repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
apaleyes/xfer
[ "99cd83424bc7e76a2c2def9d5b1dacd06f6e9eb5" ]
[ "xfer/contrib/xfer_leap/synthetic_data.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n# ==============================================================================\nimport os\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mxnet.gluon.data import ArrayDataset\nimport mxnet\n\nfrom .data import MetaTaskDataContainer, TaskDataContainer\nfrom .config import DEFAULT_CONFIG_SYNTHETIC\n\n\nclass MetaTaskSynthetic(MetaTaskDataContainer):\n def __init__(self, config=None, weights=None, bias=None, seed=1, context=None):\n\n \"\"\"\n :param config: If None, DEFAULT_CONFIG_SYNTHETIC is loaded.\n :param weights: Tasks' weights matrix. Row k corresponds to the weight parameters of task k. If None, w is\n sampled from a N(0,1).\n :param bias: Tasks' biases vector. Row k corresponds to the bias parameters of task k. If None, w is sampled\n from a N(0,1).\n :param seed: Seed for random generator.\n \"\"\"\n\n if config is None:\n config = DEFAULT_CONFIG_SYNTHETIC\n\n self.config = config\n self.weights = weights\n self.bias = bias\n\n if context is None:\n context = mxnet.cpu()\n self.context = context\n\n self.seed = seed\n random.seed(self.seed)\n\n num_tasks_train = config[\"num_tasks_train\"]\n num_tasks_test = config[\"num_tasks_test\"]\n num_tasks_val = config[\"num_tasks_val\"]\n num_tasks = num_tasks_train + num_tasks_test + num_tasks_val\n\n self.num_tasks = num_tasks\n\n self._generate_parameters()\n self._validate_parameters()\n\n num_examples = config[\"num_examples_per_task\"]\n std_x = config[\"std_x\"]\n hold_out = config[\"hold_out\"]\n noise = config[\"std_noise\"]\n\n # Generate the training/test/val dataset.\n # Each dataset is a list of TaskSynthetic objects (one per task)\n data_train = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,\n context=context)\n for t in np.arange(0, num_tasks_train)]\n data_test = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,\n context=context)\n for t in np.arange(num_tasks_train, num_tasks_train + num_tasks_test)]\n data_val = [TaskSynthetic(self.weights[t, :], self.bias[t], num_examples, std_x, noise, hold_out,\n context=context)\n for t in np.arange(num_tasks_train + num_tasks_test, num_tasks)]\n\n super().__init__(data_train, data_test, data_val, context=context)\n\n def plot_sample(self, root=\"./sample_synth\"):\n\n \"\"\"Plot N images from each alphabet and store the images in root.\"\"\"\n\n if self.weights.shape[1] != 2:\n raise ValueError(\"Only 2D datasets can be plot.\")\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n fig_train = self._plot([dd._train_dataset for dd in self.train_tasks],\n \"Training Samples for Training Tasks\")\n fig_train.savefig(os.path.join(root, \"sample_train_train_tasks.png\"))\n del fig_train\n fig_test = self._plot([dd._train_dataset for dd in self.test_tasks],\n \"Training Samples for Test Tasks\")\n fig_test.savefig(os.path.join(root, \"sample_train_test_tasks.png\"))\n del fig_test\n fig_val = self._plot([dd._train_dataset for dd in self.val_tasks],\n \"Training Samples for Validation Tasks\")\n fig_val.savefig(os.path.join(root, \"sample_train_val_tasks.png\"))\n del fig_val\n\n if self.config[\"hold_out\"] > 0:\n fig_train = self._plot([dd._val_dataset for dd in self.train_tasks],\n \"Validation Samples for Training Tasks\")\n fig_train.savefig(os.path.join(root, \"sample_val_train_tasks.png\"))\n del fig_train\n fig_test = self._plot([dd._val_dataset for dd in self.test_tasks],\n \"Validation Samples for Test Tasks\")\n fig_test.savefig(os.path.join(root, \"sample_val_test_tasks.png\"))\n del fig_test\n fig_val = self._plot([dd._val_dataset for dd in self.val_tasks],\n \"Validation Samples for Validation Tasks\")\n fig_val.savefig(os.path.join(root, \"sample_val_val_tasks.png\"))\n del fig_val\n\n def _plot(self, data, title):\n\n \"\"\"Helper function for plotting.\"\"\"\n\n num_tasks = len(data)\n fig, ax = plt.subplots(1, num_tasks, figsize=(num_tasks*5, 5))\n fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.5, hspace=0.5)\n for mm in range(num_tasks):\n X, y = data[mm][:]\n X = X.asnumpy()\n y = y.asnumpy()\n ax[mm].scatter(X[:, 0], X[:, 1], c=y.flatten())\n fig.suptitle(title, size=18)\n return fig\n\n def _validate_parameters(self):\n if self.weights.shape[0] != self.num_tasks:\n raise ValueError(\"Number of rows in w must be equal to the total number of tasks\")\n\n if len(self.bias) != self.num_tasks:\n raise ValueError(\"Length of b must be equal to the total number of tasks\")\n\n def _generate_parameters(self):\n if self.weights is None:\n dim = self.config[\"dim\"]\n self.weights = self.config[\"global_bias\"] + mxnet.nd.random_normal(shape=(self.num_tasks, dim),\n ctx=self.context)\n\n if self.bias is None:\n if self.config[\"task_bias\"]:\n self.bias = mxnet.nd.random_normal(shape=self.num_tasks, ctx=self.context)\n else:\n self.bias = mxnet.nd.zeros(num_tasks, ctx=self.context)\n\n\nclass TaskSynthetic(TaskDataContainer):\n\n \"\"\"\n Synthetic Task Container: Linear Regression.\n \"\"\"\n\n def __init__(self, w, b, num_examples, std_x, noise, hold_out=None, seed=None, context=None):\n\n \"\"\"\n :param w: Task's weights vector.\n :param b: Task's bias.\n :param num_examples: Total number of examples per task.\n :param std_x: The covariates are sampled from a zero mean normal distribution with\n standard deviation equal to std_x.\n :param hold_out: Number of examples to hold out for validation\n :param seed: seed for the random generator\n \"\"\"\n\n self.w = w\n self.b = b\n self.num_examples = num_examples\n self.seed = seed\n\n if context is None:\n context = mxnet.cpu()\n self.context = context\n\n if seed:\n random.seed(seed)\n if hold_out and hold_out < num_examples:\n Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples - hold_out, len(w)),\n ctx=context), noise)\n train_dataset = ArrayDataset(Xtr, Ytr)\n Xval, Yval = self._real_fn(std_x * mxnet.nd.random_normal(shape=(hold_out, len(w)), ctx=context), noise)\n val_dataset = ArrayDataset(Xval, Yval)\n else:\n Xtr, Ytr = self._real_fn(std_x * mxnet.nd.random_normal(shape=(num_examples, len(w)), ctx=context), noise)\n train_dataset = ArrayDataset(Xtr, Ytr)\n val_dataset = None\n\n super().__init__(train_dataset, val_dataset, context=context)\n\n def _real_fn(self, X, noise):\n y = mxnet.nd.dot(X, mxnet.nd.expand_dims(self.w, axis=1)) + self.b\n if noise > 0.0:\n y += mxnet.nd.expand_dims(noise * mxnet.nd.random_normal(shape=(X.shape[0],)), axis=1)\n return X, y\n\n\nif __name__ == '__main__':\n\n s1 = MetaTaskSynthetic()\n s1.plot_sample()\n\n batch_size = 20\n train_tasks = s1.train_tasks\n\n assert len(s1.train_tasks) == 3\n for task in train_tasks:\n tr_iterator = task.get_train_iterator(batch_size)\n for data in tr_iterator:\n assert (data[0].shape == (batch_size, 2))\n assert (data[1].shape == (batch_size, 1))\n assert (data[1].asnumpy().dtype == np.float32)\n break\n val_iterator = task.get_val_iterator(batch_size)\n for data in val_iterator:\n assert (data[0].shape == (batch_size, 2))\n assert (data[1].shape == (batch_size, 1))\n assert (data[1].asnumpy().dtype == np.float32)\n break\n\n dim = 2\n num_tasks = 15\n w = mxnet.nd.random_normal(shape=(num_tasks, dim))\n b = mxnet.nd.random_normal(shape=num_tasks)\n\n s2 = MetaTaskSynthetic(weights=w, bias=b)\n s2.plot_sample(root=\"./sample_synth_w_b_given\")\n\n batch_size = 20\n train_tasks = s2.train_tasks\n\n assert len(train_tasks) == 3\n for task in train_tasks:\n tr_iterator = task.get_train_iterator(batch_size)\n for data in tr_iterator:\n assert (data[0].shape == (batch_size, 2))\n assert (data[1].shape == (batch_size, 1))\n assert (data[1].asnumpy().dtype == np.float32)\n break\n val_iterator = task.get_val_iterator(batch_size)\n for data in val_iterator:\n assert (data[0].shape == (batch_size, 2))\n assert (data[1].shape == (batch_size, 1))\n assert (data[1].asnumpy().dtype == np.float32)\n break\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.subplots" ] ]
idharmateja/tensorflow
[ "1712002ad02f044f7569224bf465e0ea00e6a6c4" ]
[ "tensorflow/contrib/data/python/ops/readers.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrappers for reader Datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.data.python.ops import interleave_ops\nfrom tensorflow.contrib.data.python.ops import shuffle_ops\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import readers as core_readers\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import parsing_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import deprecation\n\n_ACCEPTABLE_CSV_TYPES = (dtypes.float32, dtypes.float64, dtypes.int32,\n dtypes.int64, dtypes.string)\n\n\ndef make_csv_dataset(\n file_pattern,\n batch_size,\n column_keys,\n column_defaults,\n label_key=None,\n field_delim=\",\",\n use_quote_delim=True,\n skip=0,\n filter_fn=None,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=10000,\n shuffle_seed=None,\n prefetch_buffer_size=1,\n):\n \"\"\"Reads CSV files into a dataset.\n\n Reads CSV files into a dataset, where each element is a (features, labels)\n tuple that corresponds to a batch of CSV rows. The features dictionary\n maps feature column names to `Tensor`s containing the corresponding\n feature data, and labels is a `Tensor` containing the batch's label data.\n\n Args:\n file_pattern: List of files or patterns of file paths containing CSV\n records. See @{tf.gfile.Glob} for pattern rules.\n batch_size: An int representing the number of consecutive elements of this\n dataset to combine in a single batch.\n column_keys: A list of strings that corresponds to the CSV columns, in\n order. One per column of the input record.\n column_defaults: A list of default values for the CSV fields. One item per\n column of the input record. Each item in the list is either one of the\n following dtypes: float32, float64, int32, int64, or string, or a\n `Tensor` with one of the aforementioned types. One item per column of\n the input record, with either scalar default value for that column if it\n is required, or, if the column is required, an empty `Tensor` or a dtype.\n label_key: A optional string corresponding to the label column. If provided,\n the data for this column is returned as a separate `Tensor` from the\n features dictionary, so that the dataset complies with the format expected\n by a `tf.Estimator.train` or `tf.Estimator.evaluate` input function.\n field_delim: An optional `string`. Defaults to `\",\"`. Char delimiter to\n separate fields in a record.\n use_quote_delim: An optional bool. Defaults to `True`. If false, treats\n double quotation marks as regular characters inside of the string fields.\n skip: An integer that corresponds to the number of lines to skip at the\n head of each CSV file. Defaults to 0.\n filter_fn: A callable function that takes in a CSV string and returns a\n boolean that corresponds to whether the record should be included. If\n None, does not filter records.\n num_epochs: An int specifying the number of times this dataset is repeated.\n If None, cycles through the dataset forever.\n shuffle: A bool that indicates whether the input should be shuffled.\n shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size\n ensures better shuffling, but would increase memory usage and startup\n time.\n shuffle_seed: Randomization seed to use for shuffling.\n prefetch_buffer_size: An int specifying the number of feature batches to\n prefetch for performance improvement. Recommended value is the number of\n batches consumed per training step.\n\n Returns:\n A dataset, where each element is a (features, labels) tuple that corresponds\n to a batch of `batch_size` CSV rows. The features dictionary maps feature\n column names to `Tensor`s containing the corresponding column data, and\n labels is a `Tensor` containing the column data for the label column\n specified by `label_key`.\n \"\"\"\n filenames = _get_file_names(file_pattern, False)\n column_defaults = [\n constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x\n for x in column_defaults\n ]\n\n dataset = dataset_ops.Dataset.from_tensor_slices(filenames)\n if label_key is not None:\n assert label_key in column_keys\n\n def filename_to_dataset(filename):\n ds = core_readers.TextLineDataset(filename)\n if skip > 0:\n ds = ds.skip(skip)\n if filter_fn is not None:\n ds = ds.filter(filter_fn)\n return ds\n\n def decode_csv(line):\n \"\"\"Decodes csv line into features.\n\n Args:\n line: String tensor corresponding to one csv record.\n Returns:\n A dictionary of feature names to values for that particular record. If\n label_key is provided, extracts the label feature to be returned as the\n second element of the tuple.\n \"\"\"\n columns = parsing_ops.decode_csv(\n line,\n column_defaults,\n field_delim=field_delim,\n use_quote_delim=use_quote_delim)\n features = dict(zip(column_keys, columns))\n if label_key is not None:\n label = features.pop(label_key)\n return features, label\n return features\n\n # TODO(rachelim): interleave records from files for better shuffling\n dataset = dataset.flat_map(filename_to_dataset)\n # TODO(rachelim): use fused shuffle_and_repeat for perf\n if shuffle:\n dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)\n if num_epochs != 1:\n dataset = dataset.repeat(num_epochs)\n\n dataset = dataset.batch(batch_size)\n dataset = dataset.map(decode_csv)\n dataset = dataset.prefetch(prefetch_buffer_size)\n return dataset\n\n\ndef make_batched_features_dataset(file_pattern,\n batch_size,\n features,\n reader=core_readers.TFRecordDataset,\n reader_args=None,\n num_epochs=None,\n shuffle=True,\n shuffle_buffer_size=10000,\n shuffle_seed=None,\n prefetch_buffer_size=1,\n reader_num_threads=1,\n parser_num_threads=2,\n sloppy_ordering=False):\n \"\"\"Returns a `Dataset` of feature dictionaries from `Example` protos.\n\n Example:\n\n ```\n serialized_examples = [\n features {\n feature { key: \"age\" value { int64_list { value: [ 0 ] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n feature { key: \"kws\" value { bytes_list { value: [ \"code\", \"art\" ] } } }\n },\n features {\n feature { key: \"age\" value { int64_list { value: [] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n feature { key: \"kws\" value { bytes_list { value: [ \"sports\" ] } } }\n }\n ]\n ```\n\n We can use arguments:\n\n ```\n features: {\n \"age\": FixedLenFeature([], dtype=tf.int64, default_value=-1),\n \"gender\": FixedLenFeature([], dtype=tf.string),\n \"kws\": VarLenFeature(dtype=tf.string),\n }\n ```\n\n And the expected output is:\n\n ```python\n {\n \"age\": [[0], [-1]],\n \"gender\": [[\"f\"], [\"f\"]],\n \"kws\": SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0]],\n values=[\"code\", \"art\", \"sports\"]\n dense_shape=[2, 2]),\n }\n ```\n\n Args:\n file_pattern: List of files or patterns of file paths containing\n `Example` records. See `tf.gfile.Glob` for pattern rules.\n batch_size: An int representing the number of consecutive elements of this\n dataset to combine in a single batch.\n features: A `dict` mapping feature keys to `FixedLenFeature` or\n `VarLenFeature` values. See `tf.parse_example`.\n reader: A function or class that can be\n called with a `filenames` tensor and (optional) `reader_args` and returns\n a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.\n reader_args: Additional arguments to pass to the reader class.\n num_epochs: Integer specifying the number of times to read through the\n dataset. If None, cycles through the dataset forever. Defaults to `None`.\n shuffle: A boolean, indicates whether the input should be shuffled. Defaults\n to `True`.\n shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity\n ensures better shuffling but would increase memory usage and startup time.\n shuffle_seed: Randomization seed to use for shuffling.\n prefetch_buffer_size: Number of feature batches to prefetch in order to\n improve performance. Recommended value is the number of batches consumed\n per training step (default is 1).\n reader_num_threads: Number of threads used to read `Example` records. If >1,\n the results will be interleaved.\n parser_num_threads: Number of threads to use for parsing `Example` tensors\n into a dictionary of `Feature` tensors.\n sloppy_ordering: If `True`, reading performance will be improved at\n the cost of non-deterministic ordering. If `False`, the order of elements\n produced is deterministic prior to shuffling (elements are still\n randomized if `shuffle=True`. Note that if the seed is set, then order\n of elements after shuffling is deterministic). Defaults to `False`.\n\n Returns:\n A dataset of `dict` elements. Each `dict` maps feature keys to\n `Tensor` or `SparseTensor` objects.\n \"\"\"\n # Create dataset of all matching filenames\n if shuffle:\n dataset = dataset_ops.Dataset.list_files(file_pattern, shuffle=True)\n else:\n # TODO(b/73959787): Use Dataset.list_files() once ordering is deterministic.\n filenames = _get_file_names(file_pattern, shuffle)\n dataset = dataset_ops.Dataset.from_tensor_slices(filenames)\n\n # Read `Example` records from files as tensor objects.\n if reader_args is None:\n reader_args = []\n\n # Read files sequentially (if reader_num_threads=1) or in parallel\n dataset = dataset.apply(\n interleave_ops.parallel_interleave(\n lambda filename: reader(filename, *reader_args),\n cycle_length=reader_num_threads,\n sloppy=sloppy_ordering))\n\n # Extract values if the `Example` tensors are stored as key-value tuples.\n if dataset.output_types == (dtypes.string, dtypes.string):\n dataset = dataset.map(lambda _, v: v)\n\n # Apply dataset repeat and shuffle transformations.\n repeat_dataset = (num_epochs != 1)\n if repeat_dataset and shuffle:\n # Used fused shuffle_and_repeat operation for better performance\n dataset = dataset.apply(\n shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,\n shuffle_seed))\n elif repeat_dataset:\n dataset = dataset.repeat(num_epochs)\n elif shuffle:\n dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)\n\n dataset = dataset.batch(batch_size)\n\n # Parse `Example` tensors to a dictionary of `Feature` tensors.\n dataset = dataset.map(\n lambda x: parsing_ops.parse_example(x, features),\n num_parallel_calls=parser_num_threads)\n\n # TODO(rachelim): Add an optional label_key argument for extracting the label\n # from the features dictionary, to comply with the type expected by the\n # input_fn to a `tf.Estimator.train` or `tf.Estimator.evaluate` function.\n dataset = dataset.prefetch(prefetch_buffer_size)\n return dataset\n\n\[email protected](None,\n \"Use `tf.contrib.data.make_batched_features_dataset`\")\ndef read_batch_features(file_pattern,\n batch_size,\n features,\n reader=core_readers.TFRecordDataset,\n reader_args=None,\n randomize_input=True,\n num_epochs=None,\n capacity=10000):\n \"\"\"Reads batches of Examples.\n\n Example:\n\n ```\n serialized_examples = [\n features {\n feature { key: \"age\" value { int64_list { value: [ 0 ] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n feature { key: \"kws\" value { bytes_list { value: [ \"code\", \"art\" ] } } }\n },\n features {\n feature { key: \"age\" value { int64_list { value: [] } } }\n feature { key: \"gender\" value { bytes_list { value: [ \"f\" ] } } }\n feature { key: \"kws\" value { bytes_list { value: [ \"sports\" ] } } }\n }\n ]\n ```\n\n We can use arguments:\n\n ```\n features: {\n \"age\": FixedLenFeature([], dtype=tf.int64, default_value=-1),\n \"gender\": FixedLenFeature([], dtype=tf.string),\n \"kws\": VarLenFeature(dtype=tf.string),\n }\n ```\n\n And the expected output is:\n\n ```python\n {\n \"age\": [[0], [-1]],\n \"gender\": [[\"f\"], [\"f\"]],\n \"kws\": SparseTensor(\n indices=[[0, 0], [0, 1], [1, 0]],\n values=[\"code\", \"art\", \"sports\"]\n dense_shape=[2, 2]),\n }\n ```\n\n Args:\n file_pattern: List of files or patterns of file paths containing\n `Example` records. See `tf.gfile.Glob` for pattern rules.\n batch_size: An int representing the number of consecutive elements of this\n dataset to combine in a single batch.\n features: A `dict` mapping feature keys to `FixedLenFeature` or\n `VarLenFeature` values. See `tf.parse_example`.\n reader: A function or class that can be\n called with a `filenames` tensor and (optional) `reader_args` and returns\n a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.\n reader_args: Additional arguments to pass to the reader class.\n randomize_input: Whether the input should be randomized.\n num_epochs: Integer specifying the number of times to read through the\n dataset. If None, cycles through the dataset forever.\n capacity: Buffer size of the ShuffleDataset. A large capacity ensures better\n shuffling but would increase memory usage and startup time.\n Returns:\n A dict from keys in features to `Tensor` or `SparseTensor` objects.\n \"\"\"\n dataset = make_batched_features_dataset(\n file_pattern,\n batch_size,\n features,\n reader=reader,\n reader_args=reader_args,\n shuffle=randomize_input,\n num_epochs=num_epochs,\n shuffle_buffer_size=capacity)\n iterator = dataset.make_one_shot_iterator()\n outputs = iterator.get_next()\n return outputs\n\n\ndef _get_file_names(file_pattern, shuffle):\n \"\"\"Parse list of file names from pattern, optionally shuffled.\n\n Args:\n file_pattern: File glob pattern, or list of glob patterns.\n shuffle: Whether to shuffle the order of file names.\n\n Returns:\n List of file names matching `file_pattern`.\n\n Raises:\n ValueError: If `file_pattern` is empty, or pattern matches no files.\n \"\"\"\n if isinstance(file_pattern, list):\n if not file_pattern:\n raise ValueError(\"File pattern is empty.\")\n file_names = []\n for entry in file_pattern:\n file_names.extend(gfile.Glob(entry))\n else:\n file_names = list(gfile.Glob(file_pattern))\n\n if not file_names:\n raise ValueError(\"No files match %s.\" % file_pattern)\n\n # Sort files so it will be deterministic for unit tests.\n if not shuffle:\n file_names = sorted(file_names)\n return file_names\n\n\nclass SqlDataset(dataset_ops.Dataset):\n \"\"\"A `Dataset` consisting of the results from a SQL query.\"\"\"\n\n def __init__(self, driver_name, data_source_name, query, output_types):\n \"\"\"Creates a `SqlDataset`.\n\n `SqlDataset` allows a user to read data from the result set of a SQL query.\n For example:\n\n ```python\n dataset = tf.contrib.data.SqlDataset(\"sqlite\", \"/foo/bar.sqlite3\",\n \"SELECT name, age FROM people\",\n (tf.string, tf.int32))\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n # Prints the rows of the result set of the above query.\n while True:\n try:\n print(sess.run(next_element))\n except tf.errors.OutOfRangeError:\n break\n ```\n\n Args:\n driver_name: A 0-D `tf.string` tensor containing the database type.\n Currently, the only supported value is 'sqlite'.\n data_source_name: A 0-D `tf.string` tensor containing a connection string\n to connect to the database.\n query: A 0-D `tf.string` tensor containing the SQL query to execute.\n output_types: A tuple of `tf.DType` objects representing the types of the\n columns returned by `query`.\n \"\"\"\n super(SqlDataset, self).__init__()\n self._driver_name = ops.convert_to_tensor(\n driver_name, dtype=dtypes.string, name=\"driver_name\")\n self._data_source_name = ops.convert_to_tensor(\n data_source_name, dtype=dtypes.string, name=\"data_source_name\")\n self._query = ops.convert_to_tensor(\n query, dtype=dtypes.string, name=\"query\")\n self._output_types = output_types\n\n def _as_variant_tensor(self):\n return gen_dataset_ops.sql_dataset(self._driver_name,\n self._data_source_name, self._query,\n nest.flatten(self.output_types),\n nest.flatten(self.output_shapes))\n\n @property\n def output_classes(self):\n return nest.map_structure(lambda _: ops.Tensor, self._output_types)\n\n @property\n def output_shapes(self):\n return nest.map_structure(lambda _: tensor_shape.TensorShape([]),\n self._output_types)\n\n @property\n def output_types(self):\n return self._output_types\n" ]
[ [ "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.data.ops.dataset_ops.Dataset.list_files", "tensorflow.python.data.util.nest.flatten", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.platform.gfile.Glob", "tensorflow.python.ops.parsing_ops.parse_example", "tensorflow.python.ops.parsing_ops.decode_csv", "tensorflow.contrib.data.python.ops.shuffle_ops.shuffle_and_repeat", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.data.ops.readers.TextLineDataset", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.data.util.nest.map_structure", "tensorflow.python.framework.constant_op.constant" ] ]
yicrane/Real-SR
[ "a6e380b791129b80fe58bf282089c0cfd9159b36" ]
[ "codes/preprocess/collect_noise.py" ]
[ "from PIL import Image\nimport numpy as np\nimport os.path as osp\nimport glob\nimport os\nimport argparse\nimport yaml\n\nparser = argparse.ArgumentParser(description='create a dataset')\nparser.add_argument('--dataset', default='df2k', type=str, help='selecting different datasets')\nparser.add_argument('--artifacts', default='', type=str, help='selecting different artifacts type')\nparser.add_argument('--cleanup_factor', default=2, type=int, help='downscaling factor for image cleanup')\nparser.add_argument('--upscale_factor', default=4, type=int, choices=[4], help='super resolution upscale factor')\nopt = parser.parse_args()\n\n# define input and target directories\nwith open('./preprocess/paths.yml', 'r') as stream:\n PATHS = yaml.load(stream)\n\n\ndef noise_patch(rgb_img, sp, max_var, min_mean):\n img = rgb_img.convert('L')\n rgb_img = np.array(rgb_img)\n img = np.array(img)\n\n w, h = img.shape\n collect_patchs = []\n\n for i in range(0, w - sp, sp):\n for j in range(0, h - sp, sp):\n patch = img[i:i + sp, j:j + sp]\n var_global = np.var(patch)\n mean_global = np.mean(patch)\n if var_global < max_var and mean_global > min_mean:\n rgb_patch = rgb_img[i:i + sp, j:j + sp, :]\n collect_patchs.append(rgb_patch)\n\n return collect_patchs\n\n\nif __name__ == '__main__':\n\n if opt.dataset == 'df2k':\n img_dir = PATHS[opt.dataset][opt.artifacts]['source']\n noise_dir = PATHS['datasets']['df2k'] + '/Corrupted_noise'\n sp = 256\n max_var = 20\n min_mean = 0\n else:\n img_dir = PATHS[opt.dataset][opt.artifacts]['hr']['train']\n noise_dir = PATHS['datasets']['dped'] + '/DPEDiphone_noise_sp32v20m50'\n sp = 256\n max_var = 20\n min_mean = 50\n\n assert not os.path.exists(noise_dir)\n os.mkdir(noise_dir)\n\n img_paths = sorted(glob.glob(osp.join(img_dir, '*.png')))\n cnt = 0\n for path in img_paths:\n img_name = osp.splitext(osp.basename(path))[0]\n print('**********', img_name, '**********')\n img = Image.open(path).convert('RGB')\n patchs = noise_patch(img, sp, max_var, min_mean)\n for idx, patch in enumerate(patchs):\n save_path = osp.join(noise_dir, '{}_{:03}.png'.format(img_name, idx))\n cnt += 1\n print('collect:', cnt, save_path)\n Image.fromarray(patch).save(save_path)\n" ]
[ [ "numpy.array", "numpy.var", "numpy.mean" ] ]
jalavery/gnomeR
[ "4f165774eb3c5f442881a915ee70e18a5f33b387", "4f165774eb3c5f442881a915ee70e18a5f33b387" ]
[ "inst/CnaAnnotator.py", "inst/AnnotatorCore.py" ]
[ "import argparse\n# from AnnotatorCore import *\nimport sys\nimport csv\nimport requests\nimport os.path\nimport logging\nimport re\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom datetime import date\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger('CnaAnnotator')\n\n\ndef main(argv):\n if argv.help:\n log.info('\\n'\n 'CnaAnnotator.py -i <input CNA file> -o <output CNA file> [-p previous results] [-c <input clinical file>] [-s sample list filter] [-t <default tumor type>] [-u oncokb-base-url] [-b oncokb_api_bear_token] [-z annotate_gain_loss]\\n'\n ' Input CNA file should follow the GISTIC output (https://docs.cbioportal.org/5.1-data-loading/data-loading/file-formats#data-file-1)\\n'\n ' Essential clinical columns:\\n'\n ' SAMPLE_ID: sample ID\\n'\n ' Cancer type will be assigned based on the following priority:\\n'\n ' 1) ONCOTREE_CODE in clinical data file\\n'\n ' 2) ONCOTREE_CODE exist in MAF\\n'\n ' 3) default tumor type (-t)\\n'\n ' We do not annotate Gain and Loss by default, add -z to include the analysis. See https://github.com/oncokb/oncokb-annotator/issues/51 for more information.\\n'\n ' Default OncoKB base url is https://www.oncokb.org')\n sys.exit()\n if argv.input_file == '' or argv.output_file == '' or argv.oncokb_api_bearer_token == '':\n log.info('for help: python CnaAnnotator.py -h')\n sys.exit(2)\n if argv.sample_ids_filter:\n setsampleidsfileterfile(argv.sample_ids_filter)\n if argv.oncokb_api_url:\n setoncokbbaseurl(argv.oncokb_api_url)\n setoncokbapitoken(argv.oncokb_api_bearer_token)\n\n cancertypemap = {}\n if argv.input_clinical_file:\n readCancerTypes(argv.input_clinical_file, cancertypemap)\n\n log.info('annotating %s ...' % argv.input_file)\n processcnagisticdata(argv.input_file, argv.output_file, argv.previous_result_file, argv.default_cancer_type,\n cancertypemap, argv.annotate_gain_loss)\n\n log.info('done!')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-h', dest='help', action=\"store_true\", default=False)\n parser.add_argument('-i', dest='input_file', default='', type=str)\n parser.add_argument('-o', dest='output_file', default='', type=str)\n parser.add_argument('-p', dest='previous_result_file', default='', type=str)\n parser.add_argument('-c', dest='input_clinical_file', default='', type=str)\n parser.add_argument('-s', dest='sample_ids_filter', default='', type=str)\n parser.add_argument('-t', dest='default_cancer_type', default='', type=str)\n parser.add_argument('-u', dest='oncokb_api_url', default='', type=str)\n parser.add_argument('-b', dest='oncokb_api_bearer_token', default='', type=str)\n parser.add_argument('-z', dest='annotate_gain_loss', action=\"store_true\", default=False)\n parser.set_defaults(func=main)\n\n args = parser.parse_args()\n args.func(args)\n", "import json\nimport sys\nimport csv\nfrom enum import Enum\n\nimport requests\nimport os.path\nimport logging\nimport re\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom datetime import date\nimport ctypes as ct\n\nlogging.basicConfig(level=logging.INFO)\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\nlog = logging.getLogger('AnnotatorCore')\n\ncsv.field_size_limit(int(ct.c_ulong(-1).value // 2)) # Deal with overflow problem on Windows, https://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072\nsizeLimit = csv.field_size_limit()\ncsv.field_size_limit(2**(31)-1) # for reading large files\n\noncokbapiurl = \"https://www.oncokb.org/api/v1\"\noncokbapibearertoken = \"\"\n\n\ndef setoncokbbaseurl(u):\n global oncokbapiurl\n oncokbapiurl = u.rstrip('/') + '/api/v1'\n\ndef setoncokbapitoken(t):\n global oncokbapibearertoken\n oncokbapibearertoken = t.strip()\n\ncancerhotspotsbaseurl = \"http://www.cancerhotspots.org\"\ndef setcancerhotspotsbaseurl(u):\n global cancerhotspotsbaseurl\n cancerhotspotsbaseurl = u\n\n_3dhotspotsbaseurl = \"http://www.3dhotspots.org\"\ndef set3dhotspotsbaseurl(u):\n global _3dhotspotsbaseurl\n _3dhotspotsbaseurl = u\n\nsampleidsfilter = None\ndef setsampleidsfileterfile(f):\n global sampleidsfilter\n content = [line.rstrip() for line in open(f)]\n sampleidsfilter = set(content)\n log.info(len(sampleidsfilter))\n\n\nGENE_IN_ONCOKB_HEADER = 'GENE_IN_ONCOKB'\nVARIANT_IN_ONCOKB_HEADER = 'VARIANT_IN_ONCOKB'\n\nGENE_IN_ONCOKB_DEFAULT = 'False'\nVARIANT_IN_ONCOKB_DEFAULT = 'False'\n\nlevels = [\n 'LEVEL_1',\n 'LEVEL_2',\n 'LEVEL_3A',\n 'LEVEL_3B',\n 'LEVEL_4',\n 'LEVEL_R1',\n 'LEVEL_R2',\n 'LEVEL_R3'\n]\n\ndxLevels = [\n 'LEVEL_Dx1',\n 'LEVEL_Dx2',\n 'LEVEL_Dx3'\n]\n\npxLevels = [\n 'LEVEL_Px1',\n 'LEVEL_Px2',\n 'LEVEL_Px3'\n]\n\nmutationtypeconsequencemap = {\n '3\\'Flank': ['any'],\n '5\\'Flank ': ['any'],\n 'Targeted_Region': ['inframe_deletion', 'inframe_insertion'],\n 'COMPLEX_INDEL': ['inframe_deletion', 'inframe_insertion'],\n 'ESSENTIAL_SPLICE_SITE': ['feature_truncation'],\n 'Exon skipping': ['inframe_deletion'],\n 'Frameshift deletion': ['frameshift_variant'],\n 'Frameshift insertion': ['frameshift_variant'],\n 'FRAMESHIFT_CODING': ['frameshift_variant'],\n 'Frame_Shift_Del': ['frameshift_variant'],\n 'Frame_Shift_Ins': ['frameshift_variant'],\n 'Fusion': ['fusion'],\n 'Indel': ['frameshift_variant', 'inframe_deletion', 'inframe_insertion'],\n 'In_Frame_Del': ['inframe_deletion'],\n 'In_Frame_Ins': ['inframe_insertion'],\n 'Missense': ['missense_variant'],\n 'Missense_Mutation': ['missense_variant'],\n 'Nonsense_Mutation': ['stop_gained'],\n 'Nonstop_Mutation': ['stop_lost'],\n 'Splice_Site': ['splice_region_variant'],\n 'Splice_Site_Del': ['splice_region_variant'],\n 'Splice_Site_SNP': ['splice_region_variant'],\n 'splicing': ['splice_region_variant'],\n 'Translation_Start_Site': ['start_lost'],\n 'vIII deletion': ['any']\n}\n\n\n# column headers\nHUGO_HEADERS = ['HUGO_SYMBOL', 'HUGO_GENE_SYMBOL', 'GENE']\nCONSEQUENCE_HEADERS = ['VARIANT_CLASSIFICATION', 'MUTATION_TYPE']\nALTERATION_HEADER = 'ALTERATION'\nHGVSP_SHORT_HEADER = 'HGVSP_SHORT'\nHGVSP_HEADER = 'HGVSP'\nHGVSG_HEADER = 'HGVSG'\nHGVS_HEADERS = [ALTERATION_HEADER, HGVSP_SHORT_HEADER, HGVSP_HEADER, HGVSG_HEADER, 'AMINO_ACID_CHANGE', 'FUSION']\nSAMPLE_HEADERS = ['SAMPLE_ID', 'TUMOR_SAMPLE_BARCODE']\nPROTEIN_START_HEADERS = ['PROTEIN_START']\nPROTEIN_END_HEADERS = ['PROTEIN_END']\nPROTEIN_POSITION_HEADERS = ['PROTEIN_POSITION']\nCANCER_TYPE_HEADERS = ['ONCOTREE_CODE', 'CANCER_TYPE']\nFUSION_HEADERS = ['FUSION']\nREFERENCE_GENOME_HEADERS = ['NCBI_BUILD', 'REFERENCE_GENOME']\n\n# columns for genomic change annotation\nGC_CHROMOSOME_HEADER = 'CHROMOSOME'\nGC_START_POSITION_HEADER = 'START_POSITION'\nGC_END_POSITION_HEADER = 'END_POSITION'\nGC_REF_ALLELE_HEADER = 'REFERENCE_ALLELE'\nGC_VAR_ALLELE_1_HEADER = 'TUMOR_SEQ_ALLELE1'\nGC_VAR_ALLELE_2_HEADER = 'TUMOR_SEQ_ALLELE2'\nGENOMIC_CHANGE_HEADERS = [GC_CHROMOSOME_HEADER, GC_START_POSITION_HEADER, GC_END_POSITION_HEADER, GC_REF_ALLELE_HEADER, GC_VAR_ALLELE_1_HEADER, GC_VAR_ALLELE_2_HEADER]\n\n\nclass QueryType(Enum):\n HGVSP_SHORT = 'HGVSP_SHORT'\n HGVSP = 'HGVSP'\n HGVSG = 'HGVSG'\n GENOMIC_CHANGE = 'GENOMIC_CHANGE'\n\n\nclass ReferenceGenome(Enum):\n GRCH37 = 'GRCh37'\n GRCH38 = 'GRCh38'\n\n\nREQUIRED_QUERY_TYPE_COLUMNS = {\n QueryType.HGVSP_SHORT: [HGVSP_SHORT_HEADER],\n QueryType.HGVSP: [HGVSP_HEADER],\n QueryType.HGVSG: [HGVSG_HEADER],\n QueryType.GENOMIC_CHANGE: GENOMIC_CHANGE_HEADERS\n}\n\nPOST_QUERIES_THRESHOLD = 1000\n\ndef getOncokbInfo():\n ret = ['Files annotated on ' + date.today().strftime('%m/%d/%Y') + \"\\nOncoKB API URL: \"+oncokbapiurl]\n try:\n info = requests.get(oncokbapiurl + \"/info\").json()\n ret.append('\\nOncoKB data version: ' + info['dataVersion']['version']+', released on ' + info['dataVersion']['date'])\n except:\n log.error(\"error when fetch OncoKB info\")\n return ''.join(ret)\n\n\ndef generateReadme(outfile):\n outf = open(outfile, 'w+', 1000)\n outf.write(getOncokbInfo())\n outf.close()\n\ndef gethotspots(url, type):\n hotspots = {}\n response = requests.get(url)\n if response.status_code == 200:\n hotspotsjson = response.json()\n\n for hs in hotspotsjson:\n gene = hs['hugoSymbol']\n start = hs['aminoAcidPosition']['start']\n end = hs['aminoAcidPosition']['end']\n if type is None or hs['type'] == type:\n if gene not in hotspots:\n hotspots[gene] = set()\n for i in range(start, end + 1):\n hotspots[gene].add(i)\n else:\n log.error(\"error when processing %s \\n\" % url +\n \"reason: %s\" % response.reason)\n return hotspots\n\n\ndef makeoncokbpostrequest(url, body):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer %s' % oncokbapibearertoken\n }\n return requests.post(url, headers=headers, data=json.dumps(body, default=lambda o: o.__dict__))\n\n\ndef makeoncokbgetrequest(url):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer %s' % oncokbapibearertoken\n }\n return requests.get(url, headers=headers)\n\n\n_3dhotspots = None\n\ndef init_3d_hotspots():\n global _3dhotspots\n _3dhotspots = gethotspots(_3dhotspotsbaseurl+\"/api/hotspots/3d\", None)\n\n\nconversiondict = {'Ala': 'A',\n 'Asx': 'B',\n 'Cys': 'C',\n 'Asp': 'D',\n 'Glu': 'E',\n 'Phe': 'F',\n 'Gly': 'G',\n 'His': 'H',\n 'Ile': 'I',\n 'Lys': 'K',\n 'Leu': 'L',\n 'Met': 'M',\n 'Asn': 'N',\n 'Pro': 'P',\n 'Gln': 'Q',\n 'Arg': 'R',\n 'Ser': 'S',\n 'Thr': 'T',\n 'Val': 'V',\n 'Trp': 'W',\n 'Tyr': 'Y',\n 'Glx': 'Z'\n }\nconversionlist = conversiondict.keys()\ndef conversion(hgvs):\n threecharactersearch = re.findall('[a-zA-Z]{3}\\d+', hgvs, flags=re.IGNORECASE)\n if threecharactersearch:\n if any(letters.lower() in hgvs.lower() for letters in conversionlist):\n return replace_all(hgvs)\n return hgvs\n\ndef replace_all(hgvs):\n # Author: Thomas Glaessle\n pattern = re.compile('|'.join(conversionlist), re.IGNORECASE)\n return pattern.sub(lambda m: conversiondict[m.group().capitalize()], hgvs)\n\n\ndef append_annotation_to_file(outf, ncols, rows, annotations):\n if len(rows) != len(annotations):\n log.error('The length of the rows and annotations do not match')\n\n for index, annotation in enumerate(annotations):\n row = rows[index]\n if annotation is not None:\n row = row + annotation\n\n row = padrow(row, ncols)\n rowstr = '\\t'.join(row)\n rowstr = rowstr.encode('ascii', 'ignore').decode('ascii')\n outf.write(rowstr + \"\\n\")\n\n\ndef get_tumor_type_from_row(row, row_index, defaultCancerType, icancertype, cancerTypeMap, sample):\n cancertype = defaultCancerType\n if icancertype >= 0:\n row_cancer_type = get_cell_content(row, icancertype)\n if row_cancer_type is not None:\n cancertype = row_cancer_type\n if sample in cancerTypeMap:\n cancertype = cancerTypeMap[sample]\n if cancertype == \"\":\n log.info(\"Cancer type for the sample should be defined for a more accurate result\\nline %s: %s\\n\" % (row_index, row))\n # continue\n return cancertype\n\ndef has_desired_headers(desired_headers, file_headers):\n has_required_headers = True\n for header in desired_headers:\n if header not in file_headers:\n has_required_headers = False\n break\n\n return has_required_headers\n\n\ndef resolve_query_type(user_input_query_type, headers):\n selected_query_type = None\n if isinstance(user_input_query_type, QueryType):\n selected_query_type = user_input_query_type\n\n if selected_query_type is None and HGVSP_SHORT_HEADER in headers:\n selected_query_type = QueryType.HGVSP_SHORT\n if selected_query_type is None and HGVSP_HEADER in headers:\n selected_query_type = QueryType.HGVSP\n if selected_query_type is None and HGVSG_HEADER in headers:\n selected_query_type = QueryType.HGVSG\n\n if selected_query_type is None and has_desired_headers(REQUIRED_QUERY_TYPE_COLUMNS[QueryType.GENOMIC_CHANGE], headers):\n selected_query_type = QueryType.GENOMIC_CHANGE\n\n # default to HGVSp_Short\n if selected_query_type is None:\n selected_query_type = QueryType.HGVSP_SHORT\n\n # check the file has required columns\n if has_desired_headers(REQUIRED_QUERY_TYPE_COLUMNS[selected_query_type], headers) == False:\n # when it is False, it will never be GENOMIC_CHANGE. For other types, we need to check whether ALTERATION column is available\n if ALTERATION_HEADER not in headers:\n raise Exception(\"The file does not have required columns \"\n + ', '.join(REQUIRED_QUERY_TYPE_COLUMNS[user_input_query_type])\n + \" for the query type: \" + user_input_query_type.value)\n\n return selected_query_type\n\n\ndef get_reference_genome_from_row(row_reference_genome, default_reference_genome):\n reference_genome = default_reference_genome\n if row_reference_genome is not None and row_reference_genome != '':\n try:\n reference_genome = ReferenceGenome[row_reference_genome.upper()]\n except KeyError:\n log.warning('Unexpected reference genome, only GRCh37 and GRCh38 are supported.' + (\n ' Use default.' if default_reference_genome is not None else ' Skipping.'))\n return reference_genome\n\n\ndef processalterationevents(eventfile, outfile, previousoutfile, defaultCancerType, cancerTypeMap,\n annotatehotspots, user_input_query_type, default_reference_genome):\n if annotatehotspots:\n init_3d_hotspots()\n if os.path.isfile(previousoutfile):\n cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)\n outf = open(outfile, 'w+', 1000)\n with open(eventfile, 'rU') as infile:\n reader = csv.reader(infile, delimiter='\\t')\n\n headers = readheaders(reader)\n\n ncols = headers[\"length\"]\n if ncols == 0:\n return\n newncols = 0\n\n outf.write(headers['^-$'])\n\n if annotatehotspots:\n outf.write(\"\\tIS-A-HOTSPOT\")\n outf.write(\"\\tIS-A-3D-HOTSPOT\")\n newncols += 2\n\n outf.write(\"\\t\" + GENE_IN_ONCOKB_HEADER)\n outf.write(\"\\t\" + VARIANT_IN_ONCOKB_HEADER)\n\n outf.write(\"\\tMUTATION_EFFECT\")\n outf.write(\"\\tONCOGENIC\")\n\n newncols += 4\n\n for l in levels:\n outf.write('\\t' + l)\n newncols += len(levels)\n\n outf.write(\"\\tHIGHEST_LEVEL\")\n outf.write(\"\\tCITATIONS\")\n newncols += 2\n\n for l in dxLevels:\n outf.write('\\t' + l)\n newncols += len(dxLevels)\n\n outf.write(\"\\tHIGHEST_DX_LEVEL\")\n newncols += 1\n\n for l in pxLevels:\n outf.write('\\t' + l)\n newncols += len(pxLevels)\n\n outf.write(\"\\tHIGHEST_PX_LEVEL\")\n newncols += 1\n\n outf.write(\"\\n\")\n\n query_type = resolve_query_type(user_input_query_type, headers)\n if (query_type == QueryType.HGVSP_SHORT):\n process_alteration(reader, outf, headers, [HGVSP_SHORT_HEADER, ALTERATION_HEADER], ncols, newncols,\n defaultCancerType,\n cancerTypeMap, annotatehotspots, default_reference_genome)\n\n if (query_type == QueryType.HGVSP):\n process_alteration(reader, outf, headers, [HGVSP_HEADER, ALTERATION_HEADER], ncols, newncols, defaultCancerType,\n cancerTypeMap, annotatehotspots, default_reference_genome)\n\n if (query_type == QueryType.HGVSG):\n process_hvsg(reader, outf, headers, [HGVSG_HEADER, ALTERATION_HEADER], ncols, newncols, defaultCancerType,\n cancerTypeMap, annotatehotspots, default_reference_genome)\n\n if (query_type == QueryType.GENOMIC_CHANGE):\n process_genomic_change(reader, outf, headers, ncols, newncols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome)\n\n outf.close()\n\n\ndef get_cell_content(row, index, return_empty_string=False):\n if index >= 0 and row[index] != 'NULL' and row[index] != '':\n return row[index]\n elif return_empty_string:\n return ''\n else:\n return None\n\ndef process_alteration(maffilereader, outf, maf_headers, alteration_column_names, ncols, nannotationcols, defaultCancerType, cancerTypeMap,\n annotatehotspots, default_reference_genome):\n ihugo = geIndexOfHeader(maf_headers, HUGO_HEADERS)\n iconsequence = geIndexOfHeader(maf_headers, CONSEQUENCE_HEADERS)\n ihgvs = geIndexOfHeader(maf_headers, alteration_column_names)\n isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)\n istart = geIndexOfHeader(maf_headers, PROTEIN_START_HEADERS)\n iend = geIndexOfHeader(maf_headers, PROTEIN_END_HEADERS)\n iproteinpos = geIndexOfHeader(maf_headers, PROTEIN_POSITION_HEADERS)\n icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)\n ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)\n\n posp = re.compile('[0-9]+')\n\n i = 0\n queries = []\n rows = []\n for row in maffilereader:\n i = i + 1\n\n if i % POST_QUERIES_THRESHOLD == 0:\n log.info(i)\n\n row = padrow(row, ncols)\n\n sample = row[isample]\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n hugo = row[ihugo]\n\n consequence = get_cell_content(row, iconsequence)\n if consequence in mutationtypeconsequencemap:\n consequence = '%2B'.join(mutationtypeconsequencemap[consequence])\n\n hgvs = row[ihgvs]\n if hgvs.startswith('p.'):\n hgvs = hgvs[2:]\n\n cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)\n reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)\n\n hgvs = conversion(hgvs)\n\n start = get_cell_content(row, istart)\n\n end = get_cell_content(row, iend)\n\n if start is None and iproteinpos >= 0 and row[iproteinpos] != \"\" and row[iproteinpos] != \".\" and row[iproteinpos] != \"-\":\n poss = row[iproteinpos].split('/')[0].split('-')\n try:\n if len(poss) > 0:\n start = int(poss[0])\n if len(poss) == 2:\n end = int(poss[1])\n except ValueError:\n log.info(\"position wrong at line %s: %s\" % (str(i), row[iproteinpos]))\n\n if start is None and consequence == \"missense_variant\":\n m = posp.search(hgvs)\n if m:\n start = m.group()\n\n if start is not None and end is None:\n end = start\n\n query = ProteinChangeQuery(hugo, hgvs, cancertype, reference_genome, consequence, start, end)\n queries.append(query)\n rows.append(row)\n\n if len(queries) == POST_QUERIES_THRESHOLD:\n annotations = pull_protein_change_info(queries,annotatehotspots)\n append_annotation_to_file(outf, ncols + nannotationcols, rows, annotations)\n queries = []\n rows = []\n\n if len(queries) > 0:\n annotations = pull_protein_change_info(queries,annotatehotspots)\n append_annotation_to_file(outf, ncols + nannotationcols, rows, annotations)\n\n# this method is from genome-nexus annotation-tools\n# https://github.com/genome-nexus/annotation-tools/blob/53ff7f7fe673e961282f871ebc78d2ecc0831919/standardize_mutation_data.py\ndef get_var_allele(ref_allele, tumor_seq_allele1, tumor_seq_allele2):\n # set the general tumor_seq_allele as the first non-ref allele encountered\n # this will be used to resolve the variant classification and variant type\n # if there are no tumor alleles that do not match the ref allele then use empty string\n # in the event that this happens then there might be something wrong with the data itself\n try:\n tumor_seq_allele = [allele for allele in [tumor_seq_allele1, tumor_seq_allele2] if allele != ref_allele][0]\n except:\n tumor_seq_allele = \"\"\n\n return tumor_seq_allele\n\ndef process_genomic_change(maffilereader, outf, maf_headers, ncols, nannotationcols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome):\n ichromosome = geIndexOfHeader(maf_headers, [GC_CHROMOSOME_HEADER])\n istart = geIndexOfHeader(maf_headers, [GC_START_POSITION_HEADER])\n iend = geIndexOfHeader(maf_headers, [GC_END_POSITION_HEADER])\n irefallele = geIndexOfHeader(maf_headers, [GC_REF_ALLELE_HEADER])\n ivarallele1 = geIndexOfHeader(maf_headers, [GC_VAR_ALLELE_1_HEADER])\n ivarallele2 = geIndexOfHeader(maf_headers, [GC_VAR_ALLELE_2_HEADER])\n\n isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)\n icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)\n ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)\n\n posp = re.compile('[0-9]+')\n\n i = 0\n queries = []\n rows = []\n for row in maffilereader:\n i = i + 1\n\n if i % POST_QUERIES_THRESHOLD == 0:\n log.info(i)\n\n row = padrow(row, ncols)\n\n sample = row[isample]\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)\n reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)\n\n chromosome = get_cell_content(row, ichromosome, True)\n start = get_cell_content(row, istart, True)\n end = get_cell_content(row, iend, True)\n ref_allele = get_cell_content(row, irefallele, True)\n var_allele_1 = get_cell_content(row, ivarallele1, True)\n var_allele_2 = get_cell_content(row, ivarallele2, True)\n var_allele = get_var_allele(ref_allele, var_allele_1, var_allele_2)\n\n query = GenomicChangeQuery(chromosome, start, end, ref_allele, var_allele, cancertype, reference_genome)\n queries.append(query)\n rows.append(row)\n\n if len(queries) == POST_QUERIES_THRESHOLD:\n annotations = pull_genomic_change_info(queries,annotatehotspots)\n append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)\n queries = []\n rows = []\n\n if len(queries) > 0:\n annotations = pull_genomic_change_info(queries,annotatehotspots)\n append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)\n\ndef process_hvsg(maffilereader, outf, maf_headers, alteration_column_names, ncols, nannotationcols, defaultCancerType, cancerTypeMap, annotatehotspots, default_reference_genome):\n ihgvsg = geIndexOfHeader(maf_headers, alteration_column_names)\n isample = geIndexOfHeader(maf_headers, SAMPLE_HEADERS)\n icancertype = geIndexOfHeader(maf_headers, CANCER_TYPE_HEADERS)\n ireferencegenome= geIndexOfHeader(maf_headers, REFERENCE_GENOME_HEADERS)\n\n i = 0\n queries = []\n rows = []\n for row in maffilereader:\n i = i + 1\n\n if i % POST_QUERIES_THRESHOLD == 0:\n log.info(i)\n\n row = padrow(row, ncols)\n\n sample = row[isample]\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n hgvsg = get_cell_content(row, ihgvsg)\n\n cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)\n reference_genome = get_reference_genome_from_row(get_cell_content(row, ireferencegenome), default_reference_genome)\n\n if hgvsg is None:\n if annotatehotspots:\n default_cols = [['', '', GENE_IN_ONCOKB_DEFAULT, VARIANT_IN_ONCOKB_DEFAULT]]\n else:\n default_cols = [[GENE_IN_ONCOKB_DEFAULT, VARIANT_IN_ONCOKB_DEFAULT]]\n append_annotation_to_file(outf, ncols + nannotationcols, [row],\n default_cols)\n else:\n query = HGVSgQuery(hgvsg, cancertype, reference_genome)\n queries.append(query)\n rows.append(row)\n\n if len(queries) == POST_QUERIES_THRESHOLD:\n annotations = pull_hgvsg_info(queries, annotatehotspots)\n append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)\n queries = []\n rows = []\n\n if len(queries) > 0:\n annotations = pull_hgvsg_info(queries,annotatehotspots)\n append_annotation_to_file(outf, ncols+nannotationcols, rows, annotations)\n\n\ndef getgenesfromfusion(fusion, nameregex=None):\n GENES_REGEX = \"([A-Za-z\\d]+-[A-Za-z\\d]+)\" if nameregex is None else nameregex\n searchresult = re.search(GENES_REGEX, fusion, flags=re.IGNORECASE)\n gene1=None\n gene2=None\n if searchresult:\n parts = searchresult.group(1).split(\"-\")\n gene1 = parts[0]\n gene2 = gene1\n if len(parts) > 1 and parts[1] != \"intragenic\":\n gene2 = parts[1]\n else:\n gene1=gene2=fusion\n return gene1, gene2\n\ndef processsv(svdata, outfile, previousoutfile, defaultCancerType, cancerTypeMap, nameregex):\n if os.path.isfile(previousoutfile):\n cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)\n outf = open(outfile, 'w+')\n with open(svdata, 'rU') as infile:\n reader = csv.reader(infile, delimiter='\\t')\n\n headers = readheaders(reader)\n\n ncols = headers[\"length\"]\n\n if ncols == 0:\n return\n\n outf.write(headers['^-$'])\n outf.write(\"\\t\" + GENE_IN_ONCOKB_HEADER)\n outf.write(\"\\t\" + VARIANT_IN_ONCOKB_HEADER)\n outf.write(\"\\tMUTATION_EFFECT\")\n outf.write(\"\\tONCOGENIC\")\n for l in levels:\n outf.write('\\t' + l)\n outf.write(\"\\tHIGHEST_LEVEL\")\n outf.write(\"\\tCITATIONS\")\n\n for l in dxLevels:\n outf.write('\\t' + l)\n outf.write(\"\\tHIGHEST_DX_LEVEL\")\n\n for l in pxLevels:\n outf.write('\\t' + l)\n outf.write(\"\\tHIGHEST_PX_LEVEL\\n\")\n\n newcols = ncols + 8 + len(levels) + len(dxLevels) + len(pxLevels)\n\n igene1 = geIndexOfHeader(headers, ['GENE1'])\n igene2 = geIndexOfHeader(headers, ['GENE2'])\n ifusion = geIndexOfHeader(headers, FUSION_HEADERS)\n isample = geIndexOfHeader(headers, SAMPLE_HEADERS)\n icancertype = geIndexOfHeader(headers, CANCER_TYPE_HEADERS)\n\n i = 0\n queries = []\n rows = []\n for row in reader:\n i = i + 1\n if i % POST_QUERIES_THRESHOLD == 0:\n log.info(i)\n\n row = padrow(row, ncols)\n\n sample = row[isample]\n\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n gene1 = None\n gene2 = None\n if igene1 >= 0:\n gene1 = row[igene1]\n if igene2 >= 0:\n gene2 = row[igene2]\n if igene1 < 0 and igene2 < 0 and ifusion >= 0:\n fusion = row[ifusion]\n gene1, gene2 = getgenesfromfusion(fusion, nameregex)\n\n cancertype = get_tumor_type_from_row(row, i, defaultCancerType, icancertype, cancerTypeMap, sample)\n\n\n queries.append(StructuralVariantQuery(gene1, gene2, 'FUSION', cancertype))\n rows.append(row)\n\n if len(queries) == POST_QUERIES_THRESHOLD:\n annotations = pull_structural_variant_info(queries)\n append_annotation_to_file(outf, newcols, rows, annotations)\n queries = []\n rows = []\n\n if len(queries) > 0:\n annotations = pull_structural_variant_info(queries)\n append_annotation_to_file(outf, newcols, rows, annotations)\n outf.close()\n\n\ndef processcnagisticdata(cnafile, outfile, previousoutfile, defaultCancerType, cancerTypeMap, annotate_gain_loss=False):\n CNA_AMPLIFICATION_TXT = 'Amplification'\n CNA_DELETION_TXT = 'Deletion'\n CNA_LOSS_TXT = 'Loss'\n CNA_GAIN_TXT = 'Gain'\n\n cnaEventMap = {\n \"-2\": CNA_DELETION_TXT,\n \"-1.5\": CNA_DELETION_TXT,\n \"2\": CNA_AMPLIFICATION_TXT\n }\n\n if annotate_gain_loss:\n cnaEventMap.update({\n \"-1\": CNA_LOSS_TXT,\n \"1\": CNA_GAIN_TXT\n })\n\n if os.path.isfile(previousoutfile):\n cacheannotated(previousoutfile, defaultCancerType, cancerTypeMap)\n outf = open(outfile, 'w+', 1000)\n with open(cnafile, 'rU') as infile:\n reader = csv.reader(infile, delimiter='\\t')\n headers = readheaders(reader)\n samples = []\n rawsamples = []\n if headers[\"length\"] != 0:\n startofsamples = getfirstcolumnofsampleingisticdata(headers['^-$'].split('\\t'))\n rawsamples = headers['^-$'].split('\\t')[startofsamples:]\n for rs in rawsamples:\n samples.append(rs)\n\n if defaultCancerType == '' and not set(cancerTypeMap.keys()).issuperset(set(samples)):\n log.info(\n \"Cancer type for all samples should be defined for a more accurate result\\nsamples in cna file: %s\\n\" % (\n samples))\n\n outf.write('SAMPLE_ID\\tCANCER_TYPE\\tHUGO_SYMBOL\\tALTERATION')\n outf.write(\"\\t\"+GENE_IN_ONCOKB_HEADER)\n outf.write(\"\\t\"+VARIANT_IN_ONCOKB_HEADER)\n outf.write(\"\\tMUTATION_EFFECT\")\n outf.write(\"\\tONCOGENIC\")\n for l in levels:\n outf.write('\\t' + l)\n outf.write(\"\\tHIGHEST_LEVEL\")\n outf.write(\"\\tCITATIONS\")\n\n for l in dxLevels:\n outf.write('\\t' + l)\n outf.write(\"\\tHIGHEST_DX_LEVEL\")\n\n for l in pxLevels:\n outf.write('\\t' + l)\n outf.write(\"\\tHIGHEST_PX_LEVEL\\n\")\n\n ncols = 12 + len(levels) + len(dxLevels) + len(pxLevels)\n\n i = 0\n rows = []\n queries = []\n for row in reader:\n i = i + 1\n if i % POST_QUERIES_THRESHOLD == 0:\n log.info(i)\n\n hugo = row[0]\n if len(row) == 1:\n log.warning(\"No CNA specified for gene \" + hugo)\n continue\n\n for rawsample in rawsamples:\n if rawsample in headers:\n if len(row) <= headers[rawsample]:\n log.warning('No CNA specified for ' + row[0] + ' ' + rawsample)\n continue\n cna = row[headers[rawsample]]\n if cna in cnaEventMap:\n cna_type = cnaEventMap[cna]\n if cna_type is not None:\n cancertype = defaultCancerType\n sample = rawsample\n\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n if sample in cancerTypeMap:\n cancertype = cancerTypeMap[sample]\n\n rows.append([sample, cancertype, hugo, cna_type])\n queries.append(CNAQuery(hugo, cna_type, cancertype))\n\n if len(queries) == POST_QUERIES_THRESHOLD:\n annotations = pull_cna_info(queries)\n append_annotation_to_file(outf, ncols, rows, annotations)\n rows = []\n queries = []\n\n if len(queries) > 0:\n annotations = pull_cna_info(queries)\n append_annotation_to_file(outf, ncols, rows, annotations)\n\n outf.close()\n\ndef getfirstcolumnofsampleingisticdata(headers):\n header0 = headers[0].lower()\n if header0 != \"hugo_symbol\" and header0 != \"gene symbol\":\n log.info(\"Gistic data should start with Hugo_Symbol\")\n quit()\n\n header1 = headers[1].lower()\n if header1 != \"entrez_gene_id\" and header1 != \"locus id\":\n return 1\n\n header2 = headers[2].lower()\n if header2 != \"cytoband\":\n return 2\n\n return 3\n\n\ndef file_len(fname):\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\n\ndef processclinicaldata(annotatedmutfiles, clinicalfile, outfile):\n samplelevels = {}\n sampledxlevels = {}\n samplepxlevels = {}\n sampleleveltreatments = {}\n sampledrivers = {}\n samplemutationswithdiagnosis = {}\n samplemutationswithprognosis = {}\n sampleactionablecount = {}\n samplealterationcount = {}\n for annotatedmutfile in annotatedmutfiles:\n with open(annotatedmutfile, 'rU') as mutfile:\n reader = csv.reader(mutfile, delimiter='\\t')\n headers = readheaders(reader)\n\n ncols = headers[\"length\"]\n\n if ncols == 0:\n return\n\n igene1 = geIndexOfHeader(headers, ['GENE1'] + HUGO_HEADERS) # fusion\n igene2 = geIndexOfHeader(headers, ['GENE2'] + HUGO_HEADERS) # fusion\n ifusion = geIndexOfHeader(headers, ['FUSION'])\n\n ihugo = geIndexOfHeader(headers, HUGO_HEADERS)\n iconsequence = geIndexOfHeader(headers, CONSEQUENCE_HEADERS)\n ihgvs = geIndexOfHeader(headers, HGVS_HEADERS)\n isample = geIndexOfHeader(headers, SAMPLE_HEADERS)\n istart = geIndexOfHeader(headers, PROTEIN_START_HEADERS)\n iend = geIndexOfHeader(headers, PROTEIN_END_HEADERS)\n icancertype = geIndexOfHeader(headers, CANCER_TYPE_HEADERS)\n # imutationeffect = headers['MUTATION_EFFECT']\n ioncogenic = headers['ONCOGENIC']\n\n isfusion = (igene1 != -1 & igene2 != -1) or ifusion != -1\n ismutorcna = ihugo != -1 & ihgvs != -1\n\n if not isfusion and not ismutorcna:\n log.error(\"missing proper header\")\n exit()\n\n for row in reader:\n\n row = padrow(row, ncols)\n\n sample = row[isample]\n\n oncogenic = \"\"\n if ioncogenic < len(row):\n oncogenic = row[ioncogenic].lower()\n if sample not in samplelevels:\n samplelevels[sample] = {}\n sampledxlevels[sample] = []\n samplepxlevels[sample] = []\n sampleleveltreatments[sample] = {}\n sampledrivers[sample] = []\n sampleactionablecount[sample] = {}\n\n if sample not in samplemutationswithdiagnosis:\n samplemutationswithdiagnosis[sample] = []\n\n if sample not in samplemutationswithprognosis:\n samplemutationswithprognosis[sample] = []\n\n if sample not in samplealterationcount:\n samplealterationcount[sample] = 1\n else:\n samplealterationcount[sample] += 1\n\n hugo = row[ihugo]\n alteration = row[ihgvs]\n gene1 = row[igene1]\n gene2 = row[igene2]\n\n variant = \"NA\"\n if ismutorcna:\n variant = hugo + \" \" + alteration\n elif isfusion:\n if ifusion != -1:\n variant = row[ifusion]\n else:\n if gene1 == gene2:\n variant = gene1 + \" intragenic deletion\"\n else:\n variant = gene1 + \"-\" + gene2 + \" fusion\"\n\n if oncogenic == \"oncogenic\" or oncogenic == \"likely oncogenic\" or oncogenic == \"predicted oncogenic\":\n sampledrivers[sample].append(variant)\n\n for l in levels:\n il = headers[l]\n if il < len(row) and row[il] != '':\n if l not in samplelevels[sample]:\n samplelevels[sample][l] = []\n sampleleveltreatments[sample][l] = []\n samplelevels[sample][l].append(row[il] + \"(\" + variant + \")\")\n sampleleveltreatments[sample][l].extend(row[il].split(\",\"))\n\n if not l.startswith('LEVEL_R'):\n sampleactionablecount[sample][variant] = True\n\n for l in dxLevels:\n il = headers[l]\n if il < len(row) and row[il] != '':\n if l not in samplelevels[sample]:\n samplelevels[sample][l] = []\n samplelevels[sample][l].append(row[il] + \"(\" + variant + \")\")\n\n for l in pxLevels:\n il = headers[l]\n if il < len(row) and row[il] != '':\n if l not in samplelevels[sample]:\n samplelevels[sample][l] = []\n samplelevels[sample][l].append(row[il] + \"(\" + variant + \")\")\n\n ihighestdxlevel = geIndexOfHeader(headers, ['HIGHEST_DX_LEVEL'])\n if ihighestdxlevel != -1:\n if row[ihighestdxlevel] != '':\n samplemutationswithdiagnosis[sample].append(variant)\n sampledxlevels[sample].append(row[ihighestdxlevel])\n\n ihighestpxlevel = geIndexOfHeader(headers, ['HIGHEST_PX_LEVEL'])\n if ihighestpxlevel != -1:\n if row[ihighestpxlevel] != '':\n samplemutationswithprognosis[sample].append(variant)\n samplepxlevels[sample].append(row[ihighestpxlevel])\n\n outf = open(outfile, 'w+')\n\n # export to anntoated file\n with open(clinicalfile, 'rU') as clinfile:\n reader = csv.reader(clinfile, delimiter='\\t')\n headers = readheaders(reader)\n outf.write(headers['^-$'])\n for l in levels:\n outf.write('\\t' + l)\n outf.write('\\tHIGHEST_LEVEL')\n for l in dxLevels:\n outf.write('\\t' + l)\n outf.write('\\tHIGHEST_DX_LEVEL')\n for l in pxLevels:\n outf.write('\\t' + l)\n outf.write('\\tHIGHEST_PX_LEVEL')\n outf.write('\\tONCOGENIC_MUTATIONS\\t#ONCOGENIC_MUTATIONS\\t#MUTATIONS_WITH_THERAPEUTIC_IMPLICATIONS\\t#MUTATIONS_WITH_DIAGNOSTIC_IMPLICATIONS\\t#MUTATIONS_WITH_PROGNOSTIC_IMPLICATIONS\\t#MUTATIONS\\n')\n isample = headers['SAMPLE_ID']\n\n for row in reader:\n sample = row[isample]\n\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n outf.write('\\t'.join(row))\n\n for l in levels:\n outf.write('\\t')\n if sample in samplelevels and l in samplelevels[sample]:\n outf.write(\";\".join(samplelevels[sample][l]))\n\n highestlevel = ''\n highestdxlevel = ''\n highestpxlevel = ''\n if sample in sampleleveltreatments:\n highestlevel = gethighestsensitivitylevel(sampleleveltreatments[sample])\n if sample in sampledxlevels:\n highestdxlevel = gethighestDxPxlevel(dxLevels, sampledxlevels[sample])\n if sample in samplepxlevels:\n highestpxlevel = gethighestDxPxlevel(pxLevels, samplepxlevels[sample])\n # if highestlevel == '':\n # if sample in sampledrivers and len(sampledrivers[sample])>0:\n # highestlevel = 'Oncogenic, no level'\n # else:\n # highestlevel = \"VUS\"\n outf.write('\\t' + highestlevel)\n\n for l in dxLevels:\n outf.write('\\t')\n if sample in samplelevels and l in samplelevels[sample]:\n outf.write(\";\".join(samplelevels[sample][l]))\n\n outf.write('\\t' + highestdxlevel)\n\n for l in pxLevels:\n outf.write('\\t')\n if sample in samplelevels and l in samplelevels[sample]:\n outf.write(\";\".join(samplelevels[sample][l]))\n outf.write('\\t' + highestpxlevel)\n\n\n actionablecount = 0\n if sample in sampleactionablecount:\n actionablecount = len(sampleactionablecount[sample].keys())\n\n alterationcount = 0\n if sample in samplealterationcount:\n alterationcount = samplealterationcount[sample]\n\n drivercount = 0\n diagnosiscount = 0\n prognosiscount = 0\n drivermutations = \"\"\n if sample in sampledrivers:\n drivercount = len(sampledrivers[sample])\n drivermutations = \";\".join(sampledrivers[sample])\n if sample in samplemutationswithdiagnosis:\n diagnosiscount = len(samplemutationswithdiagnosis[sample])\n if sample in samplemutationswithprognosis:\n prognosiscount = len(samplemutationswithprognosis[sample])\n\n outf.write('\\t' + drivermutations)\n outf.write('\\t' + str(drivercount))\n outf.write('\\t' + str(actionablecount))\n outf.write('\\t' + str(diagnosiscount))\n outf.write('\\t' + str(prognosiscount))\n outf.write('\\t' + str(alterationcount))\n\n outf.write('\\n')\n\n outf.close()\n\ndef plotclinicalactionability(ax, annotatedclinicalfile, outfile, parameters):\n if os.path.isfile(outfile):\n os.remove(outfile)\n\n extlevels = levels + [\"ONCOGENIC\", \"VUS\"]\n if \"levels\" in parameters:\n extlevels = parameters[\"levels\"]\n\n with open(annotatedclinicalfile, 'rU') as clinfile:\n reader = csv.reader(clinfile, delimiter='\\t')\n headers = readheaders(reader)\n isample = geIndexOfHeader(headers, SAMPLE_HEADERS)\n ilevel = headers['HIGHEST_LEVEL']\n ioncogenic = headers['ONCOGENIC_MUTATIONS']\n icat = headers[parameters[\"catogerycolumn\"].upper()] #e.g. \"CANCER_TYPE\"\n\n catsamplecount = {}\n catactionablesamplecount = {}\n oncogenicsamplecount = {}\n levelcatsamplecount = {}\n\n for row in reader:\n sample = row[isample]\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n cat = row[icat]\n if cat not in catsamplecount:\n catsamplecount[cat] = 0\n catsamplecount[cat] += 1\n\n if cat not in catactionablesamplecount:\n catactionablesamplecount[cat] = 0\n oncogenicsamplecount[cat] = 0\n\n level = row[ilevel]\n oncogenic = row[ioncogenic]\n\n exlevel = level\n\n if level in extlevels:\n catactionablesamplecount[cat] += 1\n oncogenicsamplecount[cat] += 1\n elif len(oncogenic.strip()) > 0:\n oncogenicsamplecount[cat] += 1\n exlevel = \"ONCOGENIC\"\n else:\n exlevel = \"VUS\"\n\n if exlevel not in levelcatsamplecount:\n levelcatsamplecount[exlevel] = {}\n if cat not in levelcatsamplecount[exlevel]:\n levelcatsamplecount[exlevel][cat] = 0\n levelcatsamplecount[exlevel][cat] += 1\n\n\n # plot\n catarray = [] # cancer types\n catactionabilityarray = [] # actionabiligy percentages per cancer type\n catoncogenicarray = [] # actionabiligy percentages per cancer type\n for cat in catsamplecount:\n if catsamplecount[cat] >= parameters[\"thresholdcat\"]:\n catarray.append(cat)\n catactionabilityarray.append(catactionablesamplecount[cat] * 100.0 / catsamplecount[cat])\n catoncogenicarray.append(oncogenicsamplecount[cat] * 100.0 / catsamplecount[cat])\n\n ncat = len(catarray)\n order = reversed(sorted(range(ncat),key=lambda x:(catactionabilityarray[x],catoncogenicarray[x])))\n drawplot(ax, 'OncoKB Actionability', extlevels, levelcatsamplecount, catarray, catsamplecount, order, parameters[\"thresholdcat\"])\n\ndef plotimplications(ax, header, title, levels, annotatedclinicalfile, outfile, parameters):\n if os.path.isfile(outfile):\n os.remove(outfile)\n\n extlevels = levels\n if \"levels\" in parameters:\n extlevels = parameters[\"levels\"]\n\n with open(annotatedclinicalfile, 'rU') as clinfile:\n reader = csv.reader(clinfile, delimiter='\\t')\n headers = readheaders(reader)\n isample = headers['SAMPLE_ID']\n ilevel = headers[header]\n icat = headers[parameters[\"catogerycolumn\"].upper()]\n\n catsamplecount = {}\n catactionablesamplecount = {}\n levelcatsamplecount = {}\n\n for row in reader:\n sample = row[isample]\n if sampleidsfilter and sample not in sampleidsfilter:\n continue\n\n cat = row[icat]\n if cat not in catsamplecount:\n catsamplecount[cat] = 0\n catsamplecount[cat] += 1\n\n if cat not in catactionablesamplecount:\n catactionablesamplecount[cat] = 0\n\n level = row[ilevel]\n\n exlevel = level\n\n if level in extlevels:\n catactionablesamplecount[cat] += 1\n else:\n exlevel = \"Other\"\n\n if exlevel not in levelcatsamplecount:\n levelcatsamplecount[exlevel] = {}\n if cat not in levelcatsamplecount[exlevel]:\n levelcatsamplecount[exlevel][cat] = 0\n levelcatsamplecount[exlevel][cat] += 1\n\n\n # plot\n catarray = [] # cancer types\n catactionabilityarray = [] # actionabiligy percentages per cancer type\n for cat in catsamplecount:\n if catsamplecount[cat] >= parameters[\"thresholdcat\"]:\n catarray.append(cat)\n catactionabilityarray.append(catactionablesamplecount[cat] * 100.0 / catsamplecount[cat])\n\n ncat = len(catarray)\n order = reversed(sorted(range(ncat),key=lambda x:(catactionabilityarray[x])))\n drawplot(ax, title, extlevels, levelcatsamplecount, catarray, catsamplecount, order, parameters[\"thresholdcat\"])\n\ndef drawplot(ax, title, extlevels, levelcatsamplecount, catarray, catsamplecount, order, thresholdcat):\n\n # level colors\n levelcolors = {\n 'LEVEL_1': '#33A02C',\n 'LEVEL_2': '#1F78B4',\n 'LEVEL_3A': '#984EA3',\n 'LEVEL_3B': '#BE98CE',\n 'LEVEL_4': '#a8a8a8',\n 'LEVEL_R1': '#EE3424',\n 'LEVEL_R2': '#F79A92',\n 'LEVEL_R3': '#FCD6D3',\n\n 'LEVEL_Dx1': '#33A02C',\n 'LEVEL_Dx2': '#1F78B4',\n 'LEVEL_Dx3': '#984EA3',\n\n 'LEVEL_Px1': '#33A02C',\n 'LEVEL_Px2': '#1F78B4',\n 'LEVEL_Px3': '#984EA3',\n\n 'ONCOGENIC': '#ffdab9',\n 'VUS': '#d1d1d1',\n 'Other': 'grey'\n }\n\n # level legend\n levellegend = {\n 'LEVEL_1': 'Level 1',\n 'LEVEL_2': 'Level 2',\n 'LEVEL_3A': 'Level 3A',\n 'LEVEL_3B': 'Level 3B',\n 'LEVEL_4': 'Level 4',\n 'LEVEL_R1': 'Level R1',\n 'LEVEL_R2': 'Level R2',\n 'LEVEL_R3': 'Level R3',\n\n 'LEVEL_Dx1': 'Level Dx1',\n 'LEVEL_Dx2': 'Level Dx2',\n 'LEVEL_Dx3': 'Level Dx3',\n\n 'LEVEL_Px1': 'Level Px1',\n 'LEVEL_Px2': 'Level Px2',\n 'LEVEL_Px3': 'Level Px3',\n\n 'ONCOGENIC': 'Oncogenic, no level',\n 'VUS': 'VUS',\n 'Other': 'Other'\n }\n\n ncat = len(catarray)\n if ncat > 0:\n catarray = [catarray[i] for i in order]\n\n ind = range(ncat)\n\n legends = []\n plts = []\n accumlevelcancerperc = [0] * ncat\n for level in extlevels:\n if level not in levelcatsamplecount:\n continue\n\n levelcancerperc = [0] * ncat\n for k in ind:\n cat = catarray[k]\n if catsamplecount[cat] < thresholdcat:\n continue\n if cat in levelcatsamplecount[level]:\n levelcancerperc[k] = levelcatsamplecount[level][cat] * 100.0 / catsamplecount[cat]\n\n width = 0.75\n plts = [ax.bar(ind, levelcancerperc, width, color=levelcolors[level], bottom=accumlevelcancerperc)] + plts\n legends = [levellegend[level]] + legends\n accumlevelcancerperc = list(map(sum, zip(accumlevelcancerperc,levelcancerperc)))\n\n ax = plt.gca()\n ax.set_axisbelow(True)\n ax.set_aspect(0.1)\n\n ax.tick_params(axis='y', which='major', labelsize=6)\n ax.set_ylabel('% of samples', fontsize=6)\n ax.set_title(title, fontsize=8)\n ax.set_xticks([i+0.5 for i in ind])\n ax.set_xticklabels(catarray, rotation=60, ha=\"right\", fontsize=4)\n # plt.yticks(np.arange(0, 81, 10))\n ax.legend(plts, legends, fontsize=6, bbox_to_anchor=(1.01, 1), loc=\"upper left\")\n\n\noncokbcache = {}\n\n\ndef cacheannotated(annotatedfile, defaultCancerType, cancerTypeMap):\n with open(annotatedfile, 'rU') as infile:\n try:\n reader = csv.reader(infile, delimiter='\\t')\n headers = readheaders(reader)\n\n ihugo = geIndexOfHeader(headers, HUGO_HEADERS)\n iconsequence = geIndexOfHeader(headers, CONSEQUENCE_HEADERS)\n ihgvs = geIndexOfHeader(headers, HGVS_HEADERS)\n isample = geIndexOfHeader(headers, SAMPLE_HEADERS)\n istart = geIndexOfHeader(headers, PROTEIN_START_HEADERS)\n iend = geIndexOfHeader(headers, PROTEIN_END_HEADERS)\n icancertype = geIndexOfHeader(headers, CANCER_TYPE_HEADERS)\n imutationeffect = headers['MUTATION_EFFECT']\n icitations = headers['CITATIONS']\n ioncogenic = headers['ONCOGENIC']\n igeneannotated = headers[GENE_IN_ONCOKB_HEADER]\n ivariantannotated = headers[VARIANT_IN_ONCOKB_HEADER]\n\n for row in reader:\n try:\n hugo = row[ihugo]\n\n hgvs = row[ihgvs]\n if hgvs.startswith('p.'):\n hgvs = hgvs[2:]\n\n sample = row[isample]\n cancertype = defaultCancerType\n if icancertype >= 0:\n cancertype = row[icancertype]\n if sample in cancerTypeMap:\n cancertype = cancerTypeMap[sample]\n key = '-'.join([hugo, hgvs, cancertype])\n # oncokb = row[ioncokb]\n\n oncokbcache[key] = {}\n oncokbcache[key][GENE_IN_ONCOKB_HEADER] = row[igeneannotated]\n oncokbcache[key][VARIANT_IN_ONCOKB_HEADER] = row[ivariantannotated]\n oncokbcache[key]['mutation_effect'] = row[imutationeffect]\n oncokbcache[key]['citations'] = row[icitations]\n oncokbcache[key]['oncogenic'] = row[ioncogenic]\n for l in levels:\n il = headers[l]\n if il < len(row):\n oncokbcache[key][l] = row[il].split(',')\n else:\n oncokbcache[key][l] = []\n except Exception:\n pass\n except Exception:\n pass\n\ndef geIndexOfHeader(headers, keywords):\n for k in keywords:\n if k in headers:\n return headers[k]\n return -1\n\n\ndef pull3dhotspots(hugo, consequence, start, end):\n try:\n if hugo in _3dhotspots and consequence == \"missense_variant\":\n for i in range(int(start), int(end) + 1):\n if i in _3dhotspots[hugo]:\n return \"Y\"\n except TypeError:\n log.error(\"%s: %s-%s\" % (hugo, str(start), str(end)))\n return \"\"\n\ndef appendoncokbcitations(citations, pmids, abstracts):\n if citations is None:\n citations = []\n\n if pmids is not None:\n for pmid in pmids:\n if pmid not in citations:\n citations.append(pmid)\n\n if abstracts is not None:\n for abstract in abstracts:\n abstractStr = abstract['abstract'] + '(' + abstract['link'] + ')'\n if abstractStr not in citations:\n citations.append(abstractStr)\n\n return citations\n\n\nclass Gene:\n def __init__(self, hugo):\n self.hugoSymbol = hugo\n\n\nclass ProteinChangeQuery:\n def __init__(self, hugo, hgvs, cancertype, reference_genome=None, consequence=None, start=None, end=None):\n self.gene = Gene(hugo)\n self.alteration = hgvs\n if consequence is not None:\n self.consequence = consequence\n if start is not None:\n self.proteinStart = start\n if end is not None:\n self.proteinEnd = end\n self.tumorType = cancertype\n if reference_genome is not None:\n self.referenceGenome = reference_genome.value\n\n\nclass HGVSgQuery:\n def __init__(self, hgvsg, cancertype, reference_genome=None):\n self.hgvsg = hgvsg\n self.tumorType = cancertype\n if reference_genome is not None:\n self.referenceGenome = reference_genome.value\n\n\ndef gettumortypename(tumortype):\n if 'code' in tumortype and tumortype['code'] is not None and tumortype['code'] != '':\n return tumortype['code']\n elif 'name' in tumortype and tumortype['name'] is not None and tumortype['name'] != '':\n return tumortype['name']\n else:\n return tumortype['mainType']['name']\n\n\ndef getimplications(oncokbdata, levels, implications):\n for implication in implications:\n level = implication['levelOfEvidence']\n\n if level is not None:\n if level not in levels:\n log.info(level + \" is ignored\")\n else:\n if 'tumorType' in implication:\n oncokbdata[level].append(gettumortypename(implication['tumorType']))\n\n\nclass GenomicChangeQuery:\n def __init__(self, chromosome, start, end, ref_allele, var_allele, cancertype, reference_genome=None):\n self.genomicLocation = ','.join([chromosome, start, end, ref_allele, var_allele])\n self.tumorType = cancertype\n if reference_genome is not None:\n self.referenceGenome = reference_genome.value\n\nclass CNAQuery:\n def __init__(self, hugo, cnatype, cancertype):\n self.gene = Gene(hugo)\n self.copyNameAlterationType = cnatype.upper()\n self.tumorType = cancertype\n\nclass StructuralVariantQuery:\n def __init__(self, hugoA, hugoB, structural_variant_type, cancertype):\n\n # Assume all structural variants in the file are functional fusions\n is_functional_fusion = True\n if hugoA == hugoB:\n is_functional_fusion = False\n structural_variant_type = 'DELETION'\n\n self.geneA = Gene(hugoA)\n self.geneB = Gene(hugoB)\n self.functionalFusion = is_functional_fusion\n self.structuralVariantType = structural_variant_type.upper()\n self.tumorType = cancertype\n\n\ndef pull_protein_change_info(queries, annotate_hotspot):\n url = oncokbapiurl + '/annotate/mutations/byProteinChange'\n response = makeoncokbpostrequest(url, queries)\n annotation = []\n if response.status_code == 200:\n annotation = response.json()\n else:\n for query in queries:\n geturl = url + '?'\n geturl += 'hugoSymbol=' + query.gene.hugoSymbol\n geturl += '&alteration=' + query.alteration\n geturl += '&tumorType=' + query.tumorType\n if query.consequence:\n geturl += '&consequence=' + query.consequence\n if query.proteinStart and query.proteinStart != '\\\\N' and query.proteinStart != 'NULL' and query.proteinStart != '':\n geturl += '&proteinStart=' + str(query.proteinStart)\n if query.proteinEnd and query.proteinEnd != '\\\\N' and query.proteinEnd != 'NULL' and query.proteinEnd != '':\n geturl += '&proteinEnd=' + str(query.proteinEnd)\n getresponse = makeoncokbgetrequest(geturl)\n if getresponse.status_code == 200:\n annotation.append(getresponse.json())\n else:\n # if the api call fails, we should still push a None into the list\n # to keep the same length of the queries\n annotation.append(None)\n\n processed_annotation = []\n for query_annotation in annotation:\n processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot))\n return processed_annotation\n\n\ndef pull_hgvsg_info(queries, annotate_hotspot):\n url = oncokbapiurl + '/annotate/mutations/byHGVSg'\n response = makeoncokbpostrequest(url, queries)\n annotation = []\n if response.status_code == 200:\n annotation = response.json()\n else:\n for query in queries:\n geturl = url + '?'\n geturl += 'hgvsg=' + query.hgvsg\n geturl += '&tumorType=' + query.tumorType\n getresponse = makeoncokbgetrequest(geturl)\n if getresponse.status_code == 200:\n annotation.append(getresponse.json())\n else:\n # if the api call fails, we should still push a None into the list\n # to keep the same length of the queries\n annotation.append(None)\n\n processed_annotation = []\n for query_annotation in annotation:\n processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot))\n return processed_annotation\n\ndef pull_genomic_change_info(queries, annotate_hotspot):\n url = oncokbapiurl + '/annotate/mutations/byGenomicChange'\n response = makeoncokbpostrequest(url, queries)\n annotation = []\n if response.status_code == 200:\n annotation = response.json()\n else:\n for query in queries:\n geturl = url + '?'\n geturl += 'genomicLocation=' + query.genomicLocation\n geturl += '&tumorType=' + query.tumorType\n getresponse = makeoncokbgetrequest(geturl)\n if getresponse.status_code == 200:\n annotation.append(getresponse.json())\n else:\n # if the api call fails, we should still push a None into the list\n # to keep the same length of the queries\n annotation.append(None)\n\n processed_annotation = []\n for query_annotation in annotation:\n processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot))\n return processed_annotation\n\n\ndef pull_cna_info(queries):\n url = oncokbapiurl + '/annotate/copyNumberAlterations?'\n\n response = makeoncokbpostrequest(url, queries)\n annotation = []\n if response.status_code == 200:\n annotation = response.json()\n else:\n for query in queries:\n geturl = url + '?'\n geturl += 'hugoSymbol=' + query.gene.hugoSymbol\n geturl += '&copyNameAlterationType=' + query.copyNameAlterationType\n geturl += '&tumorType=' + query.tumorType\n getresponse = makeoncokbgetrequest(geturl)\n if getresponse.status_code == 200:\n annotation.append(getresponse.json())\n else:\n # if the api call fails, we should still push a None into the list\n # to keep the same length of the queries\n annotation.append(None)\n\n processed_annotation = []\n for query_annotation in annotation:\n processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot=False))\n return processed_annotation\n\n\n\ndef pull_structural_variant_info(queries):\n url = oncokbapiurl + '/annotate/structuralVariants'\n\n response = makeoncokbpostrequest(url, queries)\n annotation = []\n if response.status_code == 200:\n annotation = response.json()\n else:\n for query in queries:\n geturl = url + '?'\n geturl += 'hugoSymbolA=' + query.geneA.hugoSymbol\n geturl += '&hugoSymbolB=' + query.geneB.hugoSymbol\n geturl += '&structuralVariantType=' + query.structuralVariantType\n geturl += '&isFunctionalFusion=' + str(query.functionalFusion).upper() if type(query.functionalFusion) is bool else query.functionalFusion\n geturl += '&tumorType=' + query.tumorType\n\n getresponse = makeoncokbgetrequest(geturl)\n if getresponse.status_code == 200:\n annotation.append(getresponse.json())\n else:\n # if the api call fails, we should still push a None into the list\n # to keep the same length of the queries\n annotation.append(None)\n\n processed_annotation = []\n for query_annotation in annotation:\n processed_annotation.append(process_oncokb_annotation(query_annotation, annotate_hotspot=False))\n return processed_annotation\n\n\n\ndef process_oncokb_annotation(annotation, annotate_hotspot):\n if annotation is None:\n return None\n\n oncokbdata = {}\n for l in levels:\n oncokbdata[l] = []\n for l in dxLevels:\n oncokbdata[l] = []\n for l in pxLevels:\n oncokbdata[l] = []\n\n oncokbdata[GENE_IN_ONCOKB_HEADER] = GENE_IN_ONCOKB_DEFAULT\n oncokbdata[VARIANT_IN_ONCOKB_HEADER] = VARIANT_IN_ONCOKB_DEFAULT\n oncokbdata['mutation_effect'] = \"\"\n oncokbdata['citations'] = []\n oncokbdata['oncogenic'] = \"\"\n\n try:\n # oncogenic\n oncokbdata[GENE_IN_ONCOKB_HEADER] = GENE_IN_ONCOKB_DEFAULT if annotation['geneExist'] is None else str(annotation['geneExist'])\n oncokbdata[VARIANT_IN_ONCOKB_HEADER] = VARIANT_IN_ONCOKB_DEFAULT if annotation['variantExist'] is None else str(annotation['variantExist'])\n\n # oncogenic\n oncokbdata['oncogenic'] = annotation['oncogenic']\n\n # if not evidences['geneExist'] or (not evidences['variantExist'] and not evidences['alleleExist']):\n # return ''\n\n # mutation effect\n if (annotation['mutationEffect'] is not None):\n oncokbdata['mutation_effect'] = annotation['mutationEffect']['knownEffect']\n oncokbdata['citations'] = appendoncokbcitations(oncokbdata['citations'],\n annotation['mutationEffect']['citations']['pmids'],\n annotation['mutationEffect']['citations']['abstracts'])\n\n # oncogenic\n oncokbdata['oncogenic'] = annotation['oncogenic']\n\n # get treatment\n for treatment in annotation['treatments']:\n level = treatment['level']\n\n if level not in levels:\n log.info(\"%s is ignored\" % level)\n # oncokbdata[level].append('')\n else:\n drugs = treatment['drugs']\n\n oncokbdata['citations'] = appendoncokbcitations(oncokbdata['citations'], treatment['pmids'],\n treatment['abstracts'])\n\n if len(drugs) == 0:\n oncokbdata[level].append('[NOT SPECIFIED]')\n else:\n drugnames = []\n for drug in drugs:\n drugnames.append(drug['drugName'])\n oncokbdata[level].append('+'.join(drugnames))\n if annotation['diagnosticImplications'] is not None:\n getimplications(oncokbdata, dxLevels, annotation['diagnosticImplications'])\n\n if annotation['prognosticImplications'] is not None:\n getimplications(oncokbdata, pxLevels, annotation['prognosticImplications'])\n\n oncokbdata['highestDiagnosticImplicationLevel'] = annotation['highestDiagnosticImplicationLevel']\n oncokbdata['highestPrognosticImplicationLevel'] = annotation['highestPrognosticImplicationLevel']\n except:\n log.error(\"error when processing %s \" % annotation)\n # sys.exit()\n\n\n ret = []\n if annotate_hotspot:\n if annotation['hotspot']:\n ret.append('Y')\n else:\n ret.append('')\n\n _3dhotspot = pull3dhotspots(annotation['query']['hugoSymbol'], annotation['query']['consequence'], annotation['query']['proteinStart'], annotation['query']['proteinEnd'])\n ret.append(_3dhotspot)\n\n ret.append(oncokbdata[GENE_IN_ONCOKB_HEADER])\n ret.append(oncokbdata[VARIANT_IN_ONCOKB_HEADER])\n ret.append(oncokbdata['mutation_effect'])\n ret.append(oncokbdata['oncogenic'])\n for l in levels:\n ret.append(','.join(oncokbdata[l]))\n ret.append(gethighestsensitivitylevel(oncokbdata))\n ret.append(';'.join(oncokbdata['citations']))\n for l in dxLevels:\n ret.append(','.join(oncokbdata[l]))\n ret.append(gethighestDxPxlevel(dxLevels, [oncokbdata['highestDiagnosticImplicationLevel']]))\n\n for l in pxLevels:\n ret.append(','.join(oncokbdata[l]))\n ret.append(gethighestDxPxlevel(pxLevels, [oncokbdata['highestPrognosticImplicationLevel']]))\n\n return ret\n\n\ndef gethighestsensitivitylevel(oncokbdata):\n r1 = set()\n if \"LEVEL_R1\" in oncokbdata:\n r1 = set(oncokbdata[\"LEVEL_R1\"])\n for l in levels:\n if l.startswith(\"LEVEL_R\") or l not in oncokbdata or oncokbdata[l] == '':\n continue\n if not r1.issuperset(set(oncokbdata[l])):\n return l\n return \"\"\n\ndef gethighestDxPxlevel(levels, oncokbdata):\n for l in levels:\n if l not in oncokbdata:\n continue\n return l\n return \"\"\n\ndef gettreatments(evidence):\n treatments = []\n for t in evidence['treatments']:\n drugs = []\n for d in t['drugs']:\n drugs.append(d['drugName'])\n treatments.append('+'.join(drugs))\n return treatments\n\n\ndef readCancerTypes(clinicalFile, data):\n with open(clinicalFile, 'rU') as infile:\n reader = csv.reader(infile, delimiter='\\t')\n headers = readheaders(reader)\n\n iSample = geIndexOfHeader(headers, ['SAMPLE_ID'])\n iCancerType = geIndexOfHeader(headers, ['ONCOTREE_CODE', 'CANCER_TYPE'])\n\n for row in reader:\n data[row[iSample]] = row[iCancerType]\n\n return data\n\n\ndef readheaders(reader):\n headers = {}\n headers[\"length\"] = 0\n for row in reader:\n if not row[0].startswith(\"#\"):\n headers[\"^-$\"] = '\\t'.join(row) # the whole line\n headers[\"length\"] = len(row)\n i = 0\n for h in row:\n headers[h.upper()] = i\n headers[h] = i\n i = i + 1\n break\n return headers\n\ndef padrow(row, n):\n nr = len(row)\n if nr == n:\n return row\n\n if nr < n:\n return row + [\"\"] * (n - len(row))\n\n else: # nr<n\n return row[0:n]\n" ]
[ [ "matplotlib.use" ], [ "matplotlib.use", "matplotlib.pyplot.gca" ] ]
JunweiLiang/Object_Detection_Tracking
[ "f86caaec97669a6da56f1b402cca4e179a85d2f0" ]
[ "tmot/matching.py" ]
[ "import numpy as np\r\nimport scipy\r\nfrom scipy.spatial.distance import cdist\r\nimport lap # 0.4.0\r\n\r\nfrom cython_bbox import bbox_overlaps as bbox_ious\r\nfrom . import kalman_filter\r\n\r\ndef merge_matches(m1, m2, shape):\r\n O,P,Q = shape\r\n m1 = np.asarray(m1)\r\n m2 = np.asarray(m2)\r\n\r\n M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))\r\n M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))\r\n\r\n mask = M1*M2\r\n match = mask.nonzero()\r\n match = list(zip(match[0], match[1]))\r\n unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))\r\n unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))\r\n\r\n return match, unmatched_O, unmatched_Q\r\n\r\n\r\ndef linear_assignment(cost_matrix, thresh):\r\n if cost_matrix.size == 0:\r\n return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))\r\n matches, unmatched_a, unmatched_b = [], [], []\r\n cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\r\n for ix, mx in enumerate(x):\r\n if mx >= 0:\r\n matches.append([ix, mx])\r\n unmatched_a = np.where(x < 0)[0]\r\n unmatched_b = np.where(y < 0)[0]\r\n matches = np.asarray(matches)\r\n return matches, unmatched_a, unmatched_b\r\n\r\n\r\ndef ious(atlbrs, btlbrs):\r\n \"\"\"\r\n Compute cost based on IoU\r\n :type atlbrs: list[tlbr] | np.ndarray\r\n :type atlbrs: list[tlbr] | np.ndarray\r\n\r\n :rtype ious np.ndarray\r\n \"\"\"\r\n ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)\r\n if ious.size == 0:\r\n return ious\r\n\r\n ious = bbox_ious(\r\n np.ascontiguousarray(atlbrs, dtype=np.float),\r\n np.ascontiguousarray(btlbrs, dtype=np.float)\r\n )\r\n\r\n return ious\r\n\r\n\r\ndef iou_distance(atracks, btracks):\r\n \"\"\"\r\n Compute cost based on IoU\r\n :type atracks: list[STrack]\r\n :type btracks: list[STrack]\r\n\r\n :rtype cost_matrix np.ndarray\r\n \"\"\"\r\n\r\n if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\r\n atlbrs = atracks\r\n btlbrs = btracks\r\n else:\r\n atlbrs = [track.tlbr for track in atracks]\r\n btlbrs = [track.tlbr for track in btracks]\r\n _ious = ious(atlbrs, btlbrs)\r\n cost_matrix = 1 - _ious\r\n\r\n return cost_matrix\r\n\r\ndef embedding_distance(tracks, detections, metric='cosine'):\r\n \"\"\"\r\n :param tracks: list[STrack]\r\n :param detections: list[BaseTrack]\r\n :param metric:\r\n :return: cost_matrix np.ndarray\r\n \"\"\"\r\n\r\n cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)\r\n track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)\r\n cost_matrix = np.maximum(0.0, cdist(track_features, det_features)) # Nomalized features\r\n\r\n return cost_matrix\r\n\r\n\r\ndef fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n gating_dim = 2 if only_position else 4\r\n gating_threshold = kalman_filter.chi2inv95[gating_dim]\r\n measurements = np.asarray([det.to_xyah() for det in detections])\r\n for row, track in enumerate(tracks):\r\n gating_distance = kf.gating_distance(\r\n track.mean, track.covariance, measurements, only_position, metric='maha')\r\n cost_matrix[row, gating_distance > gating_threshold] = np.inf\r\n cost_matrix[row] = lambda_ * cost_matrix[row] + (1-lambda_)* gating_distance\r\n return cost_matrix\r\n" ]
[ [ "scipy.spatial.distance.cdist", "numpy.empty", "numpy.ascontiguousarray", "numpy.asarray", "numpy.where" ] ]
cailab-tamu/scTenifoldXct
[ "d25ded8dfb7f2951217a30ab71eccd6b060178f6" ]
[ "tests/test_stat.py" ]
[ "import pytest\n\nimport itertools\n\nimport pandas as pd\nimport numpy as np\n\nfrom scTenifoldXct.core import null_test\n\n\ndef generate_fake_df_nn(n_ligand=3000, n_receptors=3000, n_cands=200):\n gene_names = [f\"GENE{i}\" for i in range(max(n_ligand, n_receptors))]\n iteration = itertools.product(gene_names, gene_names)\n inds, ligands, receptors = [], [], []\n for i, j in iteration:\n inds.append(f\"{i}_{j}\")\n ligands.append(i)\n receptors.append(j)\n df = pd.DataFrame({\"ligand\": ligands,\n \"receptor\": receptors,\n \"dist\": np.random.chisquare(1, (n_ligand * n_receptors,)),\n \"correspondence\": np.random.lognormal(0, 4, size=(n_ligand * n_receptors,))},\n index=inds)\n return df, np.random.choice(df.index, size=(n_cands,), replace=False)\n\n\[email protected](\"df_nn,candidates\", [\n generate_fake_df_nn(3000, 3000, 200),\n generate_fake_df_nn(1000, 1000, 200),\n])\[email protected](\"filter_zeros\", [True])\ndef test_null_test(df_nn, candidates, filter_zeros):\n null_test(df_nn=df_nn, candidates=candidates, filter_zeros=filter_zeros)\n\n\ndef test_chi2_test(xct_skin):\n xct_skin.train_nn(n_steps= 1000, lr = 0.001)\n xct_skin.chi2_test(dof=3, pval=0.05, cal_FDR=True, plot_result=True)" ]
[ [ "numpy.random.chisquare", "numpy.random.lognormal", "numpy.random.choice" ] ]
andreyyec/Texas_Tech_AI
[ "e4e8e41c65b41a1a684f1f65d21cf5427abdb046" ]
[ "practices/week6/assignment_exercise_3.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pickle\nimport tensorflow as tf\nimport sklearn.metrics\nimport matplotlib.pyplot as plt\n\n# Load the training and test data from the Pickle file\nwith open(\"../datasets/credit_card_default_dataset.pickle\", \"rb\") as f:\n train_data, train_labels, test_data, test_labels = pickle.load(f)\n\n# Get some lengths\nn_inputs = train_data.shape[1]\nnsamples = train_data.shape[0]\n\n# Training constants\nn_nodes_l1 = 5\nbatch_size = 32\nlearning_rate = .001 # Initial rate for Adam\nn_epochs = 1000\neval_step = 5\n\nn_batches = int(np.ceil(nsamples / batch_size))\n\n# Print the configuration\nprint(\"Batch size: {} Num batches: {} Num epochs: {} Learning rate: {}\".format(batch_size, n_batches, n_epochs, learning_rate))\nprint(\"Num nodes in L1: {} Activation function: ELU\".format(n_nodes_l1))\n\n# TensorFlow constants\n\n# Input vector placeholders. Length is unspecified.\nX = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\nY = tf.placeholder(tf.float32, shape=(None, 1), name=\"Y\")\n\n# Hidden layer 1:\n# Inputs: n_inputs\n# Outputs: n_nodes_l1\n# Activation: ELU\nW_L1 = tf.Variable(tf.truncated_normal([n_inputs, n_nodes_l1], stddev=2/np.sqrt(n_inputs)))\nb_L1 = tf.Variable(tf.zeros(n_nodes_l1))\nY_L1 = tf.nn.elu(tf.add(tf.matmul(X, W_L1), b_L1))\n#Y_L1 = tf.nn.relu(tf.add(tf.matmul(X, W_L1), b_L1))\n\n# Output layer:\n# Inputs: n_nodes_l1\n# Outputs: 1\n# Activation: logistic\nW_L2 = tf.Variable(tf.truncated_normal([n_nodes_l1, 1], stddev=1/np.sqrt(n_nodes_l1)))\nb_L2 = tf.Variable(tf.zeros(1))\nY_L2_linear = tf.add(tf.matmul(Y_L1, W_L2), b_L2)\n\n# Cost function, plus the sigmoid part of the prediction\ncost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits = Y_L2_linear, labels = Y))\n\n# Optimize cost through gradient descent\n#optimizer = tf.train.GradientDescentOptimizer(learning_rate)\noptimizer = tf.train.AdamOptimizer(learning_rate)\nupdate_op = optimizer.minimize(cost)\n\n# Prediction probability values\nY_pred_proba_calc = tf.nn.sigmoid(Y_L2_linear)\n\n# Create TensorFlow session and initialize it\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n# Initialize lists to hold the history of metrics per epoch\ntrn_cost_hist = []\ntest_cost_hist = []\ntrn_auroc_hist = []\ntest_auroc_hist = []\n\nepoch = 0\nwhile epoch < n_epochs:\n batch = 0\n\n # Save a vector of cost values per batch\n cost_vals = np.zeros(n_batches)\n\n while batch < n_batches:\n\n # Select the data for the next batch\n dataidx = batch * batch_size\n X_batch = train_data[dataidx:(dataidx+batch_size)]\n Y_batch = train_labels[dataidx:(dataidx+batch_size)].values.reshape(-1,1)\n feed_dict = {X: X_batch, Y: Y_batch}\n\n # Run one iteration of the computation session to update coefficients\n _, cost_vals[batch] = sess.run([update_op, cost], feed_dict=feed_dict)\n batch += 1\n\n # Evaluate and print the results so far\n if (epoch % eval_step == 0):\n\n # Compute the average cost for all mini-batches in this epoch\n trn_cost_avg = np.mean(cost_vals)\n\n # Compute the ROC AUC against the full training data\n feed_dict = {X: train_data, Y: train_labels.values.reshape(-1,1)}\n Y_pred_proba_train = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)\n train_auroc = sklearn.metrics.roc_auc_score(train_labels, Y_pred_proba_train)\n\n # Compute the cost and ROC AUC against the test data\n feed_dict = {X: test_data, Y: test_labels.values.reshape(-1,1)}\n Y_pred_proba_test = sess.run(Y_pred_proba_calc, feed_dict=feed_dict)\n test_cost = sess.run(cost, feed_dict=feed_dict)\n test_auroc = sklearn.metrics.roc_auc_score(test_labels, Y_pred_proba_test)\n\n print(\"Epoch: {:4d} trn_cost: {:.5f} test_cost: {:.5f} trn_auroc: {:.4f} test_auroc: {:.4f}\".\\\n format(epoch, trn_cost_avg, test_cost, train_auroc, test_auroc))\n\n # Save the metrics to the history\n trn_cost_hist.append(trn_cost_avg)\n test_cost_hist.append(test_cost)\n trn_auroc_hist.append(train_auroc)\n test_auroc_hist.append(test_auroc)\n\n epoch += 1\n\n# Print the best results (as if we had done early stopping)\nepoch_hist = [i for i in range(0, n_epochs, eval_step)]\n\nbest_idx = test_auroc_hist.index(max(test_auroc_hist))\nprint(\"Max test ROC AUC: {:.4f} at epoch: {}\".format(test_auroc_hist[best_idx], epoch_hist[best_idx]))\n\nbest_idx = trn_auroc_hist.index(max(trn_auroc_hist))\nprint(\"Max train ROC AUC: {:.4f} at epoch: {}\".format(trn_auroc_hist[best_idx], epoch_hist[best_idx]))\n\nbest_idx = test_cost_hist.index(min(test_cost_hist))\nprint(\"Min test cost: {:.5f} at epoch: {}\".format(test_cost_hist[best_idx], epoch_hist[best_idx]))\n\nbest_idx = trn_cost_hist.index(min(trn_cost_hist))\nprint(\"Min train cost: {:.5f} at epoch: {}\".format(trn_cost_hist[best_idx], epoch_hist[best_idx]))\n\n# Plot the metrics history\nplt.plot(epoch_hist, trn_cost_hist, \"b\")\nplt.plot(epoch_hist, test_cost_hist, \"r\")\nplt.xlabel(\"epoch\")\nplt.ylabel(\"cost\")\nplt.title(\"Cost vs. epoch\")\nplt.figure()\nplt.plot(epoch_hist, trn_auroc_hist, \"b\")\nplt.plot(epoch_hist, test_auroc_hist, \"r\")\nplt.xlabel(\"epoch\")\nplt.ylabel(\"ROC AUC\")\nplt.title(\"ROC AUC vs. epoch\")\nplt.show()\n" ]
[ [ "tensorflow.nn.sigmoid", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.placeholder", "tensorflow.zeros", "numpy.ceil", "numpy.zeros", "tensorflow.global_variables_initializer", "matplotlib.pyplot.figure", "tensorflow.train.AdamOptimizer", "tensorflow.matmul", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "tensorflow.Session", "numpy.sqrt", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.mean" ] ]
pengfei99/openfood
[ "2b65af02ce34bf8193d357ef3661da749d2d9671" ]
[ "siamesenetwork/siamesePreTrainedEmbeddings.py" ]
[ "# !/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nDefine the siamese network for one-shot learning,\nfor french short labels\n02/06/2021\n@author: milena-git, from jeremylhour courtesy\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\ndef _createEmbeddingLayer(weights_matrix, non_trainable=False):\n \"\"\"\n _createEmbeddingLayer:\n create a layer from pre-trained embeddings\n\n @param weights_matrix (np.array):\n @param non_trainable (bool):\n \"\"\"\n weights_matrix = torch.tensor(weights_matrix)\n num_embeddings, embedding_dim = weights_matrix.size()\n emb_layer = nn.Embedding(num_embeddings, embedding_dim)\n emb_layer.load_state_dict({'weight': weights_matrix})\n if non_trainable:\n emb_layer.weight.requires_grad = False\n return emb_layer, num_embeddings, embedding_dim\n\n\nclass SiamesePreTrainedQuadruplet(nn.Module):\n\n def __init__(self, weights_matrix, length, dim=100):\n \"\"\"\n Initialize the siamese network with pre-trained embeddings\n\n @param weights_matrix (torch.tensor):\n @param length (int): longueur des inputs\n @param dim (int): dimension of the output embedding space\n \"\"\"\n super(SiamesePreTrainedQuadruplet, self).__init__()\n self.dim = dim\n self.length = length\n self.embedding = nn.Embedding.from_pretrained(weights_matrix, padding_idx=0)\n self.fc1 = nn.Sequential(\n nn.Linear(self.length * weights_matrix.size()[1], 1000),\n nn.ReLU(inplace=True),\n nn.Linear(1000, 800),\n nn.Dropout(0.2),\n nn.Linear(800, 500),\n nn.Dropout(0.2),\n nn.Linear(500, self.dim)\n )\n\n def forward_once(self, x):\n \"\"\"\n Run one of the network on a single image\n\n @param x (): img output from SiameseNetworkDataset\n \"\"\"\n embedded = self.embedding(x)\n embedded = torch.reshape(embedded, (embedded.size()[0], embedded.size()[1] * embedded.size()[2]))\n output = self.fc1(embedded)\n return output\n\n def forward(self, anchor, positive, negative1, negative2):\n \"\"\"\n Run the model forward, by applying forward_once to each inputs\n Main forward that is used during train, wraps forward_once().\n\n @param anchor, positive, negative1, negative2 (): output from SiameseNetworkDataset\n \"\"\"\n anchor_o, positive_o, negative1_o, negative2_o = self.forward_once(anchor), self.forward_once(\n positive), self.forward_once(negative1), self.forward_once(negative2)\n return anchor_o, positive_o, negative1_o, negative2_o\n\n\nif __name__ == '__main__':\n pass\n" ]
[ [ "torch.nn.Linear", "torch.tensor", "torch.nn.Embedding.from_pretrained", "torch.nn.Embedding", "torch.nn.ReLU", "torch.nn.Dropout" ] ]
DwijayDS/fastestimator
[ "6061a4fbbeb62a2194ef82ba8017f651710d0c65", "9b288cb2bd870f971ec4cee09d0b3205e1316a94", "9b288cb2bd870f971ec4cee09d0b3205e1316a94" ]
[ "test/PR_test/unit_test/op/numpyop/univariate/test_autocontrast.py", "fastestimator/trace/metric/mcc.py", "fastestimator/backend/_iwd.py" ]
[ "# Copyright 2021 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport unittest\n\nimport numpy as np\n\nfrom fastestimator.op.numpyop.univariate import AutoContrast\n\n\nclass TestAutoContrast(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.single_input = [np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)]\n cls.single_output_shape = (28, 28, 3)\n cls.multi_input = [\n np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8),\n np.random.randint(0, 256, size=(28, 28, 3)).astype(np.uint8)\n ]\n cls.multi_output_shape = (28, 28, 3)\n\n def test_single_input(self):\n autocontrast = AutoContrast(inputs='x', outputs='x')\n output = autocontrast.forward(data=self.single_input, state={})\n with self.subTest('Check output type'):\n self.assertEqual(type(output), list)\n with self.subTest('Check output image shape'):\n self.assertEqual(output[0].shape, self.single_output_shape)\n\n def test_multi_input(self):\n autocontrast = AutoContrast(inputs='x', outputs='x')\n output = autocontrast.forward(data=self.multi_input, state={})\n with self.subTest('Check output type'):\n self.assertEqual(type(output), list)\n with self.subTest('Check output list length'):\n self.assertEqual(len(output), 2)\n for img_output in output:\n with self.subTest('Check output image shape'):\n self.assertEqual(img_output.shape, self.multi_output_shape)\n", "# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Union, Iterable\n\nimport numpy as np\nfrom sklearn.metrics import matthews_corrcoef\n\nfrom fastestimator.trace.meta._per_ds import per_ds\nfrom fastestimator.trace.trace import Trace\nfrom fastestimator.util.data import Any, Data, Dict\nfrom fastestimator.util.traceability_util import traceable\nfrom fastestimator.util.util import to_number\n\n\n@per_ds\n@traceable()\nclass MCC(Trace):\n \"\"\"A trace which computes the Matthews Correlation Coefficient for a given set of predictions.\n\n This is a preferable metric to accuracy or F1 score since it automatically corrects for class imbalances and does\n not depend on the choice of target class (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6941312/). Ideal value is 1,\n a value of 0 means your predictions are completely uncorrelated with the true data. A value less than zero implies\n anti-correlation (you should invert your classifier predictions in order to do better).\n\n Args:\n true_key: Name of the key that corresponds to ground truth in the batch dictionary.\n pred_key: Name of the key that corresponds to predicted score in the batch dictionary.\n mode: What mode(s) to execute this Trace in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n ds_id: What dataset id(s) to execute this Trace in. To execute regardless of ds_id, pass None. To execute in all\n ds_ids except for a particular one, you can pass an argument like \"!ds1\".\n output_name: What to call the output from this trace (for example in the logger output).\n per_ds: Whether to automatically compute this metric individually for every ds_id it runs on, in addition to\n computing an aggregate across all ds_ids on which it runs. This is automatically False if `output_name`\n contains a \"|\" character.\n **kwargs: Additional keyword arguments that pass to sklearn.metrics.matthews_corrcoef()\n\n Raises:\n ValueError: One of [\"y_true\", \"y_pred\"] argument exists in `kwargs`.\n \"\"\"\n def __init__(self,\n true_key: str,\n pred_key: str,\n mode: Union[None, str, Iterable[str]] = (\"eval\", \"test\"),\n ds_id: Union[None, str, Iterable[str]] = None,\n output_name: str = \"mcc\",\n per_ds: bool = True,\n **kwargs) -> None:\n MCC.check_kwargs(kwargs)\n super().__init__(inputs=(true_key, pred_key), mode=mode, outputs=output_name, ds_id=ds_id)\n self.kwargs = kwargs\n self.y_true = []\n self.y_pred = []\n self.per_ds = per_ds\n\n @property\n def true_key(self) -> str:\n return self.inputs[0]\n\n @property\n def pred_key(self) -> str:\n return self.inputs[1]\n\n def on_epoch_begin(self, data: Data) -> None:\n self.y_true = []\n self.y_pred = []\n\n def on_batch_end(self, data: Data) -> None:\n y_true, y_pred = to_number(data[self.true_key]), to_number(data[self.pred_key])\n if y_true.shape[-1] > 1 and y_true.ndim > 1:\n y_true = np.argmax(y_true, axis=-1)\n if y_pred.shape[-1] > 1 and y_pred.ndim > 1:\n y_pred = np.argmax(y_pred, axis=-1)\n else:\n y_pred = np.round(y_pred)\n assert y_pred.size == y_true.size\n self.y_true.extend(y_true)\n self.y_pred.extend(y_pred)\n\n def on_epoch_end(self, data: Data) -> None:\n data.write_with_log(self.outputs[0], matthews_corrcoef(y_true=self.y_true, y_pred=self.y_pred, **self.kwargs))\n\n @staticmethod\n def check_kwargs(kwargs: Dict[str, Any]) -> None:\n \"\"\"Check if `kwargs` has any blacklist argument and raise an error if it does.\n\n Args:\n kwargs: Keywork arguments to be examined.\n\n Raises:\n ValueError: One of [\"y_true\", \"y_pred\"] argument exists in `kwargs`.\n \"\"\"\n blacklist = [\"y_true\", \"y_pred\"]\n illegal_kwarg = [x for x in blacklist if x in kwargs]\n if illegal_kwarg:\n raise ValueError(\n f\"Arguments {illegal_kwarg} cannot exist in kwargs, since FastEstimator will later directly use them in\"\n \" sklearn.metrics.matthews_corrcoef()\")\n", "# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport math\nfrom typing import Optional, TypeVar\n\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\nfrom fastestimator.backend._maximum import maximum\nfrom fastestimator.backend._reduce_sum import reduce_sum\nfrom fastestimator.backend._reshape import reshape\nfrom fastestimator.backend._tensor_pow import tensor_pow\nfrom fastestimator.backend._to_tensor import to_tensor\nfrom fastestimator.util.util import TENSOR_TO_NP_DTYPE\n\nTensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)\n\n\ndef iwd(tensor: Tensor,\n power: float = 1.0,\n max_prob: float = 0.95,\n pairwise_distance: float = 1.0,\n eps: Optional[Tensor] = None) -> Tensor:\n \"\"\"Compute the Inverse Weighted Distance from the given input.\n\n This can be used as an activation function for the final layer of a neural network instead of softmax. For example,\n instead of: model.add(layers.Dense(classes, activation='softmax')), you could use:\n model.add(layers.Dense(classes, activation=lambda x: iwd(tf.nn.sigmoid(x))))\n\n This method can be used with Numpy data:\n ```python\n n = np.array([[0.5]*5, [0]+[1]*4])\n b = fe.backend.iwd(n) # [[0.2, 0.2, 0.2, 0.2, 0.2], [0.95, 0.0125, 0.0125, 0.0125, 0.0125]]\n ```\n\n This method can be used with TensorFlow tensors:\n ```python\n t = tf.constant([[0.5]*5, [0]+[1]*4])\n b = fe.backend.iwd(n) # [[0.2, 0.2, 0.2, 0.2, 0.2], [0.95, 0.0125, 0.0125, 0.0125, 0.0125]]\n ```\n\n This method can be used with PyTorch tensors:\n ```python\n p = torch.tensor([[0.5]*5, [0]+[1]*4])\n b = fe.backend.iwd(n) # [[0.2, 0.2, 0.2, 0.2, 0.2], [0.95, 0.0125, 0.0125, 0.0125, 0.0125]]\n ```\n\n Args:\n tensor: The input value. Should be of shape (Batch, C) where every element in C corresponds to a (non-negative)\n distance to a target class.\n power: The power to raise the inverse distances to. 1.0 results in a fairly intuitive probability output. Larger\n powers can widen regions of certainty, whereas values between 0 and 1 can widen regions of uncertainty.\n max_prob: The maximum probability to assign to a class estimate when it is distance zero away from the target.\n For numerical stability this must be less than 1.0. We have found that using smaller values like 0.95 can\n lead to natural adversarial robustness.\n pairwise_distance: The distance to any other class when the distance to a target class is zero. For example, if\n you have a perfect match for class 'a', what distance should be reported to class 'b'. If you have a metric\n where this isn't constant, just use an approximate expected distance. In that case `max_prob` will only give\n you approximate control over the true maximum probability.\n eps: The numeric stability constant to be used when d approaches zero. If None then it will be computed using\n `max_prob` and `pairwise_distance`. If not None, then `max_prob` and `pairwise_distance` will be ignored.\n\n Returns:\n A probability distribution of shape (Batch, C) where smaller distances from `tensor` correspond to larger\n probabilities.\n \"\"\"\n if eps is None:\n eps = np.array(pairwise_distance * math.pow((1.0 - max_prob) / (max_prob * (tensor.shape[-1] - 1)), 1 / power),\n dtype=TENSOR_TO_NP_DTYPE[tensor.dtype])\n eps = to_tensor(\n eps, target_type='torch' if isinstance(tensor, torch.Tensor) else 'tf' if tf.is_tensor(tensor) else 'np')\n if isinstance(eps, torch.Tensor):\n eps = eps.to(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n tensor = maximum(tensor, eps)\n tensor = tensor_pow(1.0 / tensor, power)\n tensor = tensor / reshape(reduce_sum(tensor, axis=-1), shape=[-1, 1])\n return tensor\n" ]
[ [ "numpy.random.randint" ], [ "numpy.round", "numpy.argmax", "sklearn.metrics.matthews_corrcoef" ], [ "torch.cuda.is_available", "tensorflow.is_tensor" ] ]
yockgen/movidius
[ "cc32f1951a4d00d2250bb0d2b9000c5f2435b41a" ]
[ "ncappzoo/tensorflow/topcoder_andresduque/supporting/inferences.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n#~ The MIT License (MIT)\n#~ Copyright 2018 ©klo86min\n#~ Permission is hereby granted, free of charge, to any person obtaining a copy \n#~ of this software and associated documentation files (the \"Software\"), to deal \n#~ in the Software without restriction, including without limitation the rights \n#~ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n#~ copies of the Software, and to permit persons to whom the Software is \n#~ furnished to do so, subject to the following conditions:\n#~ The above copyright notice and this permission notice shall be included in \n#~ all copies or substantial portions of the Software.\n#~ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \n#~ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n#~ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \n#~ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n#~ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \n#~ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \n#~ SOFTWARE.\n\nimport argparse\nimport csv\nimport cv2\nimport mvnc.mvncapi as mvnc\nimport numpy as np\nimport os.path\n\n# image settings\nIMAGE_DIM = 299\n\n###############################################################################\n#\n# Modified code from https://github.com/ashwinvijayakumar/ncappzoo/apps/\n# rapid-image-classifier/rapid-image-classifier.py\n# also under the MIT License\n#\n###############################################################################\n\n# ---- Step 1: Open the enumerated device and get a handle to it -------------\n\ndef open_ncs_device(verbose=False):\n if verbose:\n mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)\n # Look for enumerated NCS device(s); quit program if none found.\n devices = mvnc.EnumerateDevices()\n if len( devices ) == 0:\n print( 'No devices found' )\n quit()\n\n # Get a handle to the first enumerated device and open it\n device = mvnc.Device( devices[0] )\n device.OpenDevice()\n\n return device\n\n# ---- Step 2: Load a graph file onto the NCS device -------------------------\n\ndef load_graph( device, graph_file):\n\n # Read the graph file into a buffer\n with open( graph_file, mode='rb' ) as f:\n blob = f.read()\n\n # Load the graph buffer into the NCS\n graph = device.AllocateGraph( blob )\n\n return graph\n\n# ---- Step 5: Unload the graph and close the device -------------------------\n\ndef close_ncs_device( device, graph ):\n graph.DeallocateGraph()\n device.CloseDevice()\n\n##################### End of ncappzoo code ################################\n\nclass MovidiusImage(object):\n \"\"\"Image metadata and loader for Movidius NCS\n \n Args:\n name (str): image reference name as used in CSV files\n path (str): image path\n class_index (int): 1-based class label index\n \n Attributes:\n top_k (list): list of predicted (class_index, proba)\n inference_time (float): computation time in ms \n \"\"\"\n \n def __init__(self, name, path, class_index = None):\n self.name = name\n self.path = path\n self.class_index = class_index\n self.top_k = None\n self.inference_time = None\n \n def load_BGR(self, dim, dtype=np.float16):\n \"\"\"Return image data in BGR order\n \n Args:\n dim (tuple): image dimensions\n dtype (numpy.dtype): new type for the BGR blob\n \n Returns:\n numpy.ndarray: the transformed BGR blob\n \"\"\"\n mean = 128\n std = 1/128\n\n img = cv2.imread(self.path).astype(np.float32)\n dx,dy,dz= img.shape\n delta=float(abs(dy-dx))\n if dx > dy: #crop the x dimension\n img=img[int(0.5*delta):dx-int(0.5*delta),0:dy]\n else:\n img=img[0:dx,int(0.5*delta):dy-int(0.5*delta)]\n img = cv2.resize(img, (dim, dim))\n\n img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n\n for i in range(3):\n img[:,:,i] = (img[:,:,i] - mean) * std\n \n img = img.astype(dtype)\n return img\n \n def save_top_k(self, predictions, labels, k=5):\n \"\"\"Save the top_k predicted probabilities\n \n Args:\n predictions (numpy.ndarray): the probabilities for each class\n k (int): Number of top_k probas\n \"\"\"\n \n order_k = predictions.argsort()[::-1][:k]\n # class_index is 1-based\n self.top_k = [(labels[pos], np.float(predictions[pos])) \n for pos in order_k]\n\n \n def result_string(self):\n \"\"\" Return image results with the following fields:\n [name, top1, proba1, ... top5, proba5, time]\n \n Returns:\n str: formatted CSV string\n \"\"\"\n res = [ self.name, ]\n for k, prob in self.top_k:\n res += [k, prob]\n res += [self.inference_time]\n pattern = \"%s,\" + \"%d,%.9f,\" * len(self.top_k) + \"%.9f\"\n return pattern % tuple(res)\n \ndef init_images(data_dir, images_file):\n \"\"\"Parse image_file CSV and create one MovidiusImage per row.\n \n Args:\n data_dir (str): path of the folder containing images\n image_file (str): CSV file (one image path per row)\n \n Returns:\n list: list of MovidiusImage instances\n \"\"\"\n images_dir = {}\n images = []\n for file in sorted(os.listdir(data_dir)):\n if file.endswith(\".jpg\"):\n image = MovidiusImage(file, os.path.realpath(data_dir) + \"/\" + \"/\" + file, -1)\n images_dir[file] = image\n images.append(image)\n \n if os.path.isfile(images_file):\n images = []\n with open(images_file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n # skip header\n next(reader)\n for row_pos, row in enumerate(reader):\n name = row[0]\n truth = int(row[1])\n img = images_dir[name]\n img.class_index = truth\n images.append(img)\n return images\n \ndef write_inferences_csv(output_path, images):\n \"\"\" For each image, retrieve and write results.\n \n Args:\n output_path (str): path for the CSV output\n images (list): list of processed MovidiusImage instances\n \"\"\"\n with open(output_path, 'w') as output_file:\n for image in images:\n output_file.write(image.result_string() + '\\n')\n\ndef score_inferences(images, min_proba = 1e-15, mult = 100, n_classes=200, \n log_loss_max=15.0, time_limit=1000.0):\n \"\"\" Compute the logLoss and reference computation time\n \n Args:\n images (list): list of processed MovidiusImage instances\n min_proba (float): minimum probability to be used in logLoss\n mult (int): number of images used for the reference time\n n_classes (int): total number of classes\n log_loss_limit (float): minimum log_loss requirement\n time_limit (float): maximum time per image (in ms)\n \n Returns:\n tuple: LogLoss and reference_time float values\n \"\"\"\n min_proba = np.float(min_proba)\n max_proba = 1.0 - min_proba\n n_images = len(images)\n probas = np.zeros(n_images, dtype=np.float)\n image_time = 0.0\n top_1_accuracy = 0.0\n top_k_accuracy = 0.0\n for i, image in enumerate(images):\n class_probas = dict(image.top_k)\n if image.class_index == image.top_k[0][0]:\n top_1_accuracy += 1.0\n if image.class_index in class_probas:\n top_k_accuracy += 1.0\n probas[i] = class_probas[image.class_index]\n if probas[i] > 0:\n sum_probas = sum(class_probas.values())\n probas[i] /= sum_probas\n probas[i] = max(min_proba, min(max_proba, probas[i]))\n image_time += image.inference_time\n \n log_loss = np.mean(-np.log(probas))\n top_1_accuracy /= n_images\n top_k_accuracy /= n_images\n image_time /= n_images\n t = mult * image_time\n print(\"top_1_accuracy = %.9f\" % top_1_accuracy)\n print(\"top_k_accuracy = %.9f\" % top_k_accuracy )\n print(\"log_loss = %.9f\" % log_loss)\n print(\"image_time = %.9f\" % image_time)\n if image_time > time_limit or log_loss > log_loss_max:\n score = 0.0\n else:\n t_max = mult * time_limit\n score = 1e6 * (1.0 - log_loss * np.log(t) / (log_loss_max * np.log(t_max)))\n print(\"score = %.2f\" % score)\n return score\n \n\ndef main(args):\n parser = argparse.ArgumentParser(description='TopCoder Movidius MM')\n parser.add_argument(\n \"-images-dir\",\n dest=\"images_dir\",\n help=\"\"\"Folder containing images to classify\"\"\"\n )\n parser.add_argument(\n \"-output-file\",\n dest=\"output_file\",\n default=\"\",\n help=\"\"\"Output CSV file to save inference results\"\"\"\n )\n parser.add_argument(\n \"-graph-file\",\n dest=\"graph_file\",\n default=\"\",\n help=\"\"\"Movidius graph file path\"\"\"\n )\n parser.add_argument(\n \"-labels-map-file\",\n dest=\"labels_map_file\",\n default=\"\",\n help=\"\"\"Labels map file\"\"\"\n )\n parser.add_argument(\n \"-images-file\",\n dest=\"images_file\",\n default=\"\",\n help=\"\"\"CSV file containing list of images filenames to classify in images-dir folder, only filenames listed here will be processed\"\"\"\n )\n args = parser.parse_args()\n if not os.path.isdir(args.images_dir):\n print(\"data is not a directory: %s\" % args.images_dir)\n print(\"Please use the right path as argument, and/or change the Makefile MOVIDIUSDIR variable\")\n return 0\n \n print(\"IMAGE_DIM\", IMAGE_DIM)\n # start NCS\n device = open_ncs_device()\n graph = load_graph(device, args.graph_file)\n # prepare images\n images = init_images(args.images_dir, args.images_file)\n n_images = len(images)\n info_frequency = 100\n print(\"n_images = %d\" % n_images)\n \n # load labels map file\n labelsLines = [line.rstrip('\\n') for line in open(args.labels_map_file)]\n labels = {}\n for label in labelsLines:\n split = label.split(\":\")\n labels[int(split[0])] = int(split[1])\n \n # process images\n for i, image in enumerate(images):\n if (i+1) % info_frequency == 0:\n print(\"progess %d/%d ...\" % (i+1, n_images), flush=True)\n bgr_blob = image.load_BGR(IMAGE_DIM)\n graph.LoadTensor(bgr_blob, 'user object')\n output, userobj = graph.GetResult()\n #print(output)\n image.inference_time = np.sum(\n graph.GetGraphOption( mvnc.GraphOption.TIME_TAKEN ) )\n image.save_top_k(output, labels, 5)\n # stop NCS\n close_ncs_device(device, graph)\n # process results\n write_inferences_csv(args.output_file, images)\n if os.path.isfile(args.images_file):\n score_inferences(images)\n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n" ]
[ [ "numpy.float", "numpy.log", "numpy.zeros" ] ]
agarwalrounak/qmt
[ "5e8a7001cc020979636e492448abcfd894396038" ]
[ "tests/py3/test_property_map.py" ]
[ "import numpy as np\n\nfrom qmt.geometry import PropertyMap, MaterialPropertyMap\nfrom qmt.materials import Materials\n\n\nclass DummyPartMap:\n def __init__(self, part_ids):\n assert len(part_ids) == 2\n self.partIds = part_ids\n\n def __call__(self, x):\n assert np.ndim(x) >= 1\n x = np.asanyarray(x)\n if np.ndim(x) == 1:\n return self.partIds[x[0] > 0]\n else:\n return np.where(x[..., 0] > 0, self.partIds[1], self.partIds[0])\n\n\ndef test_property_map():\n int_map = DummyPartMap([0, 1])\n str_map = DummyPartMap(['part1', 'part2'])\n\n prop_map1 = PropertyMap(int_map, np.vectorize(lambda p: 'yes' if p > 0 else 'no'))\n assert prop_map1.get_part((1., 2.)) == 1\n assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)\n assert prop_map1((1., 2.)) == 'yes'\n assert np.all(prop_map1(-np.ones((2, 3))) == 'no')\n\n props = {'part1': 'yes', 'part2': 'no'}\n prop_map2 = PropertyMap(str_map, np.vectorize(lambda p: props[p]))\n assert prop_map2.get_part((1., 2.)) == 'part2'\n assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')\n assert prop_map1((1., 2.)) == 'yes'\n assert np.all(prop_map1(-np.ones((2, 3))) == 'no')\n\n\ndef test_materials_property_map():\n int_map = DummyPartMap([0, 1])\n str_map = DummyPartMap(['part1', 'part2'])\n part_materials1 = {0: 'InAs', 1: 'GaSb'}\n part_materials2 = {'part1': 'InAs', 'part2': 'Al'}\n mat_lib = Materials(matDict={})\n mat_lib.add_material('InAs', 'semi', electronMass=0.026, directBandGap=417.,\n valenceBandOffset=-590.)\n mat_lib.add_material('GaSb', 'semi', electronMass=.039, directBandGap=812.,\n valenceBandOffset=-30.)\n mat_lib.add_material('Al', 'metal', workFunction=4280.)\n\n prop_map1 = MaterialPropertyMap(int_map, part_materials1, mat_lib, 'electronMass')\n assert prop_map1.get_part((1., 2.)) == 1\n assert np.all(prop_map1.get_part(-np.ones((2, 3))) == 0)\n assert prop_map1((1., 2.)) == mat_lib['GaSb']['electronMass']\n assert np.all(prop_map1(-np.ones((2, 3))) == mat_lib['InAs']['electronMass'])\n\n prop_map2 = MaterialPropertyMap(str_map, part_materials2, mat_lib, 'directBandGap', eunit='eV',\n fill_value=0.)\n assert prop_map2.get_part((1., 2.)) == 'part2'\n assert np.all(prop_map2.get_part(-np.ones((2, 3))) == 'part1')\n assert prop_map2((1., 2.)) == 0.\n assert np.all(prop_map2(-np.ones((2, 3))) == mat_lib.find('InAs', 'eV')['directBandGap'])\n" ]
[ [ "numpy.ones", "numpy.vectorize", "numpy.asanyarray", "numpy.ndim", "numpy.where" ] ]
abhaikollara/tensorflow
[ "4f96df3659696990cb34d0ad07dc67843c4225a9", "4f96df3659696990cb34d0ad07dc67843c4225a9", "4f96df3659696990cb34d0ad07dc67843c4225a9", "4f96df3659696990cb34d0ad07dc67843c4225a9", "4f96df3659696990cb34d0ad07dc67843c4225a9" ]
[ "tensorflow/python/kernel_tests/batch_scatter_ops_test.py", "tensorflow/python/keras/datasets/cifar10.py", "tensorflow/python/framework/kernels.py", "tensorflow/python/kernel_tests/batch_matmul_op_test.py", "tensorflow/python/keras/engine/input_spec_test.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.tf.scatter.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\ndef _AsType(v, vtype):\n return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)\n\n\ndef _NumpyUpdate(ref, indices, updates):\n for i, indx in np.ndenumerate(indices):\n indx = i[:-1] + (indx,)\n ref[indx] = updates[i]\n\n\n_TF_OPS_TO_NUMPY = {\n state_ops.batch_scatter_update: _NumpyUpdate,\n}\n\n\nclass ScatterTest(test.TestCase):\n\n def _VariableRankTest(self,\n tf_scatter,\n vtype,\n itype,\n repeat_indices=False,\n updates_are_scalar=False,\n method=False):\n np.random.seed(8)\n with self.cached_session(use_gpu=False):\n for indices_shape in (2,), (3, 7), (3, 4, 7):\n for extra_shape in (), (5,), (5, 9):\n # Generate random indices with no duplicates for easy numpy comparison\n sparse_dim = len(indices_shape) - 1\n indices = np.random.randint(\n indices_shape[sparse_dim], size=indices_shape, dtype=itype)\n updates = _AsType(\n np.random.randn(*(indices_shape + extra_shape)), vtype)\n\n old = _AsType(np.random.randn(*(indices_shape + extra_shape)), vtype)\n\n # Scatter via numpy\n new = old.copy()\n np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]\n np_scatter(new, indices, updates)\n # Scatter via tensorflow\n ref = variables.Variable(old)\n ref.initializer.run()\n if method:\n ref.batch_scatter_update(ops.IndexedSlices(indices, updates))\n else:\n tf_scatter(ref, indices, updates).eval()\n self.assertAllClose(ref.eval(), new)\n\n @test_util.run_deprecated_v1\n def testVariableRankUpdate(self):\n vtypes = [np.float32, np.float64]\n for vtype in vtypes:\n for itype in (np.int32, np.int64):\n self._VariableRankTest(\n state_ops.batch_scatter_update, vtype, itype)\n\n @test_util.run_deprecated_v1\n def testBooleanScatterUpdate(self):\n with self.session(use_gpu=False) as session:\n var = variables.Variable([True, False])\n update0 = state_ops.batch_scatter_update(var, [1], [True])\n update1 = state_ops.batch_scatter_update(\n var, constant_op.constant(\n [0], dtype=dtypes.int64), [False])\n var.initializer.run()\n\n session.run([update0, update1])\n\n self.assertAllEqual([False, True], self.evaluate(var))\n\n @test_util.run_deprecated_v1\n def testScatterOutOfRange(self):\n params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)\n updates = np.array([-3, -4, -5]).astype(np.float32)\n with self.session(use_gpu=False):\n ref = variables.Variable(params)\n ref.initializer.run()\n\n # Indices all in range, no problem.\n indices = np.array([2, 0, 5])\n state_ops.batch_scatter_update(ref, indices, updates).eval()\n\n # Test some out of range errors.\n indices = np.array([-1, 0, 5])\n with self.assertRaisesOpError(\n r'indices\\[0\\] = \\[-1\\] does not index into shape \\[6\\]'):\n state_ops.batch_scatter_update(ref, indices, updates).eval()\n\n indices = np.array([2, 0, 6])\n with self.assertRaisesOpError(r'indices\\[2\\] = \\[6\\] does not index into '\n r'shape \\[6\\]'):\n state_ops.batch_scatter_update(ref, indices, updates).eval()\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"CIFAR10 small images classification dataset.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.datasets.cifar import load_batch\nfrom tensorflow.python.keras.utils.data_utils import get_file\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.datasets.cifar10.load_data')\ndef load_data():\n \"\"\"Loads CIFAR10 dataset.\n\n Returns:\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n \"\"\"\n dirname = 'cifar-10-batches-py'\n origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n path = get_file(\n dirname,\n origin=origin,\n untar=True,\n file_hash=\n '6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce')\n\n num_train_samples = 50000\n\n x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')\n y_train = np.empty((num_train_samples,), dtype='uint8')\n\n for i in range(1, 6):\n fpath = os.path.join(path, 'data_batch_' + str(i))\n (x_train[(i - 1) * 10000:i * 10000, :, :, :],\n y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath)\n\n fpath = os.path.join(path, 'test_batch')\n x_test, y_test = load_batch(fpath)\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n if K.image_data_format() == 'channels_last':\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n x_test = x_test.astype(x_train.dtype)\n y_test = y_test.astype(y_train.dtype)\n\n return (x_train, y_train), (x_test, y_test)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions for querying registered kernels.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.core.framework import kernel_def_pb2\nfrom tensorflow.python import pywrap_tensorflow as c_api\nfrom tensorflow.python.util import compat\n\n\ndef get_all_registered_kernels():\n \"\"\"Returns a KernelList proto of all registered kernels.\n \"\"\"\n buf = c_api.TF_GetAllRegisteredKernels()\n data = c_api.TF_GetBuffer(buf)\n kernel_list = kernel_def_pb2.KernelList()\n kernel_list.ParseFromString(compat.as_bytes(data))\n return kernel_list\n\n\ndef get_registered_kernels_for_op(name):\n \"\"\"Returns a KernelList proto of registered kernels for a given op.\n\n Args:\n name: A string representing the name of the op whose kernels to retrieve.\n \"\"\"\n buf = c_api.TF_GetRegisteredKernelsForOp(name)\n data = c_api.TF_GetBuffer(buf)\n kernel_list = kernel_def_pb2.KernelList()\n kernel_list.ParseFromString(compat.as_bytes(data))\n return kernel_list\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow.ops.tf.BatchMatMul.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import benchmark\nfrom tensorflow.python.platform import test\n\n\ndef GetRandomNormalInput(shape, dtype):\n # float16 has limited range so we reduce the variance of the scalars.\n scale = 10.0 if dtype != np.float16 else 0.1\n loc = -10.0 if dtype != np.float16 else 0.1\n vals = np.array(np.random.normal(loc, scale, np.prod(shape)), dtype=dtype)\n if dtype in (np.complex64, np.complex128):\n imag = np.array(np.random.normal(loc, scale, np.prod(shape)), dtype=dtype)\n vals += 1j * imag\n return vals.reshape(shape)\n\n\nclass BatchMatmulOpTest(test.TestCase):\n\n # Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).\n def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):\n # output's shape depends on adj[0] and adj[1]\n if adjoint_a:\n x = np.conjugate(np.swapaxes(x, -1, -2))\n if adjoint_b:\n y = np.conjugate(np.swapaxes(y, -1, -2))\n return np.matmul(x, y)\n\n # Compares TensorFlow BatchMatmul with NumPy's matmul.\n def _compare(self, x_in, y_in, adjoint_a, adjoint_b, static_shape):\n x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])\n y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])\n x = x_in if not adjoint_a else x_in.reshape(x_t_shape)\n y = y_in if not adjoint_b else y_in.reshape(y_t_shape)\n is_floating = x.dtype != np.int32\n tol = 100 * np.finfo(x.dtype).eps if is_floating else 0\n with self.cached_session(use_gpu=is_floating) as sess:\n if static_shape:\n z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)\n z0_val = self.evaluate(z0)\n else:\n x_ph = array_ops.placeholder(x.dtype)\n y_ph = array_ops.placeholder(y.dtype)\n z0 = math_ops.matmul(\n x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)\n z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})\n z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)\n self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)\n\n def _testNonEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):\n\n def CompareNonEmpty(self, a_shape, b_shape):\n self._compare(\n GetRandomNormalInput(a_shape, dtype),\n GetRandomNormalInput(b_shape, dtype),\n adjoint_a,\n adjoint_b,\n static_shape=use_static_shape)\n\n CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])\n CompareNonEmpty(self, [1, 2, 3], [1, 3, 1])\n CompareNonEmpty(self, [1, 1, 3], [1, 3, 5])\n CompareNonEmpty(self, [1, 2, 3], [1, 3, 5])\n CompareNonEmpty(self, [7, 1, 3], [7, 3, 5])\n CompareNonEmpty(self, [7, 2, 3], [7, 3, 1])\n CompareNonEmpty(self, [7, 2, 3], [7, 3, 5])\n CompareNonEmpty(self, [10, 64, 75], [10, 75, 30])\n CompareNonEmpty(self, [5, 7, 2, 3], [5, 7, 3, 5])\n\n def _testBroadcasting(self, dtype, adjoint_a, adjoint_b, use_static_shape):\n\n def CompareNonEmpty(self, a_shape, b_shape):\n self._compare(\n GetRandomNormalInput(a_shape, dtype),\n GetRandomNormalInput(b_shape, dtype),\n adjoint_a,\n adjoint_b,\n static_shape=use_static_shape)\n\n CompareNonEmpty(self, [2, 3], [1, 3, 5])\n CompareNonEmpty(self, [1, 2, 3], [3, 5])\n CompareNonEmpty(self, [5, 1, 2, 3], [1, 7, 3, 5])\n CompareNonEmpty(self, [5, 2, 2, 3], [3, 5])\n CompareNonEmpty(self, [2, 3], [5, 2, 3, 5])\n CompareNonEmpty(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])\n CompareNonEmpty(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])\n\n def _testEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):\n\n def CompareEmpty(self, a_shape, b_shape):\n self._compare(\n np.zeros(a_shape).astype(dtype),\n np.zeros(b_shape).astype(dtype),\n adjoint_a,\n adjoint_b,\n static_shape=use_static_shape)\n\n CompareEmpty(self, [0, 3, 2], [0, 2, 4])\n CompareEmpty(self, [3, 0, 2], [3, 2, 5])\n CompareEmpty(self, [3, 3, 2], [3, 2, 0])\n\n\ndef _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):\n\n def Test(self):\n np.random.seed(42)\n self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)\n self._testEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)\n\n return Test\n\n\ndef _GetBatchMatmulOpBroadcastingTest(dtype, adjoint_a, adjoint_b,\n use_static_shape):\n\n def Test(self):\n with compat.forward_compatibility_horizon(2019, 4, 26):\n np.random.seed(42)\n self._testBroadcasting(dtype, adjoint_a, adjoint_b, use_static_shape)\n\n return Test\n\n\nclass BatchMatmulGradientTest(test.TestCase):\n\n # loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the\n # gradient checker.\n def _checkGrad(self, x_in, y_in, adjoint_a, adjoint_b):\n x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])\n y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])\n x = x_in if not adjoint_a else x_in.reshape(x_t_shape)\n y = y_in if not adjoint_b else y_in.reshape(y_t_shape)\n epsilon = np.finfo(x.dtype).eps\n # Since our gradient is linear, a larger delta decreases the error.\n delta = 10 * epsilon**(1.0 / 3.0)\n\n def Loss(x, y):\n return math_ops.reduce_sum(math_ops.matmul(x, y, adjoint_a, adjoint_b))\n\n with self.cached_session(use_gpu=True):\n ((x_jacob_t, y_jacob_t),\n (x_jacob_n, y_jacob_n)) = gradient_checker_v2.compute_gradient(\n Loss, [x, y], delta=delta)\n tol = 10 * delta\n self.assertAllClose(x_jacob_t, x_jacob_n, rtol=tol, atol=tol)\n self.assertAllClose(y_jacob_t, y_jacob_n, rtol=tol, atol=tol)\n\n # Tests gradients of a batched matmul of x, and y\n def _compare(self, a_shape, b_shape, dtype, adjoint_a, adjoint_b):\n np.random.seed(42)\n x = GetRandomNormalInput(a_shape, dtype)\n y = GetRandomNormalInput(b_shape, dtype)\n self._checkGrad(x, y, adjoint_a, adjoint_b)\n\n\ndef _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b):\n\n def Test(self):\n def CheckGradients(self, a_shape, b_shape):\n self._compare(a_shape, b_shape, dtype, adjoint_a, adjoint_b)\n\n CheckGradients(self, [1, 2, 3], [1, 3, 5])\n CheckGradients(self, [3, 4, 7], [3, 7, 10])\n\n return Test\n\n\ndef _GetBatchMatmulGradientWithBroadcastingTest(dtype, adjoint_a, adjoint_b):\n\n def Test(self):\n def CheckGradients(self, a_shape, b_shape):\n self._compare(a_shape, b_shape, dtype, adjoint_a, adjoint_b)\n\n with compat.forward_compatibility_horizon(2019, 4, 26):\n CheckGradients(self, [1, 5, 2, 3], [7, 1, 3, 2])\n CheckGradients(self, [2, 3], [1, 3, 5])\n CheckGradients(self, [2, 3], [5, 3, 5])\n CheckGradients(self, [5, 2, 5], [5, 3])\n CheckGradients(self, [5, 2, 2, 3], [3, 5])\n CheckGradients(self, [4, 5, 1, 2, 3], [1, 1, 3, 5])\n CheckGradients(self, [1, 2, 1, 4, 2, 1, 3, 4], [3, 2, 1, 1, 1, 2, 4, 2])\n\n return Test\n\n\nclass BatchMatMulBenchmark(test.Benchmark):\n # Batch sizes are 512.\n shape_pairs = [\n # Typical fully connected layer.\n ((4, 8, 4, 2, 1, 1024), (1024, 1024)),\n ((4, 1, 4, 1, 1, 1024), (1, 8, 1, 2, 1024, 1024)),\n # Square matmul.\n ((4, 8, 4, 2, 512, 512), (512, 512)),\n ((4, 1, 4, 1, 512, 512), (1, 8, 1, 2, 512, 512)),\n # Matrix-vector multiplies.\n ((4, 8, 4, 2, 10000, 200), (200, 1)),\n ((4, 1, 4, 1, 10000, 200), (1, 8, 1, 2, 200, 1)),\n # Vector-matrix multiplies.\n ((4, 8, 4, 2, 1, 200), (200, 10000)),\n ((4, 1, 4, 1, 1, 200), (1, 8, 1, 2, 200, 10000)),\n ]\n\n def benchmarkBatchMatMulBroadcast(self):\n for (a_shape, b_shape) in self.shape_pairs:\n with compat.forward_compatibility_horizon(2019, 4, 26):\n with ops.Graph().as_default(), \\\n session.Session(config=benchmark.benchmark_config()) as sess, \\\n ops.device(\"/cpu:0\"):\n matrix_a = variables.Variable(\n GetRandomNormalInput(a_shape, np.float32))\n matrix_b = variables.Variable(\n GetRandomNormalInput(b_shape, np.float32))\n variables.global_variables_initializer().run()\n\n # Use batch matmul op's internal broadcasting.\n self.run_op_benchmark(\n sess,\n math_ops.matmul(matrix_a, matrix_b),\n min_iters=50,\n name=\"batch_matmul_cpu_{}_{}\".format(a_shape, b_shape))\n\n # Manually broadcast the input matrices using the broadcast_to op.\n broadcasted_batch_shape = array_ops.broadcast_static_shape(\n matrix_a.shape[:-2], matrix_b.shape[:-2])\n broadcasted_a_shape = broadcasted_batch_shape.concatenate(\n matrix_a.shape[-2:])\n broadcasted_b_shape = broadcasted_batch_shape.concatenate(\n matrix_b.shape[-2:])\n self.run_op_benchmark(\n sess,\n math_ops.matmul(\n array_ops.broadcast_to(matrix_a, broadcasted_a_shape),\n array_ops.broadcast_to(matrix_b, broadcasted_b_shape)),\n min_iters=50,\n name=\"batch_matmul_manual_broadcast_cpu_{}_{}\".format(\n a_shape, b_shape))\n\n\nif __name__ == \"__main__\":\n dtypes_to_test = [np.float16, np.float32, np.float64, np.int32]\n if not test.is_built_with_rocm():\n # ROCm does not support BLAS operations for complex types\n dtypes_to_test += [np.complex64, np.complex128]\n for dtype_ in dtypes_to_test:\n for adjoint_a_ in False, True:\n for adjoint_b_ in False, True:\n name = \"%s_%s_%s\" % (dtype_.__name__, adjoint_a_, adjoint_b_)\n # TF2 does not support placeholders under eager so we skip it.\n for use_static_shape_ in set([True, tf2.enabled()]):\n setattr(\n BatchMatmulOpTest,\n \"testBatchMatmulOp_\" + name + \"_{}\".format(use_static_shape_),\n test_util.xla_allow_fallback(\n \"TODO(b/134526360): XLA:CPU hasn't implemented int32 dot.\")(\n _GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,\n use_static_shape_)))\n # Broadcasting is supported only in v2.\n setattr(\n BatchMatmulOpTest, \"testBatchMatmulBroadcasting_\" + name +\n (\"_%s\" % use_static_shape_),\n test_util.xla_allow_fallback(\n \"TODO(b/134526360): XLA:CPU hasn't implemented int32 dot.\")(\n _GetBatchMatmulOpBroadcastingTest(dtype_, adjoint_a_,\n adjoint_b_,\n use_static_shape_)))\n if dtype_ == np.int32:\n continue\n setattr(BatchMatmulGradientTest, \"testBatchMatmulGradient_\" + name,\n _GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))\n # Broadcasting is supported only in v2.\n setattr(\n BatchMatmulGradientTest,\n \"testBatchMatmulGradientWithBroadcasting_\" + name,\n _GetBatchMatmulGradientWithBroadcastingTest(dtype_, adjoint_a_,\n adjoint_b_))\n test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"InputSpec tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.platform import test\n\n\nclass InputSpecTest(test.TestCase):\n\n def test_axes_initialization(self):\n input_spec.InputSpec(shape=[1, None, 2, 3], axes={3: 5, '2': 2})\n with self.assertRaisesRegexp(ValueError, 'Axis 4 is greater than'):\n input_spec.InputSpec(shape=[1, None, 2, 3], axes={4: 5})\n with self.assertRaisesRegexp(TypeError, 'keys in axes must be integers'):\n input_spec.InputSpec(shape=[1, None, 2, 3], axes={'string': 5})\n\n\nclass InputSpecToTensorShapeTest(test.TestCase):\n\n def test_defined_shape(self):\n spec = input_spec.InputSpec(shape=[1, None, 2, 3])\n self.assertAllEqual(\n [1, None, 2, 3], input_spec.to_tensor_shape(spec).as_list())\n\n def test_defined_ndims(self):\n spec = input_spec.InputSpec(ndim=5)\n self.assertAllEqual(\n [None] * 5, input_spec.to_tensor_shape(spec).as_list())\n\n spec = input_spec.InputSpec(ndim=0)\n self.assertAllEqual(\n [], input_spec.to_tensor_shape(spec).as_list())\n\n spec = input_spec.InputSpec(ndim=3, axes={1: 3, -1: 2})\n self.assertAllEqual(\n [None, 3, 2], input_spec.to_tensor_shape(spec).as_list())\n\n def test_undefined_shapes(self):\n spec = input_spec.InputSpec(max_ndim=5)\n with self.assertRaisesRegexp(ValueError, 'unknown TensorShape'):\n input_spec.to_tensor_shape(spec).as_list()\n\n spec = input_spec.InputSpec(min_ndim=5, max_ndim=5)\n with self.assertRaisesRegexp(ValueError, 'unknown TensorShape'):\n input_spec.to_tensor_shape(spec).as_list()\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.framework.ops.IndexedSlices", "numpy.random.seed", "numpy.random.randn", "numpy.ndenumerate", "tensorflow.python.platform.test.main", "tensorflow.python.ops.state_ops.batch_scatter_update", "numpy.array", "numpy.random.randint", "tensorflow.python.ops.variables.Variable", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.python.keras.datasets.cifar.load_batch", "numpy.empty", "tensorflow.python.keras.backend.image_data_format", "tensorflow.python.keras.utils.data_utils.get_file", "tensorflow.python.util.tf_export.keras_export" ], [ "tensorflow.core.framework.kernel_def_pb2.KernelList", "tensorflow.python.pywrap_tensorflow.TF_GetRegisteredKernelsForOp", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.pywrap_tensorflow.TF_GetAllRegisteredKernels", "tensorflow.python.pywrap_tensorflow.TF_GetBuffer" ], [ "tensorflow.python.ops.array_ops.broadcast_static_shape", "tensorflow.python.platform.benchmark.benchmark_config", "numpy.random.seed", "tensorflow.python.compat.compat.forward_compatibility_horizon", "tensorflow.python.ops.array_ops.broadcast_to", "tensorflow.python.framework.ops.Graph", "tensorflow.python.platform.test.is_built_with_rocm", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.gradient_checker_v2.compute_gradient", "tensorflow.python.framework.ops.device", "numpy.zeros", "tensorflow.python.ops.variables.global_variables_initializer", "numpy.prod", "tensorflow.python.framework.test_util.xla_allow_fallback", "numpy.finfo", "tensorflow.python.tf2.enabled", "numpy.matmul", "numpy.swapaxes", "tensorflow.python.platform.test.main" ], [ "tensorflow.python.platform.test.main", "tensorflow.python.keras.engine.input_spec.to_tensor_shape", "tensorflow.python.keras.engine.input_spec.InputSpec" ] ]
wenlianglaw/Tetris-in-Python
[ "d4f0a22c4827e7eeb44c55def3f024e0c6932ebe" ]
[ "game_client.py" ]
[ "# This file defines the back end of the Tetris game\n#\n# GameState is the base class of GameClient.\n#\n# GameClient.Run() will start two threads:\n# - _ProcessActions: Process the action list every x seconds\n# - _AutoDrop: Auto drops the current piece.\n#\n# GameClient:\n# - current piece\n# - held piece\n# - piece list\n# - color_map: game board\n# - InputActions(...): Inputs a list of actions.\n# - ProcessActions(...): Lets the game client process a list of actions\n# directly\n# - ProcessAction(...): Lets the game client process one actions directly\n# - PutPiece(...): Puts the current piece if the position is valid.\n# - GetState(...): Gets game state, useful to AI\n# - CheckValidity(...): Checks if a move is valid\n# - SpawnPiece(...): Sets the current piece.\n# - Restart(...): Restarts the game.\n# - Rotate(...): Alternatively, callers can directly call Rotate to rotate\n# current_piece\n# - Move(...): Alternatively, callers can directly call Move to move the\n# current_piece\n#\nimport copy\nimport queue\nimport threading\nimport time\nfrom threading import Lock\nfrom typing import Tuple, List\n\nimport numpy as np\n\nimport actions\nimport shape\n\n# Some global settings\nDEFAULT_LENGTH = 20\nDEFAULT_WIDTH = 10\nMAP_PADDING_SIZE = 4\n# When there are less than threshold pieces, spawn a new bag.\nREFILL_THRESHOLD = 5\n\n# Disable the auto drop in next few seconds\nMAXIMUM_LOCK_TIME = 4\nINCREMENTAL_LOCK_TIME = 1\n\n# Scores\nSINGLE = 5\nDOUBLE = 10\nTSS = 20\nTRIPLE = 40\nQUAD = 50\nTSD = 60\nTST = 80\nPC = 120\n\n# ATTACKS\nATTACK_DOUBLE = 1\nATTACK_TSS = 2\nATTACK_TRIPLE = 2\nATTACK_QUAD = 4\nATTACK_TSD = 4\nATTACK_TST = 6\nATTACK_PC = 10\n\n\nclass InternalError(Exception):\n \"\"\"Any internal errors.\"\"\"\n\n\nclass GameState:\n def __init__(self):\n self.height = 0\n self.width = 0\n self.color_map = np.array([])\n self.current_piece = None\n self.held_piece = None\n self.score = 0\n self.piece_list = []\n self.is_gameover = False\n self.can_swap = True\n self.accumulated_lines_eliminated = 0\n self.piece_dropped = 0\n self.blevel_increase = False\n self.level = 0\n self.line_sent = 0\n self.line_received = 0\n\n def __deepcopy__(self, memodict=None):\n if memodict is None:\n memodict = dict()\n another = copy.copy(self)\n another.color_map = self.color_map.copy()\n if self.current_piece is not None:\n another.current_piece = self.current_piece.copy()\n if self.held_piece is not None:\n another.held_piece = self.held_piece.copy()\n another.piece_list = copy.deepcopy(self.piece_list.copy())\n return another\n\n def copy(self):\n return self.__deepcopy__()\n\n def __str__(self):\n ret = \"\"\n ret += f\"\"\"height: {self.height}\nwidth: {self.width}\ncolor_map: {self.color_map}\ncurrent_piece: {self.current_piece}\nheld_piece: {self.held_piece}\nscore: {self.score}\npiece_list: {self.piece_list}\nis_gameover: {self.is_gameover}\ncan_swap: {self.can_swap}\npiece_dropped: {self.piece_dropped}\nlevel: {self.level}\n \"\"\"\n\n\nclass GameClient(GameState):\n def __init__(self, height: int = DEFAULT_LENGTH, width: int = DEFAULT_WIDTH, map_height_padding=MAP_PADDING_SIZE,\n map_side_padding=MAP_PADDING_SIZE):\n super().__init__()\n\n self.height = height\n self.width = width\n self.map_height_padding = map_height_padding\n self.map_side_padding = map_side_padding\n\n self.dtype = np.uint8\n self.dtype_length = 8\n if self.width + 2 * map_side_padding > 8:\n self.dtype = np.uint16\n self.dtype_length = 16\n if self.width + 2 * map_side_padding > 16:\n self.dtype = np.uint32\n self.dtype_length = 32\n if self.width + 2 * map_side_padding > 32:\n self.dtype = np.uint64\n self.dtype_length = 64\n if self.width + 2 * map_side_padding > 64:\n self.dtype = np.uint128\n self.dtype_length = 128\n if self.width + 2 * map_side_padding > 128:\n raise InternalError(\n \"width too long to support bit map. Consider chaning it to a smaller value.\")\n\n # Lock time settings\n # When the lock is enabled, count the lock time.\n # When the accumulated lock time is greater than the current maximum lock time,\n # force to perform the auto drop. Otherwise autodop is disabled for this turn.\n # When current locktime is reached but an refresh lock time request is genertaed.\n # increase the current maximum lock time by incremental lock time.\n self.maximum_lock_time = MAXIMUM_LOCK_TIME\n self.current_maximum_lock_time = 0\n self.incremental_lock_time = INCREMENTAL_LOCK_TIME\n self.accumulate_lock_time = 0\n # Only when move or rotate at bottom locks the auto drop\n self._enable_lock_time = False\n\n # Color map marks the color for each cell.\n self.color_map = np.array([[]], dtype=self.dtype)\n\n # Bit map for a better performance in some calculation.\n self.bit_map = np.array([], dtype=self.dtype)\n\n # Lock for current_piece\n self.mutex_current_piece = Lock()\n self.last_put_piece = None\n # List of actions to process\n self.action_list = queue.Queue()\n self._init_spawn_interval = 500 # 500 ms at level 0\n self._current_spawn_interval = 500\n # actions.Action\n self.last_action = None\n self.disable_autodrop = False\n self.line_tobesent = 0\n\n # Used when calculate the auto drop interval decrease based on current level.\n # Generated from the sigmoid function\n # x = np.linspace(0, 40, 40)\n # interval_decrease = 110 / (1 + np.exp(0.16 * x))\n # interval_decrease = np.cumsum(interval_decrease)\n # print(repr(np.cumsum(interval_decrease)))\n self.interval_decrease = np.array(\n [55., 100.49727968, 150.55179446, 190.28030383,\n 230.85041422, 260.47244367, 290.38990828, 320.86947489,\n 345.19115272, 350.63934095, 380.49515164, 400.03022699,\n 410.5020957, 420.15098155, 430.19789113, 440.8437644,\n 450.26946046, 455.63636342, 461.08741849, 465.74844074,\n 469.72957119, 473.12678557, 476.02338748, 478.4914391,\n 480.59310001, 482.38185737, 483.90364044, 485.19781892,\n 486.29808909, 487.23325451, 488.02790975, 488.70303602,\n 489.27651798, 489.76359062, 490.17722443, 490.52845671,\n 490.82667585, 491.07986489, 491.2948099, 491.47727802])\n\n self._RefillPieces()\n self._TakePieceFromList()\n self.accumulated_lines_eliminated = 0\n\n # When soft-dropping, temporarily disable auto-drop\n self.soft_drop = False\n self.piece_dropped = 0\n\n # Must be put after the initializations above\n self._InitMap()\n\n def _InitMap(self):\n side_padding = (1 << self.map_side_padding) - 1\n init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding\n bottom_padding = (1 << (self.width + 2 * self.map_side_padding)) - 1\n self.bit_map = np.concatenate((\n np.array((self.map_height_padding + self.height) * [init_row], dtype=self.dtype),\n np.array(self.map_height_padding * [bottom_padding], dtype=self.dtype)), dtype=self.dtype)\n\n self.color_map = np.array([[0 for i in range(self.width)] for x in range(self.height + self.map_height_padding)],\n dtype=self.dtype)\n\n def Restart(self):\n self._InitMap()\n self.piece_list = []\n self.held_piece = None\n self.current_piece = None\n # Lock of the game state\n self.mutex_current_piece = Lock()\n self.is_gameover = False\n self.last_put_piece = None\n # List of actions to process\n self.action_list = queue.Queue()\n self._init_spawn_interval = 500.0\n self._current_spawn_interval = 500.0\n # actions.Action\n self.last_action = []\n self.can_swap = True\n self.score = 0\n self.accumulate_lock_time = 0\n self.accumulated_lines_eliminated = 0\n self.soft_drop = False\n self.piece_dropped = 0\n self.line_sent = 0\n self.line_received = 0\n self.line_tobesent = 0\n\n self._enable_lock_time = False\n\n self._RefillPieces()\n self._TakePieceFromList()\n\n def Run(self):\n auto_drop_th = threading.Thread(target=self.AutoDrop, name=\"auto_drop\", daemon=True)\n process_input_th = threading.Thread(target=self._ProcessActionsThread, daemon=True)\n if not self.disable_autodrop:\n auto_drop_th.start()\n process_input_th.start()\n\n if not self.disable_autodrop:\n auto_drop_th.join()\n process_input_th.join()\n print(\"game ends\")\n\n def GetState(self) -> GameState:\n \"\"\"Gets game state.\n Returns the objects ref instead of copy For better performance.\n \"\"\"\n return copy.deepcopy(super())\n\n def GetCell(self, i: int, j: int) -> int:\n \"\"\"Gets cell at [i,j].\n Notes: This function doesn't check the index out of boundary error.\n \"\"\"\n return self.color_map[i, j]\n\n def GetMap(self):\n \"\"\"Gets whole color_map.\"\"\"\n return self.color_map\n\n def GetMapArea(self, corner: Tuple[int, int],\n size: Tuple[int, int]) -> np.array:\n \"\"\"Gets an area of\n :param top_left:\n :param bottom_right:\n :return: The area of the color_map.\n \"\"\"\n size = (np.min([size[0], self.color_map.shape[0] - corner[0]]),\n np.min([size[1], self.color_map.shape[1] - corner[1]]))\n\n return self.color_map[corner[0]: corner[0] + size[0],\n corner[1]: corner[1] + size[1]]\n\n def SetMap(self, pos: Tuple[int, int], v: int, map: np.array = None):\n \"\"\"Sets the cell at [i,j] to value v.\"\"\"\n (i, j) = pos\n bit_map = self.bit_map.copy()\n if map is None or map is self.color_map:\n map = self.color_map\n bit_map = self.bit_map\n map[i, j] = v\n\n # Set a bit to value: Clear to bit to 0 and then set to value\n bit_v = 0 if v == 0 else 1\n bit_j_pos = self.width + self.map_side_padding - 1 - j\n bit_map[i] = (bit_map[i] & ~(1 << bit_j_pos)) | (bit_v << bit_j_pos)\n\n def SetWholeMap(self, map: np.array):\n if map.shape != self.color_map.shape:\n raise InternalError(\n f\"Map shape {map.shape}\"\n f\" must match the color_map shape: {self.color_map.shape}\")\n\n self.color_map = map\n\n # Convert the map to Bollean map\n bit_color_map = map != 0\n\n # Revert the order and padding, then call the packbits(..., order=\"little\") fn\n bit_color_map = bit_color_map[:, ::-1]\n bit_color_map = np.pad(\n bit_color_map,\n ((0, 0), (self.map_side_padding, self.map_side_padding)),\n \"constant\", constant_values=(1,))\n\n padding0_len = self.dtype_length - bit_color_map.shape[1]\n bit_color_map = np.pad(bit_color_map, ((0, 0), (0, padding0_len)),\n \"constant\", constant_values=(0,))\n\n int_color_map = np.packbits(bit_color_map, bitorder=\"little\").view(self.dtype)\n self.bit_map[0:self.map_height_padding + self.height] = int_color_map\n print(int_color_map)\n print(self.bit_map)\n\n def copy(self):\n another = copy.copy(self)\n another.last_action = copy.copy(self.last_action)\n if self.last_put_piece is not None:\n another.last_put_piece = self.last_put_piece.copy()\n another.color_map = np.copy(self.color_map)\n another.bit_map = np.copy(self.bit_map)\n another.action_list = copy.copy(self.action_list)\n another.piece_list = self.piece_list.copy()\n another.current_piece = self.current_piece.copy()\n if self.held_piece is None:\n another.held_piece = None\n else:\n another.held_piece = self.held_piece.copy()\n return another\n\n def AutoDrop(self):\n while True:\n if self.soft_drop:\n # If it is soft dropping, we don't perform auto drop.\n self.soft_drop = False\n else:\n if self.CheckValidity(self.current_piece, offset=(1, 0)):\n self.Move(actions.Action(down=True, source_user_or_ai=False))\n else:\n if (not self._enable_lock_time or\n self.accumulate_lock_time >= self.current_maximum_lock_time):\n self.PutPiece()\n else:\n self.accumulate_lock_time += self._current_spawn_interval / 1000\n\n time.sleep(self._current_spawn_interval / 1000)\n\n def InputActions(self, acts: List[actions.Action]):\n if self.is_gameover:\n return\n\n if len(acts) > 30:\n print(\"len:\", len(acts))\n acts = acts[-30:]\n\n for act in acts:\n if self.action_list.qsize() > 50:\n break\n self.action_list.put(act)\n\n def ProcessActions(self, actions: List[actions.Action], post_processing=True):\n for a in actions:\n self.ProcessAction(a, post_processing=post_processing)\n\n def ProcessAction(self, action: actions.Action, post_processing=True):\n if self.is_gameover:\n return\n # print(f\"Processed action: {action.direction}, {action.rotation}, {action.swap}\")\n # self.test += 1\n # print(self.test)\n if action.swap:\n self.Swap()\n self.Rotate(action.rotation)\n self.Move(action, post_processing=post_processing)\n\n def _ProcessActionsThread(self):\n while True:\n while not self.action_list.empty():\n act = self.action_list.get()\n self.ProcessAction(act)\n self.action_list.task_done()\n time.sleep(0.001)\n\n def SetLevel(self, level: int = 0):\n \"\"\"Let the front end set!\"\"\"\n self.level = level\n\n i = min(len(self.interval_decrease), self.level)\n self._current_spawn_interval = max(\n 10, self._init_spawn_interval - self.interval_decrease[i])\n\n def IncreaseLevel(self, inc: int = 1):\n \"\"\"Let the front end decide!\"\"\"\n self.level += inc\n self.SetLevel(self.level)\n\n def Move(self, action: actions.Action, post_processing=True) -> bool:\n \"\"\"Moves the current piece.\n :param direction: Direction to move\n :param post_processing: if True, put the piece to color_map and\n apply line eliminate. Otherwise just update the current_piece's states.\n :return True if moved; False otherwise\n \"\"\"\n if (action.direction == actions.NONE and\n not action.down):\n return False\n\n moved = False\n if action.down:\n try:\n self.mutex_current_piece.acquire()\n if self.CheckValidity(self.current_piece, (1, 0)):\n self.current_piece.x += 1\n moved = True\n self.soft_drop = True\n finally:\n self.mutex_current_piece.release()\n\n if action.direction == actions.LEFT:\n try:\n self.mutex_current_piece.acquire()\n if self.CheckValidity(self.current_piece, (0, -1)):\n self.current_piece.y += -1\n moved = True\n finally:\n self.mutex_current_piece.release()\n\n if action.direction == actions.RIGHT:\n try:\n self.mutex_current_piece.acquire()\n if self.CheckValidity(self.current_piece, (0, 1)):\n self.current_piece.y += 1\n moved = True\n finally:\n self.mutex_current_piece.release()\n if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:\n try:\n self.mutex_current_piece.acquire()\n while self.CheckValidity(self.current_piece, (1, 0)):\n self.current_piece.x += 1\n moved = True\n finally:\n self.mutex_current_piece.release()\n if post_processing and action.direction == actions.HARD_DROP:\n self.PutPiece()\n\n if moved:\n self.last_action = action\n\n at_bottom = not self.CheckValidity(self.current_piece, (1, 0))\n if (at_bottom and action.direction != actions.HARD_DROP and\n action.source_user):\n self._RefreshLockTime()\n\n return moved\n\n def _RefreshLockTime(self):\n self._enable_lock_time = True\n if self.accumulate_lock_time >= self.current_maximum_lock_time:\n self.current_maximum_lock_time = min(\n self.current_maximum_lock_time + self.incremental_lock_time,\n self.maximum_lock_time)\n\n def _ResetLockTime(self):\n self._enable_lock_time = False\n self.accumulate_lock_time = 0\n self.current_maximum_lock_time = 0\n\n def Swap(self):\n \"\"\"Swaps the held piece and the current if its swappable\"\"\"\n if not self.can_swap:\n return\n\n try:\n self.mutex_current_piece.acquire()\n t = self.held_piece\n self.held_piece = self.current_piece\n self.current_piece = t\n\n if not self.current_piece:\n self._TakePieceFromList()\n\n self.current_piece.Init()\n self.held_piece.Init()\n self.can_swap = False\n finally:\n self.mutex_current_piece.release()\n\n def CheckGameOver(self):\n self.is_gameover = np.any(\n self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)\n\n return self.is_gameover\n\n def _AnalyzeElimination(self, n_eliminate: int) -> int:\n ret = 0\n is_last_put_t = isinstance(self.last_put_piece, shape.T)\n if n_eliminate == 1:\n if (is_last_put_t and self.last_action and self.last_action.rotation != 0):\n print(\"TSS\")\n ret += TSS\n self.line_tobesent += ATTACK_TSS\n else:\n ret += SINGLE\n\n if n_eliminate == 2:\n # TSD\n if (is_last_put_t and self.last_action and self.last_action.rotation != 0):\n print(\"TSD\")\n ret += TSD\n self.line_tobesent += ATTACK_TSD\n # Normal Double\n else:\n ret += DOUBLE\n self.line_tobesent += ATTACK_DOUBLE\n if n_eliminate == 3:\n # TST\n if (is_last_put_t and self.last_action and self.last_action.rotation != 0):\n print(\"TST\")\n ret += TST\n self.line_tobesent += ATTACK_TST\n else:\n ret += TRIPLE\n self.line_tobesent += ATTACK_TRIPLE\n\n if n_eliminate == 4:\n ret += QUAD\n self.line_tobesent += ATTACK_QUAD\n\n # Checks for PC\n if np.all(self.color_map == 0):\n print(\"PC\")\n ret += PC\n self.line_tobesent += ATTACK_PC\n\n return ret * (self.level + 3)\n\n def _LineClear(self):\n elimated_lines = []\n elimated_cnt = 0\n # Checks the 4 lines... This is not adapt to shape with higher than 4 lines\n # but that's not a part of this game. I don't have plan to support custom\n # shapes.\n for row in range(4):\n if not (self.last_put_piece.x + row >= 0 and\n self.last_put_piece.x + row < self.height + self.map_height_padding):\n continue\n if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):\n elimated_lines.append(row + self.last_put_piece.x)\n elimated_cnt += 1\n\n self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),\n dtype=self.dtype),\n np.delete(self.color_map, elimated_lines, axis=0)))\n\n # Updates the bit_map\n side_padding = (1 << self.map_side_padding) - 1\n init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding\n self.bit_map = np.concatenate((elimated_cnt * [init_row],\n np.delete(self.bit_map, elimated_lines))).astype(self.dtype)\n\n self.accumulated_lines_eliminated += elimated_cnt\n self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)\n\n def _SendAttack(self):\n \"\"\"Send attack to target.\"\"\"\n # This feature has not been implemented yet.\n self.line_sent += self.line_tobesent\n self.line_tobesent = 0\n\n def PutPiece(self, piece: shape.Shape = None):\n \"\"\" Puts a piece to color_map if it is a valid placement then execute the post processing.\n\n :param piece: The piece to put, if None, put the self.current_piece\n :param color_map: The color_map where the piece puts, if None, self.color_map will be used.\n :returns: True if the piece has been put. False otherwise.\n \"\"\"\n if self._PrePutPiece(piece):\n self._PostPutPiece(piece)\n return True\n else:\n return False\n\n def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):\n \"\"\" Puts a piece to color_map if it is a valid placement.\n Post put processing such as self._LineClear will not be executed\n\n :param piece: The piece to put, if None, put the self.current_piece\n :param map: The color_map where the piece puts, if None, self.color_map will be used.\n :returns: True if the piece has been put. False otherwise.\n \"\"\"\n try:\n if not piece:\n self.mutex_current_piece.acquire()\n piece = self.current_piece\n\n if map is None:\n map = self.color_map\n\n if not self.CheckValidity(piece):\n return False\n\n for (i, j) in piece.GetShape():\n self.SetMap((piece.x + i, piece.y + j), piece.id, map)\n return True\n finally:\n if self.mutex_current_piece.locked():\n self.mutex_current_piece.release()\n\n def _PostPutPiece(self, piece: shape.Shape = None):\n if piece is not None:\n self.last_put_piece = piece\n else:\n self.last_put_piece = self.current_piece\n\n # LineClear should be called prior to SendAttack\n self._LineClear()\n if piece is None:\n self._TakePieceFromList()\n\n self.CheckGameOver()\n self._ResetLockTime()\n self._SendAttack()\n self.can_swap = True\n self.piece_dropped += 1\n\n def TextDraw(self):\n preview_map = self.color_map.copy()\n self._PrePutPiece(self.current_piece, preview_map)\n for i in preview_map:\n print(i)\n print()\n\n def SpawnPiece(self, piece: shape.Shape = None) -> bool:\n if not piece:\n self._TakePieceFromList()\n else:\n self.current_piece = piece.copy()\n\n return self.CheckValidity(self.current_piece)\n\n def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):\n \"\"\"Finds a location that fits this piece with n 90rotations.\n Ref: https://tetris.fandom.com/wiki/SRS\n :param piece: The piece to be put in the color_map. If none, it will be set to the current_piece\n :param num_90rotations: How many 90 rotations\n :return: piece - shape.Shape: the piece with rotations that fits the color_map.\n \"\"\"\n if not piece:\n piece = self.current_piece\n\n def _IsJLSTZ(piece: shape.Shape):\n jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]\n for s in jlstz:\n if isinstance(piece, s):\n return True\n return False\n\n # The 180 rotation wall kick table is copied from\n # https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation\n # which is origined from\n # https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java\n offset_map_jlstz = [\n # state 0\n ([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1\n # 0>>2, 180 rotation\n # [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],\n [(0, 0)],\n [(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3\n\n # state 1\n ([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2\n # l>>3, 180 rotation\n # [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],\n [(0, 0)],\n [(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0\n\n # state 2\n ([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3\n # [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,\n [(0, 0)],\n [(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1\n\n # state 3\n ([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0\n # 3>>1, 180 rotation\n # [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],\n [(0, 0)],\n [(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2\n ]\n\n offset_map_i = [\n # state 0\n [[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1\n # [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation\n [(0, 0)],\n [(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3\n\n # state 1\n [[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2\n # [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,\n [(0, 0)],\n [(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0\n\n # state 2\n [[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3\n # [(0, 0), (1, 0), (2, 0), (-1, 0), (-2, 0), (0, -1)], # 2>>0, 180 rotation\n [(0, 0)],\n [(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)]], # 2>>1\n\n # state 3\n [[(0, 0), (0, 1), (0, -2), (2, 1), (-1, -2)], # 3>>0\n # [(0, 0), (0, 1), (0, 2), (0, -1), (0, -2), (1, 0)], # 3>>1, 180 rotation\n [(0, 0)],\n [(0, 0), (0, -2), (0, 1), (1, -2), (2, 1)]], # 3>>2\n ]\n\n state = piece.state\n num_90rotations %= 4\n offset_piece = piece.copy()\n ori_x = offset_piece.x\n ori_y = offset_piece.y\n\n for _ in range(num_90rotations):\n offset_piece.Rotate90()\n\n if num_90rotations == 0:\n if self.CheckValidity(offset_piece):\n return offset_piece\n num_90rotations -= 1\n\n if _IsJLSTZ(piece):\n for (offset_x, offset_y) in offset_map_jlstz[state][num_90rotations]:\n offset_piece.x = ori_x + offset_x\n offset_piece.y = ori_y + offset_y\n if (offset_piece.y >= self.width or\n offset_piece.x >= self.height + self.map_height_padding):\n continue\n if self.CheckValidity(offset_piece):\n return offset_piece\n else:\n for (offset_x, offset_y) in offset_map_i[state][num_90rotations]:\n offset_piece.x = ori_x + offset_x\n offset_piece.y = ori_y + offset_y\n if (offset_piece.y >= self.width or\n offset_piece.x >= self.height + self.map_height_padding):\n continue\n if self.CheckValidity(offset_piece):\n return offset_piece\n\n return None\n\n def Rotate(self, n: int) -> bool:\n \"\"\"Rotates the current piece.\n :param n: rotations, in range [0,4)\n :return: True if the current piece can be rotated. False otherwise.\n \"\"\"\n n %= 4\n if n == 0:\n return False\n\n fitted_piece = self._FindFittedPiece(num_90rotations=n)\n if fitted_piece:\n self.current_piece = fitted_piece\n self.last_action = actions.Action(dir=0, rotation=n)\n\n if not self.CheckValidity(self.current_piece, (1, 0)):\n self._RefreshLockTime()\n\n return fitted_piece is not None\n\n def CheckValidity(self, piece: shape.Shape, offset: Tuple[int, int] = (0, 0)):\n \"\"\"Checks if the piece with offset can be put in the color_map\n :param piece: The piece to be put.\n :param offset: The inital offset to the piece\n :return: True if the current state can fit into the color_map. False otherwise.\n \"\"\"\n (ox, oy, os) = (piece.x, piece.y, piece.state)\n piece.x += offset[0]\n piece.y += offset[1]\n\n a = self.bit_map[piece.x: piece.x + 4]\n b = self.width - piece.y\n c = piece.GetBitMap().astype(self.dtype)\n d = c << b\n e = a & d\n check_rst = e == 0\n (piece.x, piece.y, piece.state) = (ox, oy, os)\n return np.all(check_rst)\n\n def _GetNextBag(self):\n start_y = int((self.width - 3) / 2)\n assert start_y >= 0\n\n bag = [shape.I(start_y=start_y),\n shape.J(start_y=start_y),\n shape.L(start_y=start_y),\n shape.O(start_y=start_y),\n shape.S(start_y=start_y),\n shape.T(start_y=start_y),\n shape.Z(start_y=start_y)]\n np.random.shuffle(bag)\n return bag\n\n def _RefillPieces(self):\n \"\"\"\n When there are less than REFILL_THRESHOLD pieces in the list,\n refill it with a new bag.\n \"\"\"\n if len(self.piece_list) <= REFILL_THRESHOLD:\n self.piece_list.extend(self._GetNextBag())\n\n def _TakePieceFromList(self):\n self._RefillPieces()\n self.current_piece = self.piece_list[0].copy()\n self.piece_list = self.piece_list[1:]\n\n\ndef CreateGameFromState(state: GameState) -> GameClient:\n game = GameClient(height=state.height, width=state.width)\n game.color_map = np.copy(state.color_map)\n game.current_piece = state.current_piece.copy()\n if state.held_piece is not None:\n game.held_piece = state.held_piece.copy()\n else:\n game.held_piece = None\n game.score = state.score\n game.piece_list = state.piece_list.copy()\n game.can_swap = state.can_swap\n game.is_gameover = state.is_gameover\n game.accumulated_lines_eliminated = state.accumulated_lines_eliminated\n game.piece_dropped = state.piece_dropped\n game.line_sent = state.line_sent\n game.line_received = state.line_received\n return game\n" ]
[ [ "numpy.random.shuffle", "numpy.zeros", "numpy.packbits", "numpy.copy", "numpy.all", "numpy.min", "numpy.delete", "numpy.array", "numpy.pad" ] ]
ZackPashkin/pytorch
[ "5b1f5c8f17ec4067dc9f9df98bbcc6757ab24444" ]
[ "test/test_binary_ufuncs.py" ]
[ "import torch\nimport numpy as np\n\nimport itertools\nfrom itertools import product\nimport math\nimport random\nimport unittest\nimport warnings\nimport operator\nfrom functools import partial\n\nfrom torch._six import inf, nan\nfrom torch.testing._internal.common_utils import (\n TestCase, iter_indices, TEST_WITH_ASAN, run_tests,\n torch_to_numpy_dtype_dict, make_tensor, TEST_SCIPY, set_default_dtype)\nfrom torch.testing._internal.common_device_type import (\n instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA,\n dtypesIfCPU, deviceCountAtLeast, precisionOverride, onlyOnCPUAndCUDA,\n skipCUDAIfRocm, skipIf)\nfrom torch.testing import all_types_and_complex_and\n\nif TEST_SCIPY:\n import scipy.special\n\n# TODO: remove this\ndef _generate_input(shape, dtype, device, with_extremal):\n if shape == ():\n x = torch.tensor((), dtype=dtype, device=device)\n else:\n if dtype.is_floating_point or dtype.is_complex:\n # work around torch.randn not being implemented for bfloat16\n if dtype == torch.bfloat16:\n x = torch.randn(*shape, device=device) * random.randint(30, 100)\n x = x.to(torch.bfloat16)\n else:\n x = torch.randn(*shape, dtype=dtype, device=device) * random.randint(30, 100)\n x[torch.randn(*shape) > 0.5] = 0\n if with_extremal and dtype.is_floating_point:\n # Use extremal values\n x[torch.randn(*shape) > 0.5] = float('nan')\n x[torch.randn(*shape) > 0.5] = float('inf')\n x[torch.randn(*shape) > 0.5] = float('-inf')\n elif with_extremal and dtype.is_complex:\n x[torch.randn(*shape) > 0.5] = complex('nan')\n x[torch.randn(*shape) > 0.5] = complex('inf')\n x[torch.randn(*shape) > 0.5] = complex('-inf')\n elif dtype == torch.bool:\n x = torch.zeros(shape, dtype=dtype, device=device)\n x[torch.randn(*shape) > 0.5] = True\n else:\n x = torch.randint(15, 100, shape, dtype=dtype, device=device)\n\n return x\n\n# TODO: refactor this out\n# Converts half/bfloat16 dtype to float when device is cpu\ndef _convert_t(dtype, device):\n if device == 'cpu' and dtype in {torch.half, torch.bfloat16}:\n return torch.float\n return dtype\n\n# TODO: revise the tests to use make_tensor in common_utils.py instead\n# Returns a tensor of the requested shape, dtype, and device\n# Requesting a half CPU tensor returns a float CPU tensor with\n# values representable by a half.\n# Initialization uses randint for non-float types and randn for float types.\ndef _make_tensor(shape, dtype, device, fill_ones=False) -> torch.Tensor:\n # Returns a tensor filled with ones\n if fill_ones:\n return torch.ones(*shape, dtype=_convert_t(dtype, device), device=device)\n\n # Returns a tensor with random integer values\n if not (dtype.is_floating_point or dtype.is_complex):\n t = torch.randint(0, 10, shape, device=device)\n if dtype != torch.uint8:\n t = t - 5 # generate negative values also\n return t.to(_convert_t(dtype, device))\n\n # Populates the CPU tensor with floats representable as half/bfloat16\n if dtype == torch.half and device == 'cpu':\n return torch.randn(*shape, dtype=torch.float, device=device).half().float()\n if dtype == torch.bfloat16 and device == 'cpu':\n return torch.randn(*shape, dtype=torch.float, device=device).bfloat16().float()\n\n # Default: returns a tensor with random float values\n return torch.randn(shape, dtype=dtype, device=device).to(dtype=dtype)\n\n# TODO: update to use opinfos consistently\nclass TestBinaryUfuncs(TestCase):\n\n def test_add_broadcast_empty(self, device):\n # empty + empty\n self.assertRaises(RuntimeError, lambda: torch.randn(5, 0, device=device) + torch.randn(0, 5, device=device))\n self.assertEqual(torch.randn(5, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, device=device))\n self.assertEqual(torch.randn(5, 0, 0, device=device), torch.randn(0, device=device) + torch.randn(5, 0, 1, device=device))\n\n # scalar + empty\n self.assertEqual(torch.randn(5, 0, 6, device=device), torch.randn((), device=device) + torch.randn(5, 0, 6, device=device))\n\n # non-empty, empty\n self.assertEqual(torch.randn(0, device=device), torch.randn(0, device=device) + torch.randn(1, device=device))\n self.assertEqual(torch.randn(0, 7, 0, 6, 5, 0, 7, device=device),\n torch.randn(0, 7, 0, 6, 5, 0, 1, device=device) + torch.randn(1, 1, 5, 1, 7, device=device))\n self.assertRaises(RuntimeError, lambda: torch.randn(7, 0, device=device) + torch.randn(2, 1, device=device))\n\n def test_addcmul_scalars_as_floats(self, device):\n # zero-dim variables that don't require grad should bind to scalar arguments\n x = torch.tensor(2.)\n y = torch.tensor(3., device=device)\n # 3 + (3 * 3) * 2\n self.assertEqual(y.addcmul(y, y, value=x), 21)\n\n x = torch.tensor(2., requires_grad=True)\n self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))\n\n # TODO: update to work on CUDA, too\n @onlyCPU\n def test_comparison_ops(self, device):\n x = torch.randn(5, 5)\n y = torch.randn(5, 5)\n\n eq = x == y\n for idx in iter_indices(x):\n self.assertEqual(x[idx] == y[idx], eq[idx] == 1)\n\n ne = x != y\n for idx in iter_indices(x):\n self.assertEqual(x[idx] != y[idx], ne[idx] == 1)\n\n lt = x < y\n for idx in iter_indices(x):\n self.assertEqual(x[idx] < y[idx], lt[idx] == 1)\n\n le = x <= y\n for idx in iter_indices(x):\n self.assertEqual(x[idx] <= y[idx], le[idx] == 1)\n\n gt = x > y\n for idx in iter_indices(x):\n self.assertEqual(x[idx] > y[idx], gt[idx] == 1)\n\n ge = x >= y\n for idx in iter_indices(x):\n self.assertEqual(x[idx] >= y[idx], ge[idx] == 1)\n\n # TODO: update to work on CUDA, too\n @onlyCPU\n def test_comparison_ops_must_take_bool_output(self, device):\n for op in [torch.lt, torch.le, torch.gt, torch.ge, torch.eq, torch.ne,\n torch.logical_and, torch.logical_or, torch.logical_xor]:\n self.assertEqual(op(torch.tensor([True]), torch.tensor([False])).dtype, torch.bool)\n\n # TODO: update to work on CUDA, too\n @onlyCPU\n def test_inplace_comparison_ops_require_inputs_have_same_dtype(self, device):\n with self.assertRaisesRegex(RuntimeError, 'Expected object of scalar type'):\n for op in ['lt_', 'le_', 'gt_', 'ge_', 'eq_', 'ne_', 'logical_xor_', 'logical_and_', 'logical_or_']:\n x = torch.tensor([1], dtype=torch.int)\n y = torch.tensor([2], dtype=torch.long)\n in_place_method = getattr(x, op)\n in_place_method(y)\n\n # TODO: update to work on CUDA, too\n @onlyCPU\n def test_comparison_ops_check_for_scalar_overflow(self, device):\n s = 1 << 20\n t = torch.tensor([1 << 5], dtype=torch.uint8)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t < s)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(s < t)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t <= s)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(s <= t)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t > s)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(s > t)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t >= s)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(s >= t)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t == s)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(s == t)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t != s)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(s != t)\n\n # TODO: update to work on CUDA, too\n @onlyCPU\n def test_comparison_ops_check_for_zerodim_tensor_overflow(self, device):\n t1 = torch.tensor([1 << 5], dtype=torch.uint8)\n t2 = torch.tensor([1 << 30], dtype=torch.int32)\n ts1 = torch.tensor(1 << 20, dtype=torch.int32)\n ts2 = torch.tensor(1 << 40, dtype=torch.int64)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t1 < ts1)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(ts2 < t2)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t1 <= ts1)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(ts2 <= t2)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t1 > ts1)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(ts2 > t2)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t1 >= ts1)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(ts2 >= t2)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t1 == ts1)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(ts2 == t2)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(t1 != ts1)\n with self.assertRaisesRegex(RuntimeError, 'value cannot be converted to type'):\n self.assertTrue(ts2 != t2)\n\n # TODO: update to work on CUDA, too\n @onlyCPU\n def test_bitwise_ops(self, device):\n x = torch.randn(5, 5).gt(0)\n y = torch.randn(5, 5).gt(0)\n\n and_result = x & y\n for idx in iter_indices(x):\n if and_result[idx]:\n self.assertTrue(x[idx] and y[idx])\n else:\n self.assertFalse(x[idx] and y[idx])\n\n or_result = x | y\n for idx in iter_indices(x):\n if or_result[idx]:\n self.assertTrue(x[idx] or y[idx])\n else:\n self.assertFalse(x[idx] or y[idx])\n\n xor_result = x ^ y\n for idx in iter_indices(x):\n if xor_result[idx]:\n self.assertTrue(x[idx] ^ y[idx])\n else:\n self.assertFalse(x[idx] ^ y[idx])\n\n x_clone = x.clone()\n x_clone &= y\n self.assertEqual(x_clone, and_result)\n\n x_clone = x.clone()\n x_clone |= y\n self.assertEqual(x_clone, or_result)\n\n x_clone = x.clone()\n x_clone ^= y\n self.assertEqual(x_clone, xor_result)\n\n def test_inplace_division(self, device):\n t = torch.rand(5, 5, device=device)\n id_before = id(t)\n t /= 2\n id_after = id(t)\n self.assertEqual(id_before, id_after)\n\n @dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_complex=False))\n def test_div_rounding_modes(self, device, dtype):\n if dtype.is_floating_point:\n low, high = -10.0, 10.0\n else:\n info = torch.iinfo(dtype)\n low, high = info.min, info.max\n\n a = make_tensor((100,), device, dtype, low=low, high=high)\n b = make_tensor((100,), device, dtype, low=low, high=high)\n\n # Avoid division by zero so we can test (a / b) * b == a\n if dtype.is_floating_point:\n eps = 0.1\n b[(-eps < b) & (b < eps)] = eps\n else:\n b[b == 0] = 1\n\n if not dtype.is_floating_point:\n # floor(a / b) * b can be < a, so fixup slightly to avoid underflow\n a = torch.where(a < 0, a + b, a)\n\n d_true = torch.divide(a, b, rounding_mode=None)\n self.assertTrue(d_true.is_floating_point())\n self.assertEqual(d_true * b, a.to(d_true.dtype))\n\n d_floor = torch.divide(a, b, rounding_mode='floor')\n if dtype not in (torch.bfloat16, torch.half):\n self.assertEqual(d_floor * b + torch.remainder(a, b), a)\n else:\n self.assertEqual(d_floor * b + torch.remainder(a.float(), b.float()), a,\n exact_dtype=False)\n\n d_trunc = torch.divide(a, b, rounding_mode='trunc')\n rounding_unsupported = (\n dtype == torch.half and device != 'cuda' or\n dtype == torch.bfloat16 and device != 'cpu')\n d_ref = d_true.float() if rounding_unsupported else d_true\n self.assertEqual(d_trunc, d_ref.trunc().to(dtype))\n\n @dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)\n def test_div_rounding_nonfinite(self, device, dtype):\n\n # Compare division of special floating point values against NumPy\n num = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],\n dtype=dtype)\n # Divide by zero is tested seperately\n denom = num[num != 0]\n\n a, b = num[None, :].clone(), denom[:, None].clone()\n\n # Compare bfloat16 against NumPy float\n exact_dtype = dtype != torch.bfloat16\n if exact_dtype:\n an, bn = a.cpu().numpy(), b.cpu().numpy()\n else:\n an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()\n\n for mode, np_ref in ((None, np.true_divide), (\"floor\", np.floor_divide)):\n with np.errstate(all='ignore'):\n expect = np_ref(an, bn)\n kwargs = dict(rounding_mode=mode) if mode is not None else {}\n with set_default_dtype(torch.double):\n actual = torch.divide(a, b, **kwargs)\n self.assertEqual(actual, torch.from_numpy(expect),\n exact_device=False, exact_dtype=exact_dtype)\n\n # Compare contiguous (likely vectorized) against non-contiguous (not vectorized)\n a_noncontig = torch.empty([2 * i for i in a.shape], dtype=dtype, device=device)[::2, ::2]\n a_noncontig[:] = a\n b_noncontig = torch.empty([2 * i for i in b.shape], dtype=dtype, device=device)[::2, ::2]\n b_noncontig[:] = b\n\n for rounding_mode in (None, \"trunc\", \"floor\"):\n expect = torch.divide(a_noncontig, b_noncontig, rounding_mode=rounding_mode)\n actual = torch.divide(a, b, rounding_mode=rounding_mode)\n self.assertEqual(actual, expect)\n\n @dtypes(torch.bfloat16, torch.half, torch.float32, torch.float64)\n def test_divide_by_zero_rounding(self, device, dtype):\n a = torch.tensor([1.0, -1.0, 0, 0.1, -0.1, np.pi, -np.pi, np.inf, -np.inf, np.nan],\n dtype=dtype)\n exact_dtype = (dtype != torch.bfloat16)\n if exact_dtype:\n an = a.cpu().numpy()\n else:\n an = a.float().cpu().numpy()\n\n zero = torch.zeros_like(a)\n\n # NOTE: NumPy's floor_divide rounding changed in 1.20.0 to be consistent with divide\n expect = np.divide(an, 0)\n for rounding_mode in (None, 'floor'):\n # CPU scalar\n actual = torch.divide(a, 0, rounding_mode=rounding_mode)\n self.assertEqual(actual, expect, exact_dtype=exact_dtype)\n # Device tensor\n actual = torch.divide(a, zero, rounding_mode=rounding_mode)\n self.assertEqual(actual, expect, exact_dtype=exact_dtype)\n\n @dtypes(*torch.testing.get_all_dtypes(\n include_bool=False, include_complex=False, include_bfloat16=False))\n def test_div_rounding_numpy(self, device, dtype):\n info = (torch.finfo(dtype) if dtype.is_floating_point\n else torch.iinfo(dtype))\n low, high = info.min, info.max\n\n # Compare division of random values against NumPy\n a = make_tensor((4096,), device, dtype, low=low, high=high)\n b = make_tensor((4096,), device, dtype, low=low, high=high)\n\n # Avoid division by zero which raises for integers and, for floats,\n # NumPy 1.20 changed floor_divide to follow IEEE rules for inf/nan\n # after dividing by zero.\n b[b == 0] = 1\n\n # Compare bfloat16 against NumPy float\n exact_dtype = dtype != torch.bfloat16\n\n if exact_dtype:\n an, bn = a.cpu().numpy(), b.cpu().numpy()\n else:\n an, bn = a.float().cpu().numpy(), b.float().cpu().numpy()\n\n for mode, np_ref in (\n (None, np.true_divide),\n (\"floor\", np.floor_divide),\n (\"trunc\", lambda a, b: np.trunc(np.true_divide(a, b)).astype(a.dtype))\n ):\n with np.errstate(all='ignore'):\n expect = torch.from_numpy(np_ref(an, bn))\n\n kwargs = dict(rounding_mode=mode) if mode is not None else {}\n # Contiguous (likely vectorized)\n with set_default_dtype(torch.double):\n actual = torch.divide(a, b, **kwargs)\n self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)\n\n # Non-contiguous (not vectorized)\n expect = expect[::2]\n with set_default_dtype(torch.double):\n actual = torch.divide(a[::2], b[::2], **kwargs)\n\n self.assertEqual(actual, expect, exact_device=False, exact_dtype=exact_dtype)\n\n # Tests that trying to add, inplace, a CUDA tensor to a CPU tensor\n # throws the correct error message\n @onlyCUDA\n def test_cross_device_inplace_error_msg(self, device):\n a = torch.tensor(2.)\n b = torch.tensor(2., device=device)\n with self.assertRaisesRegex(RuntimeError,\n \"Expected all tensors to be on the same device\"):\n a += b\n\n # TODO: refactor this test into a more generic one, it's parked here currently\n @onlyOnCPUAndCUDA\n def test_out_resize_warning(self, device):\n a = torch.tensor((1, 2, 3), device=device, dtype=torch.float32)\n b = torch.tensor((4, 5, 6), device=device, dtype=torch.float32)\n\n unary_inputs = (a,)\n binary_inputs = (a, b)\n unary_ops = (torch.ceil, torch.exp)\n binary_ops = (torch.add, torch.sub)\n for op in (unary_ops + binary_ops):\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n inputs = unary_inputs if op in unary_ops else binary_inputs\n\n # No warnings\n op(*inputs, out=torch.empty(3, device=device))\n op(*inputs, out=torch.empty(0, device=device))\n self.assertEqual(len(w), 0)\n\n # Cases that throw warnings\n op(*inputs, out=torch.empty(2, device=device))\n self.assertEqual(len(w), 1)\n\n # Verifies that the inplace dunders (like idiv) actually are in place\n @onlyOnCPUAndCUDA\n def test_inplace_dunders(self, device):\n t = torch.randn((1,), device=device)\n expected = t.data_ptr()\n t += 1\n t -= 1\n t *= 1\n t /= 1\n with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):\n t //= 1\n t %= 1\n self.assertEqual(expected, t.data_ptr())\n\n def check_internal_mem_overlap(self, inplace_op, num_inputs,\n dtype, device,\n expected_failure=False):\n if isinstance(inplace_op, str):\n inplace_op = getattr(torch.Tensor, inplace_op)\n input = torch.randn(1, dtype=dtype, device=device).expand(3, 3)\n inputs = [input] + [torch.randn_like(input)\n for i in range(num_inputs - 1)]\n if not expected_failure:\n with self.assertRaisesRegex(RuntimeError, 'single memory location'):\n inplace_op(*inputs)\n else:\n with self.assertRaises(AssertionError):\n with self.assertRaisesRegex(RuntimeError, 'single memory location'):\n inplace_op(*inputs)\n\n def unary_check_input_output_mem_overlap(self, data, sz, op,\n expected_failure=False):\n\n def _test(op, output, input):\n output_exp = torch.empty_like(output)\n op(input, out=output_exp)\n self.assertEqual(op(input, out=output), output_exp, msg=op.__name__)\n\n # output is identical to input:\n _test(op, output=data[0:sz], input=data[0:sz])\n # output and input are independent:\n _test(op, output=data[0:sz], input=data[sz:2 * sz])\n # output partially overlaps with input:\n if not expected_failure:\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n _test(op, data[0:sz], data[1:sz + 1])\n else:\n with self.assertRaises(AssertionError):\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n _test(op, data[0:sz], data[1:sz + 1])\n\n def binary_check_input_output_mem_overlap(self, op, device,\n expected_failure=False):\n sz = 3\n data = torch.randn(2 * sz, device=device)\n other = torch.randn(sz, device=device)\n\n self.unary_check_input_output_mem_overlap(\n data, sz, lambda input, out: op(other, input, out=out),\n expected_failure=expected_failure)\n\n self.unary_check_input_output_mem_overlap(\n data, sz, lambda input, out: op(input, other, out=out),\n expected_failure=expected_failure)\n\n @dtypes(torch.double)\n def test_binary_op_mem_overlap(self, device, dtype):\n ops = [\n (\"add\", True, True, 'cpu'),\n (\"add\", True, True, 'cuda'),\n (\"mul\", True, True, 'cpu'),\n (\"mul\", True, True, 'cuda'),\n (\"sub\", True, True, 'cpu'),\n (\"sub\", True, True, 'cuda'),\n (\"div\", True, True, 'cpu'),\n (\"div\", True, True, 'cuda'),\n (\"pow\", True, True, 'cpu'),\n (\"pow\", True, True, 'cuda'),\n (\"fmod\", True, True, 'cpu'),\n (\"fmod\", True, True, 'cuda'),\n (\"atan2\", True, True, 'cpu'),\n (\"atan2\", True, True, 'cuda'),\n (\"hypot\", True, True, 'cpu'),\n (\"hypot\", True, True, 'cuda'),\n (\"igamma\", True, True, 'cpu'),\n (\"igamma\", True, True, 'cuda'),\n (\"igammac\", True, True, 'cpu'),\n (\"igammac\", True, True, 'cuda'),\n (\"nextafter\", True, True, 'cpu'),\n (\"nextafter\", True, True, 'cuda'),\n (\"le\", True, True, 'cpu'),\n (\"le\", True, True, 'cuda'),\n (\"lt\", True, True, 'cpu'),\n (\"lt\", True, True, 'cuda'),\n (\"ge\", True, True, 'cpu'),\n (\"ge\", True, True, 'cuda'),\n (\"gt\", True, True, 'cpu'),\n (\"gt\", True, True, 'cuda'),\n (\"eq\", True, True, 'cpu'),\n (\"eq\", True, True, 'cuda'),\n (\"ne\", True, True, 'cpu'),\n (\"ne\", True, True, 'cuda'),\n (\"logical_and\", True, True, 'cpu'),\n (\"logical_and\", True, True, 'cuda'),\n (\"logical_or\", True, True, 'cpu'),\n (\"logical_or\", True, True, 'cuda'),\n (\"logical_xor\", True, True, 'cpu'),\n (\"logical_xor\", True, True, 'cuda'),\n ]\n\n for (fn, has_input_output_mem_overlap_check,\n has_internal_mem_overlap_check, dev) in ops:\n if dev != device:\n continue\n out_op = getattr(torch, fn)\n inplace_op = getattr(torch.Tensor, fn + '_')\n self.check_internal_mem_overlap(\n inplace_op, 2, dtype, device,\n expected_failure=not has_internal_mem_overlap_check)\n\n self.binary_check_input_output_mem_overlap(out_op, device,\n expected_failure=not has_input_output_mem_overlap_check)\n\n def _do_pow_for_exponents(self, m1, exponents, pow_fn, atol):\n for num in exponents:\n if isinstance(num, int) and num < 0 and not m1.is_floating_point() and not m1.is_complex():\n with self.assertRaisesRegex(RuntimeError,\n r'Integers to negative integer powers are not allowed\\.'):\n torch.pow(m1[4], num)\n else:\n # base - tensor, exponent - number\n # contiguous\n res1 = torch.pow(m1[4], num)\n res2 = res1.clone().zero_()\n # `math.pow` has issues with complex exponentiation so we need to resort to normal `pow`.\n for i in range(res2.size(0)):\n res2[i] = pow_fn(m1[4][i], num)\n rtol = 0 if atol is not None else None\n self.assertEqual(res1, res2, atol=atol, rtol=rtol)\n\n # non-contiguous\n res1 = torch.pow(m1[:, 4], num)\n res2 = res1.clone().zero_()\n for i in range(res2.size(0)):\n res2[i] = pow_fn(m1[i, 4], num)\n self.assertEqual(res1, res2, atol=atol, rtol=rtol)\n\n # scalar ** tensor to enforce correct handling of dtypes for __rpow__().\n expected_dtype = torch.result_type(num, m1)\n res1 = num ** m1[4]\n res2 = torch.tensor(num, dtype=expected_dtype, device=m1.device) ** m1[4]\n self.assertEqual(res1, res2)\n self.assertEqual(res1.dtype, expected_dtype)\n\n @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))\n def test_pow(self, device, dtype):\n m1 = torch.empty(0, dtype=dtype, device=device)\n if m1.is_floating_point() or m1.is_complex():\n m1 = make_tensor((100, 100), low=0, high=1, dtype=dtype, device=device) + 0.5\n else:\n # math.pow will overflow and throw exceptions for large integers\n range_high = 4 if dtype in (torch.int8, torch.uint8) else 10\n m1 = make_tensor((100, 100), low=1, high=range_high, dtype=dtype, device=device)\n\n exponents = [-2.8, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 4, 3.3]\n complex_exponents = [-2.5j, -1.0j, 0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]\n if m1.is_complex():\n self._do_pow_for_exponents(m1, exponents + complex_exponents, pow, 10e-4)\n else:\n self._do_pow_for_exponents(m1, exponents, math.pow, None)\n self._do_pow_for_exponents(m1, complex_exponents, pow, 10e-4)\n\n # base - number, exponent - tensor\n # contiguous\n res1 = torch.pow(3, m1[4])\n res2 = res1.clone().zero_()\n for i in range(res2.size(0)):\n res2[i] = pow(3, m1[4, i])\n self.assertEqual(res1, res2)\n\n # non-contiguous\n res1 = torch.pow(3, m1[:, 4])\n res2 = res1.clone().zero_()\n for i in range(res2.size(0)):\n res2[i] = pow(3, m1[i][4])\n self.assertEqual(res1, res2)\n\n # TODO: refactor all these tests using opinfos properly\n def _test_pow(self, base, exponent, np_exponent=None):\n if np_exponent is None:\n np_exponent = exponent\n\n def to_np(value):\n if isinstance(value, torch.Tensor):\n return value.cpu().numpy()\n return value\n\n try:\n np_res = np.power(to_np(base), to_np(np_exponent))\n expected = torch.from_numpy(np_res) if isinstance(np_res, np.ndarray) else torch.tensor(np_res, dtype=base.dtype)\n except ValueError as e:\n err_msg = \"Integers to negative integer powers are not allowed.\"\n self.assertEqual(str(e), err_msg)\n out = torch.empty_like(base)\n test_cases = [\n lambda: base.pow(exponent),\n lambda: base.pow_(exponent),\n lambda: torch.pow(base, exponent),\n lambda: torch.pow(base, exponent, out=out)\n ]\n for test_case in test_cases:\n self.assertRaisesRegex(RuntimeError, err_msg, test_case)\n else:\n if isinstance(base, torch.Tensor):\n actual = base.pow(exponent)\n self.assertEqual(actual, expected.to(actual))\n actual = base.clone()\n # When base is a 0-dim cpu tensor and exp is a cuda tensor, we exp `pow` to work but `pow_` to fail, since\n # `pow` will try to create the output tensor on a cuda device, but `pow_` needs to use the cpu tensor as the output\n if (isinstance(exponent, torch.Tensor) and base.dim() == 0 and base.device.type == 'cpu' and\n exponent.device.type == 'cuda'):\n regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'\n self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)\n elif torch.can_cast(torch.result_type(base, exponent), base.dtype):\n actual2 = actual.pow_(exponent)\n self.assertEqual(actual, expected)\n self.assertEqual(actual2, expected)\n else:\n self.assertRaisesRegex(RuntimeError, \"Found dtype \\\\w+ but expected \\\\w+\", lambda: actual.pow_(exponent))\n\n actual = torch.pow(base, exponent)\n self.assertEqual(actual, expected.to(actual))\n\n actual2 = torch.pow(base, exponent, out=actual)\n self.assertEqual(actual, expected.to(actual))\n self.assertEqual(actual2, expected.to(actual))\n\n # Tests pow() for integral, floating-type tensors, with integral, floating-type\n # exponents (tensor or scalar), respectively. noncontiguous tensors are also tested.\n def test_int_and_float_pow(self, device):\n\n def _test_int_and_float_pow(dt, low, high, dev):\n test_cases = (\n ((4, 4), 0, (4, 1)),\n ((3, 1), 4, (3, 1)),\n ((2,), 4, (1,)),\n ((1,), 2, ()),\n ((513, 513), 4, (513,)),\n ((5, 5, 5), 5, (5,)),\n ((), 2, ()),\n )\n for base_shape, exp_scalar, exp_shape in test_cases:\n base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high)\n # int tensors don't take negative exponents\n if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:\n exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high)\n else:\n exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high)\n self._test_pow(base_tensor, exp_scalar)\n self._test_pow(base_tensor, exp_tensor)\n # test non-contiguous tensors as well\n base_tensor = make_tensor(base_shape, dtype=dt, device=dev, low=low, high=high,\n noncontiguous=True)\n if dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:\n exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=0, high=high,\n noncontiguous=True)\n else:\n exp_tensor = make_tensor(exp_shape, dtype=dt, device=dev, low=low, high=high,\n noncontiguous=True)\n self._test_pow(base_tensor, exp_scalar)\n self._test_pow(base_tensor, exp_tensor)\n\n _test_int_and_float_pow(torch.int8, -2, 2, device)\n _test_int_and_float_pow(torch.uint8, 0, 3, device)\n _test_int_and_float_pow(torch.int16, -5, 5, device)\n _test_int_and_float_pow(torch.int64, -10, 10, device)\n _test_int_and_float_pow(torch.int32, -10, 10, device)\n _test_int_and_float_pow(torch.float16, 0., 5., device)\n _test_int_and_float_pow(torch.float32, 0., 10., device)\n _test_int_and_float_pow(torch.float64, 0., 10., device)\n # pow's output would have some NaNs as well\n _test_int_and_float_pow(torch.float32, -10., 10., device)\n _test_int_and_float_pow(torch.float64, -10., 10., device)\n\n # Tests that a Runtime error occurs when a base tensor cannot be resized\n # by pow's inplace variant due to PyTorch's broadcasting semantics.\n def test_pow_inplace_resizing_exception(self, device):\n test_cases = (\n ((), (3,)),\n ((2,), (2, 1)),\n ((2, 1), (2, 2)),\n ((2, 2), (2, 1, 1)),\n )\n test_inputs = list((make_tensor(base_size, dtype=torch.float64, device=device,\n high=10., low=0.),\n make_tensor(exp_size, dtype=torch.float64, device=device,\n high=10., low=0.))\n for base_size, exp_size in test_cases)\n for base, exponent in test_inputs:\n regex = \"doesn't match the broadcast shape\"\n self.assertRaisesRegex(RuntimeError, regex, base.pow_, exponent)\n\n def test_int_tensor_pow_neg_ints(self, device):\n ints = [torch.iinfo(torch.int32).min,\n -3, -2, -1, 0, 1, 2, 3,\n torch.iinfo(torch.int32).max]\n neg_ints = [torch.iinfo(torch.int32).min, -3, -2, -1]\n tensor = torch.tensor(ints, dtype=torch.int32, device=device)\n for pow in neg_ints:\n self._test_pow(tensor, pow)\n\n def test_long_tensor_pow_floats(self, device):\n ints = [0, 1, 23, 4567]\n floats = [0.0, 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]\n tensor = torch.tensor(ints, dtype=torch.int64, device=device)\n for pow in floats:\n self._test_pow(tensor, pow)\n\n @dtypes(*[torch.float32, torch.float64])\n def test_float_scalar_pow_float_tensor(self, device, dtype):\n floats = [2.0, -3 / 2, -1.0, -1 / 2, -1 / 3, 0.0,\n 1 / 3, 1 / 2, 1.0, 3 / 2, 2.0]\n exponent_shapes = (\n (1,),\n (2, 2),\n (2, 1),\n (2, 2, 2),\n )\n tensors = list(make_tensor(shape, dtype=dtype, device=device, low=0)\n for shape in exponent_shapes)\n floats_tensor = torch.tensor(floats, dtype=dtype, device=device)\n for base in floats:\n self._test_pow(base, floats_tensor)\n for tensor in tensors:\n self._test_pow(base, tensor)\n\n @onlyCUDA\n def test_cuda_tensor_pow_scalar_tensor(self, device):\n cuda_tensors = [torch.randn((3, 3), device=device), torch.tensor(3.0, device=device)]\n scalar_tensors = [torch.tensor(5.0, device='cpu'), torch.tensor(-3), torch.tensor(1)]\n for base, exp in product(cuda_tensors, scalar_tensors):\n self._test_pow(base, exp)\n\n @onlyCUDA\n def test_cpu_tensor_pow_cuda_scalar_tensor(self, device):\n cuda_tensors = [torch.tensor(5.0, device='cuda'), torch.tensor(-3, device='cuda')]\n for exp in cuda_tensors:\n base = torch.randn((3, 3), device='cpu')\n regex = 'Expected all tensors to be on the same device, but found at least two devices, cuda.* and cpu!'\n self.assertRaisesRegex(RuntimeError, regex, torch.pow, base, exp)\n for exp in cuda_tensors:\n # Binary ops with a cpu + cuda tensor are allowed if the cpu tensor has 0 dimension\n base = torch.tensor(3.0, device='cpu')\n self._test_pow(base, exp)\n\n @onlyCUDA\n @dtypes(torch.complex64, torch.complex128)\n def test_pow_cuda_complex_extremal_failing(self, device, dtype):\n t = torch.tensor(complex(-1., float('inf')), dtype=dtype, device=device)\n with self.assertRaises(AssertionError):\n cuda_out = t.pow(2)\n cpu_out = t.cpu().pow(2)\n self.assertEqual(cpu_out, cuda_out)\n\n @onlyOnCPUAndCUDA\n @dtypes(*(torch.testing.get_all_dtypes(include_bool=False, include_bfloat16=False)))\n def test_complex_scalar_pow_tensor(self, device, dtype):\n complexes = [0.5j, 1. + 1.j, -1.5j, 2.2 - 1.6j, 1 + 0j]\n first_exp = make_tensor((100,), device, dtype, low=-2, high=2)\n second_exp = make_tensor((100,), device, dtype, low=-2, high=2, noncontiguous=True)\n first_exp[0] = first_exp[10] = first_exp[20] = 0\n second_exp[0] = second_exp[10] = second_exp[20] = 0\n for base in complexes:\n self._test_pow(base, first_exp)\n self._test_pow(base, second_exp)\n\n @onlyOnCPUAndCUDA\n def test_pow_scalar_type_promotion(self, device):\n # Test against a scalar and non-scalar input\n inputs = [17, [17]]\n for input in inputs:\n # We expect the computation to be performed in uint8 (overflowing to 0), and then cast to int64\n input_tensor_uint8 = torch.tensor(input, dtype=torch.uint8, device=device)\n out_uint8_computation = torch.pow(2, input_tensor_uint8, out=torch.tensor(0, dtype=torch.int64, device=device))\n\n # Computation should run in int64, and not overflow\n input_tensor_int64 = torch.tensor(input, dtype=torch.int64, device=device)\n out_int64_computation = torch.pow(2, input_tensor_int64, out=torch.tensor(0, dtype=torch.int64, device=device))\n\n self.assertNotEqual(out_uint8_computation, out_int64_computation)\n self.assertEqual(out_uint8_computation.to(dtype=torch.uint8), out_int64_computation.to(dtype=torch.uint8))\n\n def test_tensor_pow_tensor(self, dev):\n def rotate(l, n):\n return l[-n:] + l[:-n]\n\n def test_tensor_pow_tensor(values, torch_type, numpy_type):\n vals_tensor = torch.tensor(values, dtype=torch_type, device=dev)\n for i in range(len(values)):\n pows = rotate(values, i)\n pows_tensor = torch.tensor(pows, dtype=torch_type, device=dev)\n self._test_pow(vals_tensor, pows_tensor)\n\n ints = [0, 1, 2, 3]\n test_tensor_pow_tensor(ints, torch.uint8, np.uint8)\n test_tensor_pow_tensor(ints, torch.int8, np.int8)\n test_tensor_pow_tensor(ints, torch.int16, np.int16)\n test_tensor_pow_tensor(ints, torch.int32, np.int32)\n test_tensor_pow_tensor(ints, torch.int64, np.int64)\n\n floats = [-3.0, -2.0, -1.0, -1 / 2, -1 / 3,\n 0.0, 1 / 3, 1 / 2, 1.0, 2.0, 3.0]\n test_tensor_pow_tensor(floats, torch.float16, np.float16)\n test_tensor_pow_tensor(floats, torch.float32, np.float32)\n test_tensor_pow_tensor(floats, torch.float64, np.float64)\n\n\n def test_logical_xor_with_nontrivial_alignment(self, device):\n # test tensor that is not aligned to multiple of 16 bytes\n size = 128\n a = (torch.randn(size, device=device) > 0)\n b = (torch.randn(size, device=device) > 0)\n c = (torch.randn(size, device=device) > 0)\n non_trivial_alignment = [1, 2, 4, 8, 15]\n for i in non_trivial_alignment:\n for j in non_trivial_alignment:\n for k in non_trivial_alignment:\n a_ = a[i: 100 + i]\n b_ = b[j: 100 + j]\n c_ = c[k: 100 + k]\n torch.logical_xor(a_, b_, out=c_)\n for x, y, z in zip(a_.tolist(), b_.tolist(), c_.tolist()):\n self.assertEqual(x ^ y, z)\n\n @dtypes(torch.float)\n def test_add_with_tail(self, device, dtype):\n # test tensor where there is a tail which is not a multiple\n # of GPU warp size\n for tail_size in [1, 63, 67, 130]:\n size = 4096 + tail_size\n a = torch.randn(size, device=device, dtype=dtype)\n b = torch.randn(size, device=device, dtype=dtype)\n c = a + b\n for x, y, z in zip(a.tolist(), b.tolist(), c.tolist()):\n self.assertEqual(x + y, z)\n\n # Tests that CUDA tensors on different devices cannot be used in the same\n # binary operation, and that CUDA \"scalars\" cannot be used in the same\n # binary operation as non-scalar CPU tensors.\n @deviceCountAtLeast(2)\n @onlyCUDA\n def test_cross_device_binary_ops(self, devices):\n vals = (1., (2.,))\n cpu_tensor = torch.randn(2, 2)\n\n def do_test(op, a, b):\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors.+\"):\n op(a, b)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors.+\"):\n op(b, a)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors.+\"):\n op(a, cpu_tensor)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors.+\"):\n op(cpu_tensor, a)\n\n for op in (operator.add, torch.add,\n operator.sub, torch.sub,\n operator.mul, torch.mul,\n operator.truediv, torch.true_divide,\n operator.floordiv, torch.floor_divide):\n for a, b in product(vals, vals):\n a = torch.tensor(a, device=devices[0])\n b = torch.tensor(b, device=devices[1])\n\n do_test(op, a, b)\n\n # This test ensures that a scalar Tensor can be safely used\n # in a binary operation in conjunction with a Tensor on all\n # available CUDA devices\n @deviceCountAtLeast(2)\n @onlyCUDA\n def test_binary_op_scalar_device_unspecified(self, devices):\n scalar_val = torch.tensor(1.)\n for default_device in devices:\n with torch.cuda.device(default_device):\n for device in devices:\n device_obj = torch.device(device)\n x = torch.rand(3, device=device)\n y0 = x * scalar_val\n self.assertEqual(y0.device, device_obj)\n y1 = scalar_val * x\n self.assertEqual(y1.device, device_obj)\n self.assertEqual(y0, y1)\n\n def test_div_and_floordiv_vs_python(self, device):\n # Tests torch division ops which can handle both arguments being\n # scalars.\n # NOTE: torch.floor_divide currently truncates instead of flooring.\n # the quotient. See https://github.com/pytorch/pytorch/issues/43874.\n def _scalar_helper(python_op, torch_op):\n for a, b in product(range(-10, 10), range(-10, 10)):\n for op in (lambda x: x * .5, lambda x: math.floor(x)):\n a = op(a)\n b = op(b)\n\n # Skips zero divisors\n if b == 0:\n continue\n\n expected = python_op(a, b)\n\n for op in (operator.truediv, torch.true_divide):\n actual_scalar = torch_op(a, b)\n\n a_t = torch.tensor(a, device=device)\n b_t = torch.tensor(b, device=device)\n\n actual_tensor = torch_op(a_t, b_t)\n actual_first_tensor = torch_op(a_t, b)\n actual_second_tensor = torch_op(a, b_t)\n\n self.assertEqual(actual_scalar, expected_div)\n self.assertEqual(actual_tensor.item(), expected_div)\n self.assertEqual(actual_first_tensor, actual_tensor)\n self.assertEqual(actual_second_tensor, actual_tensor)\n\n _scalar_helper(operator.truediv, operator.truediv)\n _scalar_helper(operator.truediv, torch.true_divide)\n with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):\n _scalar_helper(lambda a, b: math.trunc(a / b), operator.floordiv)\n _scalar_helper(lambda a, b: math.trunc(a / b), torch.floor_divide)\n\n # NOTE: torch.floor_divide currently truncates instead of flooring.\n # See https://github.com/pytorch/pytorch/issues/43874.\n @onlyOnCPUAndCUDA\n def test_div_and_floordiv_script_vs_python(self, device):\n # Creates jitted functions of two tensors\n def _wrapped_div(a, b):\n return a / b\n\n def _wrapped_floordiv(a, b):\n return a // b\n\n scripted_div = torch.jit.script(_wrapped_div)\n scripted_floordiv = torch.jit.script(_wrapped_floordiv)\n for a, b in product(range(-10, 10), range(-10, 10)):\n for op in (lambda x: x * .5, lambda x: math.floor(x)):\n a = op(a)\n b = op(b)\n\n # Skips zero divisors\n if b == 0:\n continue\n\n expected_div = a / b\n expected_truncdiv = math.trunc(a / b)\n a_t = torch.tensor(a, device=device)\n b_t = torch.tensor(b, device=device)\n\n self.assertEqual(scripted_div(a_t, b_t), expected_div)\n with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):\n self.assertEqual(scripted_floordiv(a_t, b_t), expected_truncdiv)\n\n # Creates jitted functions of one tensor\n def _wrapped_div_scalar(a):\n return a / 5\n\n # NOTE: the JIT implements division as torch.reciprocal(a) * 5\n def _wrapped_rdiv_scalar(a):\n return 5 / a\n\n def _wrapped_floordiv_scalar(a):\n return a // 5\n\n # NOTE: this fails if the input is not an integer tensor\n # See https://github.com/pytorch/pytorch/issues/45199\n def _wrapped_rfloordiv_scalar(a):\n return 5 // a\n\n scripted_div_scalar = torch.jit.script(_wrapped_div_scalar)\n scripted_rdiv_scalar = torch.jit.script(_wrapped_rdiv_scalar)\n scripted_floordiv_scalar = torch.jit.script(_wrapped_floordiv_scalar)\n scripted_rfloordiv_scalar = torch.jit.script(_wrapped_rfloordiv_scalar)\n\n for a in range(-10, 10):\n for op in (lambda x: x * .5, lambda x: math.floor(x)):\n a = op(a)\n\n a_t = torch.tensor(a, device=device)\n\n self.assertEqual(a / 5, scripted_div_scalar(a_t))\n with self.assertWarnsOnceRegex(UserWarning, 'floor_divide'):\n self.assertEqual(math.trunc(a / 5), scripted_floordiv_scalar(a_t))\n\n # Skips zero divisors\n if a == 0:\n continue\n\n self.assertEqual(5 / a, scripted_rdiv_scalar(a_t))\n\n # Handles Issue 45199 (see comment above)\n if a_t.is_floating_point():\n with self.assertRaises(RuntimeError):\n scripted_rfloordiv_scalar(a_t)\n else:\n # This should emit a UserWarning, why doesn't it?\n # See issue gh-52387\n self.assertEqual(5 // a, scripted_rfloordiv_scalar(a_t))\n\n # NOTE: torch.floor_divide currently truncates instead of flooring\n # the quotient. See https://github.com/pytorch/pytorch/issues/43874.\n @onlyOnCPUAndCUDA\n def test_idiv_and_ifloordiv_vs_python(self, device):\n def _wrapped_idiv_tensor(a, b):\n a /= b\n return a\n\n def _wrapped_idiv_scalar(a):\n a /= 5\n return a\n\n def _wrapped_true_divide__tensor(a, b):\n a.true_divide_(b)\n return a\n\n def _wrapped_true_divide__scalar(a):\n a.true_divide_(5)\n return a\n\n def _wrapped_floor_divide__tensor(a, b):\n a.floor_divide_(b)\n return a\n\n def _wrapped_floor_divide__scalar(a):\n a.floor_divide_(5)\n return a\n\n # The following functions are unsupported by the JIT\n def _wrapped_ifloordiv_tensor(a, b):\n a //= b\n return a\n\n def _wrapped_ifloordiv_scalar(a):\n a //= 5\n return a\n\n with self.assertRaises(torch.jit.frontend.NotSupportedError):\n scripted_ifloordiv_tensor = torch.jit.script(_wrapped_ifloordiv_tensor)\n\n with self.assertRaises(torch.jit.frontend.NotSupportedError):\n scripted_ifloordiv_scalar = torch.jit.script(_wrapped_ifloordiv_scalar)\n\n scripted_idiv_tensor = torch.jit.script(_wrapped_idiv_tensor)\n scripted_idiv_scalar = torch.jit.script(_wrapped_idiv_scalar)\n scripted_true_divide__tensor = torch.jit.script(_wrapped_true_divide__tensor)\n scripted_true_divide__scalar = torch.jit.script(_wrapped_true_divide__scalar)\n scripted_floor_divide__tensor = torch.jit.script(_wrapped_floor_divide__tensor)\n scripted_floor_divide__scalar = torch.jit.script(_wrapped_floor_divide__scalar)\n\n for a, b in product(range(-10, 10), range(-10, 10)):\n for op in (lambda x: x * .5, lambda x: math.floor(x)):\n a = op(a)\n b = op(b)\n\n # Skips zero divisors\n if b == 0:\n continue\n\n expected_idiv = a / b\n expected_ifloordiv = a // b\n expected_itruncdiv = math.trunc(a / b)\n\n a_t = torch.tensor(a, device=device)\n b_t = torch.tensor(b, device=device)\n\n if a_t.is_floating_point():\n tmp0 = a_t.clone()\n tmp0 /= b\n\n tmp1 = a_t.clone()\n tmp1 /= b_t\n\n self.assertEqual(tmp0.item(), expected_idiv)\n self.assertEqual(tmp1.item(), expected_idiv)\n self.assertEqual(scripted_true_divide__tensor(a_t.clone(), b_t).item(), expected_idiv)\n self.assertEqual(scripted_true_divide__scalar(a_t.clone()).item(), a / 5)\n else:\n tmp = a_t.clone()\n with self.assertRaises(RuntimeError):\n tmp /= b\n with self.assertRaises(RuntimeError):\n tmp /= b_t\n with self.assertRaises(RuntimeError):\n scripted_true_divide__tensor(tmp, b_t)\n with self.assertRaises(RuntimeError):\n scripted_true_divide__scalar(tmp)\n\n\n if not a_t.is_floating_point() and b_t.is_floating_point():\n # Inplace modification fails because a float tensor is required\n # if the divisor is a float tensor\n with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n a_t.clone().floor_divide_(b_t)\n with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n scripted_floor_divide_tensor(a_t.clone(), b_t)\n tmp = a_t.clone()\n with self.assertRaises(RuntimeError), self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n tmp //= b_t\n else:\n # Inplace modification is OK when both or neither tensor is\n # a float tensor\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n self.assertEqual(a_t.clone().floor_divide_(b_t).item(), expected_itruncdiv)\n self.assertEqual(scripted_floor_divide__tensor(a_t.clone(), b_t).item(), expected_itruncdiv)\n tmp = a_t.clone()\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n tmp //= b_t\n self.assertEqual(tmp.item(), expected_itruncdiv)\n\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n self.assertEqual(scripted_floor_divide__scalar(a_t), math.trunc(a / 5))\n\n # Tests binary op equivalence with Python builtin ops\n # Also tests that reverse operations are equivalent to forward ops\n # NOTE: division ops are tested separately above\n def test_binary_ops_with_scalars(self, device):\n for ops in ((operator.add, torch.add),\n (operator.sub, torch.sub),\n (operator.mul, torch.mul),\n (operator.truediv, torch.div)):\n python_op, torch_op = ops\n\n for a, b in product(range(-10, 10), range(-10, 10)):\n for op in (lambda x: x * .5, lambda x: math.floor(x)):\n a = op(a)\n b = op(b)\n\n # Skips zero divisors\n if b == 0 or a == 0:\n continue\n\n a_tensor = torch.tensor(a, device=device)\n b_tensor = torch.tensor(b, device=device)\n a_tensor_cpu = a_tensor.cpu()\n b_tensor_cpu = b_tensor.cpu()\n vals = (a, b, a_tensor, b_tensor, a_tensor_cpu, b_tensor_cpu)\n\n for args in product(vals, vals):\n first, second = args\n\n first_scalar = first if not isinstance(first, torch.Tensor) else first.item()\n second_scalar = second if not isinstance(second, torch.Tensor) else second.item()\n expected = python_op(first_scalar, second_scalar)\n\n self.assertEqual(expected, python_op(first, second))\n self.assertEqual(expected, torch_op(first, second))\n\n @dtypes(*product(torch.testing.get_all_dtypes(include_complex=False), torch.testing.get_all_dtypes(include_complex=False)))\n def test_maximum_minimum_type_promotion(self, device, dtypes):\n a = torch.tensor((0, 1), device=device, dtype=dtypes[0])\n b = torch.tensor((1, 0), device=device, dtype=dtypes[1])\n for op in (torch.maximum, torch.max, torch.fmax, torch.minimum, torch.min, torch.fmin):\n result = op(a, b)\n self.assertEqual(result.dtype, torch.result_type(a, b))\n\n @dtypes(*(torch.testing.get_all_int_dtypes() + [torch.bool]))\n def test_maximum_minimum_int_and_bool(self, device, dtype):\n ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),\n (torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))\n rng = np.random.default_rng()\n a_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])\n b_np = np.array(rng.integers(-100, 100, size=10), dtype=torch_to_numpy_dtype_dict[dtype])\n\n for torch_op, alias, numpy_op in ops:\n a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)\n b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)\n tensor_result = torch_op(a_tensor, b_tensor)\n\n out = torch.empty_like(a_tensor)\n torch_op(a_tensor, b_tensor, out=out)\n\n numpy_result = numpy_op(a_np, b_np)\n\n if alias is not None:\n alias_result = alias(a_tensor, b_tensor)\n self.assertEqual(alias_result, tensor_result)\n\n self.assertEqual(tensor_result, numpy_result)\n self.assertEqual(out, numpy_result)\n\n @precisionOverride({torch.bfloat16: 1e-2})\n @dtypes(*(torch.testing.get_all_fp_dtypes()))\n def test_maximum_minimum_float(self, device, dtype):\n ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),\n (torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))\n\n if dtype == torch.bfloat16:\n a_np = np.random.randn(10).astype(np.float64)\n b_np = np.random.randn(10).astype(np.float64)\n else:\n a_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])\n b_np = np.random.randn(10).astype(torch_to_numpy_dtype_dict[dtype])\n\n for torch_op, alias, numpy_op in ops:\n numpy_result = numpy_op(a_np, b_np)\n\n a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)\n b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)\n tensor_result = torch_op(a_tensor, b_tensor)\n out = torch.empty_like(a_tensor)\n torch_op(a_tensor, b_tensor, out=out)\n\n if alias is not None:\n alias_result = alias(a_tensor, b_tensor)\n self.assertEqual(alias_result, tensor_result, exact_dtype=False)\n\n self.assertEqual(tensor_result, numpy_result, exact_dtype=False)\n self.assertEqual(out, numpy_result, exact_dtype=False)\n\n @dtypes(*(torch.testing.get_all_fp_dtypes()))\n def test_maximum_minimum_float_nan_and_inf(self, device, dtype):\n # np.maximum and np.minimum functions compare input arrays element-wisely.\n # if one of the elements being compared is a NaN, then that element is returned.\n ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum),\n (torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin))\n a_vals = (float('inf'), -float('inf'), float('nan'), float('inf'), float('nan'), float('nan'), 1, float('nan'))\n b_vals = (-float('inf'), float('inf'), float('inf'), float('nan'), float('nan'), 0, float('nan'), -5)\n if dtype == torch.bfloat16:\n a_np = np.array(a_vals, dtype=np.float64)\n b_np = np.array(b_vals, dtype=np.float64)\n else:\n a_np = np.array(a_vals, dtype=torch_to_numpy_dtype_dict[dtype])\n b_np = np.array(b_vals, dtype=torch_to_numpy_dtype_dict[dtype])\n\n for torch_op, alias, numpy_op in ops:\n numpy_result = numpy_op(a_np, b_np)\n\n a_tensor = torch.from_numpy(a_np).to(device=device, dtype=dtype)\n b_tensor = torch.from_numpy(b_np).to(device=device, dtype=dtype)\n tensor_result = torch_op(a_tensor, b_tensor)\n\n out = torch.empty_like(a_tensor)\n torch_op(a_tensor, b_tensor, out=out)\n\n if alias is not None:\n alias_result = alias(a_tensor, b_tensor)\n self.assertEqual(alias_result, tensor_result)\n\n if dtype == torch.bfloat16:\n self.assertEqual(tensor_result, numpy_result, exact_dtype=False)\n self.assertEqual(out, numpy_result, exact_dtype=False)\n else:\n self.assertEqual(tensor_result, numpy_result)\n self.assertEqual(out, numpy_result)\n\n @dtypes(*product(torch.testing.get_all_complex_dtypes(), torch.testing.get_all_dtypes()))\n def test_maximum_minimum_complex(self, device, dtypes):\n for torch_op in (torch.maximum, torch.minimum, torch.max, torch.min, torch.fmax, torch.fmin):\n with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):\n torch_op(torch.ones(1, device=device, dtype=dtypes[0]),\n torch.ones(1, device=device, dtype=dtypes[1]))\n\n with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'):\n torch_op(torch.ones(1, device=device, dtype=dtypes[1]),\n torch.ones(1, device=device, dtype=dtypes[0]))\n\n @onlyCUDA\n def test_maximum_minimum_cross_device(self, device):\n a = torch.tensor((1, 2, -1))\n b = torch.tensor((3, 0, 4), device=device)\n ops = (torch.maximum, torch.minimum)\n\n for torch_op in ops:\n with self.assertRaisesRegex(RuntimeError,\n \"Expected all tensors to be on the same device\"):\n torch_op(a, b)\n\n with self.assertRaisesRegex(RuntimeError,\n \"Expected all tensors to be on the same device\"):\n torch_op(b, a)\n\n # test cuda tensor and cpu scalar\n ops = ((torch.maximum, np.maximum), (torch.minimum, np.minimum))\n a_np = np.array(1)\n b_np = np.array([3, 0, 4])\n\n for torch_op, numpy_op in ops:\n a_tensor = torch.from_numpy(a_np)\n b_tensor = torch.from_numpy(b_np).to(device=device)\n tensor_result_1 = torch_op(a_tensor, b_tensor)\n numpy_result_1 = numpy_op(a_np, b_np)\n tensor_result_2 = torch_op(b_tensor, a_tensor)\n numpy_result_2 = numpy_op(b_np, a_np)\n\n self.assertEqual(tensor_result_1, numpy_result_1)\n self.assertEqual(tensor_result_2, numpy_result_2)\n\n # TODO: tests like this should be generic\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.float, torch.double)\n def test_mul_intertype_scalar(self, device, dtype):\n x = torch.tensor(1.5, dtype=dtype, device=device)\n y = torch.tensor(3, dtype=torch.int32, device=device)\n\n self.assertEqual(x * y, 4.5)\n self.assertEqual(y * x, 4.5)\n\n with self.assertRaisesRegex(RuntimeError, \"can't be cast to the desired output type\"):\n y *= x\n x *= y\n self.assertEqual(x, 4.5)\n\n @onlyCPU\n @dtypes(*torch.testing.get_all_dtypes())\n def test_sub(self, device, dtype):\n m1 = torch.tensor([2.34, 4.44], dtype=dtype, device=device)\n m2 = torch.tensor([1.23, 2.33], dtype=dtype, device=device)\n\n if dtype == torch.bool:\n self.assertRaises(RuntimeError, lambda: m1 - m2)\n elif (dtype == torch.bfloat16 or dtype == torch.half):\n # bfloat16 has a lower precision so we have to have a separate check for it\n self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype), atol=0.01, rtol=0)\n else:\n self.assertEqual(m1 - m2, torch.tensor([1.11, 2.11], dtype=dtype))\n\n # TODO: what is this test testing?\n @onlyCPU\n @dtypes(torch.float)\n def test_csub(self, device, dtype):\n # with a tensor\n a = torch.randn(100, 90, dtype=dtype, device=device)\n b = a.clone().normal_()\n\n res_add = torch.add(a, b, alpha=-1)\n res_csub = a.clone()\n res_csub.sub_(b)\n self.assertEqual(res_add, res_csub)\n\n # with a scalar\n a = torch.randn(100, 100, dtype=dtype, device=device)\n\n scalar = 123.5\n res_add = torch.add(a, -scalar)\n res_csub = a.clone()\n res_csub.sub_(scalar)\n self.assertEqual(res_add, res_csub)\n\n # TODO: reconcile with minimum/maximum tests\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.float, torch.double)\n def test_min_max_binary_op_nan(self, device, dtype):\n a = torch.rand(1000, dtype=dtype, device=device)\n b = torch.rand(1000, dtype=dtype, device=device)\n\n # 0:250: a -- nan, b -- not nan\n a[:250] = float('nan')\n # 250:500: a -- not nan, b -- nan\n b[250:500] = float('nan')\n # 500:750: a and b both nan\n a[500:750] = float('nan')\n b[500:750] = float('nan')\n # 750:1000: neither nan\n\n ma = torch.max(a, b)\n mi = torch.min(a, b)\n\n for i in range(750):\n self.assertTrue(torch.isnan(ma[i]), \"max(a, b): {}, a: {}, b: {}\".format(ma[i], a[i], b[i]))\n self.assertTrue(torch.isnan(mi[i]), \"min(a, b): {}, a: {}, b: {}\".format(mi[i], a[i], b[i]))\n\n for i in range(750, 1000):\n self.assertFalse(torch.isnan(ma[i]), \"max(a, b): {}, a: {}, b: {}\".format(ma[i], a[i], b[i]))\n self.assertFalse(torch.isnan(mi[i]), \"min(a, b): {}, a: {}, b: {}\".format(mi[i], a[i], b[i]))\n\n @dtypes(*product(torch.testing.get_all_dtypes(include_complex=False),\n torch.testing.get_all_dtypes(include_complex=False)))\n def test_copysign(self, device, dtypes):\n def _test_copysign_numpy(a, b):\n torch_result = torch.copysign(a, b)\n\n if a.dtype == torch.bfloat16:\n np_a = a.to(torch.float).cpu().numpy()\n else:\n np_a = a.cpu().numpy()\n\n if b.dtype == torch.bfloat16:\n np_b = b.to(torch.float).cpu().numpy()\n else:\n np_b = b.cpu().numpy()\n expected = torch.from_numpy(np.copysign(np_a, np_b))\n # To handle inconsistencies of type promotion between PyTorch and Numpy\n # Applied for both arguments having integral precision and bfloat16\n types = [torch.bool, torch.bfloat16] + torch.testing.get_all_int_dtypes()\n if a.dtype in types or b.dtype in types:\n promoted_type = torch.promote_types(torch_result.dtype, expected.dtype)\n torch_result = torch_result.to(promoted_type)\n expected = expected.to(promoted_type)\n\n # Verify Value\n self.assertEqual(torch_result, expected)\n # Verify Sign\n # Use double copysign to verify the correctnes of 0.0 and -0.0, since\n # it always True for self.assertEqual(0.0 == -0.0). So, we use 1 as the\n # magnitude to verify the sign between torch and numpy results, elementwise.\n # Special case: NaN conversions between FP32 and FP16 is not bitwise\n # equivalent to pass this assertion.\n if a.dtype != torch.float16 and b.dtype != torch.float16:\n self.assertEqual(torch.copysign(torch.tensor(1.0), torch_result),\n torch.copysign(torch.tensor(1.0), expected))\n\n # Compare Result with NumPy\n # Type promotion\n a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)\n b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)\n _test_copysign_numpy(a, b)\n\n # Broadcast\n a = make_tensor((10, 1, 10), device=device, dtype=dtypes[0], low=-9, high=9)\n b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)\n _test_copysign_numpy(a, b)\n\n a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)\n b = make_tensor((10, 1, 10), device=device, dtype=dtypes[1], low=-9, high=9)\n _test_copysign_numpy(a, b)\n\n # 0.0/-0.0/inf/-inf/nan\n cases = [0.0, -0.0, float('inf'), float('-inf'), float('nan')]\n # torch.bfloat16 can not hold '-nan'\n # torch.half can not hold '-nan' on CUDA\n types = [torch.float32, torch.float64]\n if device == 'cpu':\n types.append(torch.float16)\n if dtypes[0] in types:\n b = make_tensor((10, 10), device=device, dtype=dtypes[1], low=-9, high=9)\n for case in cases:\n _test_copysign_numpy(torch.tensor([case], device=device, dtype=dtypes[0]), b)\n\n if dtypes[1] in torch.testing.get_all_fp_dtypes():\n a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9)\n for case in cases:\n _test_copysign_numpy(a, torch.tensor([case], device=device, dtype=dtypes[1]))\n\n @dtypes(torch.bfloat16, torch.float)\n def test_div(self, device, dtype):\n for op, method, inplace in ((torch.div, torch.Tensor.div, torch.Tensor.div_),\n (torch.true_divide, torch.Tensor.true_divide,\n torch.Tensor.true_divide_)):\n m1 = torch.randn(10, 10, dtype=torch.float, device=device).to(dtype=dtype)\n res1 = m1.clone()\n inplace(res1[:, 3], 2)\n res2 = m1.clone()\n for i in range(m1.size(0)):\n res2[i, 3] = res2[i, 3] / 2\n self.assertEqual(res1, res2)\n\n if dtype == torch.bfloat16:\n a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)\n a2 = torch.tensor([2., 2.], dtype=dtype, device=device)\n self.assertEqual(op(a1, a2),\n torch.tensor([2.1, 3.1], dtype=dtype, device=device),\n atol=0.01, rtol=0)\n self.assertEqual(method(a1, a2), op(a1, a2))\n\n @dtypes(torch.bfloat16, torch.float)\n def test_true_divide_out(self, device, dtype):\n a1 = torch.tensor([4.2, 6.2], dtype=dtype, device=device)\n a2 = torch.tensor([2., 2.], dtype=dtype, device=device)\n res = torch.empty_like(a1)\n self.assertEqual(torch.true_divide(a1, a2, out=res),\n torch.tensor([2.1, 3.1], dtype=dtype, device=device),\n atol=0.01, rtol=0)\n\n @onlyCUDA\n @dtypes(torch.half)\n def test_divmul_scalar(self, device, dtype):\n x = torch.tensor(100., device=device, dtype=dtype)\n x_ref = x.float()\n scale = 1e5\n res = x.div(scale)\n expected = x_ref.div(scale)\n self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)\n x = torch.tensor(1e-5, device=device, dtype=dtype)\n x_ref = x.float()\n res = x.mul(scale)\n expected = x_ref.mul(scale)\n self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)\n res = scale * x\n self.assertEqual(res, expected.to(dtype), atol=0., rtol=0.)\n\n @dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})\n @dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})\n def test_floor_divide_tensor(self, device, dtype):\n x = torch.randn(10, device=device).mul(30).to(dtype)\n y = torch.arange(1, 11, dtype=dtype, device=device)\n\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n z = x // y\n z_alt = torch.trunc(x.double() / y.double()).to(dtype)\n\n self.assertEqual(z.dtype, x.dtype)\n self.assertEqual(z, z_alt)\n\n @dtypesIfCUDA(*set(torch.testing.get_all_math_dtypes('cuda')) - {torch.complex64, torch.complex128})\n @dtypes(*set(torch.testing.get_all_math_dtypes('cpu')) - {torch.complex64, torch.complex128})\n def test_floor_divide_scalar(self, device, dtype):\n x = torch.randn(100, device=device).mul(10).to(dtype)\n\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n z = x // 3\n z_alt = torch.tensor([math.trunc(v.item() / 3.) for v in x], dtype=x.dtype, device=device)\n\n self.assertEqual(z.dtype, x.dtype)\n self.assertEqual(z, z_alt)\n\n # Note: this tests fails on XLA\n @onlyOnCPUAndCUDA\n @dtypes(torch.float, torch.long)\n def test_floor_divide_out(self, device, dtype):\n x = torch.randn(10, device=device).mul(10).to(dtype)\n y = torch.arange(1, 11, dtype=dtype, device=device)\n o = torch.empty(10, dtype=dtype, device=device)\n\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n torch.floor_divide(x, y, out=o)\n self.assertEqual(o, x // y)\n\n # Tests scalar with out\n torch.floor_divide(x, 2, out=o)\n self.assertEqual(o, x // 2)\n\n if dtype == torch.int:\n o = torch.empty(10, dtype=torch.float, device=device)\n torch.floor_divide(x, y, out=o)\n self.assertEqual(o, torch.floor_divide(x.float(), y.float()))\n\n @onlyCPU\n @dtypes(*torch.testing.get_all_math_dtypes('cpu'))\n def test_rdiv(self, device, dtype):\n if dtype is torch.float16:\n return\n elif dtype.is_complex:\n x = torch.rand(100, dtype=dtype, device=device).add(1).mul(4)\n else:\n x = torch.rand(100, device=device).add(1).mul(4).to(dtype)\n y = 30 / x\n z = torch.tensor([30 / v.item() for v in x], device=device)\n self.assertEqual(y, z, exact_dtype=False)\n\n @dtypes(*torch.testing.get_all_fp_dtypes(include_bfloat16=False))\n def test_fmod_remainder_by_zero_float(self, device, dtype):\n fn_list = (torch.fmod, torch.remainder)\n for fn in fn_list:\n # check floating-point tensor fmod/remainder to zero is nan on both CPU and GPU\n x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)\n zero = torch.zeros_like(x)\n self.assertTrue(torch.all(fn(x, 0.0).isnan()))\n self.assertTrue(torch.all(fn(x, zero).isnan()))\n\n @onlyOnCPUAndCUDA # Check Issue https://github.com/pytorch/pytorch/issues/48130\n @skipCUDAIfRocm # Error happens on both ROCM and XLA\n @dtypes(*torch.testing.get_all_int_dtypes())\n def test_fmod_remainder_by_zero_integral(self, device, dtype):\n fn_list = (torch.fmod, torch.remainder)\n for fn in fn_list:\n # check integral tensor fmod/remainder to zero\n x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)\n zero = torch.zeros_like(x)\n # RuntimeError on CPU\n if self.device_type == 'cpu':\n with self.assertRaisesRegex(RuntimeError, \"ZeroDivisionError\"):\n fn(x, zero)\n # Different value for different dtype on CUDA:\n # Due to it's an undefined behavior, CUDA returns a pattern of all 1s\n # for integral dividend (other than int64) divided by zero. For int64,\n # CUDA returns all 1s for negative dividend, half 1s for positive dividend.\n # uint8: 0xff -> 255\n # int32: 0xffffffff -> -1\n else:\n if dtype == torch.int64:\n self.assertEqual(fn(x, zero) == 4294967295, x >= 0)\n self.assertEqual(fn(x, zero) == -1, x < 0)\n else:\n value = 255 if dtype == torch.uint8 else -1\n self.assertTrue(torch.all(fn(x, zero) == value))\n\n @dtypes(*torch.testing.get_all_dtypes(include_bfloat16=False, include_bool=False, include_complex=False))\n def test_fmod_remainder(self, device, dtype):\n # Use numpy as reference\n def _helper(x, mod, fns_list):\n for fn, inplace_fn, ref_fn in fns_list:\n np_x = x.cpu().numpy() if torch.is_tensor(x) else x\n np_mod = mod.cpu().numpy() if torch.is_tensor(mod) else mod\n exp = ref_fn(np_x, np_mod)\n exp = torch.from_numpy(exp)\n res = fn(x, mod)\n\n self.assertEqual(res, exp, exact_dtype=False)\n\n if torch.is_tensor(x):\n # out\n out = torch.empty(0, device=device, dtype=res.dtype)\n fn(x, mod, out=out)\n self.assertEqual(out, exp, exact_dtype=False)\n self.assertEqual(out.size(), torch.Size([10, 10]))\n # in-place (Type cast runtime error)\n try:\n inplace_fn(x, mod)\n self.assertEqual(x, exp, exact_dtype=False)\n except RuntimeError as e:\n self.assertRegex(str(e), \"result type (Half|Float|Double) \"\n \"can't be cast to the desired output \"\n \"type (Byte|Char|Short|Int|Long)\")\n\n x = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)\n # mod with same dtype as x\n mod = make_tensor((10, 10), device=device, dtype=dtype, low=-9, high=9)\n # Exclude 0\n mod[mod == 0] = 1\n\n # Mods: Integer, Float, Tensor, Non-contiguous Tensor\n mods = [3, 2.3, mod, mod.t()]\n # mod with floating-point dtype\n if dtype in torch.testing.get_all_int_dtypes():\n mod_float = make_tensor((10, 10), device=device, dtype=torch.float, low=-9, high=9)\n mod[mod == 0] = 1\n mods.append(mod_float)\n\n for dividend, mod in product([x, x.t()], mods):\n _helper(dividend, mod,\n ((torch.fmod, torch.Tensor.fmod_, np.fmod),\n (torch.remainder, torch.Tensor.remainder_, np.remainder),))\n\n # Tests for torch.remainder(scalar, tensor)\n for dividend, mod in product([5, 3.14], mods):\n if torch.is_tensor(mod):\n _helper(dividend, mod,\n ((torch.remainder, torch.Tensor.remainder_, np.remainder),))\n\n @dtypes(torch.float, torch.double)\n def test_remainder_fmod_large_dividend(self, device, dtype):\n alarge = 1e9\n pi = 3.14159265358979\n for avalue in [alarge, -alarge]:\n for bvalue in [pi, -pi]:\n a = torch.tensor([avalue], dtype=dtype, device=device)\n b = torch.tensor([bvalue], dtype=dtype, device=device)\n c = torch.remainder(a, b)\n d = torch.fmod(a, b)\n self.assertTrue((b[0] > 0) == (c[0] > 0)) # remainder has same sign as divisor\n self.assertTrue((a[0] > 0) == (d[0] > 0)) # fmod has same sign as dividend\n self.assertTrue(abs(c[0]) < abs(b[0])) # remainder is within range of divisor\n self.assertTrue(abs(d[0]) < abs(b[0])) # fmod is within range of divisor\n if ((a[0] > 0) == (b[0] > 0)):\n self.assertTrue(c[0] == d[0]) # remainder is same as fmod\n else:\n self.assertTrue(abs(c[0] - d[0]) == abs(b[0])) # differ by one divisor\n\n @dtypesIfCPU(torch.bfloat16, torch.float32, torch.float64)\n @dtypes(torch.float32, torch.float64)\n def test_hypot(self, device, dtype):\n inputs = [\n (torch.randn(10, device=device).to(dtype), torch.randn(10, device=device).to(dtype)),\n (torch.randn((3, 3, 3), device=device).to(dtype), torch.randn((3, 3, 3), device=device).to(dtype)),\n (torch.randn((10, 1), device=device).to(dtype), torch.randn((10, 1), device=device).to(dtype).transpose(0, 1)),\n (torch.randint(100, (10, ), device=device, dtype=torch.long), torch.randn(10, device=device).to(dtype))\n ]\n for input in inputs:\n actual = torch.hypot(input[0], input[1])\n if dtype == torch.bfloat16:\n expected = torch.sqrt(input[0] * input[0] + input[1] * input[1])\n else:\n expected = np.hypot(input[0].cpu().numpy(), input[1].cpu().numpy())\n self.assertEqual(actual, expected, exact_dtype=False)\n\n @onlyOnCPUAndCUDA\n @dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)\n def test_gcd(self, device, dtype):\n # Tests gcd(0, 0), gcd(0, a) cases\n t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)\n t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)\n actual = torch.gcd(t1, t2)\n expected = np.gcd([0, 10, 0], [0, 0, 10])\n self.assertEqual(actual, expected, exact_dtype=False)\n\n if dtype == torch.uint8:\n # Test unsigned integers with potential sign issues (i.e., uint8 with value >= 128)\n a = torch.tensor([190, 210], device=device, dtype=dtype)\n b = torch.tensor([190, 220], device=device, dtype=dtype)\n actual = torch.gcd(a, b)\n expected = torch.tensor([190, 10], device=device, dtype=dtype)\n self.assertEqual(actual, expected)\n else:\n # Compares with NumPy\n a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)\n b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)\n actual = torch.gcd(a, b)\n expected = np.gcd(a.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(actual, expected)\n\n @onlyOnCPUAndCUDA\n @dtypes(torch.int16, torch.int32, torch.int64)\n def test_lcm(self, device, dtype):\n # Tests lcm(0, 0), lcm(0, a) cases\n t1 = torch.tensor([0, 10, 0], dtype=dtype, device=device)\n t2 = torch.tensor([0, 0, 10], dtype=dtype, device=device)\n actual = torch.lcm(t1, t2)\n expected = np.lcm([0, 10, 0], [0, 0, 10])\n self.assertEqual(actual, expected, exact_dtype=False)\n\n # Compares with NumPy\n a = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)\n b = torch.randint(-20, 20, (1024,), device=device, dtype=dtype)\n actual = torch.lcm(a, b)\n expected = np.lcm(a.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(actual, expected, exact_dtype=False)\n\n @onlyOnCPUAndCUDA\n @dtypes(torch.float32, torch.float64)\n def test_nextafter(self, device, dtype):\n # Test special cases\n t1 = torch.tensor([0, 0, 10], device=device, dtype=dtype)\n t2 = torch.tensor([inf, -inf, 10], device=device, dtype=dtype)\n actual = torch.nextafter(t1, t2)\n expected = np.nextafter(t1.cpu().numpy(), t2.cpu().numpy())\n self.assertEqual(actual, expected, atol=0, rtol=0)\n\n actual = torch.nextafter(t2, t1)\n expected = np.nextafter(t2.cpu().numpy(), t1.cpu().numpy())\n self.assertEqual(actual, expected, atol=0, rtol=0)\n\n t1 = torch.tensor([0, nan], device=device, dtype=dtype)\n t2 = torch.tensor([nan, 0], device=device, dtype=dtype)\n self.assertTrue(torch.nextafter(t1, t2).isnan().all())\n\n a = torch.randn(100, device=device, dtype=dtype)\n b = torch.randn(100, device=device, dtype=dtype)\n actual = torch.nextafter(a, b)\n expected = np.nextafter(a.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(actual, expected, atol=0, rtol=0)\n\n def _test_cop(self, torchfn, mathfn, dtype, device):\n def reference_implementation(res2):\n for i, j in iter_indices(sm1):\n idx1d = i * sm1.size(0) + j\n res2[i, j] = mathfn(sm1[i, j], sm2[idx1d])\n return res2\n\n # contiguous\n m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)\n m2 = torch.randn(10, 10 * 10, dtype=dtype, device=device)\n sm1 = m1[4]\n sm2 = m2[4]\n\n res1 = torchfn(sm1, sm2.view(10, 10))\n res2 = reference_implementation(res1.clone())\n self.assertEqual(res1, res2)\n\n # non-contiguous\n m1 = torch.randn(10, 10, 10, dtype=dtype, device=device)\n m2 = torch.randn(10 * 10, 10 * 10, dtype=dtype, device=device)\n sm1 = m1[:, 4]\n sm2 = m2[:, 4]\n # view as sm1.size()\n sm2.set_(sm2.storage(), sm2.storage_offset(), sm1.size(), (sm2.stride()[0] * 10, sm2.stride()[0]))\n res1 = torchfn(sm1, sm2)\n # reference_implementation assumes 1-d sm2\n sm2.set_(sm2.storage(), sm2.storage_offset(), m2[:, 4].size(), m2[:, 4].stride())\n res2 = reference_implementation(res1.clone())\n self.assertEqual(res1, res2)\n\n @onlyCPU\n @dtypes(torch.float)\n def test_cdiv(self, device, dtype):\n self._test_cop(torch.div, lambda x, y: x / y, dtype, device)\n\n @onlyCPU\n @dtypes(torch.float)\n def test_cremainder(self, device, dtype):\n self._test_cop(torch.remainder, lambda x, y: x % y, dtype, device)\n\n @onlyCPU\n @dtypes(torch.float)\n def test_cmul(self, device, dtype):\n self._test_cop(torch.mul, lambda x, y: x * y, dtype, device)\n\n @onlyCPU\n @dtypes(torch.float)\n def test_cpow(self, device, dtype):\n self._test_cop(torch.pow, lambda x, y: nan if x < 0 else math.pow(x, y), dtype, device)\n\n @onlyCPU\n @dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)\n def test_floor_divide_zero(self, device, dtype):\n a = torch.tensor([0, 1], dtype=dtype, device=device)\n b = torch.tensor([0, 1], dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, 'ZeroDivisionError'):\n with self.assertWarnsOnceRegex(UserWarning, \"floor_divide\"):\n a // b\n\n @unittest.skipIf(TEST_WITH_ASAN, \"Integer overflows are not allowed under ASAN\")\n @dtypes(*torch.testing.get_all_dtypes())\n def test_muldiv_scalar(self, device, dtype):\n x = make_tensor((10, 3), device, dtype, low=None, high=None)\n s = make_tensor((1,), 'cpu', dtype, low=None, high=None).item()\n y = torch.full_like(x, s)\n self.assertEqual(x * s, x * y)\n self.assertEqual(s * x, y * x)\n self.assertEqual(x / s, x / y)\n self.assertEqual(s / x, y / x)\n\n @dtypes(*tuple(itertools.combinations_with_replacement(torch.testing.get_all_dtypes(), 2)))\n def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes):\n # issue #42660\n # testing all combinations of broadcasting and type promotion\n # with a range of dtypes and input shapes, and with extremal values\n def compare_with_numpy_bin_op(torch_fn, np_fn, x, y, out=None):\n # working around the fact that numpy doesn't support bfloat16\n # by letting numpy treat them as float32's\n x_np = x if x.dtype != torch.bfloat16 else x.to(torch.float32)\n y_np = y.cpu().numpy() if y.dtype != torch.bfloat16 else y.to(torch.float32).cpu().numpy()\n self.compare_with_numpy(lambda inp: torch_fn(inp, y, out=out) if out else torch_fn(inp, y),\n lambda inp: np_fn(inp, y_np, out=out) if out else np_fn(inp, y_np),\n x_np)\n\n complex_op_denylist = [torch.lt, torch.le, torch.gt, torch.ge] # complex not supported\n input_sizes = [\n (1,),\n (10,),\n (10, 1),\n (1, 10),\n (4, 10),\n (64, 10),\n (12, 3)]\n op_pairs = [(torch.lt, np.less),\n (torch.le, np.less_equal),\n (torch.gt, np.greater),\n (torch.ge, np.greater_equal),\n (torch.eq, np.equal),\n (torch.ne, np.not_equal),\n (torch.logical_and, np.logical_and),\n (torch.logical_or, np.logical_or),\n (torch.logical_xor, np.logical_xor)]\n\n for size1 in input_sizes:\n size2 = (2,) + size1 # perform broadcasting\n for with_extremal in [False, True]:\n a = _generate_input(size1, dtypes[0], device, with_extremal)\n b = _generate_input(size2, dtypes[1], device, with_extremal)\n for torch_op, numpy_op in op_pairs:\n if (dtypes[0].is_complex or dtypes[1].is_complex) and torch_op in complex_op_denylist:\n continue\n # functional version of op\n compare_with_numpy_bin_op(torch_op, numpy_op, a, b)\n\n # functional comparison ops always return bool tensors\n self.assertEqual(torch_op(a, b).dtype, torch.bool)\n\n # out version of op\n out = torch.zeros(1, dtype=torch.complex128) # all casts to complex128 are safe\n compare_with_numpy_bin_op(torch_op, numpy_op, a, b, out=out)\n\n @onlyOnCPUAndCUDA\n @dtypes(torch.int8, torch.int16, torch.int32, torch.int64)\n def test_signed_shift(self, device, dtype):\n \"Ensure that signed integer bit shifting works as expected.\"\n a = torch.tensor([-10, 10], device=device, dtype=dtype) # [11...1110110, 1010]\n expected_l = torch.tensor([-40, 40], device=device, dtype=dtype) # [11...11011000, 101000]\n self.assertEqual(a << 2, expected_l)\n self.compare_with_numpy(lambda x: x << 2, lambda x: np.left_shift(x, 2), a)\n expected_r = torch.tensor([-5, 5], device=device, dtype=dtype) # [1111...111011, 101]\n self.assertEqual(a >> 1, expected_r)\n self.compare_with_numpy(lambda x: x >> 1, lambda x: np.right_shift(x, 1), a)\n\n def test_bitwise_and(self, device):\n for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):\n a = torch.tensor([1, -2, 3], dtype=dtype, device=device)\n b = torch.tensor([2, 1, 3], dtype=dtype, device=device)\n expected_res = torch.tensor([0, 0, 3], dtype=dtype, device=device)\n b_scalar = 2\n expected_res_scalar = torch.tensor([0, 2, 2], dtype=dtype, device=device)\n\n # standard version\n self.assertEqual(torch.bitwise_and(a, b), expected_res)\n self.assertEqual(torch.bitwise_and(a, b_scalar), expected_res_scalar)\n\n # out\n c = torch.empty(0, dtype=dtype, device=device)\n torch.bitwise_and(a, b, out=c)\n self.assertEqual(c, expected_res)\n torch.bitwise_and(a, b_scalar, out=c)\n self.assertEqual(c, expected_res_scalar)\n\n # in-place\n a1 = a.clone()\n a1.bitwise_and_(b)\n self.assertEqual(a1, expected_res)\n a.bitwise_and_(b_scalar)\n self.assertEqual(a, expected_res_scalar)\n\n self.assertEqual(torch.tensor([False, True, False], device=device),\n torch.bitwise_and(torch.tensor([True, True, False], device=device),\n torch.tensor([False, True, False], device=device)))\n\n def test_bitwise_or(self, device):\n for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):\n a = torch.tensor([1, -2, 3], dtype=dtype, device=device)\n b = torch.tensor([2, 1, 3], dtype=dtype, device=device)\n expected_res = torch.tensor([3, -1, 3], dtype=dtype, device=device)\n b_scalar = 2\n expected_res_scalar = torch.tensor([3, -2, 3], dtype=dtype, device=device)\n\n # standard version\n self.assertEqual(torch.bitwise_or(a, b), expected_res)\n self.assertEqual(torch.bitwise_or(a, b_scalar), expected_res_scalar)\n\n # out\n c = torch.empty(0, dtype=dtype, device=device)\n torch.bitwise_or(a, b, out=c)\n self.assertEqual(c, expected_res)\n torch.bitwise_or(a, b_scalar, out=c)\n self.assertEqual(c, expected_res_scalar)\n\n # in-place\n a1 = a.clone()\n a1.bitwise_or_(b)\n self.assertEqual(a1, expected_res)\n a.bitwise_or_(b_scalar)\n self.assertEqual(a, expected_res_scalar)\n\n self.assertEqual(torch.tensor([True, True, False], device=device),\n torch.bitwise_or(torch.tensor([True, True, False], device=device),\n torch.tensor([False, True, False], device=device)))\n\n def test_bitwise_xor(self, device):\n for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):\n a = torch.tensor([1, -2, 3], dtype=dtype, device=device)\n b = torch.tensor([2, 1, 3], dtype=dtype, device=device)\n expected_res = torch.tensor([3, -1, 0], dtype=dtype, device=device)\n b_scalar = 2\n expected_res_scalar = torch.tensor([3, -4, 1], dtype=dtype, device=device)\n\n # standard version\n self.assertEqual(torch.bitwise_xor(a, b), expected_res)\n self.assertEqual(torch.bitwise_xor(a, b_scalar), expected_res_scalar)\n\n # out\n c = torch.empty(0, dtype=dtype, device=device)\n torch.bitwise_xor(a, b, out=c)\n self.assertEqual(c, expected_res)\n torch.bitwise_xor(a, b_scalar, out=c)\n self.assertEqual(c, expected_res_scalar)\n\n # in-place\n a1 = a.clone()\n a1.bitwise_xor_(b)\n self.assertEqual(a1, expected_res)\n a.bitwise_xor_(b_scalar)\n self.assertEqual(a, expected_res_scalar)\n\n self.assertEqual(torch.tensor([True, False, False], device=device),\n torch.bitwise_xor(torch.tensor([True, True, False], device=device),\n torch.tensor([False, True, False], device=device)))\n\n @dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)\n def test_bitwise_shift(self, device, dtype):\n ops = [\n (torch.bitwise_left_shift, np.left_shift),\n (operator.lshift, operator.lshift),\n (torch.bitwise_right_shift, np.right_shift),\n (operator.rshift, operator.rshift),\n ]\n for torch_op, numpy_op in ops:\n a = torch.tensor([19, -20, -21, 22], dtype=dtype, device=device)\n b = torch.tensor([2, 1, 3, 1], dtype=dtype, device=device)\n a_np = a.cpu().numpy()\n b_np = b.cpu().numpy()\n\n # Tensor x Tensor\n self.assertEqual(torch_op(a, b), torch.tensor(numpy_op(a_np, b_np), device=device))\n # Tensor x int scalar\n self.assertEqual(torch_op(a, 2), torch.tensor(numpy_op(a_np, 2), device=device))\n\n def test_bitwise_shift_float(self, device):\n ops = [\n (torch.bitwise_left_shift, lambda x, y: x * 2. ** y),\n (operator.lshift, lambda x, y: x * 2. ** y),\n (torch.bitwise_right_shift, lambda x, y: x / 2. ** y),\n (operator.rshift, lambda x, y: x / 2. ** y),\n ]\n for torch_op, expected_op in ops:\n # int tensor x float\n a = torch.tensor([19, -20, -21, 22], dtype=torch.int64, device=device)\n self.assertEqual(torch_op(a, 1.8), torch.floor(expected_op(a, 1)).to(a.dtype))\n # float tensor x int scalar\n a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)\n self.assertEqual(torch_op(a, 2), expected_op(a, 2))\n # float tensor x float scalar\n a = torch.tensor([19.1, -20.2, -21.3, 22.4], dtype=torch.float32, device=device)\n self.assertEqual(torch_op(a, 2.2), expected_op(a, 2.2))\n\n @onlyOnCPUAndCUDA\n @dtypes(*list(product(torch.testing.get_all_dtypes(include_complex=False),\n torch.testing.get_all_dtypes(include_complex=False))))\n def test_heaviside(self, device, dtypes):\n input_dtype = dtypes[0]\n values_dtype = dtypes[1]\n\n rng = np.random.default_rng()\n input = np.array(rng.integers(-10, 10, size=10),\n dtype=torch_to_numpy_dtype_dict[input_dtype if (input_dtype != torch.bfloat16) else torch.float64])\n input[0] = input[3] = input[7] = 0\n values = np.array(rng.integers(-10, 10, size=10),\n dtype=torch_to_numpy_dtype_dict[values_dtype if (values_dtype != torch.bfloat16) else torch.float64])\n np_result = torch.from_numpy(np.heaviside(input, values)).to(device=device, dtype=input_dtype)\n\n input = torch.from_numpy(input).to(device=device, dtype=input_dtype)\n values = torch.from_numpy(values).to(device=device, dtype=values_dtype)\n out = torch.empty_like(input)\n\n if input_dtype == values_dtype:\n torch_result = torch.heaviside(input, values)\n self.assertEqual(np_result, torch_result)\n\n torch_result = input.heaviside(values)\n self.assertEqual(np_result, torch_result)\n\n torch.heaviside(input, values, out=out)\n self.assertEqual(np_result, out)\n\n input.heaviside_(values)\n self.assertEqual(np_result, input)\n else:\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):\n torch.heaviside(input, values)\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):\n input.heaviside(values)\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):\n torch.heaviside(input, values, out=out)\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for tensors with different dtypes.'):\n input.heaviside_(values)\n\n @onlyCUDA\n def test_heaviside_cross_device(self, device):\n x = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)\n y = torch.tensor(0)\n result = torch.heaviside(x, y)\n expect = torch.tensor([0, 1, 0, 1, 0, 1], device=device)\n self.assertEqual(result, expect)\n\n result = torch.heaviside(y, x)\n expect = torch.tensor([-9, 5, 0, 6, -2, 2], device=device)\n self.assertEqual(result, expect)\n\n x = torch.tensor([-9, 5, 0, 6, -2, 2])\n y = torch.tensor(0, device=device)\n with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):\n torch.heaviside(x, y)\n\n with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'):\n torch.heaviside(y, x)\n\n @dtypes(*list(product(torch.testing.get_all_complex_dtypes(),\n torch.testing.get_all_complex_dtypes())))\n def test_heaviside_complex(self, device, dtypes):\n input_dtype = dtypes[0]\n values_dtype = dtypes[1]\n\n data = (complex(0, -6), complex(-1, 3), complex(1, 1))\n input = torch.tensor(data, device=device, dtype=input_dtype)\n values = torch.tensor(data, device=device, dtype=values_dtype)\n out = torch.empty_like(input)\n real = input.real\n\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):\n torch.heaviside(input, real)\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):\n real.heaviside(values)\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):\n input.heaviside_(values)\n with self.assertRaisesRegex(RuntimeError, 'heaviside is not yet implemented for complex tensors.'):\n torch.heaviside(real, real, out=out)\n\n def _test_logical(self, device, dtypes, op, a_, b_, expected_res_):\n expected_res = torch.tensor(expected_res_, dtype=dtypes[0], device=device)\n a = torch.tensor(a_, dtype=dtypes[0], device=device)\n b = torch.tensor(b_, dtype=dtypes[1], device=device)\n\n # new tensor\n self.assertEqual(expected_res.bool(), getattr(a, op)(b))\n # out\n c = torch.empty(0, dtype=torch.bool, device=device)\n getattr(torch, op)(a, b, out=c)\n self.assertEqual(expected_res.bool(), c)\n\n # in-place\n # TODO: remove when different dtypes as operands are supported\n if dtypes[0] != dtypes[1]:\n with self.assertRaises(RuntimeError):\n getattr(a, op + '_')(b)\n return\n\n getattr(a, op + '_')(b)\n self.assertEqual(expected_res, a)\n\n @dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))\n def test_logical_xor(self, device, dtypes):\n self._test_logical(device, dtypes, 'logical_xor', [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1])\n\n @dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))\n def test_logical_and(self, device, dtypes):\n self._test_logical(device, dtypes, 'logical_and', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0])\n\n @dtypes(*product(torch.testing.get_all_dtypes(), torch.testing.get_all_dtypes()))\n def test_logical_or(self, device, dtypes):\n self._test_logical(device, dtypes, 'logical_or', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1])\n\n def test_remainder_overflow(self, device):\n # Check Integer Overflows\n x = torch.tensor(23500, dtype=torch.int64, device=device)\n q = 392486996410368\n self.assertEqual(x % q, x)\n self.assertEqual(-x % q, q - x)\n self.assertEqual(x % -q, x - q)\n self.assertEqual(-x % -q, -x)\n\n def test_rpow(self, device):\n m = torch.randn(10, 10, device=device)\n self.assertEqual(torch.pow(2, m), 2**m)\n\n # test with scalar\n m = torch.randn(1, device=device).squeeze()\n assert m.dim() == 0, \"m is intentionally a scalar\"\n self.assertEqual(torch.pow(2, m), 2**m)\n\n @onlyCPU\n def test_ldexp(self, device):\n # random values\n mantissas = torch.randn(64, device=device)\n exponents = torch.randint(-31, 31, (64,), device=device, dtype=torch.int32)\n\n # basic test\n np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())\n pt_outcome_1 = torch.ldexp(mantissas, exponents)\n pt_outcome_2 = mantissas.ldexp(exponents)\n self.assertEqual(np_outcome, pt_outcome_1)\n self.assertEqual(np_outcome, pt_outcome_2)\n mantissas.ldexp_(exponents)\n self.assertEqual(np_outcome, mantissas)\n\n # test bounds\n mantissas = torch.tensor([float('inf'), float('-inf'), float('inf'), float('nan')], device=device)\n exponents = torch.randint(0, 31, (4,), device=device, dtype=torch.int32)\n np_outcome = np.ldexp(mantissas.numpy(), exponents.numpy())\n pt_outcome = torch.ldexp(mantissas, exponents)\n self.assertEqual(np_outcome, pt_outcome)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_lerp(self, device, dtype):\n start_end_weight_shapes = [(), (5,), (5, 5)]\n for shapes in product(start_end_weight_shapes, start_end_weight_shapes, start_end_weight_shapes):\n start = torch.randn(shapes[0], device=device, dtype=dtype)\n end = torch.randn(shapes[1], device=device, dtype=dtype)\n\n # Tensor weights\n weights = [torch.randn(shapes[2], device=device, dtype=dtype), random.random()]\n if dtype.is_complex:\n weights += [complex(0, 1), complex(0.4, 1.2)]\n\n for weight in weights:\n actual = torch.lerp(start, end, weight)\n actual_method = start.lerp(end, weight)\n self.assertEqual(actual, actual_method)\n actual_out = torch.tensor(1., dtype=dtype, device=device)\n torch.lerp(start, end, weight, out=actual_out)\n self.assertEqual(actual, actual_out)\n expected = start + weight * (end - start)\n self.assertEqual(expected, actual)\n\n def _test_logaddexp(self, device, dtype, base2):\n if base2:\n ref_func = np.logaddexp2\n our_func = torch.logaddexp2\n else:\n ref_func = np.logaddexp\n our_func = torch.logaddexp\n\n def _test_helper(a, b):\n ref = ref_func(a.cpu().numpy(), b.cpu().numpy())\n v = our_func(a, b)\n self.assertEqual(ref, v)\n\n # simple test\n a = torch.randn(64, 2, dtype=dtype, device=device) - 0.5\n b = torch.randn(64, 2, dtype=dtype, device=device) - 0.5\n _test_helper(a, b)\n _test_helper(a[:3], b[:3])\n\n # large value test for numerical stability\n a *= 10000\n b *= 10000\n _test_helper(a, b)\n _test_helper(a[:3], b[:3])\n\n a = torch.tensor([float('inf'), float('-inf'), float('inf'), float(\"nan\")], dtype=dtype, device=device)\n b = torch.tensor([float('inf'), float('-inf'), float('-inf'), float(\"nan\")], dtype=dtype, device=device)\n _test_helper(a, b)\n\n @dtypes(torch.float32, torch.float64)\n def test_logaddexp(self, device, dtype):\n self._test_logaddexp(device, dtype, base2=False)\n\n @dtypes(torch.float32, torch.float64)\n def test_logaddexp2(self, device, dtype):\n self._test_logaddexp(device, dtype, base2=True)\n\n def test_add(self, device):\n dtypes = [torch.float, torch.double] + torch.testing.get_all_complex_dtypes()\n for dtype in dtypes:\n # [res] torch.add([res,] tensor1, tensor2)\n m1 = torch.randn(100, 100, dtype=dtype, device=device)\n v1 = torch.randn(100, dtype=dtype, device=device)\n\n # contiguous\n res1 = torch.add(m1[4], v1)\n res2 = res1.clone().zero_()\n for i in range(m1.size(1)):\n res2[i] = m1[4, i] + v1[i]\n self.assertEqual(res1, res2)\n\n m1 = torch.randn(100, 100, device=device)\n v1 = torch.randn(100, device=device)\n\n # non-contiguous\n res1 = torch.add(m1[:, 4], v1)\n res2 = res1.clone().zero_()\n for i in range(m1.size(0)):\n res2[i] = m1[i, 4] + v1[i]\n self.assertEqual(res1, res2)\n\n # [res] torch.add([res,] tensor, value)\n m1 = torch.randn(10, 10, device=device)\n\n # contiguous\n res1 = m1.clone()\n res1[3].add_(2)\n res2 = m1.clone()\n for i in range(m1.size(1)):\n res2[3, i] = res2[3, i] + 2\n self.assertEqual(res1, res2)\n\n # non-contiguous\n m1 = torch.randn(10, 10, device=device)\n res1 = m1.clone()\n res1[:, 3].add_(2)\n res2 = m1.clone()\n for i in range(m1.size(0)):\n res2[i, 3] = res2[i, 3] + 2\n self.assertEqual(res1, res2)\n\n # inter-type\n m1 = torch.randn(10, 10, dtype=dtype, device=device)\n self.assertEqual(m1 + 3, m1 + torch.tensor(3))\n self.assertEqual(3 + m1, torch.tensor(3) + m1)\n\n # contiguous + non-contiguous\n m1 = torch.randn(10, 10, dtype=dtype, device=device)\n m2 = torch.randn(10, 10, dtype=dtype, device=device).t()\n res = m1 + m2\n self.assertTrue(res.is_contiguous())\n self.assertEqual(res, m1 + m2.contiguous())\n\n # 1d + empty\n m1 = torch.tensor([1.0], dtype=dtype, device=device)\n m2 = torch.tensor([], dtype=dtype, device=device)\n self.assertEqual(m1 + m2, [])\n\n # inter-type unint8\n one = torch.tensor(1, dtype=torch.uint8, device=device)\n self.assertEqual(torch.add(one, 1), 2)\n self.assertEqual(torch.add(one, 1).dtype, torch.uint8)\n\n # bool\n m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)\n m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)\n expected = torch.tensor([True, True, False, True, False, True], dtype=torch.bool, device=device)\n self.assertEqual(m1 + m2, expected)\n\n # fused multiply add\n a = torch.zeros(2, 3, dtype=torch.bool, device=device)\n res = torch.add(a, a, alpha=0)\n expected = torch.zeros(2, 3, device=device).bool()\n self.assertEqual(res, expected)\n\n # bfloat16\n m1 = torch.tensor([1., 2.], dtype=torch.bfloat16)\n m2 = torch.tensor([3., 4.], dtype=torch.bfloat16)\n self.assertEqual(m1 + m2, torch.tensor([4., 6.], dtype=torch.bfloat16))\n\n # different alpha types\n m1 = torch.tensor([2 + 3j, 4 + 5j], dtype=torch.complex64, device=device)\n m2 = torch.tensor([4 + 5j, 2 + 3j], dtype=torch.complex64, device=device)\n # add complex numbers with float alpha\n res = torch.add(m1, m2, alpha=0.1)\n expected = torch.tensor([2.4000 + 3.5000j, 4.2000 + 5.3000j], dtype=torch.complex64, device=device)\n self.assertEqual(res, expected)\n\n # add complex numbers with complex alpha\n res = torch.add(m1, m2, alpha=complex(0.1, 0.2))\n expected = torch.tensor([1.4000 + 4.3000j, 3.6000 + 5.7000j], dtype=torch.complex64, device=device)\n self.assertEqual(res, expected)\n\n # add complex numbers with integer alpha\n res = torch.add(m1, m2, alpha=2)\n expected = torch.tensor([10. + 13.j, 8. + 11.j], dtype=torch.complex64, device=device)\n self.assertEqual(res, expected)\n\n # mismatched alpha\n m1 = torch.tensor([1], dtype=torch.int8, device=device)\n m2 = torch.tensor([2], dtype=torch.int8, device=device)\n self.assertRaisesRegex(RuntimeError,\n r\"Boolean alpha only supported for Boolean results\\.\",\n lambda: torch.add(m1, m2, alpha=True))\n self.assertRaisesRegex(RuntimeError,\n r\"For integral input tensors, argument alpha must not be a floating point number\\.\",\n lambda: torch.add(m1, m2, alpha=1.0))\n\n # mismatched alpha, float / double tensor and complex alpha\n msg = r\"For non-complex input tensors, argument alpha must not be a complex number\\.\"\n m1 = torch.tensor([3., 4.], device=device)\n m2 = torch.tensor([4., 3.], device=device)\n self.assertRaisesRegex(RuntimeError, msg,\n lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))\n\n m1 = torch.tensor([3., 4.], dtype=torch.double, device=device)\n m2 = torch.tensor([4., 3.], dtype=torch.double, device=device)\n self.assertRaisesRegex(RuntimeError, msg,\n lambda: torch.add(m1, m2, alpha=complex(0.1, 0.2)))\n\n # complex\n m1 = torch.tensor((4.0000 + 4.0000j), dtype=torch.complex64)\n m2 = torch.tensor(4., dtype=torch.float64)\n self.assertRaisesRegex(RuntimeError, r\"result type ComplexFloat can't be cast to the desired output type Double\",\n lambda: torch.add(m1, m1, out=m2))\n\n @onlyCUDA\n def test_addsub_half_tensor(self, device):\n x = torch.tensor([60000.0], dtype=torch.half, device=device)\n for op, y, alpha in (\n (torch.add, torch.tensor([-60000.0], dtype=torch.half, device=device), 2),\n (torch.sub, torch.tensor([60000.0], dtype=torch.half, device=device), 2),\n (torch.add, -70000.0, 1),\n (torch.sub, 70000.0, 1),\n ):\n actual = op(x, y, alpha=alpha)\n self.assertTrue(not (actual.isnan() or actual.isinf()))\n\n def test_sub_typing(self, device):\n m1 = torch.tensor([True, False, False, True, False, False], dtype=torch.bool, device=device)\n m2 = torch.tensor([True, True, False, False, False, True], dtype=torch.bool, device=device)\n self.assertRaisesRegex(RuntimeError,\n r\"Subtraction, the `\\-` operator, with two bool tensors is not supported. \"\n r\"Use the `\\^` or `logical_xor\\(\\)` operator instead.\",\n lambda: m1 - m2)\n self.assertRaisesRegex(RuntimeError,\n r\"Subtraction, the `\\-` operator, with a bool tensor is not supported. \"\n r\"If you are trying to invert a mask, use the `\\~` or `logical_not\\(\\)` operator instead.\",\n lambda: 1 - m1)\n self.assertRaisesRegex(RuntimeError,\n r\"Subtraction, the `\\-` operator, with a bool tensor is not supported. \"\n r\"If you are trying to invert a mask, use the `\\~` or `logical_not\\(\\)` operator instead.\",\n lambda: m2 - 1)\n\n # mismatched alpha\n m1 = torch.tensor([1], dtype=torch.int8, device=device)\n m2 = torch.tensor([2], dtype=torch.int8, device=device)\n self.assertRaisesRegex(RuntimeError,\n r\"Boolean alpha only supported for Boolean results\\.\",\n lambda: torch.sub(m1, m2, alpha=True))\n self.assertRaisesRegex(RuntimeError,\n r\"For integral input tensors, argument alpha must not be a floating point number\\.\",\n lambda: torch.sub(m1, m2, alpha=1.0))\n\n def test_mul(self, device):\n m1 = torch.randn(10, 10, device=device)\n res1 = m1.clone()\n res1[:, 3].mul_(2)\n res2 = m1.clone()\n for i in range(res1.size(0)):\n res2[i, 3] = res2[i, 3] * 2\n self.assertEqual(res1, res2)\n\n a1 = torch.tensor([True, False, False, True], dtype=torch.bool, device=device)\n a2 = torch.tensor([True, False, True, False], dtype=torch.bool, device=device)\n self.assertEqual(a1 * a2, torch.tensor([True, False, False, False], dtype=torch.bool, device=device))\n\n if device == 'cpu':\n a1 = torch.tensor([0.1, 0.1], dtype=torch.bfloat16, device=device)\n a2 = torch.tensor([1.1, 0.1], dtype=torch.bfloat16, device=device)\n self.assertEqual(a1 * a2, torch.tensor([0.11, 0.01], dtype=torch.bfloat16, device=device), atol=0.01, rtol=0)\n self.assertEqual(a1.mul(a2), a1 * a2)\n\n def test_bool_tensor_comparison_ops(self, device):\n a = torch.tensor([True, False, True, False, True, False], dtype=torch.bool, device=device)\n b = torch.tensor([True, False, True, True, True, True], dtype=torch.bool, device=device)\n self.assertEqual(a == b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))\n self.assertEqual(a != b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))\n self.assertEqual(a < b, torch.tensor([0, 0, 0, 1, 0, 1], dtype=torch.bool, device=device))\n self.assertEqual(a > b, torch.tensor([0, 0, 0, 0, 0, 0], dtype=torch.bool, device=device))\n self.assertEqual(a >= b, torch.tensor([1, 1, 1, 0, 1, 0], dtype=torch.bool, device=device))\n self.assertEqual(a <= b, torch.tensor([1, 1, 1, 1, 1, 1], dtype=torch.bool, device=device))\n self.assertEqual(a > False, torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))\n self.assertEqual(a == torch.tensor(True, dtype=torch.bool, device=device),\n torch.tensor([1, 0, 1, 0, 1, 0], dtype=torch.bool, device=device))\n self.assertEqual(a == torch.tensor(0, dtype=torch.bool, device=device),\n torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device))\n self.assertFalse(a.equal(b))\n\n @dtypes(*torch.testing.get_all_dtypes(include_complex=False))\n def test_logical(self, device, dtype):\n if dtype != torch.bool:\n x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype)\n b = torch.tensor([2], device=device, dtype=dtype)\n self.assertEqual(x.lt(2), torch.tensor([True, False, False, False]))\n self.assertEqual(x.le(2), torch.tensor([True, True, False, False]))\n self.assertEqual(x.ge(2), torch.tensor([False, True, True, True]))\n self.assertEqual(x.gt(2), torch.tensor([False, False, True, True]))\n self.assertEqual(x.eq(2), torch.tensor([False, True, False, False]))\n self.assertEqual(x.ne(2), torch.tensor([True, False, True, True]))\n\n self.assertEqual(x.lt(b), torch.tensor([True, False, False, False]))\n self.assertEqual(x.le(b), torch.tensor([True, True, False, False]))\n self.assertEqual(x.ge(b), torch.tensor([False, True, True, True]))\n self.assertEqual(x.gt(b), torch.tensor([False, False, True, True]))\n self.assertEqual(x.eq(b), torch.tensor([False, True, False, False]))\n self.assertEqual(x.ne(b), torch.tensor([True, False, True, True]))\n else:\n x = torch.tensor([True, False, True, False], device=device)\n self.assertEqual(x.lt(True), torch.tensor([False, True, False, True]))\n self.assertEqual(x.le(True), torch.tensor([True, True, True, True]))\n self.assertEqual(x.ge(True), torch.tensor([True, False, True, False]))\n self.assertEqual(x.gt(True), torch.tensor([False, False, False, False]))\n self.assertEqual(x.eq(True), torch.tensor([True, False, True, False]))\n self.assertEqual(x.ne(True), torch.tensor([False, True, False, True]))\n\n def test_atan2(self, device):\n def _test_atan2_with_size(size, device):\n a = torch.rand(size=size, device=device, dtype=torch.double)\n b = torch.rand(size=size, device=device, dtype=torch.double)\n actual = a.atan2(b)\n x = a.view(-1)\n y = b.view(-1)\n expected = torch.tensor([math.atan2(x[i].item(), y[i].item()) for i in range(x.numel())],\n device=device, dtype=torch.double)\n self.assertEqual(expected, actual.view(-1), rtol=0, atol=0.02)\n\n _test_atan2_with_size((2, 2), device)\n _test_atan2_with_size((3, 3), device)\n _test_atan2_with_size((5, 5), device)\n\n def test_atan2_edgecases(self, device):\n def _test_atan2(x, y, expected, device, dtype):\n expected_tensor = torch.tensor([expected], dtype=dtype, device=device)\n x_tensor = torch.tensor([x], dtype=dtype, device=device)\n y_tensor = torch.tensor([y], dtype=dtype, device=device)\n actual = torch.atan2(y_tensor, x_tensor)\n self.assertEqual(expected_tensor, actual, rtol=0, atol=0.02)\n\n for dtype in [torch.float, torch.double]:\n _test_atan2(0, 0, 0, device, dtype)\n _test_atan2(0, 1, math.pi / 2, device, dtype)\n _test_atan2(0, -1, math.pi / -2, device, dtype)\n _test_atan2(-1, 0, math.pi, device, dtype)\n _test_atan2(1, 0, 0, device, dtype)\n _test_atan2(-1, -1, math.pi * -3 / 4 , device, dtype)\n _test_atan2(1, 1, math.pi / 4 , device, dtype)\n _test_atan2(1, -1, math.pi / -4 , device, dtype)\n _test_atan2(-1, 1, math.pi * 3 / 4 , device, dtype)\n\n def test_trapz(self, device):\n def test_dx(sizes, dim, dx, device):\n t = torch.randn(sizes, device=device)\n actual = torch.trapz(t, dx=dx, dim=dim)\n expected = np.trapz(t.cpu().numpy(), dx=dx, axis=dim)\n self.assertEqual(expected.shape, actual.shape)\n self.assertEqual(expected, actual, exact_dtype=False)\n\n def test_x(sizes, dim, x, device):\n t = torch.randn(sizes, device=device)\n actual = torch.trapz(t, x=torch.tensor(x, device=device), dim=dim)\n expected = np.trapz(t.cpu().numpy(), x=x, axis=dim)\n self.assertEqual(expected.shape, actual.shape)\n self.assertEqual(expected, actual.cpu(), exact_dtype=False)\n\n test_dx((2, 3, 4), 1, 1, device)\n test_dx((10, 2), 0, 0.1, device)\n test_dx((1, 10), 0, 2.3, device)\n test_dx((0, 2), 0, 1.0, device)\n test_dx((0, 2), 1, 1.0, device)\n test_x((2, 3, 4), 1, [1.0, 2.0, 3.0], device)\n test_x((10, 2), 0, [2.0, 3.0, 4.0, 7.0, 11.0, 14.0, 22.0, 26.0, 26.1, 30.3], device)\n test_x((1, 10), 0, [1.0], device)\n test_x((0, 2), 0, [], device)\n test_x((0, 2), 1, [1.0, 2.0], device)\n with self.assertRaisesRegex(\n IndexError,\n 'Dimension out of range'):\n test_x((2, 3), 2, [], device)\n test_dx((2, 3), 2, 1.0, device)\n with self.assertRaisesRegex(\n RuntimeError,\n 'There must be one `x` value for each sample point'):\n test_x((2, 3), 1, [1.0, 2.0], device)\n test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device)\n\n @dtypes(torch.double)\n def test_pow_scalar_overloads_mem_overlap(self, device, dtype):\n sz = 3\n doubles = torch.randn(2 * sz, dtype=dtype, device=device)\n self.check_internal_mem_overlap(\n lambda t: t.pow_(42), 1, dtype, device)\n self.unary_check_input_output_mem_overlap(\n doubles, sz, lambda input, out: torch.pow(input, 42, out=out))\n self.unary_check_input_output_mem_overlap(\n doubles, sz, lambda input, out: torch.pow(42, input, out=out))\n\n @dtypes(*list(product(torch.testing.get_all_dtypes(include_bool=False),\n torch.testing.get_all_dtypes(include_bool=False))))\n def test_float_power(self, device, dtypes):\n def to_np(value):\n if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16:\n return value.to(torch.float).cpu().numpy()\n return value.cpu().numpy() if isinstance(value, torch.Tensor) else value\n\n base_dtype = dtypes[0]\n exp_dtype = dtypes[1]\n out_dtype = torch.complex128 if base_dtype.is_complex or exp_dtype.is_complex else torch.float64\n\n base = make_tensor((30,), device, base_dtype, low=1, high=100)\n # Complex and real results do not agree between PyTorch and NumPy when computing negative and zero power of 0\n # Related: https://github.com/pytorch/pytorch/issues/48000\n # base[0] = base[3] = base[7] = 0\n exp = make_tensor((30,), device, exp_dtype, low=-2, high=2)\n exp[0] = exp[4] = exp[6] = 0\n\n expected = torch.from_numpy(np.float_power(to_np(base), to_np(exp)))\n\n exponents = [-2.8, -2, -1, -0.5, 0.5, 1, 2]\n complex_exponents = exponents + [-2.5j, -1.0j, 1.0j, 2.5j, 1.0 + 1.0j, -1.0 - 1.5j, 3.3j]\n\n for op in (torch.float_power, torch.Tensor.float_power, torch.Tensor.float_power_):\n\n # Case of Tensor x Tensor\n if op is torch.Tensor.float_power_ and base_dtype != out_dtype:\n with self.assertRaisesRegex(RuntimeError, \"operation's result requires dtype\"):\n op(base.clone(), exp)\n else:\n result = op(base.clone(), exp)\n self.assertEqual(expected, result)\n\n if op is torch.float_power:\n out = torch.empty_like(base).to(device=device, dtype=out_dtype)\n op(base, exp, out=out)\n self.assertEqual(expected, out)\n\n # Case of Tensor x Scalar\n for i in complex_exponents if exp_dtype.is_complex else exponents:\n out_dtype_scalar_exp = torch.complex128 if base_dtype.is_complex or type(i) == complex else torch.float64\n expected_scalar_exp = torch.from_numpy(np.float_power(to_np(base), i))\n\n if op is torch.Tensor.float_power_ and base_dtype != out_dtype_scalar_exp:\n with self.assertRaisesRegex(RuntimeError, \"operation's result requires dtype\"):\n op(base.clone(), i)\n else:\n result = op(base.clone(), i)\n self.assertEqual(expected_scalar_exp, result)\n\n if op is torch.float_power:\n out = torch.empty_like(base).to(device=device, dtype=out_dtype_scalar_exp)\n op(base, i, out=out)\n self.assertEqual(expected_scalar_exp, out)\n\n # Case of Scalar x Tensor\n for i in complex_exponents if base_dtype.is_complex else exponents:\n out_dtype_scalar_base = torch.complex128 if exp_dtype.is_complex or type(i) == complex else torch.float64\n expected_scalar_base = torch.from_numpy(np.float_power(i, to_np(exp)))\n\n result = torch.float_power(i, exp)\n self.assertEqual(expected_scalar_base, result)\n\n out = torch.empty_like(exp).to(device=device, dtype=out_dtype_scalar_base)\n torch.float_power(i, exp, out=out)\n self.assertEqual(expected_scalar_base, out)\n\n def test_float_power_exceptions(self, device):\n def _promo_helper(x, y):\n for i in (x, y):\n if type(i) == complex:\n return torch.complex128\n elif type(i) == torch.Tensor and i.is_complex():\n return torch.complex128\n return torch.double\n\n test_cases = ((torch.tensor([-2, -1, 0, 1, 2], device=device), -.25),\n (torch.tensor([-1.0j, 0j, 1.0j, 1.0 + 1.0j, -1.0 - 1.5j], device=device), 2.))\n for base, exp in test_cases:\n for out_dtype in (torch.long, torch.float, torch.double, torch.cdouble):\n out = torch.empty(1, device=device, dtype=out_dtype)\n required_dtype = _promo_helper(base, exp)\n\n if out.dtype == required_dtype:\n torch.float_power(base, exp, out=out)\n else:\n with self.assertRaisesRegex(RuntimeError, \"operation's result requires dtype\"):\n torch.float_power(base, exp, out=out)\n\n if base.dtype == required_dtype:\n torch.Tensor.float_power_(base.clone(), exp)\n else:\n with self.assertRaisesRegex(RuntimeError, \"operation's result requires dtype\"):\n torch.Tensor.float_power_(base.clone(), exp)\n\n @skipIf(not TEST_SCIPY, \"Scipy required for the test.\")\n @dtypes(*product(torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False),\n torch.testing.get_all_dtypes(include_complex=False, include_bfloat16=False)))\n def test_xlogy_xlog1py(self, device, dtypes):\n x_dtype, y_dtype = dtypes\n\n def out_variant_helper(torch_fn, x, y):\n expected = torch_fn(x, y)\n out = torch.empty_like(expected)\n torch_fn(x, y, out=out)\n self.assertEqual(expected, out)\n\n def xlogy_inplace_variant_helper(x, y):\n if x.dtype in torch.testing.get_all_int_dtypes() + [torch.bool]:\n with self.assertRaisesRegex(RuntimeError,\n \"can't be cast to the desired output type\"):\n x.clone().xlogy_(y)\n else:\n expected = torch.empty_like(x)\n torch.xlogy(x, y, out=expected)\n inplace_out = x.clone().xlogy_(y)\n self.assertEqual(expected, inplace_out)\n\n def test_helper(torch_fn, reference_fn, inputs, scalar=None):\n x, y, z = inputs\n torch_fn_partial = partial(torch_fn, x)\n reference_fn_partial = partial(reference_fn, x.cpu().numpy())\n self.compare_with_numpy(torch_fn_partial, reference_fn_partial, x, exact_dtype=False)\n self.compare_with_numpy(torch_fn_partial, reference_fn_partial, y, exact_dtype=False)\n self.compare_with_numpy(torch_fn_partial, reference_fn_partial, z, exact_dtype=False)\n\n val = scalar if scalar is not None else x\n out_variant_helper(torch_fn, val, x)\n out_variant_helper(torch_fn, val, y)\n out_variant_helper(torch_fn, val, z)\n\n # Tensor-Tensor Test (tensor of same and different shape)\n x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)\n y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)\n z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)\n\n x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.5, high=1000)\n y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.5, high=1000)\n z_1p = make_tensor((4, 5), device, y_dtype, low=-0.5, high=1000)\n\n xlogy_fns = torch.xlogy, scipy.special.xlogy\n xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py\n\n test_helper(*xlogy_fns, (x, y, z))\n xlogy_inplace_variant_helper(x, x)\n xlogy_inplace_variant_helper(x, y)\n xlogy_inplace_variant_helper(x, z)\n test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p))\n\n # Scalar-Tensor Test\n test_helper(*xlogy_fns, (x, y, z), 3.14)\n test_helper(*xlog1py_fns, (x_1p, y_1p, z_1p), 3.14)\n\n # Special Values Tensor-Tensor\n t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)\n zeros = torch.zeros(7, dtype=y_dtype, device=device)\n\n def test_zeros_special_helper(torch_fn, reference_fn, scalar=False):\n zeros_t = 0 if scalar else zeros\n zeros_np = 0 if scalar else zeros.cpu().numpy()\n torch_fn_partial = partial(torch_fn, zeros_t)\n reference_fn_partial = partial(reference_fn, zeros_np)\n self.compare_with_numpy(torch_fn_partial, reference_fn_partial, t, exact_dtype=False)\n out_variant_helper(torch_fn, zeros_t, t)\n\n test_zeros_special_helper(*xlogy_fns)\n xlogy_inplace_variant_helper(zeros, t)\n test_zeros_special_helper(*xlog1py_fns)\n\n # Special Values Scalar-Tensor\n test_zeros_special_helper(*xlogy_fns, scalar=True)\n test_zeros_special_helper(*xlog1py_fns, scalar=True)\n\n def test_xlogy_xlog1py_scalar_type_promotion(self, device):\n # Test that python numbers don't participate in type promotion at the same\n # priority level as 0-dim tensors\n t = torch.randn((), dtype=torch.float32, device=device)\n\n self.assertEqual(t.dtype, torch.xlogy(t, 5).dtype)\n self.assertEqual(t.dtype, torch.xlogy(t, 5.).dtype)\n self.assertEqual(t.dtype, torch.special.xlog1py(t, 5).dtype)\n self.assertEqual(t.dtype, torch.special.xlog1py(t, 5.).dtype)\n\n self.assertEqual(t.dtype, torch.xlogy(5, t).dtype)\n self.assertEqual(t.dtype, torch.xlogy(5., t).dtype)\n self.assertEqual(t.dtype, torch.special.xlog1py(5, t).dtype)\n self.assertEqual(t.dtype, torch.special.xlog1py(5., t).dtype)\n\n @skipIf(not TEST_SCIPY, \"Scipy required for the test.\")\n def test_xlogy_xlog1py_bfloat16(self, device):\n def _compare_helper(x, y, torch_fn, reference_fn):\n x_np = x if isinstance(x, float) else x.cpu().to(torch.float).numpy()\n y_np = y if isinstance(y, float) else y.cpu().to(torch.float).numpy()\n expected = torch.from_numpy(reference_fn(x_np, y_np))\n actual = torch_fn(x, y)\n self.assertEqual(expected, actual, exact_dtype=False)\n\n x_dtype, y_dtype = torch.bfloat16, torch.bfloat16\n\n # Tensor-Tensor Test (tensor of same and different shape)\n x = make_tensor((3, 2, 4, 5), device, x_dtype, low=0.5, high=1000)\n y = make_tensor((3, 2, 4, 5), device, y_dtype, low=0.5, high=1000)\n z = make_tensor((4, 5), device, y_dtype, low=0.5, high=1000)\n\n x_1p = make_tensor((3, 2, 4, 5), device, x_dtype, low=-0.8, high=1000)\n y_1p = make_tensor((3, 2, 4, 5), device, y_dtype, low=-0.8, high=1000)\n z_1p = make_tensor((4, 5), device, y_dtype, low=-0.8, high=1000)\n\n xlogy_fns = torch.xlogy, scipy.special.xlogy\n xlog1py_fns = torch.special.xlog1py, scipy.special.xlog1py\n\n _compare_helper(x, x, *xlogy_fns)\n _compare_helper(x, y, *xlogy_fns)\n _compare_helper(x, z, *xlogy_fns)\n _compare_helper(x, 3.14, *xlogy_fns)\n _compare_helper(y, 3.14, *xlogy_fns)\n _compare_helper(z, 3.14, *xlogy_fns)\n\n _compare_helper(x_1p, x_1p, *xlog1py_fns)\n _compare_helper(x_1p, y_1p, *xlog1py_fns)\n _compare_helper(x_1p, z_1p, *xlog1py_fns)\n _compare_helper(x_1p, 3.14, *xlog1py_fns)\n _compare_helper(y_1p, 3.14, *xlog1py_fns)\n _compare_helper(z_1p, 3.14, *xlog1py_fns)\n\n # Special Values Tensor-Tensor\n t = torch.tensor([-1., 0., 1., 2., float('inf'), -float('inf'), float('nan')], device=device)\n zeros = torch.tensor(7, dtype=y_dtype, device=device)\n\n _compare_helper(t, zeros, *xlogy_fns)\n _compare_helper(t, 0., *xlogy_fns)\n\n _compare_helper(t, zeros, *xlog1py_fns)\n _compare_helper(t, 0., *xlog1py_fns)\n\n @dtypes(*product(torch.testing.get_all_dtypes(include_complex=False,\n include_half=False, include_bfloat16=False),\n torch.testing.get_all_dtypes(include_complex=False,\n include_half=False, include_bfloat16=False)))\n @skipIf(not TEST_SCIPY, \"Scipy required for the test.\")\n def test_zeta(self, device, dtypes):\n x_dtype, q_dtype = dtypes\n\n def test_helper(x, q):\n x_np = x if isinstance(x, float) else x.cpu().numpy()\n q_np = q if isinstance(q, float) else q.cpu().numpy()\n expected = torch.from_numpy(scipy.special.zeta(x_np, q_np))\n actual = torch.special.zeta(x, q)\n\n rtol, atol = None, None\n if self.device_type == 'cpu':\n rtol, atol = 1e-6, 1e-6\n self.assertEqual(expected, actual, rtol=rtol, atol=atol, exact_dtype=False)\n\n # x tensor - q tensor same size\n x = make_tensor((2, 3, 4), device, x_dtype)\n q = make_tensor((2, 3, 4), device, q_dtype)\n test_helper(x, q)\n\n # x tensor - q tensor broadcast lhs\n x = make_tensor((2, 1, 4), device, x_dtype)\n q = make_tensor((2, 3, 4), device, q_dtype)\n test_helper(x, q)\n\n # x tensor - q tensor broadcast rhs\n x = make_tensor((2, 3, 4), device, x_dtype)\n q = make_tensor((2, 1, 4), device, q_dtype)\n test_helper(x, q)\n\n # x tensor - q tensor broadcast all\n x = make_tensor((2, 3, 1), device, x_dtype)\n q = make_tensor((2, 1, 4), device, q_dtype)\n test_helper(x, q)\n\n # x scalar - q tensor\n for x in np.linspace(-5, 5, num=10).tolist():\n if not q_dtype.is_floating_point:\n q_dtype = torch.get_default_dtype()\n q = make_tensor((2, 3, 4), device, q_dtype)\n test_helper(x, q)\n\n # x tensor - q scalar\n for q in np.linspace(-5, 5, num=10).tolist():\n if not x_dtype.is_floating_point:\n x_dtype = torch.get_default_dtype()\n x = make_tensor((2, 3, 4), device, x_dtype)\n test_helper(x, q)\n\n\ntensor_binary_ops = [\n '__lt__', '__le__',\n '__gt__', '__ge__',\n '__eq__', '__ne__',\n\n '__add__', '__radd__', '__iadd__',\n '__sub__', '__rsub__', '__isub__',\n '__mul__', '__rmul__', '__imul__',\n '__matmul__', '__rmatmul__',\n '__truediv__', '__rtruediv__', '__itruediv__',\n '__floordiv__', '__rfloordiv__', '__ifloordiv__',\n '__mod__', '__rmod__', '__imod__',\n '__pow__', '__rpow__', '__ipow__',\n '__lshift__', '__rlshift__', '__ilshift__',\n '__rshift__', '__rrshift__', '__irshift__',\n '__and__', '__iand__',\n '__xor__', '__ixor__',\n '__or__', '__ior__',\n\n # Unsupported operators\n # '__imatmul__',\n # '__divmod__', '__rdivmod__', '__idivmod__',\n # '__rand__', '__ror__', '__rxor__',\n]\n\n# Test that binary math operations return NotImplemented for unknown types.\ndef generate_not_implemented_tests(cls):\n class UnknownType:\n pass\n\n # TODO: refactor to inline these\n _types = [\n torch.half, torch.float, torch.double,\n torch.int8, torch.short, torch.int, torch.long,\n torch.uint8\n ]\n\n # TODO: refactor to use make_tensor\n def _small_2d(dtype, device, has_zeros=True, fill_ones=False, oneish=False):\n t = _make_tensor((5, 5), dtype, device, fill_ones=fill_ones)\n if oneish:\n return t.clamp(min=_number(.99, 1, dtype), max=1.01)\n if not has_zeros:\n return t.clamp(min=(_number(_div_min, 1, dtype)))\n return t\n\n def create_test_func(op):\n @dtypes(*_types)\n def test(self, device, dtype):\n # Generate the inputs\n tensor = _small_2d(dtype, device)\n\n # Runs the tensor op on the device\n result = getattr(tensor, op)(UnknownType())\n self.assertEqual(result, NotImplemented)\n return test\n\n for op in tensor_binary_ops:\n test_name = \"test_{}_not_implemented\".format(op)\n assert not hasattr(cls, test_name), \"{0} already in {1}\".format(\n test_name, cls.__name__)\n\n setattr(cls, test_name, create_test_func(op))\n\n\ngenerate_not_implemented_tests(TestBinaryUfuncs)\ninstantiate_device_type_tests(TestBinaryUfuncs, globals())\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.result_type", "torch.testing._internal.common_utils.run_tests", "torch.rand", "torch.testing.get_all_math_dtypes", "torch.testing._internal.common_utils.set_default_dtype", "torch.randn", "torch.iinfo", "numpy.right_shift", "torch.device", "torch.lerp", "torch.special.zeta", "torch.testing.get_all_int_dtypes", "torch.cuda.device", "torch.testing.get_all_dtypes", "numpy.divide", "torch.empty_like", "torch.zeros_like", "torch.testing._internal.common_device_type.dtypesIfCUDA", "numpy.errstate", "torch.finfo", "torch.zeros", "numpy.array", "torch.promote_types", "torch.empty", "torch.jit.script", "torch.testing._internal.common_utils.make_tensor", "torch.max", "torch.logical_xor", "torch.special.xlog1py", "torch.atan2", "numpy.copysign", "torch.true_divide", "torch.floor_divide", "torch.bitwise_and", "torch.copysign", "numpy.heaviside", "torch.testing._internal.common_device_type.deviceCountAtLeast", "numpy.gcd", "torch.trapz", "torch.testing.all_types_and_complex_and", "torch.xlogy", "torch.testing._internal.common_device_type.precisionOverride", "torch.Size", "numpy.random.default_rng", "torch.remainder", "torch.testing._internal.common_device_type.dtypesIfCPU", "torch.get_default_dtype", "torch.gcd", "torch.add", "torch.testing._internal.common_device_type.dtypes", "torch.divide", "torch.float_power", "torch.randn_like", "numpy.true_divide", "numpy.left_shift", "numpy.random.randn", "torch.testing._internal.common_utils.iter_indices", "torch.nextafter", "torch.where", "torch.full_like", "torch.lcm", "torch.min", "torch.testing.get_all_complex_dtypes", "torch.randint", "torch.sub", "torch.bitwise_or", "torch.sqrt", "numpy.lcm", "torch.bitwise_xor", "torch.fmod", "torch.testing.get_all_fp_dtypes", "torch.arange", "torch.from_numpy", "torch.ldexp", "numpy.linspace", "torch.ones", "torch.tensor", "torch.testing._internal.common_device_type.skipIf", "torch.isnan", "torch.pow", "torch.hypot", "torch.is_tensor", "torch.heaviside" ] ]
rkeulemans/exercise_public
[ "5f8020198b8b234169eea4d5e08c98344438de5d" ]
[ "helpers.py" ]
[ "from sympy import Rational, Symbol, latex, UnevaluatedExpr\nimport sympy as sp\nimport numpy as np\n\nu = lambda x : UnevaluatedExpr(x)\n\n# Helper functions\ndef explain_add(a, b):\n assert(np.shape(a) == np.shape(b))\n rows, columns = np.shape(a)\n return sp.Matrix([[Symbol(f\"({latex(u(a[i,j]))} + {latex(u(b[i,j]))})\") for j in range(columns)] for i in range(rows)])\n\ndef symbolic_matrix(character, rows, columns):\n # row or column vector\n if rows == 1:\n return sp.Matrix([[Symbol(f\"{{{character}}}_{{{j+1}}}\") for j in range(columns)] for i in range(rows)]) \n if columns == 1:\n return sp.Matrix([[Symbol(f\"{{{character}}}_{{{i+1}}}\") for j in range(columns)] for i in range(rows)]) \n return sp.Matrix([[Symbol(f\"{{{character}}}_{{{i+1}, {j+1}}}\") for j in range(columns)] for i in range(rows)])\n\ndef explain_multiply(a, b):\n # #rows in b == #columns in a\n assert(np.shape(a)[1] == np.shape(b)[0])\n rows = np.shape(a)[0]\n columns = np.shape(b)[1]\n result = np.empty(shape=(rows, columns), dtype=object)\n for i in range(rows):\n row = a[i,:]\n for j in range(columns):\n column = b[:,j]\n zipped = zip(row, column)\n mapped = list(map(lambda t: f\"{latex(u(t[0]))} \\cdot {latex(u(t[1]))}\", zipped))\n s = Symbol(\"\") \n result[i, j] = Symbol(\" + \".join(mapped), evaluate=False)\n \n return sp.Matrix(result)" ]
[ [ "numpy.shape", "numpy.empty" ] ]
galactics/space-command
[ "496b054883c6464bcd8d73d72c8145ae80606336" ]
[ "space/station.py" ]
[ "import logging\nfrom numpy import degrees, pi, radians\n\nfrom beyond.frames import get_frame, create_station\nfrom beyond.errors import UnknownFrameError\n\nfrom .wspace import ws\nfrom .utils import dms2deg, deg2dms\n\n\nlog = logging.getLogger(__name__)\n\n\nclass StationDb:\n def __new__(cls):\n\n if not hasattr(cls, \"_instance\"):\n # Singleton\n cls._instance = super().__new__(cls)\n\n return cls._instance\n\n @classmethod\n def list(cls):\n\n self = cls()\n\n if not hasattr(self, \"_stations\"):\n\n self._stations = {}\n for abbr, charact in ws.config[\"stations\"].items():\n\n charact[\"parent_frame\"] = get_frame(charact[\"parent_frame\"])\n full_name = charact.pop(\"name\")\n mask = charact.get(\"mask\")\n if mask:\n # reverse direction of the mask to put it in counterclockwise\n # to comply with the mathematical definition\n charact[\"mask\"] = (\n (2 * pi - radians(mask[\"azims\"][::-1])),\n radians(mask[\"elevs\"][::-1]),\n )\n\n # Deletion of all unknown characteristics from the charact dict\n # and conversion to object attributes (they may be used by addons)\n extra_charact = {}\n for key in list(charact.keys()):\n if key not in (\"parent_frame\", \"latlonalt\", \"mask\"):\n extra_charact[key] = charact.pop(key)\n\n self._stations[abbr] = create_station(abbr, **charact)\n self._stations[abbr].abbr = abbr\n self._stations[abbr].full_name = full_name\n\n for key, value in extra_charact.items():\n setattr(self._stations[abbr], key, value)\n\n return self._stations\n\n @classmethod\n def get(cls, name):\n\n self = cls()\n\n try:\n return get_frame(name)\n except UnknownFrameError:\n if name not in self.list().keys():\n raise\n return self.list()[name]\n\n @classmethod\n def save(cls, station):\n self = cls()\n\n ws.config[\"stations\"].update(station)\n ws.config.save()\n\n if hasattr(self, \"_stations\"):\n del self._stations\n\n\ndef wshook(cmd, *args, **kwargs):\n\n if cmd in (\"init\", \"full-init\"):\n name = \"TLS\"\n\n ws.config.setdefault(\"stations\", {})\n\n try:\n StationDb.get(name)\n except UnknownFrameError:\n StationDb.save(\n {\n name: {\n \"latlonalt\": [43.604482, 1.443962, 172.0],\n \"name\": \"Toulouse\",\n \"parent_frame\": \"WGS84\",\n }\n }\n )\n log.info(\"Station {} created\".format(name))\n else:\n log.warning(\"Station {} already exists\".format(name))\n\n\ndef space_station(*argv):\n \"\"\"Stations management\n\n Usage:\n space-station list [--map] [<abbr>]\n space-station create <abbr> <name> <lat> <lon> <alt>\n\n Options\n list List available stations\n create Interactively create a station\n <abbr> Abbreviation\n <name> Name of the station\n <lat> Latitude in degrees\n <lon> Longitude in degrees\n <alt> Altitude in meters\n -m, --map Display the station on a map\n\n Latitude and longitude both accept degrees as float or as\n degrees, minutes and seconds of arc (e.g. 43°25\"12')\n \"\"\"\n\n from pathlib import Path\n import matplotlib.pyplot as plt\n\n from .utils import docopt\n from .map.background import set_background\n\n args = docopt(space_station.__doc__)\n\n station = StationDb()\n\n if args[\"create\"]:\n abbr = args[\"<abbr>\"]\n name = args[\"<name>\"]\n latitude = args[\"<lat>\"]\n longitude = args[\"<lon>\"]\n altitude = args[\"<alt>\"]\n\n if \"°\" in latitude:\n latitude = dms2deg(latitude)\n else:\n latitude = float(latitude)\n\n if \"°\" in longitude:\n longitude = dms2deg(longitude)\n else:\n longitude = float(longitude)\n\n altitude = float(altitude)\n\n log.info(\"Creation of station '{}' ({})\".format(name, abbr))\n log.debug(\n \"{} {}, altitude : {} m\".format(\n deg2dms(latitude, \"lat\"), deg2dms(longitude, \"lon\"), altitude\n )\n )\n StationDb.save(\n {\n abbr: {\n \"name\": name,\n \"latlonalt\": (latitude, longitude, altitude),\n \"parent_frame\": \"WGS84\",\n }\n }\n )\n else:\n\n stations = []\n\n for station in sorted(station.list().values(), key=lambda x: x.abbr):\n\n if args[\"<abbr>\"] and station.abbr != args[\"<abbr>\"]:\n continue\n\n print(station.name)\n print(\"-\" * len(station.name))\n lat, lon, alt = station.latlonalt\n lat, lon = degrees([lat, lon])\n print(\"name: {}\".format(station.full_name))\n print(\n \"altitude: {} m\\nposition: {}, {}\".format(\n alt, deg2dms(lat, \"lat\"), deg2dms(lon, \"lon\")\n )\n )\n print()\n\n stations.append((station.name, lat, lon))\n\n if args[\"--map\"]:\n plt.figure(figsize=(15.2, 8.2))\n set_background()\n plt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.02)\n plt.show()\n" ]
[ [ "numpy.degrees", "matplotlib.pyplot.figure", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.radians" ] ]
boringlee24/keras_old
[ "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d", "1e1176c45c4952ba1b9b9e58e9cc4df027ab111d" ]
[ "examples/pwr_run/checkpointing/final/final4_new2/job51.py", "examples/pwr_run/checkpointing/final_trace/top50/job48.py", "examples/pwr_run/checkpointing/debug/ovhd_profile/job6.py", "examples/pwr_run/ml_regression/new_speedup_def/knn_k80.py", "examples/pwr_run/checkpointing/nonpc_short/final1/job20.py", "examples/pwr_run/checkpointing/throughput/final4_new2/job48.py", "examples/pwr_run/gpu_pwr.py", "examples/pwr_run/checkpointing/final/no_threshold/job63.py" ]
[ "\"\"\"\n#Trains a ResNet on the CIFAR10 dataset.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg19 import VGG19\nfrom keras import models, layers, optimizers\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\nimport sys\nimport argparse\nimport time\nimport signal\nimport glob\nimport json\nimport send_signal\n\nparser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')\nparser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')\nparser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')\nparser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')\nparser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')\nparser.set_defaults(resume=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_num\n\n# Training parameters\nbatch_size = 256\nargs_lr = 0.001\nargs_model = 'vgg16'\nepoch_begin_time = 0\n\njob_name = sys.argv[0].split('.')[0]\nsave_files = '/scratch/li.baol/checkpoint_final4_new2/' + job_name + '*'\n\ntotal_epochs = 6\nstarting_epoch = 0\n\n# first step is to update the PID\npid = os.getpid()\nmessage = job_name + ' pid ' + str(pid) # 'job50 pid 3333'\nsend_signal.send(args.node, 10002, message)\n\nif args.resume:\n save_file = glob.glob(save_files)[0]\n# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])\n starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])\n\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\nn = 3\n\n# Model name, depth and version\nmodel_type = args.tc #'P100_resnet50_he_256_1'\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nif args.resume:\n print('resume from checkpoint')\n message = job_name + ' b_end'\n send_signal.send(args.node, 10002, message)\n model = keras.models.load_model(save_file)\n message = job_name + ' c_end'\n send_signal.send(args.node, 10002, message)\nelse:\n print('train from start')\n model = models.Sequential()\n \n if '16' in args_model:\n base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '19' in args_model:\n base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n \n #base_model.summary()\n \n #pdb.set_trace()\n \n model.add(base_model)\n model.add(layers.Flatten())\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))\n #model.add(layers.Dropout(0.2))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))\n #model.add(layers.Dropout(0.2))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=args_lr),\n metrics=['accuracy'])\n \n #model.summary()\n print(model_type)\n\n#pdb.set_trace()\n\ncurrent_epoch = 0\n\n################### connects interrupt signal to the process #####################\n\ndef terminateProcess(signalNumber, frame):\n # first record the wasted epoch time\n global epoch_begin_time\n if epoch_begin_time == 0:\n epoch_waste_time = 0\n else:\n epoch_waste_time = int(time.time() - epoch_begin_time)\n\n message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'\n if epoch_waste_time > 0:\n send_signal.send(args.node, 10002, message)\n\n print('checkpointing the model triggered by kill -15 signal')\n # delete whatever checkpoint that already exists\n for f in glob.glob(save_files):\n os.remove(f)\n model.save('/scratch/li.baol/checkpoint_final4_new2/' + job_name + '_' + str(current_epoch) + '.h5')\n print ('(SIGTERM) terminating the process')\n\n message = job_name + ' checkpoint'\n send_signal.send(args.node, 10002, message)\n\n sys.exit()\n\nsignal.signal(signal.SIGTERM, terminateProcess)\n\n#################################################################################\n\nlogdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name\n\ntensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')\n\nfirst_epoch_start = 0\n\nclass PrintEpoch(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n global current_epoch, first_epoch_start\n #remaining_epochs = epochs - epoch\n current_epoch = epoch\n print('current epoch ' + str(current_epoch))\n global epoch_begin_time\n epoch_begin_time = time.time()\n if epoch == starting_epoch and args.resume:\n first_epoch_start = time.time()\n message = job_name + ' d_end'\n send_signal.send(args.node, 10002, message)\n elif epoch == starting_epoch:\n first_epoch_start = time.time() \n if epoch == starting_epoch:\n # send signal to indicate checkpoint is qualified\n message = job_name + ' ckpt_qual'\n send_signal.send(args.node, 10002, message)\n\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch == starting_epoch:\n first_epoch_time = int(time.time() - first_epoch_start)\n message = job_name + ' 1st_epoch ' + str(first_epoch_time)\n send_signal.send(args.node, 10002, message)\n progress = round((epoch+1) / round(total_epochs/2), 2)\n message = job_name + ' completion ' + str(progress)\n send_signal.send(args.node, 10002, message)\n\nmy_callback = PrintEpoch()\n\ncallbacks = [tensorboard_callback, my_callback]\n #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]\n\n# Run training\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=round(total_epochs/2),\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks,\n initial_epoch=starting_epoch,\n verbose=1\n )\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n# send signal to indicate job has finished\nmessage = job_name + ' finish'\nsend_signal.send(args.node, 10002, message)\n\n", "\"\"\"\n#Trains a ResNet on the CIFAR10 dataset.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.applications.resnet import ResNet50, ResNet101, ResNet152\nfrom keras import models, layers, optimizers\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\nimport sys\nimport argparse\nimport time\nimport signal\nimport glob\nimport json\nimport send_signal\n\nparser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')\nparser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')\nparser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')\nparser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')\nparser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')\nparser.set_defaults(resume=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_num\n\n# Training parameters\nbatch_size = 32\nargs_lr = 0.0014\nargs_model = 'resnet101'\n\nepoch_begin_time = 0\n\njob_name = sys.argv[0].split('.')[0]\nsave_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'\n\ntotal_epochs = 36\nstarting_epoch = 0\n\n# first step is to update the PID\npid = os.getpid()\nmessage = job_name + ' pid ' + str(pid) # 'job50 pid 3333'\nsend_signal.send(args.node, 10002, message)\n\nif args.resume:\n save_file = glob.glob(save_files)[0]\n# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])\n starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])\n\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\nn = 3\n\n# Model name, depth and version\nmodel_type = args.tc #'P100_resnet50_he_256_1'\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nif args.resume:\n print('resume from checkpoint')\n message = job_name + ' b_end'\n send_signal.send(args.node, 10002, message)\n model = keras.models.load_model(save_file)\n message = job_name + ' c_end'\n send_signal.send(args.node, 10002, message)\nelse:\n print('train from start')\n model = models.Sequential()\n \n if '50' in args_model:\n base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '101' in args_model:\n base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '152' in args_model:\n base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n \n #base_model.summary()\n \n #pdb.set_trace()\n \n #model.add(layers.UpSampling2D((2,2)))\n #model.add(layers.UpSampling2D((2,2)))\n #model.add(layers.UpSampling2D((2,2)))\n model.add(base_model)\n model.add(layers.Flatten())\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(128, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(64, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=args_lr),\n metrics=['accuracy'])\n \n #model.summary()\n print(model_type)\n\n#pdb.set_trace()\n\ncurrent_epoch = 0\n\n################### connects interrupt signal to the process #####################\n\ndef terminateProcess(signalNumber, frame):\n # first record the wasted epoch time\n global epoch_begin_time\n if epoch_begin_time == 0:\n epoch_waste_time = 0\n else:\n epoch_waste_time = int(time.time() - epoch_begin_time)\n\n message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'\n if epoch_waste_time > 0:\n send_signal.send(args.node, 10002, message)\n\n print('checkpointing the model triggered by kill -15 signal')\n # delete whatever checkpoint that already exists\n for f in glob.glob(save_files):\n os.remove(f)\n model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')\n print ('(SIGTERM) terminating the process')\n\n message = job_name + ' checkpoint'\n send_signal.send(args.node, 10002, message)\n\n sys.exit()\n\nsignal.signal(signal.SIGTERM, terminateProcess)\n\n#################################################################################\n\nlogdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name\n\ntensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')\n\nfirst_epoch_start = 0\n\nclass PrintEpoch(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n global current_epoch, first_epoch_start\n #remaining_epochs = epochs - epoch\n current_epoch = epoch\n print('current epoch ' + str(current_epoch))\n global epoch_begin_time\n epoch_begin_time = time.time()\n if epoch == starting_epoch and args.resume:\n first_epoch_start = time.time()\n message = job_name + ' d_end'\n send_signal.send(args.node, 10002, message)\n elif epoch == starting_epoch:\n first_epoch_start = time.time() \n if epoch == starting_epoch:\n # send signal to indicate checkpoint is qualified\n message = job_name + ' ckpt_qual'\n send_signal.send(args.node, 10002, message)\n\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch == starting_epoch:\n first_epoch_time = int(time.time() - first_epoch_start)\n message = job_name + ' 1st_epoch ' + str(first_epoch_time)\n send_signal.send(args.node, 10002, message)\n progress = round((epoch+1) / round(total_epochs/2), 2)\n message = job_name + ' completion ' + str(progress)\n send_signal.send(args.node, 10002, message)\n\nmy_callback = PrintEpoch()\n\ncallbacks = [tensorboard_callback, my_callback]\n #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]\n\n# Run training\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=round(total_epochs/2),\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks,\n initial_epoch=starting_epoch,\n verbose=1\n )\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n# send signal to indicate job has finished\nmessage = job_name + ' finish'\nsend_signal.send(args.node, 10002, message)\n", "\"\"\"\n#Trains a ResNet on the CIFAR10 dataset.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.vgg19 import VGG19\nfrom keras import models, layers, optimizers\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\nimport sys\nimport argparse\nimport time\nimport signal\nimport glob\nimport json\nimport send_signal\n\nload_start = time.time()\n\nparser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')\nparser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')\nparser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')\nparser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')\nparser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')\nparser.set_defaults(resume=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_num\n\n# Training parameters\nbatch_size = 256\nargs_lr = 0.005\nargs_model = 'vgg16'\n\nepoch_begin_time = 0\n\njob_name = sys.argv[0].split('.')[0]\nsave_files = '/scratch/li.baol/checkpoint_test/' + job_name + '*'\n\ntotal_epochs = 9\nstarting_epoch = 0\n\nif args.resume:\n save_file = glob.glob(save_files)[0]\n# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])\n starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])\n\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\nn = 3\n\n# Model name, depth and version\nmodel_type = args.tc #'P100_resnet50_he_256_1'\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nif args.resume:\n print('resume from checkpoint')\n model = keras.models.load_model(save_file)\nelse:\n print('train from start')\n model = models.Sequential()\n \n if '16' in args_model:\n base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '19' in args_model:\n base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n \n #base_model.summary()\n \n #pdb.set_trace()\n \n model.add(base_model)\n model.add(layers.Flatten())\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))\n #model.add(layers.Dropout(0.2))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))\n #model.add(layers.Dropout(0.2))\n model.add(layers.BatchNormalization())\n model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=args_lr),\n metrics=['accuracy'])\n \n #model.summary()\n print(model_type)\n\n#pdb.set_trace()\n\ncurrent_epoch = 0\n\n################### connects interrupt signal to the process #####################\n\ndef terminateProcess():\n save_start = time.time()\n # first record the wasted epoch time\n global epoch_begin_time\n if epoch_begin_time == 0:\n epoch_waste_time = 0\n else:\n epoch_waste_time = int(time.time() - epoch_begin_time)\n\n print('checkpointing the model triggered by kill -15 signal')\n # delete whatever checkpoint that already exists\n for f in glob.glob(save_files):\n os.remove(f)\n model.save('/scratch/li.baol/checkpoint_test/' + job_name + '_' + str(current_epoch) + '.h5')\n print ('(SIGTERM) terminating the process')\n\n save_time = int(time.time() - save_start)\n message = job_name + ' save ' + str(save_time)\n send_signal.send(args.node, 10002, message)\n\n sys.exit()\n\nsignal.signal(signal.SIGTERM, terminateProcess)\n\n#################################################################################\n\nlogdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name\n\ntensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')\n\nclass PrintEpoch(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n global current_epoch \n #remaining_epochs = epochs - epoch\n current_epoch = epoch\n print('current epoch ' + str(current_epoch))\n global epoch_begin_time\n epoch_begin_time = time.time()\n\nmy_callback = PrintEpoch()\n\ncallbacks = [tensorboard_callback, my_callback]\n\nload_time = int(time.time() - load_start)\nif args.resume:\n message = job_name + ' load ' + str(load_time)\n send_signal.send(args.node, 10002, message)\n # Score trained model.\n scores = model.evaluate(x_test, y_test, verbose=1)\n print('Test loss:', scores[0])\n print('Test accuracy:', scores[1])\n # send signal to indicate job has finished\n message = job_name + ' finish'\n send_signal.send(args.node, 10002, message)\n sys.exit()\n\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=1,\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks,\n initial_epoch=starting_epoch,\n verbose=1\n )\nif not args.resume:\n terminateProcess()\n", "import pandas\nimport pdb\nfrom datetime import datetime\nimport matplotlib\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport glob\nimport sys\nfrom matplotlib.ticker import MultipleLocator\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn import neighbors\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nimport json\n\nlog_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/pwr/*'\ndirs = glob.glob(log_dir)\ndirs.sort()\n# store everything in a dict\nall_pwr = {} # {densenet121_32:{K80:a, K100:b}...}\n\nfor tc in dirs:\n test = tc.split('/')[6+1+1].split('.')[0]\n gpu = test.split('_')[0]\n model = test.replace(gpu + '_', '')\n\n # read tc.csv into a list\n data = pandas.read_csv(tc)\n pwr = np.asarray(data[data.columns[0]].tolist())\n \n if model in all_pwr:\n all_pwr[model][gpu] = pwr\n else:\n all_pwr[model] = {gpu: pwr}\n\nlog_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/util/*'\ndirs = glob.glob(log_dir)\ndirs.sort()\n# store everything in a dict\nall_util = {} # {densenet121_32:{K80:a, K100:b}...}\n\nfor tc in dirs:\n test = tc.split('/')[6+1+1].split('.')[0]\n gpu = test.split('_')[0]\n model = test.replace(gpu + '_', '')\n\n # read tc.csv into a list\n data = pandas.read_csv(tc)\n util = np.asarray(data[data.columns[0]].tolist())\n \n if model in all_util:\n all_util[model][gpu] = util\n else:\n all_util[model] = {gpu: util}\n\nlog_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/regression/mem_util/*'\ndirs = glob.glob(log_dir)\ndirs.sort()\n# store everything in a dict\nall_mem_util = {} # {densenet121_32:{K80:a, K100:b}...}\n\nfor tc in dirs:\n test = tc.split('/')[6+1+1].split('.')[0]\n gpu = test.split('_')[0]\n model = test.replace(gpu + '_', '')\n\n # read tc.csv into a list\n data = pandas.read_csv(tc)\n mem_util = np.asarray(data[data.columns[0]].tolist())\n \n if model in all_mem_util:\n all_mem_util[model][gpu] = mem_util\n else:\n all_mem_util[model] = {gpu: mem_util}\n\nlog_dir = '/scratch/li.baol/GPU_time_meas/tensorflow/round1/csv/*'\ndirs = glob.glob(log_dir)\ndirs.sort()\n# store everything in a dict\nall_time = {} # {densenet121_32:{K80:a, K100:b}...}\n\nfor tc in dirs:\n test = tc.split('/')[6+1].split('.')[0]\n gpu = test.split('_')[0]\n model = test.replace(gpu + '_', '')\n\n # read tc.csv into a list\n data = pandas.read_csv(tc)\n time = np.asarray(data[data.columns[0]].tolist())\n \n if model in all_time:\n all_time[model][gpu] = time\n else:\n all_time[model] = {gpu: time}\n\n# Now plot V100 power save ratio (%) vs K80 power(W)\n\nx1_data = [] # power\nx2_data = [] # speed\nx3_data = [] # utilization\nx4_data = [] # mem util\ny_data = []\n\nfor key in all_pwr:\n# if ('mnasnet' not in key and 'mobilenet' not in key):\n for i in all_pwr[key]['K80'].tolist(): # power\n x1_data.append(i)\n for i in (1 / all_time[key]['K80']).tolist(): # speed\n x2_data.append(i)\n for i in (all_util[key]['K80']).tolist(): # utilization\n x3_data.append(i)\n for i in (all_mem_util[key]['K80']).tolist(): # mem util\n x4_data.append(i)\n for i in (all_time[key]['K80'] / all_time[key]['V100']).tolist(): # speed up \n y_data.append(i)\n\nx1_norm = [(i - min(x1_data)) / (max(x1_data) - min(x1_data)) for i in x1_data]\nx2_norm = [(i - min(x2_data)) / (max(x2_data) - min(x2_data)) for i in x2_data]\nx3_norm = [(i - min(x3_data)) / (max(x3_data) - min(x3_data)) for i in x3_data]\nx4_norm = [(i - min(x4_data)) / (max(x4_data) - min(x4_data)) for i in x4_data]\n\n# create training data\nx_data = []\nfor i in range(len(x1_norm)):\n x_data.append([x1_norm[i], x2_norm[i], x3_norm[i], x4_norm[i]])\n\nx_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3)\n\nwith open('x1_data.json', 'w') as outfile:\n json.dump(x1_data, outfile)\nwith open('x2_data.json', 'w') as outfile:\n json.dump(x2_data, outfile)\nwith open('x3_data.json', 'w') as outfile:\n json.dump(x3_data, outfile)\nwith open('x4_data.json', 'w') as outfile:\n json.dump(x4_data, outfile)\n\nwith open('y_data.json', 'w') as outfile:\n json.dump(y_data, outfile)\n#with open('x_data.json') as f:\n# x_data = json.load(f)\n#with open('y_data.json') as f:\n# y_data = json.load(f)\n#x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3)\n\nrmse_val = [] #to store rmse values for different k\nfor K in range(20):\n K = K+1\n model = neighbors.KNeighborsRegressor(n_neighbors = K, weights='distance')\n\n model.fit(x_train, y_train) #fit the model\n pred = model.predict(x_test) #make prediction on test set\n# model.predict(np.array(x_test[0]).reshape((1, -1)))\n err = sqrt(mean_squared_error(y_test, pred)) #calculate rmse\n rmse_val.append(err) #store rmse values\n err_pct = abs(y_test-pred) / y_test * 100\n print('RMSE value for k= ' , K , 'is:', err)\n print('error (%) is', np.mean(err_pct))\n\nxx_data = []\nfor i in range(len(x1_norm)):\n xx_data.append([x1_norm[i]])\n\n# now compare with liear regression\nx_train, x_test, y_train, y_test = train_test_split(xx_data, y_data, test_size=0.3)\nmodel2 = LinearRegression().fit(x_train, y_train)\npred = model2.predict(x_test) #make prediction on test set\nerr = sqrt(mean_squared_error(y_test,pred)) #calculate rmse\nprint('RMSE value for linear regression is ', err)\n\n\n\n", "\"\"\"\n#Trains a ResNet on the CIFAR10 dataset.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.applications.mobilenet_v2 import MobileNetV2\nfrom keras import models, layers, optimizers\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\nimport sys\nimport argparse\nimport time\nimport signal\nimport glob\nimport json\nimport send_signal\n\nparser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')\nparser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')\nparser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')\nparser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')\nparser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')\nparser.set_defaults(resume=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_num\n\n# Training parameters\nbatch_size = 256\nargs_lr = 0.0005\n\nepoch_begin_time = 0\n\njob_name = sys.argv[0].split('.')[0]\nsave_files = '/scratch/li.baol/checkpoint_final1/' + job_name + '*'\n\ntotal_epochs = 44\nstarting_epoch = 0\n\n# first step is to update the PID\npid_dict = {}\nwith open('pid_lock.json', 'r') as fp:\n pid_dict = json.load(fp)\npid_dict[job_name] = os.getpid()\njson_file = json.dumps(pid_dict)\nwith open('pid_lock.json', 'w') as fp:\n fp.write(json_file) \nos.rename('pid_lock.json', 'pid.json')\n\nif args.resume:\n save_file = glob.glob(save_files)[0]\n# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])\n starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])\n\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\nn = 3\n\n# Model name, depth and version\nmodel_type = args.tc #'P100_resnet50_he_256_1'\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nif args.resume:\n print('resume from checkpoint')\n model = keras.models.load_model(save_file)\nelse:\n print('train from start')\n model = models.Sequential()\n \n base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n \n #base_model.summary()\n \n #pdb.set_trace()\n \n model.add(base_model)\n model.add(layers.Flatten())\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(128, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(64, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n model.add(layers.Dense(10, activation='softmax'))\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=args_lr),\n metrics=['accuracy'])\n \n #model.summary()\n print(model_type)\n\n#pdb.set_trace()\n\ncurrent_epoch = 0\n\n################### connects interrupt signal to the process #####################\n\ndef terminateProcess(signalNumber, frame):\n # first record the wasted epoch time\n global epoch_begin_time\n if epoch_begin_time == 0:\n epoch_waste_time = 0\n else:\n epoch_waste_time = int(time.time() - epoch_begin_time)\n\n epoch_waste_dict = {}\n with open('epoch_waste.json', 'r') as fp:\n epoch_waste_dict = json.load(fp)\n epoch_waste_dict[job_name] += epoch_waste_time\n json_file3 = json.dumps(epoch_waste_dict)\n with open('epoch_waste.json', 'w') as fp:\n fp.write(json_file3)\n\n print('checkpointing the model triggered by kill -15 signal')\n # delete whatever checkpoint that already exists\n for f in glob.glob(save_files):\n os.remove(f)\n model.save('/scratch/li.baol/checkpoint_final1/' + job_name + '_' + str(current_epoch) + '.h5')\n print ('(SIGTERM) terminating the process')\n\n checkpoint_dict = {}\n with open('checkpoint.json', 'r') as fp:\n checkpoint_dict = json.load(fp)\n checkpoint_dict[job_name] = 1\n json_file3 = json.dumps(checkpoint_dict)\n with open('checkpoint.json', 'w') as fp:\n fp.write(json_file3)\n\n sys.exit()\n\nsignal.signal(signal.SIGTERM, terminateProcess)\n\n#################################################################################\n\nlogdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name\n\ntensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')\n\nclass PrintEpoch(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n global current_epoch \n #remaining_epochs = epochs - epoch\n current_epoch = epoch\n print('current epoch ' + str(current_epoch))\n global epoch_begin_time\n epoch_begin_time = time.time()\n\nmy_callback = PrintEpoch()\n\ncallbacks = [tensorboard_callback, my_callback]\n #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]\n\n# Run training\n\n# send signal to indicate checkpoint is qualified\nmessage = job_name + ' ckpt_qual'\nsend_signal.send(args.node, 10002, message)\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=round(total_epochs/2),\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks,\n initial_epoch=starting_epoch,\n verbose=1\n )\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n# send signal to indicate job has finished\nmessage = job_name + ' finish'\nsend_signal.send(args.node, 10002, message)\n", "\"\"\"\n#Trains a ResNet on the CIFAR10 dataset.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.applications.resnet import ResNet50, ResNet101, ResNet152\nfrom keras import models, layers, optimizers\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\nimport sys\nimport argparse\nimport time\nimport signal\nimport glob\nimport json\nimport send_signal\n\nparser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')\nparser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')\nparser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')\nparser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')\nparser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')\nparser.set_defaults(resume=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_num\n\n# Training parameters\nbatch_size = 32\nargs_lr = 0.0014\nargs_model = 'resnet101'\n\nepoch_begin_time = 0\n\njob_name = sys.argv[0].split('.')[0]\nsave_files = '/scratch/li.baol/checkpoint_final4/' + job_name + '*'\n\ntotal_epochs = 134\nstarting_epoch = 0\n\n# first step is to update the PID\npid = os.getpid()\nmessage = job_name + ' pid ' + str(pid) # 'job50 pid 3333'\nsend_signal.send(args.node, 10002, message)\n\nif args.resume:\n save_file = glob.glob(save_files)[0]\n# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])\n starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])\n\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\nn = 3\n\n# Model name, depth and version\nmodel_type = args.tc #'P100_resnet50_he_256_1'\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nif args.resume:\n print('resume from checkpoint')\n message = job_name + ' b_end'\n send_signal.send(args.node, 10002, message)\n model = keras.models.load_model(save_file)\n message = job_name + ' c_end'\n send_signal.send(args.node, 10002, message)\nelse:\n print('train from start')\n model = models.Sequential()\n \n if '50' in args_model:\n base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '101' in args_model:\n base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '152' in args_model:\n base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n \n #base_model.summary()\n \n #pdb.set_trace()\n \n #model.add(layers.UpSampling2D((2,2)))\n #model.add(layers.UpSampling2D((2,2)))\n #model.add(layers.UpSampling2D((2,2)))\n model.add(base_model)\n model.add(layers.Flatten())\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(128, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(64, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=args_lr),\n metrics=['accuracy'])\n \n #model.summary()\n print(model_type)\n\n#pdb.set_trace()\n\ncurrent_epoch = 0\n\n################### connects interrupt signal to the process #####################\n\ndef terminateProcess(signalNumber, frame):\n # first record the wasted epoch time\n global epoch_begin_time\n if epoch_begin_time == 0:\n epoch_waste_time = 0\n else:\n epoch_waste_time = int(time.time() - epoch_begin_time)\n\n message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'\n if epoch_waste_time > 0:\n send_signal.send(args.node, 10002, message)\n\n print('checkpointing the model triggered by kill -15 signal')\n # delete whatever checkpoint that already exists\n for f in glob.glob(save_files):\n os.remove(f)\n model.save('/scratch/li.baol/checkpoint_final4/' + job_name + '_' + str(current_epoch) + '.h5')\n print ('(SIGTERM) terminating the process')\n\n message = job_name + ' checkpoint'\n send_signal.send(args.node, 10002, message)\n\n sys.exit()\n\nsignal.signal(signal.SIGTERM, terminateProcess)\n\n#################################################################################\n\nlogdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name\n\ntensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')\n\nfirst_epoch_start = 0\n\nclass PrintEpoch(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n global current_epoch, first_epoch_start\n #remaining_epochs = epochs - epoch\n current_epoch = epoch\n print('current epoch ' + str(current_epoch))\n global epoch_begin_time\n epoch_begin_time = time.time()\n if epoch == starting_epoch and args.resume:\n first_epoch_start = time.time()\n message = job_name + ' d_end'\n send_signal.send(args.node, 10002, message)\n elif epoch == starting_epoch:\n first_epoch_start = time.time() \n if epoch == starting_epoch:\n # send signal to indicate checkpoint is qualified\n message = job_name + ' ckpt_qual'\n send_signal.send(args.node, 10002, message)\n\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch == starting_epoch:\n first_epoch_time = int(time.time() - first_epoch_start)\n message = job_name + ' 1st_epoch ' + str(first_epoch_time)\n send_signal.send(args.node, 10002, message)\n progress = round((epoch+1) / round(total_epochs/2), 2)\n message = job_name + ' completion ' + str(progress)\n send_signal.send(args.node, 10002, message)\n\nmy_callback = PrintEpoch()\n\ncallbacks = [tensorboard_callback, my_callback]\n #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]\n\n# Run training\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=round(total_epochs/2),\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks,\n initial_epoch=starting_epoch,\n verbose=1\n )\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n# send signal to indicate job has finished\nmessage = job_name + ' finish'\nsend_signal.send(args.node, 10002, message)\n", "import pandas\nimport pdb\nfrom datetime import datetime\nimport matplotlib\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport glob\nimport sys\nfrom matplotlib.ticker import MultipleLocator\n\ntestcase = sys.argv[1] # K80_vgg19_32\nprint(testcase)\nbase_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/'\nlog_dir = base_dir + testcase + '_*/' # /scratch/li.baol/GPU_pwr_meas/pytorch/K80_vgg19_32_*/\ndirs = glob.glob(log_dir)\ndirs.sort()\npwr_all = []\navg_all = []\n\nfor tc in dirs:\n model = tc.split('/')[5+1]\n files = glob.glob(tc + \"sample*.csv\")\n files.sort()\n avg_pwr = [0] * (len(files) + 1)\n \n for fil in files:\n file_path = fil\n minute = int(fil.split('/')[6+1].split('_')[1].split('.')[0])\n try: # in case the file is empty\n data = pandas.read_csv(file_path)\n pwr = data[data.columns[2]].tolist()\n \n pwr_array = np.asarray(pwr)\n if (len(pwr) == 0):\n avg_pwr[minute] = 0\n else:\n avg_pwr[minute] = np.average(pwr_array)\n except pandas.errors.EmptyDataError:\n avg_pwr[minute] = 0\n pass\n pwr_all.append(avg_pwr)\n avg_pwr_filter = [i for i in avg_pwr if i > 10] # remove power measurements below 10W\n avg_all.append(sum(avg_pwr_filter) / len(avg_pwr_filter))\n\n\n#------------- plot ---------------#\n\nwidth = 0.1\n\nfig, axs = plt.subplots(1, 1, gridspec_kw={'hspace': 0, 'wspace': 0}, figsize=(12,5))\nfig.suptitle(testcase + \" GPU power (W) during training epochs\")\nfor i in range(len(pwr_all)): \n x = np.arange(len(pwr_all[i]))\n axs.scatter(x, pwr_all[i], label = str(i))\n\naxs.set_xlabel('# of sample with 10s interval')\naxs.set_ylabel('power(W)')\n#axs.set_yticks(minor=True)\naxs.get_yaxis().set_minor_locator(MultipleLocator(5))\naxs.legend()\n\naxs.grid(which='both', axis='y', linestyle=':', color='black')\npwr = int(sum(avg_all) / len(avg_all))\nplt.savefig(base_dir + \"png/\" + testcase + '_pwr' + str(pwr) + \".png\")\n\ndf = pandas.DataFrame(avg_all, columns=[\"power(W)\"])\ndf.to_csv(base_dir + 'csv/' + testcase + '.csv', index=False)\n", "\"\"\"\n#Trains a ResNet on the CIFAR10 dataset.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler\nfrom keras.callbacks import ReduceLROnPlateau, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.datasets import cifar10\nfrom keras.applications.resnet import ResNet50, ResNet101, ResNet152\nfrom keras import models, layers, optimizers\nfrom datetime import datetime\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pdb\nimport sys\nimport argparse\nimport time\nimport signal\nimport glob\nimport json\nimport send_signal\n\nparser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')\nparser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')\nparser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')\nparser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')\nparser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')\nparser.set_defaults(resume=False)\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu_num\n\n# Training parameters\nbatch_size = 256\nargs_lr = 0.01\nargs_model = 'resnet50'\n\nepoch_begin_time = 0\n\njob_name = sys.argv[0].split('.')[0]\nsave_files = '/scratch/li.baol/checkpoint_no_threshold/' + job_name + '*'\n\ntotal_epochs = 11\nstarting_epoch = 0\n\n# first step is to update the PID\npid = os.getpid()\nmessage = job_name + ' pid ' + str(pid) # 'job50 pid 3333'\nsend_signal.send(args.node, 10002, message)\n\nif args.resume:\n save_file = glob.glob(save_files)[0]\n# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])\n starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])\n\ndata_augmentation = True\nnum_classes = 10\n\n# Subtracting pixel mean improves accuracy\nsubtract_pixel_mean = True\n\nn = 3\n\n# Model name, depth and version\nmodel_type = args.tc #'P100_resnet50_he_256_1'\n\n# Load the CIFAR10 data.\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# Normalize data.\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\n\n# If subtract pixel mean is enabled\nif subtract_pixel_mean:\n x_train_mean = np.mean(x_train, axis=0)\n x_train -= x_train_mean\n x_test -= x_train_mean\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint('y_train shape:', y_train.shape)\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nif args.resume:\n print('resume from checkpoint')\n message = job_name + ' b_end'\n send_signal.send(args.node, 10002, message)\n model = keras.models.load_model(save_file)\n message = job_name + ' c_end'\n send_signal.send(args.node, 10002, message)\nelse:\n print('train from start')\n model = models.Sequential()\n \n if '50' in args_model:\n base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '101' in args_model:\n base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n elif '152' in args_model:\n base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)\n \n #base_model.summary()\n \n #pdb.set_trace()\n \n #model.add(layers.UpSampling2D((2,2)))\n #model.add(layers.UpSampling2D((2,2)))\n #model.add(layers.UpSampling2D((2,2)))\n model.add(base_model)\n model.add(layers.Flatten())\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(128, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n #model.add(layers.Dense(64, activation='relu'))\n #model.add(layers.Dropout(0.5))\n #model.add(layers.BatchNormalization())\n model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))\n \n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=args_lr),\n metrics=['accuracy'])\n \n #model.summary()\n print(model_type)\n\n#pdb.set_trace()\n\ncurrent_epoch = 0\n\n################### connects interrupt signal to the process #####################\n\ndef terminateProcess(signalNumber, frame):\n # first record the wasted epoch time\n global epoch_begin_time\n if epoch_begin_time == 0:\n epoch_waste_time = 0\n else:\n epoch_waste_time = int(time.time() - epoch_begin_time)\n\n message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'\n if epoch_waste_time > 0:\n send_signal.send(args.node, 10002, message)\n\n print('checkpointing the model triggered by kill -15 signal')\n # delete whatever checkpoint that already exists\n for f in glob.glob(save_files):\n os.remove(f)\n model.save('/scratch/li.baol/checkpoint_no_threshold/' + job_name + '_' + str(current_epoch) + '.h5')\n print ('(SIGTERM) terminating the process')\n\n message = job_name + ' checkpoint'\n send_signal.send(args.node, 10002, message)\n\n sys.exit()\n\nsignal.signal(signal.SIGTERM, terminateProcess)\n\n#################################################################################\n\nlogdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name\n\ntensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')\n\nfirst_epoch_start = 0\n\nclass PrintEpoch(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n global current_epoch, first_epoch_start\n #remaining_epochs = epochs - epoch\n current_epoch = epoch\n print('current epoch ' + str(current_epoch))\n global epoch_begin_time\n epoch_begin_time = time.time()\n if epoch == starting_epoch and args.resume:\n first_epoch_start = time.time()\n message = job_name + ' d_end'\n send_signal.send(args.node, 10002, message)\n elif epoch == starting_epoch:\n first_epoch_start = time.time() \n if epoch == starting_epoch:\n # send signal to indicate checkpoint is qualified\n message = job_name + ' ckpt_qual'\n send_signal.send(args.node, 10002, message)\n\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch == starting_epoch:\n first_epoch_time = int(time.time() - first_epoch_start)\n message = job_name + ' 1st_epoch ' + str(first_epoch_time)\n send_signal.send(args.node, 10002, message)\n progress = round((epoch+1) / round(total_epochs/2), 2)\n message = job_name + ' completion ' + str(progress)\n send_signal.send(args.node, 10002, message)\n\nmy_callback = PrintEpoch()\n\ncallbacks = [tensorboard_callback, my_callback]\n #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]\n\n# Run training\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=round(total_epochs/2),\n validation_data=(x_test, y_test),\n shuffle=True,\n callbacks=callbacks,\n initial_epoch=starting_epoch,\n verbose=1\n )\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n\n# send signal to indicate job has finished\nmessage = job_name + ' finish'\nsend_signal.send(args.node, 10002, message)\n" ]
[ [ "numpy.mean" ], [ "numpy.mean" ], [ "numpy.mean" ], [ "sklearn.metrics.mean_squared_error", "sklearn.neighbors.KNeighborsRegressor", "pandas.read_csv", "sklearn.linear_model.LinearRegression", "matplotlib.use", "sklearn.model_selection.train_test_split", "numpy.mean" ], [ "numpy.mean" ], [ "numpy.mean" ], [ "pandas.read_csv", "pandas.DataFrame", "numpy.asarray", "matplotlib.pyplot.subplots", "matplotlib.use", "matplotlib.ticker.MultipleLocator", "numpy.average" ], [ "numpy.mean" ] ]
woanderer/neuroformer
[ "df3462d55977b6c9adcb6753e7c474b8b76e8021", "df3462d55977b6c9adcb6753e7c474b8b76e8021" ]
[ ".history/neuroformer/model_perceiver_20220116213408.py", ".history/neuroformer/model_perceiver_20220121144506.py" ]
[ "# from code.transformer_vid.utils import convert_weights\n# import rotary_embedding_torch\nfrom torch.nn.modules.activation import GELU, ReLU\n# from data.OneCombo3.trainer import TrainerConfig\nimport math\nimport numpy as np\nimport itertools\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nfrom torchvision.models.video import r3d_18\n# from ResNet3D import r3d_18\n\nfrom scipy.optimize import linear_sum_assignment\n# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding\n\nfrom einops.layers.torch import Rearrange\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_weights(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n if isinstance(l, nn.MultiheadAttention):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.half()\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.half()\n\n model.apply(_convert_weights_to_fp16)\n\nclass GPTConfig:\n \"\"\" base GPT config, params common to all GPT versions \"\"\"\n embd_pdrop = 0.2\n resid_pdrop = 0.2\n attn_pdrop = 0.2\n pos_pdrop = 0.2\n temp_pdrop = 0.2\n pos_emb = True\n temp_emb = True\n start_prune = 30\n epoch = 0\n\n def __init__(self, vocab_size, block_size, **kwargs):\n self.vocab_size = vocab_size\n self.block_size = block_size\n for k, v in kwargs.items():\n setattr(self, k, v)\n\nclass neuralGPTConfig:\n \"\"\" base GPT config, params common to all GPT versions \"\"\"\n n = 0.4\n im_drop = 0.2\n id_drop = n\n embd_pdrop = n\n resid_pdrop = n\n attn_pdrop = n\n pos_pdrop = n\n temp_pdrop = n\n pos_emb = True\n temp_emb = True\n\n def __init__(self, vocab_size, block_size, **kwargs):\n self.vocab_size = vocab_size\n self.block_size = block_size\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass GPT1Config(GPTConfig):\n \"\"\" GPT-1 like network roughly 125M params \"\"\"\n n_layer = 12\n n_head = 12\n n_embd = 768\n\n\nclass VideoFeaturesExtractor(nn.Module):\n \"\"\" \n R3D: (3 x T x H x W)\n H, W = 112\n \"\"\"\n \n def __init__(self):\n super().__init__()\n self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))\n convert_weights(self.backbone)\n # # freeze backbone\n # for k, v in self.backbone.named_parameters():\n # v.requires_grad = False\n\n def forward(self, x):\n # B = Batch, T, C, Fm, H, W\n features = self.backbone(x) # (B, C, T, H, W)\n B, C, T, H, W = features.shape\n features = features.permute(0, 2, 3, 4, 1)\n features = features.view(B, -1, C)\n return features\n\nclass VideoEncoder(nn.Module):\n def __init__(self):\n super().__init__()\n self.to_patch_embedding = nn.Sequential(\n Rearrange('b c t (h p1) (w p2) -> b (t h w) (p1 p2 c)', p1=16, p2=16)\n )\n \n def forward(self, x):\n return self.to_patch_embedding(x)\n\n\nclass CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n \n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n assert config.n_embd % config.n_head == 0\n self.config = config\n # key, query, value projections for all heads\n self.key = nn.Linear(config.n_embd, config.n_embd)\n self.query = nn.Linear(config.n_embd, config.n_embd)\n self.value = nn.Linear(config.n_embd, config.n_embd)\n # regularization\n self.attn_drop = nn.Dropout(config.attn_pdrop)\n self.resid_drop = nn.Dropout(config.resid_pdrop)\n # output projection\n self.proj = nn.Linear(config.n_embd, config.n_embd)\n\n self.register_buffer(\"mask\", self.build_mask(config.block_size)) \n self.n_head = config.n_head\n\n self.att = None\n self.T = config.block_size\n\n # self.rotary_embedding = RotarySpatioTemporalEmbedding(config)\n \n def build_mask(self, block_size):\n mask = torch.tril(torch.ones((block_size, block_size)),\n ).view(1, 1, block_size, block_size)\n return mask\n \n def generate_sparse_mask(self, att, p, config):\n \"\"\"\n Generate a sparse mask according to p.\n \"\"\"\n assert p >= 0 and p <= 1, \"p should be in [0, 1]\"\n T = config.block_size\n mask = torch.rand((1, T)) < p\n mask = mask.repeat(T, 1)\n \n mask[0, 0] = False # don't mask 1st step\n # check if any step is fully masked and umask it\n idx_all_true = (True == torch.all(mask, dim=0)).nonzero()\n for step in idx_all_true:\n sampler = torch.distributions.Uniform(low=0, high=step.item()+1)\n idx_false = sampler.sample((1,1)).long()\n mask[step, idx_false] = False\n\n # mask = mask.repeat(T, 1)\n mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)\n att = att.masked_fill(mask, float('-inf'))\n return att\n\n def forward(self, x, pad=None, dtx=None):\n # B = Batch, T = Sequence, C = n_embed\n B, T, C = x.size()\n\n # calculate query, key, values for all head in batch and move head forward to the batch dim\n k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n # # apply rotary embeddings\n # if dtx is not None:\n # q, k = self.rotary_embedding(q, k, dtx)\n\n # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))\n if self.training:\n att = self.generate_sparse_mask(att, 0.25, self.config)\n if pad is not None:\n for idx, i in enumerate(pad):\n att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token\n \n att = F.softmax(att, dim=-1)\n att = self.attn_drop(att)\n self.att = att\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.resid_drop(self.proj(y))\n return y\n\n\nclass PositionalEmbedding(nn.Module):\n \"\"\" Implement the PE function. \"\"\"\n def __init__(self, n_embd, p_drop, max_len=1500):\n super().__init__()\n self.dropout = nn.Dropout(p=p_drop)\n \n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, n_embd)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, n_embd, 2) *\n -(math.log(10000.0) / n_embd))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n def forward(self, x):\n x = Variable(self.pe[:, :x.size(1)], \n requires_grad=False)\n return self.dropout(x)\n\n\n# class RotarySpatioTemporalEmbedding(nn.Module):\n# \"\"\" Rotary temporal embeddings - block_size = id_blk_sz \"\"\"\n# def __init__(self, config):\n# super().__init__()\n# self.frame_block_size = config.frame_block_size\n# self.id_block_size = config.id_block_size\n# self.emb = RotaryEmbedding(dim=32)\n\n# def forward(self, q, k, t):\n# b = t.shape[0]\n# tf = self.frame_block_size\n# queries = []\n# keys = []\n# for B in range(b):\n# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))\n# im_pos_emb = torch.arange(self.frame_block_size)\n# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)\n# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)\n# freqs = self.emb(torch.cat(im_emb, id_temp_emb))\n# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))\n# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))\n# q, k = torch.cat(queries), torch.cat(keys)\n# return q, k\n\n\nclass TemporalEmbedding(nn.Module):\n \"\"\" encoding temporal information using fourrier signals \"\"\"\n def __init__(self, n_embd, p_drop, max_len=1500):\n super().__init__()\n self.dropout = nn.Dropout(p=p_drop)\n \n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, n_embd)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, n_embd, 2) *\n -(math.log(10000.0) / n_embd))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n def forward(self, x):\n x = Variable(self.pe[:, :x.size(1)], \n requires_grad=False)\n return self.dropout(x)\n\n\nclass LearntTemporalEmbedding(nn.Module):\n \"\"\"\n Project B x T x 1 time sequence to\n B x T x C\n \"\"\"\n def __init__(self, block_sz, n_embd, p_drop=0.2):\n super().__init__()\n self.temp_emb = nn.Sequential(\n nn.Linear(1, n_embd // 2),\n nn.GELU(),\n nn.Linear(n_embd // 2, n_embd),\n nn.Dropout(p_drop)\n )\n \n def forward(self, x):\n return self.temp_emb(x.unsqueeze(-1))\n\n\nclass Decoder(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n # decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head, \n # activation='gelu', dropout=0.2, batch_first=True)\n # self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)\n self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head, \n num_encoder_layers=3, num_decoder_layers=config.n_layer,\n activation=\"gelu\", dropout=0.4, batch_first=True)\n self.register_buffer(\"tgt_mask\", self.generate_square_subsequent_mask(config.id_block_size))\n # self.register_buffer(\"tgt_pad_mask\", self.generate_padding_mask(config.ids_block_size))\n self.T = config.id_block_size\n\n def generate_square_subsequent_mask(self, sz: int, pad=None):\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n \n def generate_padding_mask(self, sz: int, pad=None):\n r\"\"\"Build a (B x T) mask that resides on the GPU and can be \n manipulated by build_padding_mask according to padded sequence\n \"\"\"\n mask = torch.zeros(1, sz, dtype=torch.bool)\n return mask\n\n def generate_sparse_mask(self, sz: int, pad=None):\n r\"\"\" Build a square mask that employs \n teacher forcing according to P\n \"\"\"\n rand_mat = torch.rand(1, sz)\n k = round(0.75 * sz)\n k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]\n bool_tensor = rand_mat <= k_th_quant\n mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask\n \n def build_padding_mask(self, tgt, pad):\n # mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)\n mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)\n for B, P in enumerate(pad):\n mask[B, self.T - P:] = True\n return mask # .to(torch.cuda.current_device())\n\n def forward(self, tgt, memory, pad):\n # padding_mask = self.build_padding_mask(tgt, pad)\n # tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask\n return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask, \n tgt_key_padding_mask=None)\n\n\nclass ProjectNorm(nn.Module):\n\n def __init__(self, feat_size, target_size):\n super().__init__()\n self.ln = nn.LayerNorm(feat_size)\n self.mlp = nn.Sequential(\n nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),\n nn.GELU(),\n nn.Linear(math.floor(2 * feat_size), target_size, bias=False),\n )\n\n def forward(self, x):\n return self.mlp(self.ln(x))\n\n\nclass TimeProjection(nn.Module):\n \n def __init__(self, seq_size, id_seq_size, feat_size, target_size):\n super().__init__()\n self.mlp_seq = nn.Sequential(\n nn.Linear(seq_size, id_seq_size),\n nn.ReLU(),\n nn.Dropout(p=0.3),\n nn.Linear(id_seq_size, id_seq_size)\n )\n self.mlp_t = nn.Sequential(\n nn.Linear(feat_size, feat_size // 2),\n nn.ReLU(),\n nn.Dropout(p=0.3),\n nn.Linear(feat_size // 2, target_size)\n )\n \n def forward(self, x):\n x = x.permute(0, 2, 1) # B, T, C -> B, C, T\n x = self.mlp_seq(x) # B, C, T / 2\n x = x.permute(0, 2, 1) # B, T / 2, C\n return self.mlp_t(x) # B, T / 2, 1\n\n\nclass PSTHProjection(nn.Module):\n \"\"\"Takes Last Output of Block -> (B, C) \n Builds PSTH table \n \"\"\"\n def __init__(self, config):\n super().__init__()\n self.mlp = nn.Sequential(\n nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),\n nn.Dropout(p=0.2),\n nn.GELU(),\n nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)\n )\n \n def forward(self, x):\n return self.mlp(x)\n\n\n# class PSTHProjection(nn.Module):\n \n# def __init__(self, config):\n# super().__init__()\n# self.mlp_seq = nn.Sequential(\n# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),\n# nn.GELU(),\n# nn.Dropout(p=0.2),\n# nn.Linear(config.id_block_size // 2, 1, bias=False)\n# )\n# self.mlp_t = nn.Sequential(\n# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),\n# nn.GELU(),\n# nn.Dropout(p=0.2),\n# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)\n# )\n \n# def forward(self, x):\n# x = x.transpose(-1, -2) # B, T, C -> B, C, T\n# x = self.mlp_seq(x) # B, C, 1\n# x = x.transpose(-2, -1) # B, 1, Vocab_id\n# return self.mlp_t(x)\n\n\nclass TimeRNN(nn.Module):\n def __init__(self, feat_size, target_size):\n super().__init__()\n\n\nclass Block(nn.Module):\n \"\"\" an unassuming Transformer block \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.ln1 = nn.LayerNorm(config.n_embd)\n self.ln2 = nn.LayerNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.mlp = nn.Sequential(\n nn.Linear(config.n_embd, 4 * config.n_embd),\n nn.GELU(),\n nn.Linear(4 * config.n_embd, config.n_embd),\n nn.Dropout(config.resid_pdrop),\n )\n\n def forward(self, x, pad=None, dtx=None):\n x = x + self.attn(self.ln1(x), pad)\n x = x + self.mlp(self.ln2(x))\n return x\n\n\nclass BlockSequential(nn.Sequential):\n def forward(self, x, pad=None, dtx=None):\n for module in self._modules.values():\n x = module(x, pad, dtx)\n return x\n\n\nclass DiceLossPSTH(nn.Module):\n def __init__(self, size_average=True, smooth=1):\n super().__init__()\n \n def cross_entropy(self, input, target):\n return torch.mean(-torch.sum(target * torch.log(input), 1))\n \n def forward(self, logits, targets, smooth=1, class_weights=None):\n total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])\n # probs = F.log_softmax(logits, dim=-1)\n probs = F.softmax(total_logits, dim=-1)\n # logits = F.gelu(logits)\n # probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))\n # flatten label and prediction tensors\n outputs = probs.contiguous().view(-1)\n targets = targets.contiguous().view(-1)\n labels = torch.zeros_like(outputs)\n labels[targets] = 1 / len(targets)\n # intersection = (outputs * labels).sum()\n # dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)\n return self.cross_entropy(outputs[None, ...], labels[None, ...])\n\n\nclass SetLoss(nn.Module):\n def __init__(self):\n super().__init__()\n \n def cross_entropy(self, input, target):\n return torch.mean(-torch.sum(target * torch.log(input), 1))\n \n def forward(self, logits, targets):\n targets = targets.contiguous().view(-1)\n loss = 0\n for n_step, n_logits in enumerate(logits):\n n_logits = F.softmax(n_logits, dim=-1)\n n_target = targets[n_step:]\n n_target_dist = torch.zeros_like(n_logits)\n if len(n_target) != 0:\n n_target_dist[n_target] = 1 / len(n_target)\n loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])\n return loss / len(logits)\n\n\nclass TruncatedLoss(nn.Module):\n\n def __init__(self, q=0.8, k=0.2, trainset_size=50000):\n super(TruncatedLoss, self).__init__()\n self.q = q\n self.k = k\n self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)\n \n def forward(self, logits, targets, indexes):\n p = F.softmax(logits, dim=-1)\n Yg = torch.gather(p, 2, targets.unsqueeze(2))\n\n loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]\n loss = torch.mean(loss)\n\n return loss\n\n def update_weight(self, logits, targets, indexes):\n p = F.softmax(logits, dim=-1)\n Yg = torch.gather(p, 2, targets.unsqueeze(2))\n Lq = ((1-(Yg**self.q))/self.q)\n Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))\n Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)\n Lqk = torch.unsqueeze(Lqk, 1)\n \n condition = torch.gt(Lqk, Lq)\n self.weight[indexes] = condition.type(torch.cuda.FloatTensor)\n\n\n# class PSTHLOSS(nn.Module):\n# def __init__(self):\n# super().__init__()\n\n# def forward(self, logits, targets):\n# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension\n# probs = F.softmax(total_logits, dim=-1)\n# outptu\n\n\nclass HungarianMatcher(nn.Module):\n def __init__(self):\n super().__init__()\n \n @torch.no_grad()\n def forward(self, logits, targets):\n T, C = logits.size()\n probs = F.softmax(logits, dim=-1)\n cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\nclass KLDivLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.log_softmax = nn.LogSoftmax(dim=-1)\n self.KLdiv = nn.KLDivLoss()\n def forward(self, logits, targets):\n log_probs = self.log_softmax(logits)\n return self.KLdiv(log_probs.long(), targets)\n\n\nclass PoissonCrossEntropyLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.log_softmax = nn.LogSoftmax(dim=-1)\n # self.softmax = nn.Softmax(dim=-1)\n self.nll_poisson = nn.PoissonNLLLoss()\n # self.nll_poisson = nn.NLLLoss()\n\n def forward(self, logits, targets):\n log_probs = self.log_softmax(logits)\n return self.nll_poisson(log_probs, targets)\n\n\nclass GPT(nn.Module):\n \"\"\" the full GPT language model, with a context size of block_size \"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n self.device = 'cpu'\n if torch.cuda.is_available():\n self.device = torch.cuda.current_device()\n\n self.config = config\n # input embedding stem\n self.n_embd = config.n_embd\n self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)\n self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)\n # self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))\n self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))\n # self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)\n # self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)\n self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)\n self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)\n self.id_drop = nn.Dropout(config.id_drop)\n self.im_drop = nn.Dropout(config.im_drop)\n self.drop = nn.Dropout(config.embd_pdrop)\n\n # -- Visual Backbone -- #\n # self.visual_backbone = VideoFeaturesExtractor()\n self.video_encoder = VideoEncoder()\n frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)\n self.register_buffer(\"frame_temp_emb_seq\", frame_temp_emb)\n\n # -- Contrastive Loss -- ##\n # self.proj_id = ProjectNorm(config.n_embd, config.n_embd)\n # self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape\n \n ## -- IM_Decoder -- ##\n # self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])\n # self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])\n # self.ln_f_id = nn.LayerNorm(config.n_embd)\n # self.ln_f_im = nn.LayerNorm(config.n_embd)\n\n ## -- Decoder -- ##\n # self.ln_f = nn.LayerNorm(config.n_embd)\n ## GPT\n # self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])\n # self.ln_f = nn.LayerNorm(config.n_embd)\n ## enc_dec\n self.state_decoder = Decoder(config)\n self.ln_f_state_dec = nn.LayerNorm(config.n_embd)\n self.stimulus_decoder = Decoder(config)\n self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n \n ## -- Time -- ##\n # self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)\n # self.proj_time = ProjectNorm(config.n_embd, config.n_dt)\n # self.proj_time = ProjectNorm(config.n_embd, 1)\n \n ## -- PSTH -- ##\n # self.proj_psth = PSTHProjection(config)\n\n # Loss\n # self.dice_loss = DiceLossPSTH()\n # self.poisson_loss = PoissonCrossEntropyLoss()\n # self.hungarian_matcher = HungarianMatcher()\n # self.kldiv_loss = KLDivLoss()\n # self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)\n # self.set_loss = SetLoss()\n # self.a = torch.tensor(0.5, requires_grad=True)\n\n self.block_size = config.block_size\n self.apply(self._init_weights)\n \n if config.class_weights is not None:\n self.register_buffer(\"class_weights\", config.class_weights) \n \n logger.info(\"number of parameters: %e\", sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def configure_optimizers(self, train_config):\n \"\"\"\n Separates parameters into those who will experience weight decay and those that will not\n \"\"\"\n if train_config.decay_weights:\n decay = set()\n no_decay = set()\n whitelist_weight_modules = (torch.nn.Linear, )\n blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)\n for mn, m in self.named_modules():\n for pn, p in m.named_parameters():\n fpn = '%s.%s' % (mn, pn) if mn else pn # full param name\n if pn.endswith('bias'):\n # all biases will not be decayed\n no_decay.add(fpn)\n elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):\n # weights of whitelist modules will be weight decayed\n decay.add(fpn)\n elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):\n # weights of blacklist modules will NOT be weight decayed\n no_decay.add(fpn)\n else: no_decay.add(fpn)\n\n # special case the position embedding parameter in the root GPT module as not decayed\n black_list_mods = ['pos_emb', 'temp_emb']\n for mods in black_list_mods:\n for name, param in self.named_parameters():\n if mods in name:\n no_decay.add(name) # also pos_emb\n \n # validate that we considered every parameter\n param_dict = {pn: p for pn, p in self.named_parameters()}\n no_decay -= decay & no_decay\n inter_params = decay & no_decay\n union_params = decay | no_decay\n\n assert len(inter_params) == 0, \"parameters %s made it into both decay/no_decay sets!\" % (str(inter_params), )\n assert len(param_dict.keys() - union_params) == 0, \"parameters %s were not separated into either decay/no_decay set!\" \\\n % (str(param_dict.keys() - union_params), )\n\n \n # create the pytorch optimizer object\n optim_groups = [\n {\"params\": [param_dict[pn] for pn in sorted(list(decay))], \"weight_decay\": train_config.weight_decay},\n {\"params\": [param_dict[pn] for pn in sorted(list(no_decay))], \"weight_decay\": 0.0},\n ]\n optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)\n \n else:\n parameters = self.parameters()\n optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)\n \n return optimizer\n \n def process_features(self, x):\n # batch, block_size, feature\n p_idx = x['id_prev']\n idx = x['id']\n dtx = x['dt']\n dtx_prev = x['dt_prev']\n frames = self.video_encoder(x['frames'])\n pad = x['pad']\n\n b, t = idx.size()\n # b_p, t_p = p_idx.size()\n bf, tf = frames.size()[0:2]\n\n # forward the GPT model\n ''' \n positional and temporal embeddings implemented in multiple ways, learnt, \n fourrier decomposition and in the case of time, just passed as is. \n '''\n # # Embeddings\n prev_id_position_embeddings = 0 # self.pos_emb(p_idx)\n prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())\n id_position_embeddings = 0 # self.pos_emb(idx) \n im_position_embeddings = self.pos_emb_frames\n temporal_embeddings = self.temp_emb(dtx.float())\n \n # Extract ID features\n prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings\n token_embeddings = self.id_drop(token_embeddings)\n\n # Extract image features and add time embeddings\n im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)\n im_embeddings = frames # self.tok_emb(frames)\n im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings\n im_embeddings = self.im_drop(im_embeddings) # separate pos emb?\n \n # Tidy up\n features = dict()\n features['id_prev'] = prev_token_embeddings\n features['id'] = token_embeddings\n features['frames'] = im_embeddings\n \n return features, pad\n\n def perceiver(self, features, pad):\n x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)\n x = self.ln_f_state_dec(x)\n x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)\n x = self.ln_f_stimulus_dec(x)\n logits = self.head(x)\n\n return logits, x\n\n def enc_dec(self, features, pad):\n x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)\n x = self.ln_f_stimulus_dec(x)\n logits = self.head(x)\n\n return logits, x\n\n def GPTdecoder(self, features, pad, dtx=None):\n # image + neural features\n x = torch.cat((features['frames'], features['id']), dim=1)\n\n # Decoder\n x = self.blocks(x, pad, dtx) # (B, T, C)\n x = self.ln_f(x)\n logits = self.head(x)\n\n # print(logits.shape) # (B, T, Vocab)\n # logits_psth = x[:, -1] # (B, C)\n\n return logits, x\n\n def forward(self, x, targets=None):\n idx = x['id']\n dtx = x['dt']\n frames = x['frames']\n pad = x['pad']\n\n b, t = idx.size()\n # b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]\n bf, tf = frames.size()[0:2]\n tf = self.config.frame_block_size\n # assert t + tf == self.config.block_size, f\"{tf} {t}\"\n # assert t <= self.block_size, \"Cannot forward, model block size is exhausted\"\n \n features, pad = self.process_features(x)\n logits, x = self.perceiver(features, pad)\n # logits, x = self.enc_dec(features, pad)\n # logits, x = self.GPTdecoder(features, pad)\n # time = self.proj_time(x) # (B, T_id, 1)\n\n # print(x[:, 0].shape)\n # psth = self.proj_psth(x) # (B, Vocab_id)\n\n # if targets, calculate loss\n # calculate loss on logits up to padding token for each batch\n loss = None\n loss_frames = 0\n loss_id = []\n loss_time = []\n loss_dice = []\n loss_psth = []\n loss_hungarian = []\n if targets is not None:\n # loss_psth = self.dice_loss(psth, targets['modes'][:, tf:]) \n for B, P in enumerate(pad):\n tf = 0\n # im_logits = logits[B, :tf]\n # im_targets = targets['frames'][B, :tf]\n # loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))\n id_logits = logits[B, tf:tf + t - P]\n id_targets = targets['id'][B, :t - P]\n\n loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1))\n # if self.config.epoch >= 15:\n # self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])\n # loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])\n # time_preds = time[B, :t - P]\n # time_targets = targets['dt'][B, :t - P]\n # loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1))\n # loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)\n # loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))\n # if len(id_targets) > 0:\n # indices = self.hungarian_matcher(id_logits, id_targets)\n # probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]\n # loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)\n # loss_hungarian.append(loss_hungarian_)\n # # psth = self.proj_psth(x[B, -1]) # from the EOS position\n \n # loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))\n # loss_psth_ = self.dice_loss(id_logits, id_targets)\n # loss_psth.append(torch.nan_to_num(loss_psth_))\n \n # loss_time.append(torch.nan_to_num(loss_time_))\n loss_id.append(torch.nan_to_num(loss_id_))\n \n loss = dict()\n # loss['frames'] = loss_frames / (b / 3)\n loss['id'] = sum(loss_id) / (b) # sum(loss_id) / (b * 2) # / len(loss_id)\n # loss['time'] = sum(loss_time) / (b * 2)\n # loss['dice'] = sum(loss_dice) / len(loss_dice)\n # loss['dt'] = loss_time / (b * 50)\n # loss['hungarian'] = sum(loss_hungarian) / (b * 2)\n # loss['psth'] = sum(loss_psth) / (b * 2)\n\n for key in list(loss):\n if isinstance(loss[key], float):\n del loss[key]\n \n preds = dict()\n preds['logits'] = logits # [:, tf:] # only id logits\n # preds['dt'] = time\n\n return preds, features, loss", "# from code.transformer_vid.utils import convert_weights\n# import rotary_embedding_torch\nfrom torch.nn.modules.activation import GELU, ReLU\n# from data.OneCombo3.trainer import TrainerConfig\nimport math\nimport numpy as np\nimport itertools\nimport logging\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nfrom torchvision.models.video import r3d_18\n# from ResNet3D import r3d_18\n\nfrom scipy.optimize import linear_sum_assignment\n# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding\n\nfrom einops.layers.torch import Rearrange\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_weights(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,\n l.weight.data = l.weight.data.half()\n if l.bias is not None:\n l.bias.data = l.bias.data.half()\n\n if isinstance(l, nn.MultiheadAttention):\n for attr in [*[f\"{s}_proj_weight\" for s in [\"in\", \"q\", \"k\", \"v\"]], \"in_proj_bias\", \"bias_k\", \"bias_v\"]:\n tensor = getattr(l, attr)\n if tensor is not None:\n tensor.data = tensor.data.half()\n\n for name in [\"text_projection\", \"proj\"]:\n if hasattr(l, name):\n attr = getattr(l, name)\n if attr is not None:\n attr.data = attr.data.half()\n\n model.apply(_convert_weights_to_fp16)\n\nclass GPTConfig:\n \"\"\" base GPT config, params common to all GPT versions \"\"\"\n embd_pdrop = 0.2\n resid_pdrop = 0.2\n attn_pdrop = 0.2\n pos_pdrop = 0.2\n temp_pdrop = 0.2\n pos_emb = True\n temp_emb = True\n start_prune = 30\n epoch = 0\n\n def __init__(self, vocab_size, block_size, **kwargs):\n self.vocab_size = vocab_size\n self.block_size = block_size\n for k, v in kwargs.items():\n setattr(self, k, v)\n\nclass neuralGPTConfig:\n \"\"\" base GPT config, params common to all GPT versions \"\"\"\n n = 0.4\n im_drop = 0.2\n id_drop = n\n embd_pdrop = n\n resid_pdrop = n\n attn_pdrop = n\n pos_pdrop = n\n temp_pdrop = n\n pos_emb = True\n temp_emb = True\n\n def __init__(self, vocab_size, block_size, **kwargs):\n self.vocab_size = vocab_size\n self.block_size = block_size\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass GPT1Config(GPTConfig):\n \"\"\" GPT-1 like network roughly 125M params \"\"\"\n n_layer = 12\n n_head = 12\n n_embd = 768\n\n\nclass VideoFeaturesExtractor(nn.Module):\n \"\"\" \n R3D: (3 x T x H x W)\n H, W = 112\n \"\"\"\n \n def __init__(self):\n super().__init__()\n self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))\n convert_weights(self.backbone)\n # # freeze backbone\n # for k, v in self.backbone.named_parameters():\n # v.requires_grad = False\n\n def forward(self, x):\n # B = Batch, T, C, Fm, H, W\n features = self.backbone(x) # (B, C, T, H, W)\n B, C, T, H, W = features.shape\n features = features.permute(0, 2, 3, 4, 1)\n features = features.view(B, -1, C)\n return features\n\nclass VideoEncoder(nn.Module):\n def __init__(self, n_embd):\n super().__init__()\n p1, p2 = 16\n \n assert n_embd % (p1 * p2) == 0, \"n_embd must be divisible by p1 * p2\"\n \n c = n_embd // (p1 * p2) \n self.to_patch_embedding = nn.Sequential(\n Rearrange(f'b {c} t (h {p1}) (w {p2}) -> b (t h w) (p1 p2 {c})', p1=16, p2=16)\n )\n \n def forward(self, x):\n return self.to_patch_embedding(x)\n\n\nclass CausalSelfAttention(nn.Module):\n \"\"\"\n A vanilla multi-head masked self-attention layer with a projection at the end.\n \n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n assert config.n_embd % config.n_head == 0\n self.config = config\n # key, query, value projections for all heads\n self.key = nn.Linear(config.n_embd, config.n_embd)\n self.query = nn.Linear(config.n_embd, config.n_embd)\n self.value = nn.Linear(config.n_embd, config.n_embd)\n # regularization\n self.attn_drop = nn.Dropout(config.attn_pdrop)\n self.resid_drop = nn.Dropout(config.resid_pdrop)\n # output projection\n self.proj = nn.Linear(config.n_embd, config.n_embd)\n\n self.register_buffer(\"mask\", self.build_mask(config.block_size)) \n self.n_head = config.n_head\n\n self.att = None\n self.T = config.block_size\n\n # self.rotary_embedding = RotarySpatioTemporalEmbedding(config)\n \n def build_mask(self, block_size):\n mask = torch.tril(torch.ones((block_size, block_size)),\n ).view(1, 1, block_size, block_size)\n return mask\n \n def generate_sparse_mask(self, att, p, config):\n \"\"\"\n Generate a sparse mask according to p.\n \"\"\"\n assert p >= 0 and p <= 1, \"p should be in [0, 1]\"\n T = config.block_size\n mask = torch.rand((1, T)) < p\n mask = mask.repeat(T, 1)\n \n mask[0, 0] = False # don't mask 1st step\n # check if any step is fully masked and umask it\n idx_all_true = (True == torch.all(mask, dim=0)).nonzero()\n for step in idx_all_true:\n sampler = torch.distributions.Uniform(low=0, high=step.item()+1)\n idx_false = sampler.sample((1,1)).long()\n mask[step, idx_false] = False\n\n # mask = mask.repeat(T, 1)\n mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)\n att = att.masked_fill(mask, float('-inf'))\n return att\n\n def forward(self, x, pad=None, dtx=None):\n # B = Batch, T = Sequence, C = n_embed\n B, T, C = x.size()\n\n # calculate query, key, values for all head in batch and move head forward to the batch dim\n k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)\n\n # # apply rotary embeddings\n # if dtx is not None:\n # q, k = self.rotary_embedding(q, k, dtx)\n\n # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)\n att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))\n att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))\n if self.training:\n att = self.generate_sparse_mask(att, 0.25, self.config)\n if pad is not None:\n for idx, i in enumerate(pad):\n att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token\n \n att = F.softmax(att, dim=-1)\n att = self.attn_drop(att)\n self.att = att\n y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)\n y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side\n\n # output projection\n y = self.resid_drop(self.proj(y))\n return y\n\n\nclass PositionalEmbedding(nn.Module):\n \"\"\" Implement the PE function. \"\"\"\n def __init__(self, n_embd, p_drop, max_len=1500):\n super().__init__()\n self.dropout = nn.Dropout(p=p_drop)\n \n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, n_embd)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, n_embd, 2) *\n -(math.log(10000.0) / n_embd))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n def forward(self, x):\n x = Variable(self.pe[:, :x.size(1)], \n requires_grad=False)\n return self.dropout(x)\n\n\n# class RotarySpatioTemporalEmbedding(nn.Module):\n# \"\"\" Rotary temporal embeddings - block_size = id_blk_sz \"\"\"\n# def __init__(self, config):\n# super().__init__()\n# self.frame_block_size = config.frame_block_size\n# self.id_block_size = config.id_block_size\n# self.emb = RotaryEmbedding(dim=32)\n\n# def forward(self, q, k, t):\n# b = t.shape[0]\n# tf = self.frame_block_size\n# queries = []\n# keys = []\n# for B in range(b):\n# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))\n# im_pos_emb = torch.arange(self.frame_block_size)\n# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)\n# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)\n# freqs = self.emb(torch.cat(im_emb, id_temp_emb))\n# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))\n# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))\n# q, k = torch.cat(queries), torch.cat(keys)\n# return q, k\n\n\nclass TemporalEmbedding(nn.Module):\n \"\"\" encoding temporal information using fourrier signals \"\"\"\n def __init__(self, n_embd, p_drop, max_len=1500):\n super().__init__()\n self.dropout = nn.Dropout(p=p_drop)\n \n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_len, n_embd)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, n_embd, 2) *\n -(math.log(10000.0) / n_embd))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n \n def forward(self, x):\n x = Variable(self.pe[:, :x.size(1)], \n requires_grad=False)\n return self.dropout(x)\n\n\nclass LearntTemporalEmbedding(nn.Module):\n \"\"\"\n Project B x T x 1 time sequence to\n B x T x C\n \"\"\"\n def __init__(self, block_sz, n_embd, p_drop=0.2):\n super().__init__()\n self.temp_emb = nn.Sequential(\n nn.Linear(1, n_embd // 2),\n nn.GELU(),\n nn.Linear(n_embd // 2, n_embd),\n nn.Dropout(p_drop)\n )\n \n def forward(self, x):\n return self.temp_emb(x.unsqueeze(-1))\n\n\nclass Decoder(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n # decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head, \n # activation='gelu', dropout=0.2, batch_first=True)\n # self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)\n self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head, \n num_encoder_layers=3, num_decoder_layers=config.n_layer,\n activation=\"gelu\", dropout=0.4, batch_first=True)\n self.register_buffer(\"tgt_mask\", self.generate_square_subsequent_mask(config.id_block_size))\n # self.register_buffer(\"tgt_pad_mask\", self.generate_padding_mask(config.ids_block_size))\n self.T = config.id_block_size\n\n def generate_square_subsequent_mask(self, sz: int, pad=None):\n r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n Unmasked positions are filled with float(0.0).\n \"\"\"\n mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n \n def generate_padding_mask(self, sz: int, pad=None):\n r\"\"\"Build a (B x T) mask that resides on the GPU and can be \n manipulated by build_padding_mask according to padded sequence\n \"\"\"\n mask = torch.zeros(1, sz, dtype=torch.bool)\n return mask\n\n def generate_sparse_mask(self, sz: int, pad=None):\n r\"\"\" Build a square mask that employs \n teacher forcing according to P\n \"\"\"\n rand_mat = torch.rand(1, sz)\n k = round(0.75 * sz)\n k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]\n bool_tensor = rand_mat <= k_th_quant\n mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask\n \n def build_padding_mask(self, tgt, pad):\n # mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)\n mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)\n for B, P in enumerate(pad):\n mask[B, self.T - P:] = True\n return mask # .to(torch.cuda.current_device())\n\n def forward(self, tgt, memory, pad):\n # padding_mask = self.build_padding_mask(tgt, pad)\n # tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask\n return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask, \n tgt_key_padding_mask=None)\n\n\nclass ProjectNorm(nn.Module):\n\n def __init__(self, feat_size, target_size):\n super().__init__()\n self.ln = nn.LayerNorm(feat_size)\n self.mlp = nn.Sequential(\n nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),\n nn.GELU(),\n nn.Linear(math.floor(2 * feat_size), target_size, bias=False),\n )\n\n def forward(self, x):\n return self.mlp(self.ln(x))\n\n\nclass TimeProjection(nn.Module):\n \n def __init__(self, seq_size, id_seq_size, feat_size, target_size):\n super().__init__()\n self.mlp_seq = nn.Sequential(\n nn.Linear(seq_size, id_seq_size),\n nn.ReLU(),\n nn.Dropout(p=0.3),\n nn.Linear(id_seq_size, id_seq_size)\n )\n self.mlp_t = nn.Sequential(\n nn.Linear(feat_size, feat_size // 2),\n nn.ReLU(),\n nn.Dropout(p=0.3),\n nn.Linear(feat_size // 2, target_size)\n )\n \n def forward(self, x):\n x = x.permute(0, 2, 1) # B, T, C -> B, C, T\n x = self.mlp_seq(x) # B, C, T / 2\n x = x.permute(0, 2, 1) # B, T / 2, C\n return self.mlp_t(x) # B, T / 2, 1\n\n\nclass PSTHProjection(nn.Module):\n \"\"\"Takes Last Output of Block -> (B, C) \n Builds PSTH table \n \"\"\"\n def __init__(self, config):\n super().__init__()\n self.mlp = nn.Sequential(\n nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),\n nn.Dropout(p=0.2),\n nn.GELU(),\n nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)\n )\n \n def forward(self, x):\n return self.mlp(x)\n\n\n# class PSTHProjection(nn.Module):\n \n# def __init__(self, config):\n# super().__init__()\n# self.mlp_seq = nn.Sequential(\n# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),\n# nn.GELU(),\n# nn.Dropout(p=0.2),\n# nn.Linear(config.id_block_size // 2, 1, bias=False)\n# )\n# self.mlp_t = nn.Sequential(\n# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),\n# nn.GELU(),\n# nn.Dropout(p=0.2),\n# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)\n# )\n \n# def forward(self, x):\n# x = x.transpose(-1, -2) # B, T, C -> B, C, T\n# x = self.mlp_seq(x) # B, C, 1\n# x = x.transpose(-2, -1) # B, 1, Vocab_id\n# return self.mlp_t(x)\n\n\nclass TimeRNN(nn.Module):\n def __init__(self, feat_size, target_size):\n super().__init__()\n\n\nclass Block(nn.Module):\n \"\"\" an unassuming Transformer block \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.ln1 = nn.LayerNorm(config.n_embd)\n self.ln2 = nn.LayerNorm(config.n_embd)\n self.attn = CausalSelfAttention(config)\n self.mlp = nn.Sequential(\n nn.Linear(config.n_embd, 4 * config.n_embd),\n nn.GELU(),\n nn.Linear(4 * config.n_embd, config.n_embd),\n nn.Dropout(config.resid_pdrop),\n )\n\n def forward(self, x, pad=None, dtx=None):\n x = x + self.attn(self.ln1(x), pad)\n x = x + self.mlp(self.ln2(x))\n return x\n\n\nclass BlockSequential(nn.Sequential):\n def forward(self, x, pad=None, dtx=None):\n for module in self._modules.values():\n x = module(x, pad, dtx)\n return x\n\n\nclass DiceLossPSTH(nn.Module):\n def __init__(self, size_average=True, smooth=1):\n super().__init__()\n \n def cross_entropy(self, input, target):\n return torch.mean(-torch.sum(target * torch.log(input), 1))\n \n def forward(self, logits, targets, smooth=1, class_weights=None):\n total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])\n # probs = F.log_softmax(logits, dim=-1)\n probs = F.softmax(total_logits, dim=-1)\n # logits = F.gelu(logits)\n # probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))\n # flatten label and prediction tensors\n outputs = probs.contiguous().view(-1)\n targets = targets.contiguous().view(-1)\n labels = torch.zeros_like(outputs)\n labels[targets] = 1 / len(targets)\n # intersection = (outputs * labels).sum()\n # dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)\n return self.cross_entropy(outputs[None, ...], labels[None, ...])\n\n\nclass SetLoss(nn.Module):\n def __init__(self):\n super().__init__()\n \n def cross_entropy(self, input, target):\n return torch.mean(-torch.sum(target * torch.log(input), 1))\n \n def forward(self, logits, targets):\n targets = targets.contiguous().view(-1)\n loss = 0\n for n_step, n_logits in enumerate(logits):\n n_logits = F.softmax(n_logits, dim=-1)\n n_target = targets[n_step:]\n n_target_dist = torch.zeros_like(n_logits)\n if len(n_target) != 0:\n n_target_dist[n_target] = 1 / len(n_target)\n loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])\n return loss / len(logits)\n\n\nclass TruncatedLoss(nn.Module):\n\n def __init__(self, q=0.8, k=0.2, trainset_size=50000):\n super(TruncatedLoss, self).__init__()\n self.q = q\n self.k = k\n self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)\n \n def forward(self, logits, targets, indexes):\n p = F.softmax(logits, dim=-1)\n Yg = torch.gather(p, 2, targets.unsqueeze(2))\n\n loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]\n loss = torch.mean(loss)\n\n return loss\n\n def update_weight(self, logits, targets, indexes):\n p = F.softmax(logits, dim=-1)\n Yg = torch.gather(p, 2, targets.unsqueeze(2))\n Lq = ((1-(Yg**self.q))/self.q)\n Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))\n Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)\n Lqk = torch.unsqueeze(Lqk, 1)\n \n condition = torch.gt(Lqk, Lq)\n self.weight[indexes] = condition.type(torch.cuda.FloatTensor)\n\n\n# class PSTHLOSS(nn.Module):\n# def __init__(self):\n# super().__init__()\n\n# def forward(self, logits, targets):\n# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension\n# probs = F.softmax(total_logits, dim=-1)\n# outptu\n\n\nclass HungarianMatcher(nn.Module):\n def __init__(self):\n super().__init__()\n \n @torch.no_grad()\n def forward(self, logits, targets):\n T, C = logits.size()\n probs = F.softmax(logits, dim=-1)\n cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\nclass KLDivLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.log_softmax = nn.LogSoftmax(dim=-1)\n self.KLdiv = nn.KLDivLoss()\n def forward(self, logits, targets):\n log_probs = self.log_softmax(logits)\n return self.KLdiv(log_probs.long(), targets)\n\n\nclass PoissonCrossEntropyLoss(nn.Module):\n def __init__(self):\n super().__init__()\n self.log_softmax = nn.LogSoftmax(dim=-1)\n # self.softmax = nn.Softmax(dim=-1)\n self.nll_poisson = nn.PoissonNLLLoss()\n # self.nll_poisson = nn.NLLLoss()\n\n def forward(self, logits, targets):\n log_probs = self.log_softmax(logits)\n return self.nll_poisson(log_probs, targets)\n\n\nclass GPT(nn.Module):\n \"\"\" the full GPT language model, with a context size of block_size \"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n self.device = 'cpu'\n if torch.cuda.is_available():\n self.device = torch.cuda.current_device()\n\n self.config = config\n # input embedding stem\n self.n_embd = config.n_embd\n self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)\n self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)\n # self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))\n self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))\n # self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)\n # self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)\n self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)\n self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)\n self.id_drop = nn.Dropout(config.id_drop)\n self.im_drop = nn.Dropout(config.im_drop)\n self.drop = nn.Dropout(config.embd_pdrop)\n\n # -- Visual Backbone -- #\n # self.visual_backbone = VideoFeaturesExtractor()\n self.video_encoder = VideoEncoder()\n frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)\n self.register_buffer(\"frame_temp_emb_seq\", frame_temp_emb)\n\n # -- Contrastive Loss -- ##\n # self.proj_id = ProjectNorm(config.n_embd, config.n_embd)\n # self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape\n \n ## -- IM_Decoder -- ##\n # self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])\n # self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])\n # self.ln_f_id = nn.LayerNorm(config.n_embd)\n # self.ln_f_im = nn.LayerNorm(config.n_embd)\n\n ## -- Decoder -- ##\n # self.ln_f = nn.LayerNorm(config.n_embd)\n ## GPT\n # self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])\n # self.ln_f = nn.LayerNorm(config.n_embd)\n ## enc_dec\n self.state_decoder = Decoder(config)\n self.ln_f_state_dec = nn.LayerNorm(config.n_embd)\n self.stimulus_decoder = Decoder(config)\n self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n \n ## -- Time -- ##\n # self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)\n self.proj_time = ProjectNorm(config.n_embd, config.n_dt)\n # self.proj_time = ProjectNorm(config.n_embd, 1)\n \n ## -- PSTH -- ##\n # self.proj_psth = PSTHProjection(config)\n\n # Loss\n # self.dice_loss = DiceLossPSTH()\n # self.poisson_loss = PoissonCrossEntropyLoss()\n # self.hungarian_matcher = HungarianMatcher()\n # self.kldiv_loss = KLDivLoss()\n # self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)\n # self.set_loss = SetLoss()\n # self.a = torch.tensor(0.5, requires_grad=True)\n\n self.block_size = config.block_size\n self.apply(self._init_weights)\n \n if config.class_weights is not None:\n for key in config.class_weights.keys():\n self.register_buffer(f\"class_weights_{key}\", config.class_weights[key]) \n \n logger.info(\"number of parameters: %e\", sum(p.numel() for p in self.parameters()))\n\n def get_block_size(self):\n return self.block_size\n\n def _init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def configure_optimizers(self, train_config):\n \"\"\"\n Separates parameters into those who will experience weight decay and those that will not\n \"\"\"\n if train_config.decay_weights:\n decay = set()\n no_decay = set()\n whitelist_weight_modules = (torch.nn.Linear, )\n blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)\n for mn, m in self.named_modules():\n for pn, p in m.named_parameters():\n fpn = '%s.%s' % (mn, pn) if mn else pn # full param name\n if pn.endswith('bias'):\n # all biases will not be decayed\n no_decay.add(fpn)\n elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):\n # weights of whitelist modules will be weight decayed\n decay.add(fpn)\n elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):\n # weights of blacklist modules will NOT be weight decayed\n no_decay.add(fpn)\n else: no_decay.add(fpn)\n\n # special case the position embedding parameter in the root GPT module as not decayed\n black_list_mods = ['pos_emb', 'temp_emb']\n for mods in black_list_mods:\n for name, param in self.named_parameters():\n if mods in name:\n no_decay.add(name) # also pos_emb\n \n # validate that we considered every parameter\n param_dict = {pn: p for pn, p in self.named_parameters()}\n no_decay -= decay & no_decay\n inter_params = decay & no_decay\n union_params = decay | no_decay\n\n assert len(inter_params) == 0, \"parameters %s made it into both decay/no_decay sets!\" % (str(inter_params), )\n assert len(param_dict.keys() - union_params) == 0, \"parameters %s were not separated into either decay/no_decay set!\" \\\n % (str(param_dict.keys() - union_params), )\n\n \n # create the pytorch optimizer object\n optim_groups = [\n {\"params\": [param_dict[pn] for pn in sorted(list(decay))], \"weight_decay\": train_config.weight_decay},\n {\"params\": [param_dict[pn] for pn in sorted(list(no_decay))], \"weight_decay\": 0.0},\n ]\n optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)\n \n else:\n parameters = self.parameters()\n optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)\n \n return optimizer\n \n def process_features(self, x):\n # batch, block_size, feature\n p_idx = x['id_prev']\n idx = x['id']\n dtx = x['dt']\n dtx_prev = x['dt_prev']\n frames = self.video_encoder(x['frames'])\n pad = x['pad']\n\n b, t = idx.size()\n # b_p, t_p = p_idx.size()\n bf, tf = frames.size()[0:2]\n\n # forward the GPT model\n ''' \n positional and temporal embeddings implemented in multiple ways, learnt, \n fourrier decomposition and in the case of time, just passed as is. \n '''\n # # Embeddings\n prev_id_position_embeddings = self.pos_emb(p_idx)\n prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())\n id_position_embeddings = self.pos_emb(idx) \n im_position_embeddings = self.pos_emb_frames\n temporal_embeddings = self.temp_emb(dtx.float())\n \n # Extract ID features\n prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)\n token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector\n token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings\n token_embeddings = self.id_drop(token_embeddings)\n\n # Extract image features and add time embeddings\n im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)\n im_embeddings = frames # self.tok_emb(frames)\n im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings\n im_embeddings = self.im_drop(im_embeddings) # separate pos emb?\n \n # Tidy up\n features = dict()\n features['id_prev'] = prev_token_embeddings\n features['id'] = token_embeddings\n features['frames'] = im_embeddings\n \n return features, pad\n\n def perceiver(self, features, pad):\n x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)\n x = self.ln_f_state_dec(x)\n x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)\n x = self.ln_f_stimulus_dec(x)\n logits = self.head(x)\n\n return logits, x\n\n def enc_dec(self, features, pad):\n x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)\n x = self.ln_f_stimulus_dec(x)\n logits = self.head(x)\n\n return logits, x\n\n def GPTdecoder(self, features, pad, dtx=None):\n # image + neural features\n x = torch.cat((features['frames'], features['id']), dim=1)\n\n # Decoder\n x = self.blocks(x, pad, dtx) # (B, T, C)\n x = self.ln_f(x)\n logits = self.head(x)\n\n # print(logits.shape) # (B, T, Vocab)\n # logits_psth = x[:, -1] # (B, C)\n\n return logits, x\n\n def forward(self, x, targets=None):\n idx = x['id']\n dtx = x['dt']\n frames = x['frames']\n pad = x['pad']\n\n b, t = idx.size()\n # b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]\n bf, tf = frames.size()[0:2]\n tf = self.config.frame_block_size\n # assert t + tf == self.config.block_size, f\"{tf} {t}\"\n # assert t <= self.block_size, \"Cannot forward, model block size is exhausted\"\n \n features, pad = self.process_features(x)\n logits, x = self.perceiver(features, pad)\n # logits, x = self.enc_dec(features, pad)\n # logits, x = self.GPTdecoder(features, pad)\n time = self.proj_time(x) # (B, T_id, 1)\n\n # print(x[:, 0].shape)\n # psth = self.proj_psth(x) # (B, Vocab_id)\n\n # if targets, calculate loss\n # calculate loss on logits up to padding token for each batch\n loss = None\n loss_frames = 0\n loss_id = []\n loss_time = []\n loss_dice = []\n loss_psth = []\n loss_hungarian = []\n if targets is not None:\n # loss_psth = self.dice_loss(psth, targets['modes'][:, tf:]) \n for B, P in enumerate(pad):\n tf = 0\n # im_logits = logits[B, :tf]\n # im_targets = targets['frames'][B, :tf]\n # loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))\n id_logits = logits[B, tf:tf + t - P]\n id_targets = targets['id'][B, :t - P]\n\n loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1), weight=self.class_weights_id)\n # if self.config.epoch >= 15:\n # self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])\n # loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])\n time_preds = time[B, :t - P]\n time_targets = targets['dt'][B, :t - P]\n loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1), weight=self.class_weights_dt)\n # loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)\n # loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))\n # if len(id_targets) > 0:\n # indices = self.hungarian_matcher(id_logits, id_targets)\n # probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]\n # loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)\n # loss_hungarian.append(loss_hungarian_)\n # # psth = self.proj_psth(x[B, -1]) # from the EOS position\n \n # loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))\n # loss_psth_ = self.dice_loss(id_logits, id_targets)\n # loss_psth.append(torch.nan_to_num(loss_psth_))\n \n loss_time.append(torch.nan_to_num(loss_time_))\n loss_id.append(torch.nan_to_num(loss_id_))\n \n loss = dict()\n # loss['frames'] = loss_frames / (b / 3)\n loss['id'] = sum(loss_id) / (b * 2) # sum(loss_id) / (b * 2) # / len(loss_id)\n loss['time'] = sum(loss_time) / (b * 2)\n # loss['dice'] = sum(loss_dice) / len(loss_dice)\n # loss['dt'] = loss_time / (b * 50)\n # loss['hungarian'] = sum(loss_hungarian) / (b * 2)\n # loss['psth'] = sum(loss_psth) / (b * 2)\n\n for key in list(loss):\n if isinstance(loss[key], float):\n del loss[key]\n \n preds = dict()\n preds['id'] = logits # [:, tf:] # only id logits\n preds['dt'] = time\n\n return preds, features, loss" ]
[ [ "torch.as_tensor", "torch.nn.functional.softmax", "torch.rand", "torch.no_grad", "torch.nn.KLDivLoss", "scipy.optimize.linear_sum_assignment", "torch.cuda.is_available", "torch.log", "torch.cat", "torch.nn.Dropout", "torch.cos", "torch.optim.Adam", "torch.sin", "torch.nn.LayerNorm", "torch.arange", "torch.from_numpy", "torch.all", "torch.mean", "torch.unsqueeze", "torch.ones", "torch.tensor", "torch.nn.LogSoftmax", "torch.cuda.current_device", "torch.optim.AdamW", "torch.nn.PoissonNLLLoss", "torch.nan_to_num", "torch.sum", "torch.nn.Linear", "torch.zeros_like", "torch.nn.Embedding", "torch.nn.GELU", "torch.nn.Transformer", "torch.topk", "torch.zeros", "torch.nn.ReLU", "torch.gt" ], [ "torch.as_tensor", "torch.nn.functional.softmax", "torch.rand", "torch.no_grad", "torch.nn.KLDivLoss", "scipy.optimize.linear_sum_assignment", "torch.cuda.is_available", "torch.log", "torch.cat", "torch.nn.Dropout", "torch.cos", "torch.optim.Adam", "torch.sin", "torch.nn.LayerNorm", "torch.arange", "torch.from_numpy", "torch.all", "torch.mean", "torch.unsqueeze", "torch.ones", "torch.tensor", "torch.nn.LogSoftmax", "torch.cuda.current_device", "torch.optim.AdamW", "torch.nn.PoissonNLLLoss", "torch.nan_to_num", "torch.sum", "torch.nn.Linear", "torch.zeros_like", "torch.nn.Embedding", "torch.nn.GELU", "torch.nn.Transformer", "torch.topk", "torch.zeros", "torch.nn.ReLU", "torch.gt" ] ]
agramfort/pymultifracs
[ "3a8896f3f26180b05ccecb4a905b05a3ebc0308b" ]
[ "pymultifracs/simul/mrw.py" ]
[ "# Synthesis of multifractal random walk and derived processes.\n#\n# Roberto Fabio Leonarduzzi\n# January, 2019\n\nimport numpy as np\nfrom .fbm import fgn\nfrom .pzutils import gaussian_cme, gaussian_chol\nfrom numpy.fft import fft, ifft\n# import math\n# import matplotlib.pyplot as plt\n\n\ndef mrw(shape, H, lam, L, sigma=1, method='cme', z0=(None, None)):\n '''\n Create a realization of fractional Brownian motion using circulant\n matrix embedding.\n\n Parameters\n ----------\n shape : int | tuple(int)\n If scalar, it is the number of samples. If tuple it is (N, R),\n the number of samples and realizations, respectively.\n H : float\n Hurst exponent\n lam : float\n Lambda, intermittency parameter\n L : float\n Integral scale\n sigma : float\n Variance of process\n\n Returns\n -------\n mrw : ndarray\n Synthesized mrw realizations. If `shape` is scalar,\n fbm is ofshape (N,). Otherwise, it is of shape (N, R).\n\n References\n ----------\n .. [1] Bacry, Delour, Muzy, \"Multifractal Random Walk\", Physical Review E,\n 2001\n '''\n\n try:\n N, R = shape\n do_squeeze = False\n except TypeError: # shape is scalar\n N, R = shape, 1\n do_squeeze = True\n\n # Is 0.5 or 0 the lower bound ? Search biblio\n if not 0 <= H <= 1:\n raise ValueError('H must satisfy 0 <= H <= 1')\n\n if L > N:\n raise ValueError('Integral scale L is larger than data length N')\n\n # 1) Gaussian process w\n w = gaussian_w(N, R, L, lam, 1, method, z0[1])\n\n # Adjust mean to ensure convergence of variance\n r = 1/2 # see Bacry, Delour & Muzy, Phys Rev E, 2001, page 4\n w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L)\n\n # 2) fGn e\n e = fgn((N, R), H, sigma, method=method, z0=z0[0])\n\n # 3) mrw\n mrw = np.cumsum(e * np.exp(w), axis=0)\n\n return mrw.squeeze() if do_squeeze else mrw\n\n\ndef mrw_cumul(shape, c1, c2, L, **kwargs):\n '''\n Wrapper for mrw generation from cumulants.\n\n Parameters\n ----------\n shape : int | tuple(int)\n If scalar, it is the number of samples. If tuple it is (N, R),\n the number of samples and realizations, respectively.\n c1 : float\n First order cumulant\n c2 : float\n Second order cumulant\n L : float\n Integral scale\n kwargs : dict\n Optional parameters passed to :obj:`mrw`\n\n Returns\n -------\n mrw : ndarray\n Synthesized mrw realizations. If `shape` is scalar,\n fbm is ofshape (N,). Otherwise, it is of shape (N, R).\n\n References\n ----------\n .. [1] Bacry, Delour, Muzy, \"Multifractal Random Walk\", Physical Review E,\n 2001\n '''\n\n H = c1 + c2\n lam = np.sqrt(-c2)\n\n return mrw(shape, H, lam, L, **kwargs)\n\n\ndef skewed_mrw(shape, H, lam, L, K0=1, alpha=1, sigma=1, dt=1, beta=1,\n do_mirror=False):\n '''\n Create skewed mrw as in Pochart & Bouchaud\n Assumes :math:`\\\\Delta_t=1`, so no parameter beta is needed.\n '''\n\n try:\n N, R = shape\n do_squeeze = False\n except TypeError: # shape is scalar\n N, R = shape, 1\n do_squeeze = True\n\n # Is 0.5 or 0 the lower bound ? Search biblio\n if not 0 <= H <= 1:\n raise ValueError('H must satisfy 0 <= H <= 1')\n\n if L / dt > N:\n raise ValueError('Integral scale L/dt is larger than data length N')\n\n # 1) Gaussian process w\n w = gaussian_w(N, R, L, lam, dt)\n\n # Adjust mean to ensure convergence of variance\n r = 1 # see Bacry, Delour & Muzy, Phys Rev E, 2001, page 4\n w = w - np.mean(w, axis=0) - r * lam**2 * np.log(L / dt)\n\n # 2) fGn e\n e = fgn((2*N + 1, R), H, sigma, dt)\n\n # 3) Correlate components\n past = skewness_convolution(e, K0, alpha, beta, dt)\n wtilde = w - past\n\n # 4) skewed mrw\n smrw = np.cumsum(e[N:] * np.exp(wtilde), axis=0)\n\n if do_squeeze:\n smrw = smrw.squeeze()\n\n if do_mirror:\n past_mirror = skewness_convolution(-e, K0, alpha, beta, dt)\n wtilde_mirror = w - past_mirror\n smrw_mirror = np.cumsum(-e[N:] * np.exp(wtilde_mirror), axis=0)\n if do_squeeze:\n smrw_mirror = smrw_mirror.squeeze()\n return smrw, smrw_mirror\n else:\n return smrw\n\n\ndef gaussian_w(N, R, L, lam, dt=1, method='cme', z0=None):\n '''\n Auxiliar function to create gaussian process w\n '''\n kmax = int(L / dt)\n k = np.arange(kmax)\n rho = np.ones((N))\n rho[:kmax] = L / (k + 1) / dt\n cov = (lam ** 2) * np.log(rho)\n if method == 'cme':\n w = gaussian_cme(cov, N, R, z0)\n elif method == 'chol':\n w = gaussian_chol(cov, N, R, z0)\n\n return w\n\n\ndef skewness_convolution(e, K0, alpha, beta=1, dt=1):\n '''\n Noise e should be of length 2*N, with \"N false past variables\" at the\n beginning to avoid spurious correlations due to cutoffs in convolution.\n '''\n N, _ = e.shape\n N = N // 2\n\n tau = np.arange(1, N+1)\n Kbar = np.zeros((2*N))\n Kbar[1:N+1] = K0 / (tau**alpha) / (dt**beta)\n skew_conv = np.real(ifft(fft(Kbar[:, None], axis=0) *\n fft(e, axis=0), axis=0))\n return skew_conv[N:]\n\n\ndef skewness_convolution_dumb(e, K0, alpha, beta=1, dt=1):\n '''\n Direct and inefficient calculation for testing purposes.\n Receives \"true\" input noise of size N.\n '''\n N, R = e.shape\n\n def K(i, j):\n return K0 / (j-i)**alpha / dt**beta\n\n scorr = np.zeros((N, R))\n for k in range(N):\n for i in range(k):\n scorr[k, :] += K(i, k) * e[i, :]\n return scorr\n\n\ndef mrw2D(shape, H, lam, L, sigma=1):\n '''\n Create a realization of fractional Brownian motion using circulant\n matrix embedding.\n\n Parameters\n ----------\n shape : int | tuple(int)\n If scalar, it is the number of samples. If tuple it is (N, R),\n the number of samples and realizations, respectively.\n H : float\n Hurst exponent\n lambda : float\n Intermittency parameter\n L : float\n Integral scale\n sigma : float\n Variance of process\n\n Returns\n -------\n mrw : ndarray\n Synthesized mrw realizations. If 'shape' is scalar,\n fbm is of shape (N,). Otherwise, it is of shape (N, N, R).\n\n References\n ----------\n .. [1] Bacry, Delour, Muzy, \"Multifractal Random Walk\", Physical Review E,\n 2001\n '''\n\n try:\n N, R = shape\n # do_squeeze = False\n except TypeError: # shape is scalar\n N, R = shape, 1\n # do_squeeze = True\n\n N = int(2 * np.ceil(N / 2))\n\n # dim = 2\n\n n = np.arange(-N // 2, N // 2)\n d = np.sqrt(n[:, None]**2 + n[None, :]**2)\n\n corr = lam**2 * np.log(np.maximum(L / (1 + d), 1))\n\n L = np.fft.fft2(corr)\n\n z1 = np.random.randn(N, N, R) + 1j * np.random.randn(N, N, R)\n w = np.exp(np.real(np.fft.ifft2(z1 * np.sqrt(L[..., None]), axes=(0, 1))))\n\n # Increment process:\n X = np.random.randn(N, N, R) * w\n\n # Fractional integration to produce motion:\n BX = fract_int_2d(X, H + 1)\n\n return BX, X\n\n\ndef fract_int_2d(x, alpha):\n '''\n Assumes size of x divisible by two\n '''\n N = x.shape[0]\n\n # Create Fourier filter\n k = np.arange(-N/2, N/2)\n\n d = np.sqrt(k[:, None]**2 + k[None, :]**2)\n mini = np.min(d[d != 0])\n d[d == 0] = mini\n filt = 1 / (d ** alpha)\n\n yhat = np.fft.fftshift(np.fft.fft2(x, axes=(0, 1)), axes=(0, 1))\n yhat *= filt[..., None]\n y = np.real(np.fft.ifft2(np.fft.ifftshift(yhat, axes=(0, 1)), axes=(0, 1)))\n return y\n" ]
[ [ "numpy.ones", "numpy.fft.fft", "numpy.ceil", "numpy.fft.fft2", "numpy.zeros", "numpy.maximum", "numpy.random.randn", "numpy.exp", "numpy.fft.ifftshift", "numpy.arange", "numpy.log", "numpy.min", "numpy.sqrt", "numpy.mean" ] ]
CatTiger/vnpy
[ "7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b" ]
[ "venv/lib/python3.7/site-packages/rqdatac/services/stock_status.py" ]
[ "# -*- coding: utf-8 -*-\nimport datetime\nimport warnings\n\nimport pandas as pd\nimport numpy as np\n\nfrom rqdatac.utils import to_datetime, to_date\nfrom rqdatac.validators import (\n ensure_date_range,\n ensure_date_or_today_int,\n ensure_list_of_string,\n check_items_in_container,\n ensure_order,\n ensure_order_book_id,\n ensure_order_book_ids,\n ensure_dates_base_on_listed_date,\n ensure_string,\n ensure_date_int\n)\nfrom rqdatac.services.basic import instruments\nfrom rqdatac.services.calendar import (\n get_trading_dates,\n get_previous_trading_date,\n get_trading_dates_in_type,\n)\nfrom rqdatac.client import get_client\nfrom rqdatac.decorators import export_as_api, compatible_with_parm\n\n\n@export_as_api\ndef is_st_stock(order_book_ids, start_date=None, end_date=None, market=\"cn\"):\n \"\"\"判断股票在给定的时间段是否是ST股, 返回值为一个DataFrame\n\n :param order_book_ids: 股票 id\n :param start_date: (Default value = None)\n :param end_date: (Default value = None)\n :param market: (Default value = \"cn\")\n\n \"\"\"\n order_book_ids = ensure_order_book_ids(order_book_ids, type=\"CS\", market=market)\n\n if len(order_book_ids) == 1:\n instrument = instruments(order_book_ids[0], market=market)\n start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market)\n if start_date is None:\n return\n\n start_date, end_date = ensure_date_range(start_date, end_date)\n\n trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market))\n data = get_client().execute(\n \"get_st_days\", order_book_ids, start_date=start_date, end_date=end_date\n )\n df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates)\n for idx, dates in data.items():\n for date in dates:\n date = to_datetime(date)\n df.at[date, idx] = True\n return df\n\n\n@export_as_api\ndef _is_st_stock(order_book_id, date=None, market=\"cn\"):\n \"\"\"判断股票在给定日期是否是ST股\n :param order_book_id: 股票id\n :param date: (Default value = None)\n :param market: (Default value = \"cn\")\n :returns: True or False\n \"\"\"\n order_book_id = ensure_order_book_id(order_book_id, type=\"CS\", market=market)\n date = ensure_date_or_today_int(date)\n df = is_st_stock(order_book_id, start_date=date, end_date=date, market=market)\n if df is None or df.empty:\n return False\n else:\n return df[order_book_id][0]\n\n\n@export_as_api\n@compatible_with_parm(name=\"country\", value=\"cn\", replace=\"market\")\ndef is_suspended(order_book_ids, start_date=None, end_date=None, market=\"cn\"):\n \"\"\"获取股票停牌信息\n\n :param order_book_ids: 股票名称\n :param start_date: 开始日期, 如'2013-01-04' (Default value = None)\n :param end_date: 结束日期,如'2014-01-04' (Default value = None)\n :param market: 地区代码, 如 'cn' (Default value = \"cn\")\n :returns: DataFrame\n\n \"\"\"\n order_book_ids = ensure_order_book_ids(order_book_ids, type=\"CS\", market=market)\n\n if len(order_book_ids) == 1:\n instrument = instruments(order_book_ids[0], market=market)\n start_date, end_date = ensure_dates_base_on_listed_date(instrument, start_date, end_date, market)\n if start_date is None:\n return\n\n start_date, end_date = ensure_date_range(start_date, end_date)\n\n trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market))\n df = pd.DataFrame(data=False, columns=order_book_ids, index=trading_dates)\n data = get_client().execute(\"get_suspended_days\", order_book_ids, start_date, end_date, market=market)\n for idx, dates in data.items():\n for date in dates:\n date = to_datetime(int(date))\n df.at[date, idx] = True\n return df\n\n\nstock_fields = {\"shares_holding\": \"shares_holding\", \"holding_ratio\": \"holding_ratio\"}\nspecial_symbols = [\"all_connect\", \"shanghai_connect\", \"shenzhen_connect\"]\nsymbols_map = {\"shanghai_connect\": \"SH\", \"shenzhen_connect\": \"SZ\"}\n\n\n@export_as_api\ndef get_stock_connect(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False):\n \"\"\"获取\"陆股通\"的持股、持股比例\n\n :param order_book_ids: 股票列表\n :param start_date: 开始日期: 如'2017-03-17' (Default value = None)\n :param end_date: 结束日期: 如'2018-03-16' (Default value = None)\n :param fields: 默认为所有字段,可输入shares_holding或者holding_ratio (Default value = None)\n :param expect_df: 返回 MultiIndex DataFrame (Default value = False)\n :returns: 返回pandas.DataFrame or pandas.Panel\n\n \"\"\"\n if order_book_ids not in (\"shanghai_connect\", \"shenzhen_connect\", \"all_connect\"):\n order_book_ids = ensure_order_book_ids(order_book_ids, type=\"CS\")\n start_date, end_date = ensure_date_range(start_date, end_date)\n if fields is not None:\n fields = ensure_list_of_string(fields)\n for f in fields:\n if f not in (\"shares_holding\", \"holding_ratio\"):\n raise ValueError(\"invalid field: {}\".format(f))\n else:\n fields = [\"shares_holding\", \"holding_ratio\"]\n\n data = get_client().execute(\"get_stock_connect\", order_book_ids, start_date, end_date, fields)\n if not data:\n return None\n df = pd.DataFrame(data, columns=[\"trading_date\", \"order_book_id\"] + fields)\n\n if expect_df:\n df.sort_values([\"order_book_id\", \"trading_date\"], inplace=True)\n df.set_index([\"order_book_id\", \"trading_date\"], inplace=True)\n return df\n\n df = df.set_index([\"trading_date\", \"order_book_id\"])\n df = df.to_panel()\n df.major_axis.name = None\n df.minor_axis.name = None\n if len(order_book_ids) == 1:\n df = df.minor_xs(order_book_ids[0])\n if len(fields) == 1:\n df = df[fields[0]]\n if len(order_book_ids) != 1 and len(fields) != 1:\n warnings.warn(\"Panel is removed after pandas version 0.25.0.\"\n \" the default value of 'expect_df' will change to True in the future.\")\n return df\n\n\nMARGIN_FIELDS = (\n \"margin_balance\",\n \"buy_on_margin_value\",\n \"short_sell_quantity\",\n \"margin_repayment\",\n \"short_balance_quantity\",\n \"short_repayment_quantity\",\n \"short_balance\",\n \"total_balance\",\n)\n\nMARGIN_SUMMARY_MAP = {\"SH\": \"XSHG\", \"XSHG\": \"XSHG\", \"SZ\": \"XSHE\", \"XSHE\": \"XSHE\"}\n\n\n@export_as_api\ndef get_securities_margin(\n order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market=\"cn\"\n):\n \"\"\"获取股票融资融券数据\n\n :param order_book_ids: 股票代码或代码列表\n :param start_date: 开始时间,支持 str, date, datetime, pandasTimestamp\n 默认为 end_date 之前一个月 (Default value = None)\n :param end_date: 结束时间 默认为当前日期前一天 (Default value = None)\n :param fields: str 或 list 类型. 默认为 None, 返回所有字段。可选字段包括:\n today, week, month, three_month, six_month, year, current_year, total\n (Default value = None)\n :param expect_df: 返回 MultiIndex DataFrame (Default value = False)\n :param market: 地区代码, 如: 'cn' (Default value = \"cn\")\n :returns: 如果传入多个股票代码,且 fields 为多个或者 None,返回 pandas.Panel\n 如果传入一只股票或者 fields 为单个字段,则返回 pandas.DataFrame\n 如果传入的股票代码和字段数都是1,则返回 pandas.Series\n\n \"\"\"\n\n order_book_ids = ensure_list_of_string(order_book_ids, \"order_book_ids\")\n all_list = []\n for order_book_id in order_book_ids:\n if order_book_id.upper() in MARGIN_SUMMARY_MAP:\n all_list.append(MARGIN_SUMMARY_MAP[order_book_id.upper()])\n else:\n inst = instruments(order_book_id, market)\n\n if inst.type in [\"CS\", \"ETF\", \"LOF\"]:\n all_list.append(inst.order_book_id)\n else:\n warnings.warn(\"{} is not stock, ETF, or LOF.\".format(order_book_id))\n order_book_ids = all_list\n if not order_book_ids:\n raise ValueError(\"no valid securities in {}\".format(order_book_ids))\n\n if fields is None:\n fields = list(MARGIN_FIELDS)\n else:\n fields = ensure_list_of_string(fields, \"fields\")\n check_items_in_container(fields, MARGIN_FIELDS, \"fields\")\n fields = ensure_order(fields, MARGIN_FIELDS)\n start_date, end_date = ensure_date_range(start_date, end_date)\n if end_date > ensure_date_or_today_int(None):\n end_date = ensure_date_or_today_int(get_previous_trading_date(datetime.date.today()))\n trading_dates = pd.to_datetime(get_trading_dates(start_date, end_date, market=market))\n\n data = get_client().execute(\n \"get_securities_margin\", order_book_ids, start_date, end_date, market=market\n )\n if not data:\n return\n\n if expect_df:\n df = pd.DataFrame(data)\n df.sort_values([\"order_book_id\", \"date\"], inplace=True)\n df.set_index([\"order_book_id\", \"date\"], inplace=True)\n df = df.reindex(columns=fields)\n return df\n\n pl = pd.Panel(items=fields, major_axis=trading_dates, minor_axis=order_book_ids)\n for r in data:\n for field in fields:\n value = r.get(field)\n pl.at[field, r[\"date\"], r[\"order_book_id\"]] = value\n\n if len(order_book_ids) == 1:\n pl = pl.minor_xs(order_book_ids[0])\n if len(fields) == 1:\n pl = pl[fields[0]]\n if len(order_book_ids) != 1 and len(fields) != 1:\n warnings.warn(\"Panel is removed after pandas version 0.25.0.\"\n \" the default value of 'expect_df' will change to True in the future.\")\n return pl\n\n\nMARGIN_TYPE = (\"stock\", \"cash\")\nEXCHANGE_TYPE = {\"SZ\": \"XSHE\", \"sz\": \"XSHE\", \"xshe\": \"XSHE\", \"SH\": \"XSHG\", \"sh\": \"XSHG\", \"xshg\": \"XSHG\"}\nEXCHANGE_CONTENT = [\"XSHE\", \"XSHG\"]\n\n\n@export_as_api\ndef get_margin_stocks(date=None, exchange=None, margin_type='stock', market=\"cn\"):\n \"\"\"获取融资融券信息\n\n :param date: 查询日期,默认返回今天上一交易日,支持 str, timestamp, datetime 类型\n :param exchange: 交易所信息,默认不填写则返回全部。\n str类型,默认为 None,返回所有字段。可选字段包括:\n 'XSHE', 'sz' 代表深交所;'XSHG', 'sh' 代表上交所,不区分大小写\n (Default value = None)\n :param margin_type: 'stock' 代表融券卖出,'cash',代表融资买入,默认为'stock'\n\n \"\"\"\n if date:\n date = ensure_date_int(date)\n else:\n date = get_previous_trading_date(datetime.date.today())\n date = date.year * 10000 + date.month * 100 + date.day\n\n if exchange is None:\n exchange = EXCHANGE_CONTENT\n else:\n exchange = ensure_string(exchange, \"exchange\")\n if exchange in EXCHANGE_TYPE:\n exchange = EXCHANGE_TYPE[exchange]\n check_items_in_container(exchange, EXCHANGE_CONTENT, \"exchange\")\n exchange = [exchange]\n\n margin_type = ensure_string(margin_type, \"margin_type\")\n check_items_in_container(margin_type, MARGIN_TYPE, \"margin_type\")\n\n data = get_client().execute(\n \"get_margin_stocks\", date, exchange, margin_type, market=market\n )\n\n if not data:\n return []\n else:\n return sorted(data)\n\n\nshare_fields = {\n \"total\": \"total_shares\",\n \"circulation_a\": \"a_cir_shares\",\n \"non_circulation_a\": \"a_non_cir_shares\",\n \"total_a\": \"a_total_shares\",\n}\n\nanti_fields = {v: k for k, v in share_fields.items()}\n\n\n@export_as_api\n@compatible_with_parm(name=\"country\", value=\"cn\", replace=\"market\")\ndef get_shares(order_book_ids, start_date=None, end_date=None, fields=None, expect_df=False, market=\"cn\"):\n \"\"\"获取流通股本信息\n\n :param order_book_ids: 股票名称\n :param start_date: 开始日期, 如'2013-01-04' (Default value = None)\n :param end_date: 结束日期,如'2014-01-04' (Default value = None)\n :param fields: 如'total', 'circulation_a' (Default value = None)\n :param expect_df: 返回 MultiIndex DataFrame (Default value = False)\n :param market: 地区代码,如'cn' (Default value = \"cn\")\n :returns: 返回一个DataFrame\n\n \"\"\"\n order_book_ids = ensure_order_book_ids(order_book_ids, market=market)\n start_date, end_date = ensure_date_range(start_date, end_date)\n\n if fields:\n fields = ensure_list_of_string(fields, \"fields\")\n if 'management_circulation' in fields:\n fields.remove('management_circulation')\n if fields:\n warnings.warn(\"management_circulation is removed\")\n else:\n raise ValueError(\"management_circulation is removed\")\n check_items_in_container(fields, set(share_fields), \"fields\")\n fields = [share_fields[i] for i in fields]\n else:\n fields = list(share_fields.values())\n \n all_shares = get_client().execute(\"get_shares\", order_book_ids, fields, market=market)\n if not all_shares:\n return\n dates = get_trading_dates_in_type(start_date, end_date, expect_type=\"datetime\", market=market)\n df = pd.DataFrame(all_shares)\n unique = set(df.order_book_id)\n for order_book_id in order_book_ids:\n if order_book_id not in unique:\n df = df.append(\n {\"order_book_id\": order_book_id, \"date\": df.date.iloc[-1]}, ignore_index=True\n )\n df.set_index([\"date\", \"order_book_id\"], inplace=True)\n df.sort_index(inplace=True)\n df = df.unstack(level=1)\n index = df.index.union(dates)\n df = df.reindex(index)\n df = df.fillna(method=\"ffill\")\n df = df.loc[list(dates)]\n df = df.dropna(how=\"all\")\n df = df[fields]\n if expect_df:\n df = df.stack(1)\n df.index.set_names([\"date\", \"order_book_id\"], inplace=True)\n df = df.reorder_levels([\"order_book_id\", \"date\"]).sort_index()\n df = df.rename(columns=anti_fields)\n return df\n\n pl = df.stack(1).to_panel()\n pl.items = [anti_fields[i] for i in pl.items]\n if len(order_book_ids) == 1:\n pl = pl.minor_xs(order_book_ids[0])\n if len(fields) == 1:\n pl = pl[anti_fields[fields[0]]]\n if len(order_book_ids) != 1 and len(fields) != 1:\n warnings.warn(\"Panel is removed after pandas version 0.25.0.\"\n \" the default value of 'expect_df' will change to True in the future.\")\n return pl\n" ]
[ [ "pandas.DataFrame", "pandas.Panel" ] ]
pazamelin/openvino
[ "b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48", "b7e8ef910d7ed8e52326d14dc6fd53b71d16ed48", "031e998a15ec738c64cc2379d7f30fb73087c272" ]
[ "src/bindings/python/tests/test_ngraph/test_ops_binary.py", "tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py", "src/bindings/python/tests_compatibility/test_ngraph/test_ops_multioutput.py" ]
[ "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport openvino.runtime.opset8 as ov\nfrom tests.runtime import get_runtime\nfrom tests.test_ngraph.util import run_op_node\n\n\[email protected](\n \"ng_api_helper,numpy_function\",\n [\n (ov.add, np.add),\n (ov.divide, np.divide),\n (ov.multiply, np.multiply),\n (ov.subtract, np.subtract),\n (ov.minimum, np.minimum),\n (ov.maximum, np.maximum),\n (ov.mod, np.mod),\n (ov.equal, np.equal),\n (ov.not_equal, np.not_equal),\n (ov.greater, np.greater),\n (ov.greater_equal, np.greater_equal),\n (ov.less, np.less),\n (ov.less_equal, np.less_equal),\n ],\n)\ndef test_binary_op(ng_api_helper, numpy_function):\n runtime = get_runtime()\n\n shape = [2, 2]\n parameter_a = ov.parameter(shape, name=\"A\", dtype=np.float32)\n parameter_b = ov.parameter(shape, name=\"B\", dtype=np.float32)\n\n model = ng_api_helper(parameter_a, parameter_b)\n computation = runtime.computation(model, parameter_a, parameter_b)\n\n value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)\n\n result = computation(value_a, value_b)\n expected = numpy_function(value_a, value_b)\n assert np.allclose(result, expected)\n\n\[email protected](\n \"ng_api_helper,numpy_function\",\n [\n (ov.add, np.add),\n (ov.divide, np.divide),\n (ov.multiply, np.multiply),\n (ov.subtract, np.subtract),\n (ov.minimum, np.minimum),\n (ov.maximum, np.maximum),\n (ov.mod, np.mod),\n (ov.equal, np.equal),\n (ov.not_equal, np.not_equal),\n (ov.greater, np.greater),\n (ov.greater_equal, np.greater_equal),\n (ov.less, np.less),\n (ov.less_equal, np.less_equal),\n ],\n)\ndef test_binary_op_with_scalar(ng_api_helper, numpy_function):\n runtime = get_runtime()\n\n value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)\n\n shape = [2, 2]\n parameter_a = ov.parameter(shape, name=\"A\", dtype=np.float32)\n\n model = ng_api_helper(parameter_a, value_b)\n computation = runtime.computation(model, parameter_a)\n\n result = computation(value_a)\n expected = numpy_function(value_a, value_b)\n assert np.allclose(result, expected)\n\n\[email protected](\n \"ng_api_helper,numpy_function\",\n [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],\n)\ndef test_binary_logical_op(ng_api_helper, numpy_function):\n runtime = get_runtime()\n\n shape = [2, 2]\n parameter_a = ov.parameter(shape, name=\"A\", dtype=np.bool)\n parameter_b = ov.parameter(shape, name=\"B\", dtype=np.bool)\n\n model = ng_api_helper(parameter_a, parameter_b)\n computation = runtime.computation(model, parameter_a, parameter_b)\n\n value_a = np.array([[True, False], [False, True]], dtype=np.bool)\n value_b = np.array([[False, True], [False, True]], dtype=np.bool)\n\n result = computation(value_a, value_b)\n expected = numpy_function(value_a, value_b)\n assert np.allclose(result, expected)\n\n\[email protected](\n \"ng_api_helper,numpy_function\",\n [(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],\n)\ndef test_binary_logical_op_with_scalar(ng_api_helper, numpy_function):\n runtime = get_runtime()\n\n value_a = np.array([[True, False], [False, True]], dtype=np.bool)\n value_b = np.array([[False, True], [False, True]], dtype=np.bool)\n\n shape = [2, 2]\n parameter_a = ov.parameter(shape, name=\"A\", dtype=np.bool)\n\n model = ng_api_helper(parameter_a, value_b)\n computation = runtime.computation(model, parameter_a)\n\n result = computation(value_a)\n expected = numpy_function(value_a, value_b)\n assert np.allclose(result, expected)\n\n\[email protected](\n \"operator,numpy_function\",\n [\n (operator.add, np.add),\n (operator.sub, np.subtract),\n (operator.mul, np.multiply),\n (operator.truediv, np.divide),\n (operator.eq, np.equal),\n (operator.ne, np.not_equal),\n (operator.gt, np.greater),\n (operator.ge, np.greater_equal),\n (operator.lt, np.less),\n (operator.le, np.less_equal),\n ],\n)\ndef test_binary_operators(operator, numpy_function):\n runtime = get_runtime()\n\n value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)\n\n shape = [2, 2]\n parameter_a = ov.parameter(shape, name=\"A\", dtype=np.float32)\n\n model = operator(parameter_a, value_b)\n computation = runtime.computation(model, parameter_a)\n\n result = computation(value_a)\n expected = numpy_function(value_a, value_b)\n assert np.allclose(result, expected)\n\n\[email protected](\n \"operator,numpy_function\",\n [\n (operator.add, np.add),\n (operator.sub, np.subtract),\n (operator.mul, np.multiply),\n (operator.truediv, np.divide),\n (operator.eq, np.equal),\n (operator.ne, np.not_equal),\n (operator.gt, np.greater),\n (operator.ge, np.greater_equal),\n (operator.lt, np.less),\n (operator.le, np.less_equal),\n ],\n)\ndef test_binary_operators_with_scalar(operator, numpy_function):\n runtime = get_runtime()\n\n value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)\n value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)\n\n shape = [2, 2]\n parameter_a = ov.parameter(shape, name=\"A\", dtype=np.float32)\n\n model = operator(parameter_a, value_b)\n computation = runtime.computation(model, parameter_a)\n\n result = computation(value_a)\n expected = numpy_function(value_a, value_b)\n assert np.allclose(result, expected)\n\n\ndef test_multiply():\n A = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1))\n B = np.arange(35, dtype=np.int32).reshape((7, 1, 5))\n\n expected = np.multiply(A, B)\n result = run_op_node([A, B], ov.multiply)\n\n assert np.allclose(result, expected)\n\n\ndef test_power_v1():\n A = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))\n B = np.arange(20, dtype=np.float32).reshape((4, 1, 5))\n\n expected = np.power(A, B)\n result = run_op_node([A, B], ov.power)\n\n assert np.allclose(result, expected)\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging as log\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.extractor import get_new_placeholder_name\nfrom openvino.tools.mo.graph.graph import Node, Graph\nfrom openvino.tools.mo.utils.error import Error\nfrom openvino.tools.mo.utils.utils import refer_to_faq_msg\n\n\"\"\"\nPacked data of custom types are stored in numpy uint8 data type.\nTo distinguish true uint8 and custom data we introduce this class not to store,\nbut to have unique data type in SUPPORTED_DATA_TYPES map\n\"\"\"\n\n\nclass packed_U1(np.generic):\n pass\n\n\nclass packed_U4(np.generic):\n pass\n\n\nclass packed_I4(np.generic):\n pass\n\n\nSUPPORTED_DATA_TYPES = {\n 'float': (np.float32, 'FP32', 'f32'),\n 'half': (np.float16, 'FP16', 'f16'),\n 'FP32': (np.float32, 'FP32', 'f32'),\n 'FP64': (np.float64, 'FP64', 'f64'),\n 'FP16': (np.float16, 'FP16', 'f16'),\n 'I32': (np.int32, 'I32', 'i32'),\n 'I64': (np.int64, 'I64', 'i64'),\n 'int8': (np.int8, 'I8', 'i8'),\n 'int32': (np.int32, 'I32', 'i32'),\n 'int64': (np.int64, 'I64', 'i64'),\n 'bool': (np.bool, 'BOOL', 'boolean'),\n 'uint8': (np.uint8, 'U8', 'u8'),\n 'uint32': (np.uint32, 'U32', 'u32'),\n 'uint64': (np.uint64, 'U64', 'u64'),\n\n # custom types\n 'U1': (packed_U1, 'U1', 'u1'),\n 'int4': (packed_I4, 'I4', 'i4'),\n 'uint4': (packed_U4, 'U4', 'u4'),\n 'I4': (packed_I4, 'I4', 'i4'),\n 'U4': (packed_U4, 'U4', 'u4'),\n}\n\n\ndef data_type_str_to_np(data_type_str: str):\n return SUPPORTED_DATA_TYPES[data_type_str][0] if data_type_str in SUPPORTED_DATA_TYPES else None\n\n\ndef data_type_str_to_precision(data_type_str: str):\n return SUPPORTED_DATA_TYPES[data_type_str][1] if data_type_str in SUPPORTED_DATA_TYPES else None\n\n\ndef data_type_str_to_destination_type(data_type_str: str):\n return SUPPORTED_DATA_TYPES[data_type_str][2] if data_type_str in SUPPORTED_DATA_TYPES else None\n\n\ndef np_data_type_to_precision(np_data_type):\n for np_t, precision, _ in SUPPORTED_DATA_TYPES.values():\n if np_t == np_data_type:\n return precision\n raise Error('Data type \"{}\" is not supported'.format(np_data_type))\n\n\ndef np_data_type_to_destination_type(np_data_type):\n for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values():\n if np_t == np_data_type:\n return destination_type\n raise Error('Data type \"{}\" is not supported'.format(np_data_type))\n\n\ndef destination_type_to_np_data_type(dst_type):\n for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values():\n if destination_type == dst_type:\n return np_t\n raise Error('Destination type \"{}\" is not supported'.format(dst_type))\n\n\ndef precision_to_destination_type(data_type_str):\n for _, precision, destination_type in SUPPORTED_DATA_TYPES.values():\n if precision == data_type_str:\n return destination_type\n raise Error('Data type \"{}\" is not supported'.format(data_type_str))\n\n\ndef convert_blob(blob: np.ndarray, dst_type: type):\n if blob.dtype == dst_type:\n return blob, None, None\n\n converted_blob = blob.astype(dtype=dst_type, casting=\"unsafe\")\n if dst_type in (np.int32, np.int64, np.uint8, np.int8) and not np.array_equal(blob, converted_blob):\n raise Error('The conversion of blob with value \"{}\" to dst_type \"{}\" results in rounding'.format(\n blob, dst_type))\n\n finite_match = (np.isfinite(blob) != np.isfinite(converted_blob))\n zero_match = ((blob == 0) != (converted_blob == 0))\n finite_match_count = np.count_nonzero(finite_match)\n zero_match_count = np.count_nonzero(zero_match)\n\n return converted_blob, finite_match_count, zero_match_count\n\n\ndef convert_node_blobs(graph: Graph, node: Node, data_type: type):\n out_edges = graph.out_edges(node.node, data=True)\n\n # if the data.value is used as binary weights\n if any('bin' in d for _, __, d in out_edges):\n blob = node.value\n if blob.dtype != data_type:\n new_blob, finite_match_count, zero_match_count = convert_blob(blob, data_type)\n consumers = [x.name if x.has_valid('name') else '<NO NAME>' for x in node.out_nodes()]\n log.debug(\n 'Blob was converted to {} while dumping to the bin file. This blob is an input for {} nodes.'.format(\n data_type, consumers))\n if finite_match_count:\n log.error(\n (\"{} elements of {} were clipped to infinity while converting a blob for node [{}] to {}. \" +\n refer_to_faq_msg(76)).format(finite_match_count, blob.size, consumers, data_type))\n if zero_match_count:\n log.warning(\n (\"{} elements of {} were clipped to zero while converting a blob for node [{}] to {}. \" +\n refer_to_faq_msg(77)).format(zero_match_count, blob.size, consumers, data_type))\n\n node.value = new_blob\n # for the constant node need to propagate the converted value to the node output because there is a fake\n # input data for the 'Const' nodes being generated in the CreateConstNodesReplacement\n if len(node.out_nodes()) == 1 and node.out_node(0).op == 'Const':\n const_node = node.out_node(0)\n const_node.value = new_blob\n const_node.infer(const_node)\n const_node.type_infer(const_node)\n\n\ndef convert_parameters_data_type(graph: Graph, data_type_str: str):\n inputs = graph.get_op_nodes(op='Parameter')\n data_type = data_type_str_to_np(data_type_str)\n user_defined_data_types = graph.graph['user_shapes'] if 'user_shapes' in graph.graph else None\n for input in inputs:\n user_defined_type = None\n name = input.soft_get('initial_node_name', input.id)\n\n # override data type for Parameter specified by the user. This is a workaround for the issue in the\n # extensions.middle.ChangePlaceholderTypes transformation which has an incorrect condition and always overrides\n # Parameter data type to np.float32. When the transformation is fixed the code below must be updated\n if user_defined_data_types is not None and name in user_defined_data_types:\n for desc in user_defined_data_types[name]:\n if 'port' in desc and desc['port'] is None: # neither input nor output port specified\n user_defined_type = desc.get('data_type', None)\n else: # need to check the particular port the Parameter was created for\n p_name = get_new_placeholder_name(name, 'out' in desc, desc['out'] if 'out' in desc else desc['in'])\n if p_name == input.soft_get('name'):\n user_defined_type = desc.get('data_type', None)\n if user_defined_type is not None:\n log.info('Overriding Parameter node {} data type to {}'.format(name, user_defined_type))\n input['data_type'] = user_defined_type\n input.out_port(0).set_data_type(user_defined_type, True)\n elif not input.has_valid('data_type') or input.data_type == np.float32:\n input['data_type'] = data_type\n input.out_port(0).set_data_type(data_type, True)\n else:\n log.info('Do not change data type for node {}'.format(input.soft_get('name')))\n\n\ndef convert_blobs(graph: Graph, data_type_str: str):\n for node in graph.get_data_nodes():\n if node.value is not None:\n try:\n if node.value.dtype in [np.float32, np.float64, np.float16] and not node.has_and_set('correct_data_type'):\n convert_node_blobs(graph, node, data_type_str_to_np(data_type_str))\n except Exception as e:\n raise Error('Coudn\\'t convert blob {}, details: {}', node.soft_get('name'), e) from e\n", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nimport ngraph as ng\nfrom tests_compatibility.runtime import get_runtime\n\n\ndef test_split():\n runtime = get_runtime()\n input_tensor = ng.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32))\n axis = ng.constant(0, dtype=np.int64)\n splits = 3\n\n split_node = ng.split(input_tensor, axis, splits)\n computation = runtime.computation(split_node)\n split_results = computation()\n expected_results = np.array([[0, 1], [2, 3], [4, 5]], dtype=np.int32)\n assert np.allclose(split_results, expected_results)\n\n\ndef test_variadic_split():\n runtime = get_runtime()\n input_tensor = ng.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32))\n axis = ng.constant(1, dtype=np.int64)\n splits = ng.constant(np.array([2, 4], dtype=np.int64))\n\n v_split_node = ng.variadic_split(input_tensor, axis, splits)\n computation = runtime.computation(v_split_node)\n results = computation()\n split0 = np.array([[0, 1], [6, 7]], dtype=np.int32)\n split1 = np.array([[2, 3, 4, 5], [8, 9, 10, 11]], dtype=np.int32)\n\n assert np.allclose(results[0], split0)\n assert np.allclose(results[1], split1)\n" ]
[ [ "numpy.allclose", "numpy.multiply", "numpy.arange", "numpy.power", "numpy.array" ], [ "numpy.array_equal", "numpy.isfinite", "numpy.count_nonzero" ], [ "numpy.array", "numpy.allclose" ] ]
ihmeuw/cascade-at
[ "a5b1b5da1698163fd3bbafc6288968dd9c398096" ]
[ "tests/model/test_priors.py" ]
[ "import pytest\nimport numpy as np\nfrom numpy import isclose\nfrom numpy.random import RandomState\n\nfrom cascade_at.model.priors import (\n Constant,\n Gaussian,\n Uniform,\n Laplace,\n StudentsT,\n LogGaussian,\n LogLaplace,\n LogStudentsT,\n PriorError,\n)\n\n\ndef test_happy_construction():\n Uniform(-1, 1, 0, name=\"test\")\n Uniform(-1, 1, 0, 0.5, name=\"test\")\n Gaussian(0, 1, -10, 10, name=\"test2\")\n Gaussian(0, 1, -10, 10, 0.5, name=\"test2\")\n Laplace(0, 1, -10, 10, name=\"test3\")\n Laplace(0, 1, -10, 10, 0.5, name=\"test3\")\n StudentsT(0, 1, 2.5, -10, 10, name=\"test4\")\n LogGaussian(0, 1, 0.5, -10, 10, name=\"test5\")\n LogLaplace(0, 1, 0.5, -10, 10, name=\"test6\")\n LogStudentsT(0, 1, 2.5, 0.5, -10, 10, name=\"test7\")\n\n\ndef test_prior_equality():\n a = Gaussian(0, 1)\n b = Gaussian(0, 1)\n assert a == b\n\n a = Gaussian(0, 1, -1, 1)\n b = Gaussian(0, 1, -1, 1)\n assert a == b\n\n a = Uniform(0, 10)\n b = Uniform(0, 10)\n assert a == b\n\n a = Uniform(0, 10, name=\"test_prior\")\n b = Uniform(0, 10, name=\"test_prior\")\n assert a == b\n\n\ndef test_prior_nonequality():\n a = Gaussian(0, 1)\n b = Gaussian(1, 1)\n assert a != b\n\n a = Uniform(0, 1)\n b = Uniform(-1, 0)\n assert a != b\n\n a = Gaussian(0, 1, name=\"test_prior\")\n b = Gaussian(0, 1, name=\"other_test_prior\")\n assert a != b\n\n a = Gaussian(0, 1)\n b = Uniform(0, 1)\n assert a != b\n\n\ndef test_prior_sort():\n priors = [\n Uniform(lower=1e-10, upper=1, mean=5e-5, name=\"iota\"),\n Gaussian(0, 1, name=\"other_test_prior\"),\n Uniform(0, 1),\n ]\n\n # NOTE: This is a weak test of actual sorting behavior however all I\n # actually care about is that the sort is stable, I don't really care\n # what the order is\n assert sorted(priors) == sorted(reversed(priors))\n\n\ndef test_prior_hashing():\n s = {Gaussian(0, 1), Uniform(0, 1), Gaussian(0, 1), Uniform(0, 2), Uniform(0, 1)}\n\n assert len(s) == 3\n assert Gaussian(0, 1) in s\n assert Uniform(0, 10) not in s\n\n\ndef test_prior_hashing__near_miss():\n assert hash(Gaussian(0, 1.0000000000000001)) == hash(Gaussian(0, 1))\n assert hash(Gaussian(0, 1.000000000000001)) != hash(Gaussian(0, 1))\n\n\ndef test_bounds_check():\n with pytest.raises(PriorError) as excinfo:\n Uniform(0, -1, 1)\n assert \"Bounds are inconsistent\" in str(excinfo.value)\n\n\ndef test_validate_standard_deviation():\n with pytest.raises(PriorError) as excinfo:\n Gaussian(0, -1)\n assert \"must be positive\" in str(excinfo.value)\n\n\[email protected](\"bad_nu\", [-1, -3, 0, 2, 1.99])\ndef test_validate_nu(bad_nu):\n with pytest.raises(PriorError) as excinfo:\n StudentsT(0, 1, bad_nu)\n assert \"must be greater\" in str(excinfo.value)\n\n\[email protected]\ndef rng():\n return RandomState(34257234)\n\n\ndef test_const_fit():\n \"\"\"A constant distribution is unchanged.\"\"\"\n dist = Constant(0.023)\n assert isclose(dist.rvs(), 0.023)\n assert isclose(dist.mle([6, 24, 327]).mean, 0.023)\n\n\ndef test_uniform_fit(rng):\n dist = Uniform(-0.4, 0.6, 0.5)\n draws = dist.rvs(size=10000, random_state=rng)\n new_dist = dist.mle(draws)\n assert isclose(new_dist.mean, 0.1, atol=0.01)\n\n\[email protected](\"cls,params\", [\n (Gaussian, (0.1, 1, -10, 10)),\n (Gaussian, (0.1, 1, 0, 0.2)),\n (Laplace, (0, 1, -10, 10)),\n (StudentsT, (0, 1, 2.7, -10, 10)),\n])\ndef test_mle(cls, params, rng):\n dist = cls(*params)\n draw_dist = dist\n if hasattr(dist, \"mean\"):\n draw_dist = draw_dist.assign(mean=0.1)\n if hasattr(dist, \"standard_deviation\"):\n draw_dist = draw_dist.assign(standard_deviation=0.04)\n\n draws = draw_dist.rvs(size=10000, random_state=rng)\n assert np.all((dist.lower <= draws) & (draws <= dist.upper))\n new_dist = dist.mle(draws)\n\n if hasattr(dist, \"mean\"):\n assert isclose(new_dist.mean, 0.1, rtol=0.2)\n\n if hasattr(dist, \"standard_deviation\"):\n assert isclose(new_dist.standard_deviation, 0.04, rtol=0.2)\n" ]
[ [ "numpy.random.RandomState", "numpy.all", "numpy.isclose" ] ]
jiangbestone/DetectRccn
[ "fb30491201f8c64d5ca75298d52aa1a20c4bc6e3" ]
[ "models/rcnn.py" ]
[ "\nfrom torch.autograd import Variable\nfrom models.proposal_target_layer_cascade import *\nimport torchvision.models as models\nfrom models.proposal import *\n#bocknet\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000,dropout_prob=0.2):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.dropout = nn.Dropout(p=dropout_prob)\n self.avgpool = nn.AvgPool2d(7)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.dropout(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\nclass _fasterRCNN(nn.Module):\n \"\"\" faster RCNN \"\"\"\n def __init__(self, classes, class_agnostic):\n super(_fasterRCNN, self).__init__()\n self.classes = classes\n self.n_classes = len(classes)\n self.class_agnostic = class_agnostic\n # loss\n self.RCNN_loss_cls = 0\n self.RCNN_loss_bbox = 0\n\n\n\n def forward(self, im_data, im_info, gt_boxes, num_boxes):\n batch_size = im_data.size(0)\n\n im_info = im_info.data\n gt_boxes = gt_boxes.data\n num_boxes = num_boxes.data\n\n # feed image cfgs to base model to obtain base feature map\n base_feat = self.RCNN_base(im_data)\n\n # feed base feature map to RPN to obtain rois\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\n\n # if it is training phase, then use ground truth bboxes for refining\n if self.training:\n roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)\n rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data\n\n rois_label = Variable(rois_label.view(-1).long())\n else:\n rois_label = None\n rpn_loss_cls = 0\n rpn_loss_bbox = 0\n\n rois = Variable(rois)\n # do roi pooling based on predicted rois\n\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))\n\n # feed pooled features to top model\n pooled_feat = self._head_to_tail(pooled_feat)\n\n # compute bbox offset\n bbox_pred = self.RCNN_bbox_pred(pooled_feat)\n if self.training and not self.class_agnostic:\n # select the corresponding columns according to roi labels\n bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)\n bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))\n bbox_pred = bbox_pred_select.squeeze(1)\n\n # compute object classification probability\n cls_score = self.RCNN_cls_score(pooled_feat)\n cls_prob = F.softmax(cls_score, 1)\n\n RCNN_loss_cls = 0\n RCNN_loss_bbox = 0\n\n\n cls_prob = cls_prob.view(batch_size, rois.size(1), -1)\n bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)\n\n return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label\n\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n def create_architecture(self):\n self._init_modules()\n self._init_weights()\n\n\n\n#\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\nclass Model(nn.Module):\n def __init__(self, model_cfg='datanet.yaml', ch=3, nc=None):\n super(Model, self).__init__()\n if type(model_cfg) is dict:\n self.md = model_cfg\n else:\n import yaml\n with open(model_cfg) as f:\n self.md = yaml.load(f, Loader=yaml.FullLoader)\n\n if nc and nc != self.md['nc']:\n print('Overriding %s nc=%g with nc=%g' % (model_cfg, self.md['nc'], nc))\n self.md['nc'] = nc\n self.model, self.save = BasicBlock(self.md, ch=[ch])\n\n m = self.model[-1]\n if isinstance(m, Detect):\n s = 128\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))])\n m.anchors /= m.stride.view(-1, 1, 1)\n check_anchor_order(m)\n self.stride = m.stride\n self._initialize_biases()\n\n torch_utils.initialize_weights(self)\n self._initialize_biases()\n torch_utils.model_info(self)\n print('')\n\n def forward(self, x, augment=False, profile=False):\n if augment:\n img_size = x.shape[-2:]\n s = [0.83, 0.67]\n y = []\n for i, xi in enumerate((x,\n torch_utils.scale_img(x.flip(3), s[0]),\n torch_utils.scale_img(x, s[1]),\n )):\n y.append(self.forward_once(xi)[0])\n\n y[1][..., :4] /= s[0] # scale\n y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr\n y[2][..., :4] /= s[1] # scale\n return torch.cat(y, 1), None\n else:\n return self.forward_once(x, profile)\n\n def forward_once(self, x, profile=False):\n y, dt = [], [] # outputs\n for m in self.model:\n if m.f != -1: # if not from previous layer\n x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers\n\n if profile:\n try:\n import thop\n o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS\n except:\n o = 0\n t = torch_utils.time_synchronized()\n for _ in range(10):\n _ = m(x)\n dt.append((torch_utils.time_synchronized() - t) * 100)\n print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\n\n x = m(x) # run\n y.append(x if m.i in self.save else None) # save output\n\n if profile:\n print('%.1fms total' % sum(dt))\n return x\n\n def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n m = self.model[-1] # Detect() module\n for f, s in zip(m.f, m.stride): #  from\n mi = self.model[f % m.i]\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _print_biases(self):\n m = self.model[-1] # Detect() module\n for f in sorted([x % m.i for x in m.f]): #  from\n b = self.model[f].bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)\n print(('%g Conv2d.bias:' + '%10.3g' * 6) % (f, *b[:5].mean(1).tolist(), b[5:].mean()))\n\n\n def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers\n print('Fusing layers... ', end='')\n for m in self.model.modules():\n if type(m) is Conv:\n m.conv = torch_utils.fuse_conv_and_bn(m.conv, m.bn) # update conv\n m.bn = None # remove batchnorm\n m.forward = m.fuseforward # update forward\n torch_utils.model_info(self)\n return self\n\ndef BasicBlock(runwget, ch):\n anchors, nc, gd, gw = runwget['anchors'], runwget['nc'], runwget['depth_multiple'], runwget['width_multiple']\n na = (len(anchors[0]) // 2) # number of anchors\n no = na * (nc + 5) # number of outputs = anchors * (classes + 5)\n\n layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out\n for i, (f, n, m, args) in enumerate(runwget['backbone'] + runwget['head']): # from, number, module, args\n m = eval(m) if isinstance(m, str) else m # eval strings\n for j, a in enumerate(args):\n try:\n args[j] = eval(a) if isinstance(a, str) else a # eval strings\n except:\n pass\n\n n = max(round(n * gd), 1) if n > 1 else n # depth gain\n if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:\n c1, c2 = ch[f], args[0]\n c2 = make_divisible(c2 * gw, 8) if c2 != no else c2\n args = [c1, c2, *args[1:]]\n if m in [BottleneckCSP, C3]:\n args.insert(2, n)\n n = 1\n elif m is nn.BatchNorm2d:\n args = [ch[f]]\n elif m is Concat:\n c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])\n elif m is Detect:\n f = f or list(reversed([(-1 if j == i else j - 1) for j, x in enumerate(ch) if x == no]))\n else:\n c2 = ch[f]\n m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module\n t = str(m)[8:-2].replace('__main__.', '') # module type\n np = sum([x.numel() for x in m_.parameters()]) # number params\n m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params\n save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist\n layers.append(m_)\n ch.append(c2)\n return nn.Sequential(*layers), sorted(save)\n\nclass vgg16(_fasterRCNN):\n def __init__(self, classes, pretrained=False, class_agnostic=False):\n self.model_path = 'cfgs/pretrained_model/vgg16_caffe.pth'\n self.dout_base_model = 512\n self.pretrained = pretrained\n self.class_agnostic = class_agnostic\n\n _fasterRCNN.__init__(self, classes, class_agnostic)\n\n def _init_modules(self):\n vgg = models.vgg16()\n if self.pretrained:\n print(\"Loading pretrained weights from %s\" % (self.model_path))\n state_dict = torch.load(self.model_path)\n vgg.load_state_dict({k: v for k, v in state_dict.items() if k in vgg.state_dict()})\n\n vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])\n\n self.RCNN_base = nn.Sequential(*list(vgg.features._modules.values())[:-1])\n\n for layer in range(10):\n for p in self.RCNN_base[layer].parameters(): p.requires_grad = False\n\n self.RCNN_top = vgg.classifier\n\n self.RCNN_cls_score = nn.Linear(4096, self.n_classes)\n\n if self.class_agnostic:\n self.RCNN_bbox_pred = nn.Linear(4096, 4)\n else:\n self.RCNN_bbox_pred = nn.Linear(4096, 4 * self.n_classes)\n\n def _head_to_tail(self, pool5):\n\n pool5_flat = pool5.view(pool5.size(0), -1)\n fc7 = self.RCNN_top(pool5_flat)\n\n return fc7\n\n\n" ]
[ [ "torch.autograd.Variable" ] ]
httpsgithu/mindspore
[ "c29d6bb764e233b427319cb89ba79e420f1e2c64" ]
[ "tests/ut/python/dataset/test_rgb_hsv.py" ]
[ "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting RgbToHsv and HsvToRgb op in DE\n\"\"\"\n\nimport colorsys\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.transforms.transforms\nimport mindspore.dataset.vision.transforms as vision\nimport mindspore.dataset.vision.py_transforms_util as util\n\nDATA_DIR = [\"../data/dataset/test_tf_file_3_images/train-0000-of-0001.data\"]\nSCHEMA_DIR = \"../data/dataset/test_tf_file_3_images/datasetSchema.json\"\n\n\ndef generate_numpy_random_rgb(shape):\n # Only generate floating points that are fractions like n / 256, since they\n # are RGB pixels. Some low-precision floating point types in this test can't\n # handle arbitrary precision floating points well.\n return np.random.randint(0, 256, shape) / 255.\n\n\ndef test_rgb_hsv_hwc():\n rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)\n rgb_np = rgb_flat.reshape((8, 8, 3))\n hsv_base = np.array([\n colorsys.rgb_to_hsv(\n r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))\n for r, g, b in rgb_flat\n ])\n hsv_base = hsv_base.reshape((8, 8, 3))\n hsv_de = util.rgb_to_hsvs(rgb_np, True)\n assert hsv_base.shape == hsv_de.shape\n assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)\n\n hsv_flat = hsv_base.reshape(64, 3)\n rgb_base = np.array([\n colorsys.hsv_to_rgb(\n h.astype(np.float64), s.astype(np.float64), v.astype(np.float64))\n for h, s, v in hsv_flat\n ])\n rgb_base = rgb_base.reshape((8, 8, 3))\n rgb_de = util.hsv_to_rgbs(hsv_base, True)\n assert rgb_base.shape == rgb_de.shape\n assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)\n\n\ndef test_rgb_hsv_batch_hwc():\n rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)\n rgb_np = rgb_flat.reshape((4, 2, 8, 3))\n hsv_base = np.array([\n colorsys.rgb_to_hsv(\n r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))\n for r, g, b in rgb_flat\n ])\n hsv_base = hsv_base.reshape((4, 2, 8, 3))\n hsv_de = util.rgb_to_hsvs(rgb_np, True)\n assert hsv_base.shape == hsv_de.shape\n assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)\n\n hsv_flat = hsv_base.reshape((64, 3))\n rgb_base = np.array([\n colorsys.hsv_to_rgb(\n h.astype(np.float64), s.astype(np.float64), v.astype(np.float64))\n for h, s, v in hsv_flat\n ])\n rgb_base = rgb_base.reshape((4, 2, 8, 3))\n rgb_de = util.hsv_to_rgbs(hsv_base, True)\n assert rgb_de.shape == rgb_base.shape\n assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)\n\n\ndef test_rgb_hsv_chw():\n rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)\n rgb_np = rgb_flat.reshape((3, 8, 8))\n hsv_base = np.array([\n np.vectorize(colorsys.rgb_to_hsv)(\n rgb_np[0, :, :].astype(np.float64), rgb_np[1, :, :].astype(np.float64), rgb_np[2, :, :].astype(np.float64))\n ])\n hsv_base = hsv_base.reshape((3, 8, 8))\n hsv_de = util.rgb_to_hsvs(rgb_np, False)\n assert hsv_base.shape == hsv_de.shape\n assert_allclose(hsv_base.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)\n\n rgb_base = np.array([\n np.vectorize(colorsys.hsv_to_rgb)(\n hsv_base[0, :, :].astype(np.float64), hsv_base[1, :, :].astype(np.float64),\n hsv_base[2, :, :].astype(np.float64))\n ])\n rgb_base = rgb_base.reshape((3, 8, 8))\n rgb_de = util.hsv_to_rgbs(hsv_base, False)\n assert rgb_de.shape == rgb_base.shape\n assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)\n\n\ndef test_rgb_hsv_batch_chw():\n rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)\n rgb_imgs = rgb_flat.reshape((4, 3, 2, 8))\n hsv_base_imgs = np.array([\n np.vectorize(colorsys.rgb_to_hsv)(\n img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64))\n for img in rgb_imgs\n ])\n hsv_de = util.rgb_to_hsvs(rgb_imgs, False)\n assert hsv_base_imgs.shape == hsv_de.shape\n assert_allclose(hsv_base_imgs.flatten(), hsv_de.flatten(), rtol=1e-5, atol=0)\n\n rgb_base = np.array([\n np.vectorize(colorsys.hsv_to_rgb)(\n img[0, :, :].astype(np.float64), img[1, :, :].astype(np.float64), img[2, :, :].astype(np.float64))\n for img in hsv_base_imgs\n ])\n rgb_de = util.hsv_to_rgbs(hsv_base_imgs, False)\n assert rgb_base.shape == rgb_de.shape\n assert_allclose(rgb_base.flatten(), rgb_de.flatten(), rtol=1e-5, atol=0)\n\n\ndef test_rgb_hsv_pipeline():\n # First dataset\n transforms1 = [\n vision.Decode(True),\n vision.Resize([64, 64]),\n vision.ToTensor()\n ]\n transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1)\n ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n ds1 = ds1.map(operations=transforms1, input_columns=[\"image\"])\n\n # Second dataset\n transforms2 = [\n vision.Decode(True),\n vision.Resize([64, 64]),\n vision.ToTensor(),\n vision.RgbToHsv(),\n vision.HsvToRgb()\n ]\n transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)\n ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[\"image\"], shuffle=False)\n ds2 = ds2.map(operations=transform2, input_columns=[\"image\"])\n\n num_iter = 0\n for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1), ds2.create_dict_iterator(num_epochs=1)):\n num_iter += 1\n ori_img = data1[\"image\"].asnumpy()\n cvt_img = data2[\"image\"].asnumpy()\n assert_allclose(ori_img.flatten(), cvt_img.flatten(), rtol=1e-5, atol=0)\n assert ori_img.shape == cvt_img.shape\n\n\nif __name__ == \"__main__\":\n test_rgb_hsv_hwc()\n test_rgb_hsv_batch_hwc()\n test_rgb_hsv_chw()\n test_rgb_hsv_batch_chw()\n test_rgb_hsv_pipeline()\n" ]
[ [ "numpy.random.randint", "numpy.vectorize" ] ]
rtloftin/strategically_efficient_rl
[ "85a702b9361211d345a58cc60696e4e851d48ec4" ]
[ "algorithms/agents/intrinsic.py" ]
[ "import numpy as np\nimport scipy.signal\nfrom ray.rllib.policy.sample_batch import SampleBatch\nfrom ray.rllib.evaluation.postprocessing import Postprocessing\n\nfrom algorithms.curiosity import INTRINSIC_REWARD\n\nINTRINSIC_VALUE_TARGETS = \"intrinsic_value_targets\"\nINTRINSIC_VF_PREDS = \"intrinsic_vf_preds\"\n\n\ndef discount(x, gamma):\n return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]\n\n\ndef compute_advantages_intrinsic(rollout,\n last_r,\n last_intrinsic_r,\n gamma=0.9,\n intrinsic_gamma=0.9,\n lambda_=1.0,\n intrinsic_lambda_=1.0):\n \"\"\"\n Given a rollout, compute its value targets and the advantage. Assumes we are using separate\n value function heads for the extrinsic and intrinsic rewards\n Args:\n rollout (SampleBatch): SampleBatch of a single trajectory\n last_r (float): Value estimation for last observation\n gamma (float): Discount factor\n intrinsic_gamma (float): Discount factor\n lambda_ (float): Parameter for GAE\n intrinsic_lambda_ (float): Parameter for intrinsic GAE\n Returns:\n SampleBatch (SampleBatch): Object with experience from rollout and\n processed rewards.\n \"\"\"\n\n traj = {}\n trajsize = len(rollout[SampleBatch.ACTIONS])\n for key in rollout:\n traj[key] = np.stack(rollout[key])\n\n # Extrinsic value predictions and targets\n vpred_t = np.concatenate([rollout[SampleBatch.VF_PREDS], np.array([last_r])])\n delta_t = (traj[SampleBatch.REWARDS] + gamma * vpred_t[1:] - vpred_t[:-1])\n advantages = discount(delta_t, gamma * lambda_)\n\n traj[Postprocessing.VALUE_TARGETS] = (\n advantages + traj[SampleBatch.VF_PREDS]).copy().astype(np.float32)\n\n # Intrinsic value predictions\n intrinsic_vpred_t = np.concatenate([rollout[INTRINSIC_VF_PREDS], np.array([last_intrinsic_r])])\n intrinsic_delta_t = (traj[INTRINSIC_REWARD] + intrinsic_gamma * intrinsic_vpred_t[1:] - intrinsic_vpred_t[:-1])\n intrinsic_advantages = discount(intrinsic_delta_t, intrinsic_gamma * intrinsic_lambda_)\n\n traj[INTRINSIC_VALUE_TARGETS] = (\n intrinsic_advantages + traj[INTRINSIC_VF_PREDS]).copy().astype(np.float32)\n\n traj[Postprocessing.ADVANTAGES] = (advantages + intrinsic_advantages).copy().astype(np.float32)\n\n assert all(val.shape[0] == trajsize for val in traj.values()), \\\n \"Rollout stacked incorrectly!\"\n\n return SampleBatch(traj)\n" ]
[ [ "numpy.stack", "numpy.array" ] ]
KaihuaTang/scene-graph-benchmark.pytorch
[ "45cd54f7465b81d3154e94fcab2b554a09637f6f" ]
[ "maskrcnn_benchmark/modeling/roi_heads/relation_head/model_transformer.py" ]
[ "\"\"\"\nBased on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom maskrcnn_benchmark.modeling.utils import cat\nfrom .utils_motifs import obj_edge_vectors, to_onehot, nms_overlaps, encode_box_info\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v, mask=None):\n \"\"\"\n Args:\n q (bsz, len_q, dim_q)\n k (bsz, len_k, dim_k)\n v (bsz, len_v, dim_v)\n Note: len_k==len_v, and dim_q==dim_k\n Returns:\n output (bsz, len_q, dim_v)\n attn (bsz, len_q, len_k)\n \"\"\"\n attn = torch.bmm(q, k.transpose(1, 2))\n attn = attn / self.temperature\n\n if mask is not None:\n attn = attn.masked_fill(mask, -np.inf)\n\n attn = self.softmax(attn)\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k)\n self.w_ks = nn.Linear(d_model, n_head * d_k)\n self.w_vs = nn.Linear(d_model, n_head * d_v)\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.fc = nn.Linear(n_head * d_v, d_model)\n nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n\n def forward(self, q, k, v, mask=None):\n \"\"\"\n Args:\n q (bsz, len_q, dim_q)\n k (bsz, len_k, dim_k)\n v (bsz, len_v, dim_v)\n Note: len_k==len_v, and dim_q==dim_k\n Returns:\n output (bsz, len_q, d_model)\n attn (bsz, len_q, len_k)\n \"\"\"\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size() # len_k==len_v\n\n residual = q\n\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n\n mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn = self.attention(q, k, v, mask=mask)\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n output = self.dropout(self.fc(output))\n output = self.layer_norm(output + residual)\n\n return output, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise\n self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise\n self.layer_norm = nn.LayerNorm(d_in)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n \"\"\"\n Merge adjacent information. Equal to linear layer if kernel size is 1\n Args:\n x (bsz, len, dim)\n Returns:\n output (bsz, len, dim)\n \"\"\"\n residual = x\n output = x.transpose(1, 2)\n output = self.w_2(F.relu(self.w_1(output)))\n output = output.transpose(1, 2)\n output = self.dropout(output)\n output = self.layer_norm(output + residual)\n return output\n\n\nclass EncoderLayer(nn.Module):\n ''' Compose with two layers '''\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(EncoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input, mask=slf_attn_mask)\n enc_output *= non_pad_mask.float()\n\n enc_output = self.pos_ffn(enc_output)\n enc_output *= non_pad_mask.float()\n\n return enc_output, enc_slf_attn\n\n\nclass TransformerEncoder(nn.Module):\n \"\"\"\n A encoder model with self attention mechanism.\n \"\"\"\n def __init__(self, n_layers, n_head, d_k, d_v, d_model, d_inner, dropout=0.1):\n super().__init__()\n self.layer_stack = nn.ModuleList([\n EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n\n def forward(self, input_feats, num_objs):\n \"\"\"\n Args:\n input_feats [Tensor] (#total_box, d_model) : bounding box features of a batch\n num_objs [list of int] (bsz, ) : number of bounding box of each image\n Returns:\n enc_output [Tensor] (#total_box, d_model)\n \"\"\"\n original_input_feats = input_feats\n input_feats = input_feats.split(num_objs, dim=0)\n input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True)\n\n # -- Prepare masks\n bsz = len(num_objs)\n device = input_feats.device\n pad_len = max(num_objs)\n num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len)\n slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len)\n non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1)\n\n # -- Forward\n enc_output = input_feats\n for enc_layer in self.layer_stack:\n enc_output, enc_slf_attn = enc_layer(\n enc_output,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask)\n\n enc_output = enc_output[non_pad_mask.squeeze(-1)]\n return enc_output\n\n\nclass TransformerContext(nn.Module):\n def __init__(self, config, obj_classes, rel_classes, in_channels):\n super().__init__()\n self.cfg = config\n # setting parameters\n if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX:\n self.mode = 'predcls' if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL else 'sgcls'\n else:\n self.mode = 'sgdet'\n self.obj_classes = obj_classes\n self.rel_classes = rel_classes\n self.num_obj_cls = len(obj_classes)\n self.num_rel_cls = len(rel_classes)\n self.in_channels = in_channels\n self.obj_dim = in_channels\n self.embed_dim = self.cfg.MODEL.ROI_RELATION_HEAD.EMBED_DIM\n self.hidden_dim = self.cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_HIDDEN_DIM\n self.nms_thresh = self.cfg.TEST.RELATION.LATER_NMS_PREDICTION_THRES\n\n self.dropout_rate = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.DROPOUT_RATE \n self.obj_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.OBJ_LAYER \n self.edge_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.REL_LAYER \n self.num_head = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.NUM_HEAD \n self.inner_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.INNER_DIM \n self.k_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.KEY_DIM \n self.v_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.VAL_DIM \n\n\n # the following word embedding layer should be initalize by glove.6B before using\n embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim)\n self.obj_embed1 = nn.Embedding(self.num_obj_cls, self.embed_dim)\n self.obj_embed2 = nn.Embedding(self.num_obj_cls, self.embed_dim)\n with torch.no_grad():\n self.obj_embed1.weight.copy_(embed_vecs, non_blocking=True)\n self.obj_embed2.weight.copy_(embed_vecs, non_blocking=True)\n\n # position embedding\n self.bbox_embed = nn.Sequential(*[\n nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1),\n nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1),\n ])\n self.lin_obj = nn.Linear(self.in_channels + self.embed_dim + 128, self.hidden_dim)\n self.lin_edge = nn.Linear(self.embed_dim + self.hidden_dim + self.in_channels, self.hidden_dim)\n self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_cls)\n self.context_obj = TransformerEncoder(self.obj_layer, self.num_head, self.k_dim, \n self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)\n self.context_edge = TransformerEncoder(self.edge_layer, self.num_head, self.k_dim, \n self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)\n\n \n def forward(self, roi_features, proposals, logger=None):\n # labels will be used in DecoderRNN during training\n use_gt_label = self.training or self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL\n obj_labels = cat([proposal.get_field(\"labels\") for proposal in proposals], dim=0) if use_gt_label else None\n\n # label/logits embedding will be used as input\n if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL:\n obj_embed = self.obj_embed1(obj_labels)\n else:\n obj_logits = cat([proposal.get_field(\"predict_logits\") for proposal in proposals], dim=0).detach()\n obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed1.weight\n \n # bbox embedding will be used as input\n assert proposals[0].mode == 'xyxy'\n pos_embed = self.bbox_embed(encode_box_info(proposals))\n\n # encode objects with transformer\n obj_pre_rep = cat((roi_features, obj_embed, pos_embed), -1)\n num_objs = [len(p) for p in proposals]\n obj_pre_rep = self.lin_obj(obj_pre_rep)\n obj_feats = self.context_obj(obj_pre_rep, num_objs)\n\n # predict obj_dists and obj_preds\n if self.mode == 'predcls':\n obj_preds = obj_labels\n obj_dists = to_onehot(obj_preds, self.num_obj_cls)\n edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_labels)), dim=-1)\n else:\n obj_dists = self.out_obj(obj_feats)\n use_decoder_nms = self.mode == 'sgdet' and not self.training\n if use_decoder_nms:\n boxes_per_cls = [proposal.get_field('boxes_per_cls') for proposal in proposals]\n obj_preds = self.nms_per_cls(obj_dists, boxes_per_cls, num_objs)\n else:\n obj_preds = obj_dists[:, 1:].max(1)[1] + 1\n edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_preds)), dim=-1)\n\n # edge context\n edge_pre_rep = self.lin_edge(edge_pre_rep)\n edge_ctx = self.context_edge(edge_pre_rep, num_objs)\n\n return obj_dists, obj_preds, edge_ctx\n\n def nms_per_cls(self, obj_dists, boxes_per_cls, num_objs):\n obj_dists = obj_dists.split(num_objs, dim=0)\n obj_preds = []\n for i in range(len(num_objs)):\n is_overlap = nms_overlaps(boxes_per_cls[i]).cpu().numpy() >= self.nms_thresh # (#box, #box, #class)\n\n out_dists_sampled = F.softmax(obj_dists[i], -1).cpu().numpy()\n out_dists_sampled[:, 0] = -1\n\n out_label = obj_dists[i].new(num_objs[i]).fill_(0)\n\n for i in range(num_objs[i]):\n box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape)\n out_label[int(box_ind)] = int(cls_ind)\n out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0\n out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample\n\n obj_preds.append(out_label.long())\n obj_preds = torch.cat(obj_preds, dim=0)\n return obj_preds\n" ]
[ [ "torch.nn.utils.rnn.pad_sequence", "torch.nn.Linear", "torch.nn.init.xavier_normal_", "torch.nn.functional.softmax", "torch.nn.Softmax", "torch.no_grad", "torch.nn.Embedding", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.LayerNorm", "numpy.power", "torch.arange", "torch.cat", "numpy.sqrt", "torch.LongTensor", "torch.bmm", "torch.nn.Dropout" ] ]
lenna-project/birds-plugin
[ "c548790dcb0593b80ea6da4605e7aa32e3f141ae" ]
[ "scripts/train.py" ]
[ "import logging\nimport numpy as np\nimport os\nimport PIL\nimport PIL.Image\nimport tensorflow as tf\n\nfrom tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Dense, Flatten, Dropout, GlobalAveragePooling2D\nfrom tensorflow.keras.applications import MobileNetV2\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import Model\n\nimg_height = 224\nimg_width = 224\nbatch_size = 64\n\ndata_dir = './100-bird-species/'\ndata_dir_train = os.path.join(data_dir, 'train')\ndata_dir_valid = os.path.join(data_dir, 'valid')\ndata_dir_test = os.path.join(data_dir, 'test')\n\ntrain_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir_train,\n label_mode='categorical',\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\nvalid_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir_valid,\n label_mode='categorical',\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\ntest_ds = tf.keras.utils.image_dataset_from_directory(\n data_dir_test,\n label_mode='categorical',\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n\ndef normalize(img, label):\n return img / 255.0, label\n\n\ndata_augmentation = tf.keras.Sequential([\n tf.keras.layers.RandomFlip(\"horizontal\"),\n tf.keras.layers.RandomRotation(0.2),\n tf.keras.layers.RandomZoom(0.2)\n])\n\ntrain_dataset = (train_ds\n .map(normalize)\n .map(lambda x, y: (data_augmentation(x), y))\n .prefetch(tf.data.AUTOTUNE))\n\nvalid_dataset = valid_ds.map(normalize)\ntest_dataset = test_ds.map(normalize)\n\n\ndef get_birds_mobilenet():\n pre_trained_model = MobileNetV2(\n include_top=False,\n input_shape=(img_height, img_width, 3),\n classifier_activation='softmax'\n )\n\n for layer in pre_trained_model.layers:\n layer.trainable = False\n\n last_layer = pre_trained_model.output\n last_layer.trainable = True\n\n x = GlobalAveragePooling2D()(last_layer)\n x = Dense(1024, activation='relu')(x)\n x = layers.Dense(325, activation='softmax')(x)\n\n model = Model(pre_trained_model.input, x)\n return model\n\n\nmodel = get_birds_mobilenet()\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\ncheckpoint_path = \"./checkpoints/birds_mobilenet/\"\n\nmodel.load_weights(checkpoint_path)\n\nmodel_history = model.fit(\n train_dataset,\n validation_data=valid_dataset,\n epochs=200,\n callbacks=[\n #tf.keras.callbacks.EarlyStopping(patience=5),\n tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path, verbose=0, save_freq=\"epoch\")\n ])\n" ]
[ [ "tensorflow.keras.applications.MobileNetV2", "tensorflow.keras.layers.RandomRotation", "tensorflow.keras.layers.RandomZoom", "tensorflow.keras.Model", "tensorflow.keras.layers.RandomFlip", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.utils.image_dataset_from_directory", "tensorflow.keras.layers.GlobalAveragePooling2D" ] ]
tpimentelms/meaning2form
[ "624b3947b3ac2a7a521cf35c762fb56508236f74" ]
[ "learn_pipe/model/lstm.py" ]
[ "import torch.nn as nn\n\nfrom .base import BaseLM\n\n\nclass IpaLM(BaseLM):\n name = 'lstm'\n\n def __init__(self, vocab_size, hidden_size, nlayers=1, dropout=0.1, embedding_size=None, **kwargs):\n super().__init__(\n vocab_size, hidden_size, nlayers=nlayers, dropout=dropout, embedding_size=embedding_size, **kwargs)\n\n self.embedding = nn.Embedding(vocab_size, self.embedding_size)\n self.lstm = nn.LSTM(\n self.embedding_size, hidden_size, nlayers, dropout=(dropout if nlayers > 1 else 0), batch_first=True)\n self.dropout = nn.Dropout(dropout)\n self.out = nn.Linear(hidden_size, vocab_size)\n\n def forward(self, x, idx):\n h_old = self.context(idx)\n x_emb = self.dropout(self.get_embedding(x))\n\n c_t, h_t = self.lstm(x_emb, h_old)\n c_t = self.dropout(c_t).contiguous()\n\n logits = self.out(c_t)\n return logits, h_t\n\n def get_embedding(self, x):\n return self.embedding(x)\n\n def initHidden(self, bsz=1):\n weight = next(self.parameters()).data\n return weight.new(self.nlayers, bsz, self.hidden_size).zero_(), \\\n weight.new(self.nlayers, bsz, self.hidden_size).zero_()\n" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear", "torch.nn.Embedding", "torch.nn.Dropout" ] ]
j-towns/jax
[ "49f3f991d4faae22fcd9d8248f3d36575b5004f6" ]
[ "tests/lax_numpy_einsum_test.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport itertools\n\nimport numpy as onp\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport jax.numpy as np\nimport jax.test_util as jtu\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\n\ndef rng():\n return onp.random.RandomState(0)\n\n\nclass EinsumTest(jtu.JaxTestCase):\n\n def _check(self, s, *ops):\n a = onp.einsum(s, *ops)\n b = np.einsum(s, *ops)\n self.assertAllClose(a, b, atol=1e-4, rtol=1e-4, check_dtypes=True)\n\n def test_three_operands_1(self):\n r = rng()\n x = r.randn(3)\n y = r.randn(4)\n z = r.randn(5)\n s = 'i,j,k->ijk'\n self._check(s, x, y, z)\n\n def test_three_operands_2(self):\n r = rng()\n x = r.randn(3)\n y = r.randn(4)\n z = r.randn(5)\n s = 'i,j,k->ijk'\n self._check(s, x, y, z)\n\n def test_two_operands_1(self):\n r = rng()\n x = r.randn(3, 4)\n y = r.randn(4)\n s = 'ij,j->i'\n self._check(s, x, y)\n\n def test_two_operands_2(self):\n r = rng()\n x = r.randn(3, 4, 5)\n y = r.randn(4)\n s = 'ijk,j->i'\n self._check(s, x, y)\n\n def test_two_operands_3(self):\n r = rng()\n x = r.randn(3, 4, 3)\n y = r.randn(3)\n s = 'iji,i->j'\n self._check(s, x, y)\n\n def test_two_operands_4(self):\n r = rng()\n x = r.randn(3, 4)\n y = r.randn(3, 4)\n s = 'ij,ij->'\n self._check(s, x, y)\n\n def test_two_operands_5(self):\n r = rng()\n x = r.randn(10, 2, 3)\n y = r.randn(3, 4)\n s = 'nij,jk->nik'\n self._check(s, x, y)\n\n def test_two_operands_6(self):\n # based on https://github.com/google/jax/issues/37#issuecomment-448572187\n r = rng()\n x = r.randn(2, 1)\n y = r.randn(2, 3, 4)\n s = 'sa,shb->shab'\n self._check(s, x, y)\n\n def test_one_operand_1(self):\n r = rng()\n x = r.randn(3, 4, 5)\n s = 'ijk->j'\n self._check(s, x)\n\n def test_one_operand_2(self):\n r = rng()\n x = r.randn(3, 4, 5)\n s = 'ijk->kij'\n self._check(s, x)\n\n def test_one_operand_3(self):\n r = rng()\n x = r.randn(3, 4, 5)\n s = 'ijk->ki'\n self._check(s, x)\n\n def test_one_operand_4(self):\n r = rng()\n x = r.randn(3, 4, 5)\n s = 'ijk->ki'\n self._check(s, x)\n\n def test_one_operand_5(self):\n r = rng()\n x = r.randn(2, 3, 4, 5)\n s = '...ijk->...ki'\n self._check(s, x)\n\n def test_one_operand_6(self):\n r = rng()\n x = r.randn(3, 4, 5)\n s = '...ijk->ki'\n self._check(s, x)\n\n def test_one_operand_7(self):\n r = rng()\n x = r.randn(3, 3)\n s = 'ii->'\n self._check(s, x)\n\n def test_one_operand_8(self):\n r = rng()\n x = r.randn(3, 3)\n s = 'ij->'\n self._check(s, x)\n\n def test_one_operand_9(self):\n r = rng()\n x = r.randn(3, 3, 3)\n s = 'iii->'\n self._check(s, x)\n\n def test_one_operand_10(self):\n r = rng()\n x = r.randn(3, 3)\n s = 'ii->i'\n self._check(s, x)\n\n def test_one_operand_11(self):\n r = rng()\n x = r.randn(3, 3, 4)\n s = 'iij->i'\n self._check(s, x)\n\n def test_one_operand_12(self):\n r = rng()\n x = r.randn(3, 3, 3)\n s = 'iii->i'\n self._check(s, x)\n\n def test_one_operand_13(self):\n r = rng()\n x = r.randn(3, 3, 5, 4, 4)\n s = 'iijkk->i'\n self._check(s, x)\n\n def test_one_operand_14(self):\n r = rng()\n x = r.randn(3, 3, 5, 4, 4)\n s = 'iijkk->ik'\n self._check(s, x)\n\n def test_one_operand_15(self):\n r = rng()\n x = r.randn(3, 3, 5, 4, 4)\n s = 'iijkl->il'\n self._check(s, x)\n\n def test_one_operand_16(self):\n r = rng()\n x = r.randn(3, 3)\n s = 'ij->ij'\n self._check(s, x)\n\n def test_tf_unsupported_1(self):\n # from https://www.tensorflow.org/api_docs/python/tf/einsum\n r = rng()\n x = r.randn(2, 3, 5, 1)\n y = r.randn(3, 4, 5, 1)\n s = 'ij...,jk...->ik...'\n self._check(s, x, y)\n\n def test_tf_unsupported_2(self):\n # from https://www.tensorflow.org/api_docs/python/tf/einsum\n r = rng()\n x = r.randn(2, 3, 3)\n y = r.randn(4)\n s = 'ijj,k->ik'\n self._check(s, x, y)\n\n def test_tf_unsupported_3(self):\n # from https://www.tensorflow.org/api_docs/python/tf/einsum\n r = rng()\n x = r.randn(2, 3)\n y = r.randn(2, 3)\n z = r.randn(3, 4)\n s = 'ij,ij,jk->ik'\n self._check(s, x, y, z)\n\n # these tests are based on https://github.com/dask/dask/pull/3412/files\n @parameterized.named_parameters(\n {\"testcase_name\": \"_{}\".format(einstr), \"einstr\": einstr}\n for einstr in [\n 'abc,bad->abcd',\n 'abcdef,bcdfg->abcdeg',\n 'ea,fb,abcd,gc,hd->efgh',\n 'ab,b',\n 'aa',\n 'a,a->',\n 'a,a->a',\n 'a,a',\n 'a,b',\n 'a,b,c',\n 'a',\n 'ba,b',\n 'ba,b->',\n 'defab,fedbc->defac',\n 'ab...,bc...->ac...',\n 'a...a',\n 'abc...->cba...',\n '...ab->...a',\n 'a...a->a...',\n # Following 2 from # https://stackoverflow.com/a/19203475/1611416\n '...abc,...abcd->...d',\n 'ab...,b->ab...',\n # https://github.com/dask/dask/pull/3412#discussion_r182413444\n 'aa->a',\n 'ab,ab,c->c',\n 'aab,bc->ac',\n 'aab,bcc->ac',\n 'fdf,cdd,ccd,afe->ae',\n 'fff,fae,bef,def->abd',\n ])\n def test_from_dask(self, einstr):\n r = rng()\n if '->' in einstr:\n input_str, result_names = einstr.split('->')\n else:\n input_str = einstr\n input_names = input_str.split(',')\n\n dims = itertools.cycle([2, 3, 4])\n shapes = defaultdict(lambda: next(dims))\n input_shapes = [tuple(shapes[c] for c in names.replace('...', '01'))\n for names in input_names]\n operands = [r.randn(*shape) for shape in input_shapes]\n\n self._check(einstr, *operands)\n\n def test_ordered_front_batch_dim_case(self):\n x = onp.ones((1,8,20,4))\n y = onp.ones((1,8,20,4))\n s = 'ijkl,ijml->ijkm'\n self._check(s, x, y)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "numpy.random.RandomState", "numpy.ones", "numpy.einsum" ] ]
tanzhenyu/image_augmentation
[ "d1f8cc35cf25438556e7934e8e6c78827819ea9d" ]
[ "image_augmentation/callbacks/extra_eval.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.callbacks import Callback\n\n\nclass ExtraValidation(Callback):\n \"\"\"Log evaluation metrics of an extra validation set. This callback\n is useful for model training scenarios where multiple validation sets\n are used for evaluation (as Keras by default, provides functionality for\n evaluating on a single validation set only).\n\n The evaluation metrics are also logged to TensorBoard.\n\n Args:\n validation_data: A tf.data.Dataset pipeline used to evaluate the\n model, essentially an extra validation dataset.\n tensorboard_path: Path to the TensorBoard logging directory.\n validation_freq: Number of epochs to wait before performing\n subsequent evaluations.\n \"\"\"\n def __init__(self, validation_data, tensorboard_path, validation_freq=1):\n super(ExtraValidation, self).__init__()\n\n self.validation_data = validation_data\n self.tensorboard_path = tensorboard_path\n\n self.tensorboard_writer = tf.summary.create_file_writer(self.tensorboard_path)\n\n self.validation_freq = validation_freq\n\n def on_epoch_end(self, epoch, logs=None):\n # evaluate at an interval of `validation_freq` epochs\n if (epoch + 1) % self.validation_freq == 0:\n # gather metric names form model\n metric_names = ['{}_{}'.format('epoch', metric.name)\n for metric in self.model.metrics]\n # TODO: fix `model.evaluate` memory leak on TPU\n # gather the evaluation metrics\n scores = self.model.evaluate(self.validation_data, verbose=2)\n\n # gather evaluation metrics to TensorBoard\n with self.tensorboard_writer.as_default():\n for metric_name, score in zip(metric_names, scores):\n tf.summary.scalar(metric_name, score, step=epoch)\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.summary.create_file_writer" ] ]
cristianMeli/ubatch
[ "fb3c6dccf0a9e25e25f5956e2e91ed70e9ea01ee" ]
[ "examples/flask_app.py" ]
[ "import random\nimport numpy as np\n\nfrom typing import Dict, List\n\nfrom flask import Flask\nfrom flask_restx import Resource, Api\n\n# from numpy import genfromtxt\n\nfrom ubatch import ubatch_decorator\n\n# from keras.models import load_model\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.model_selection import train_test_split\n\n\nfrom joblib import load\n\nngd = fetch_20newsgroups(subset=\"all\")\n\nX = ngd.data\ny = ngd.target\n_, X_test, _, _ = train_test_split(X, y, test_size=0.33)\n\n\nmodel = load(\"xgbregressor.joblib\")\n# X_test = genfromtxt(\"xgbregressor_inputs.csv\", delimiter=\",\")\n\napp = Flask(__name__)\napi = Api(app)\n\n\n@ubatch_decorator(max_size=100, timeout=0.01)\ndef predict(data: List[np.array]) -> List[np.float32]:\n return model.predict(np.array(data)) # type: ignore\n\n\[email protected](\"/predict_ubatch\")\nclass BatchPredict(Resource):\n def post(self) -> Dict[str, float]:\n output = predict.ubatch(random.choice(X_test))\n return {\"prediction\": float(output)}\n\n\[email protected](\"/predict\")\nclass Predict(Resource):\n def post(self) -> Dict[str, float]:\n output = predict([random.choice(X_test)])[0]\n return {\"prediction\": float(output)}\n" ]
[ [ "numpy.array", "sklearn.model_selection.train_test_split", "sklearn.datasets.fetch_20newsgroups" ] ]
simonharris/pykmeans
[ "4d47eb12a2bbaf1b05d7ccfd0cfc9ccf78ddf86d" ]
[ "tests/initialisations/test_bradleyfayyad1998.py" ]
[ "\"\"\"\nTest for Bradley & Fayyad 1998 initialisation algorithm\n\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom datasets import testloader\nfrom initialisations import bradley as bfinit\nimport kmeans\n\n# pylint: disable=R0201,W0212\n\n\nclass BfTestSuite(unittest.TestCase):\n \"\"\"Test suite for B&F\"\"\"\n\n def test_code_runs(self):\n \"\"\"At least prove it runs\"\"\"\n\n dataset = testloader.load_iris()\n centroids = bfinit.generate(dataset.data, 3)\n self.assertEqual((3, 4), centroids.shape)\n\n def test_with_hartigan(self):\n \"\"\"A tiny dataset which can't possibly work here\"\"\"\n\n dataset = testloader.load_hartigan()\n\n with self.assertRaises(ValueError):\n bfinit.generate(dataset.data, 3)\n\n def test_find_furthest(self):\n \"\"\"Find the data point furthest from its cluster center\"\"\"\n\n distances = np.array([\n [1, 2, 3], # 1\n [7, 5, 16], # 5\n [7, 26, 4], # 4\n [19, 20, 21], # 19\n [6, 18, 8] # 6\n ])\n\n np.testing.assert_equal(bfinit._find_furthest(distances), [3])\n np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 2)),\n [3, 4])\n np.testing.assert_equal(np.sort(bfinit._find_furthest(distances, 3)),\n [1, 3, 4])\n\n def test_with_1_empty(self):\n \"\"\"Seeds and data known to leave one empty cluster after k_means(),\n and thus trigger k_means_mod() to reassign a centroid\"\"\"\n\n seeds = np.array([\n [5.4, 3.0, 4.5, 1.5],\n [6.7, 3.0, 5.0, 1.7],\n [5.1, 3.8, 1.5, 0.3], # Doesn't get any data points assigned\n ])\n\n data = np.array([\n # Assigned to 0 but is furthest, so becomes the new 2\n [6.4, 2.9, 4.3, 1.3],\n [6.3, 3.4, 5.6, 2.4],\n [6.8, 3.0, 5.5, 2.1],\n [5.0, 2.0, 3.5, 1.0],\n [5.8, 2.7, 5.1, 1.9],\n ])\n\n expected_labels = [2, 1, 1, 0, 0]\n\n expected_centroids = [\n [5.4, 2.35, 4.3, 1.45],\n [6.55, 3.2, 5.55, 2.25],\n [6.4, 2.9, 4.3, 1.3], # The new 2\n ]\n\n centroids = bfinit._k_means_mod(seeds, data, len(seeds))\n labels = kmeans.distance_table(data, centroids).argmin(1)\n\n np.testing.assert_array_equal(labels, expected_labels)\n np.testing.assert_array_equal(centroids, expected_centroids)\n\n def _test_with_n_empty(self):\n \"\"\"Seeds and data known to leave more than one empty cluster\n\n This is left as TODO for now, since no way can I force sklearn to\n give me more than one empty cluster.\n \"\"\"\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal" ] ]
swharden/SWHLab
[ "a86c3c65323cec809a4bd4f81919644927094bf5" ]
[ "doc/oldcode/swhlab/core/memtest.py" ]
[ "\"\"\"\nMembrane test routines for voltage clamp experiments.\ncreates abf.MTs[sweep]={} #with keys like Ih, Ra, Rm, etc\n\nExample usage:\n abf=swhlab.ABF('../abfs/group/16701010.abf')\n swhlab.memtest.memtest(abf) #performs memtest on all sweeps\n swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did\n pylab.show()\n\"\"\"\n\nimport os\nimport sys\nimport pylab\nimport numpy as np\nimport time\n\nimport swhlab\nimport swhlab.core.common as cm\nexampleABF=swhlab.ABF()\n\ndef memtestSweepVC(abf=exampleABF):\n \"\"\"\n perform memtest on current sweep in VC mode. Return Ih, Ra, Rm, etc.\n All variable names are explained in /swhlab/docs/memtest.ppt\n \"\"\"\n if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3:\n return \"protocol doesn't step down and back up\"\n TA,TB=int(abf.protoSeqX[1]),int(abf.protoSeqX[2])\n dT=int(TB-TA)\n T1A=int(TA+.5*dT)\n T1B=int(TA+.9*dT)\n T2A=T1A+dT\n T2B=T1B+dT\n P1=np.average(abf.dataY[T1A:T1B])\n P2=np.average(abf.dataY[T2A:T2B])\n dI=P2-P1\n dV=abf.protoSeqY[2]-abf.protoSeqY[1]\n PP=np.max(abf.dataY[TB:TB+100])# peak found within first 100 points\n TP=np.where(abf.dataY[TB:TB+150]==PP)[0][0]+TB\n dP=PP-P1\n dTC=PP-P2\n PCA=P2+.9*dTC # upper fraction for Cm detection\n PCB=P2+.1*dTC # upper fraction for Cm detection\n PCtau=P2+.37*dTC # crossing point of theoretical tau\n TCA=np.where(abf.dataY[TP:T2A]<PCA)[0][0]+TP\n TCB=np.where(abf.dataY[TP:T2A]<PCB)[0][0]+TP\n dTCT=TCB-TCA #number of points available for fitting\n Ih=P2\n Ra=(dV*10**3)/(PP-P2) #MOhm=uV/pA\n Rm=(dV*10**3)/(P2-P1) #MOhm=uV/pA\n fitM,fitT,fitB,fitTau=cm.fit_exp(abf.dataY[TCA:TCB]) #same units as given\n fitTau=fitTau*1000/abf.rate #time constant convert to ms units\n Tv=fitTau #time constant of extrinsic voltage clamp\n Cm=Tv/Ra*1000 #us/MOhm is pF\n Tm=Rm*Cm/1000 #time constant of cell membrane (intrinsic voltage clamp)\n del abf\n return locals()\n\ndef memtestIC(abf=exampleABF):\n \"\"\"\n IC memtest is different. Make an average sweep, then curve fit it.\n This only RETURNS the memtest, it does not assign it.\n \"\"\"\n if abf.protoSeqY[1]>abf.protoSeqY[0] or len(abf.protoSeqY)<3:\n return \"protocol doesn't step down and back up\"\n abf.baseline=[abf.protoSeqX[1]/abf.rate*.75,abf.protoSeqX[1]/abf.rate]\n T1A,T1B=np.array(abf.baseline)*abf.rate\n Xs,Ys,Er=abf.average_sweep()\n T2A=abf.protoSeqX[2]-abf.protoSeqX[1]\n T2B=abf.protoSeqX[2]\n M2=np.average(Ys[T2A:T2B])\n MCA=.1*M2 # set 90% here\n MCB=.9*M2 # set 10% here\n TCA=np.where(Ys<MCA)[0][0]\n TCB=np.where(Ys<MCB)[0][0]\n m,t,b,tc=cm.fit_exp(Ys[TCA:TCB]) #do the fit!\n dI=abs(abf.protoSeqY[2]-abf.protoSeqY[1]) #pA\n dV=abs(M2) #mV\n Rm=dV/dI*1000 #uV/pA = MOhm\n Cm=tc/Rm #ms/MOhm\n del abf,Ys,Xs,Er\n return locals() #convert to structured array\n\ndef memtest(abf=exampleABF,firstSweepOnly=False,plotToo=False,saveToo=True):\n \"\"\"perform memtest on all sweeps.\"\"\"\n timeStart=time.clock()\n if abf.units==\"mV\":\n abf.MTs = memtestIC(abf)\n else:\n abf.MTs=[None]*abf.sweeps\n for sweep in range(abf.sweeps):\n abf.setSweep(sweep)\n result=memtestSweepVC(abf)\n if type(result) is dict:\n abf.MTs[abf.currentSweep]=result\n else:\n print(\"MEMTEST FAILED - sweep %d -\"%sweep,result)\n if firstSweepOnly:\n return\n abf.MTs = cm.matrixfromDicts(abf.MTs) #convert to structured array\n took=time.clock()-timeStart\n print(\" -- memtest performed on %d sweeps in %.02f ms\"%(abf.sweeps,took*1000))\n if saveToo:\n abf.saveThing(abf.MTs,\"MTs\")\n\ndef plot_standard4(abf=exampleABF):\n \"\"\"make a standard memtest plot showing Ih, Ra, etc. with time.\"\"\"\n if abf.sweeps<2:\n return\n swhlab.plot.new(abf)\n Xs=np.arange(abf.sweeps)*abf.sweepInterval/60\n subplots=[221,222,223,224]\n features=['Ih','Ra','Rm','Cm']\n units=['pA','MOhm','MOhm','pF']\n for subplot,feature,unit in zip(subplots,features,units):\n pylab.subplot(subplot)\n pylab.grid(alpha=.5)\n #pylab.title(feature)\n pylab.plot(Xs,cm.dictVals(abf.MTs,feature),'.-',alpha=.5)\n pylab.xlabel(None)\n pylab.ylabel(\"%s (%s)\"%(feature,unit))\n swhlab.plot.comments(abf,True)\n pylab.margins(0,.1)\n\ndef checkSweepIC(abf=exampleABF,sweep=0):\n \"\"\"Produce an eyeball-ready indication how the MT was calculated in IC.\"\"\"\n _keys = abf.MTs.dtype.names\n for key in _keys:\n globals()[key]=abf.MTs[key] # only global for this module, that's fine\n fitted=cm.algo_exp(np.arange(TCB-TCA),m,t,b)\n swhlab.plot.new(abf,forceNewFigure=True)\n Xs,Ys,Er=abf.average_sweep()\n for subplot in [121,122]:\n pylab.subplot(subplot)\n pylab.axhline(0,color='b',lw=2,alpha=.5,ls=\"--\")\n pylab.axhline(M2,color='b',lw=2,alpha=.5,ls=\"--\")\n swhlab.plot.sweep(abf,'all',rainbow=False,color='#CCCCCC',alpha=.5)\n pylab.plot(Xs,Ys,color='k',alpha=.5)\n pylab.plot(Xs[T1A:T1B],Ys[T1A:T1B],color='b',lw=2)\n pylab.plot(Xs[T2A:T2B],Ys[T2A:T2B],color='b',lw=2)\n pylab.plot(abf.dataX[TCA:TCB],fitted,color='r',lw=2,ls='--')\n pylab.axis([(TCA-100)/abf.rate,(TCB+100)/abf.rate,None,None])\n pylab.tight_layout()\n msg=\"tau: %.02f ms\\n\"%(tc/abf.rate*1000)\n msg+=\"Rm: %.02f MOhm\\n\"%(Rm)\n msg+=\"Cm: %.02f pF\"%(Cm)\n pylab.annotate(msg,(.75,.95),ha='left',va='top',weight='bold',family='monospace',\n xycoords='figure fraction',size=12,color='g')\n swhlab.plot.annotate(abf)\n return\n\ndef checkSweep(abf=exampleABF,sweep=0):\n \"\"\"Produce an eyeball-ready indication how the MT was calculated in VC.\"\"\"\n if abf.units==\"mV\":\n return checkSweepIC(abf,sweep)\n if abf.MTs[sweep] is None:\n return False #no memtest data even found\n _keys = abf.MTs[sweep].dtype.names\n for key in _keys:\n globals()[key]=abf.MTs[sweep][key] # only global for this module, that's fine.\n _msg2=\"Average (n=%d)\\n\"%abf.sweeps\n _msg=\"\"\n for i in range(len(_keys)):\n _msg+=\"%s=%s\\n\"%(_keys[i],abf.MTs[sweep][i])\n if _keys[i] in ['Ih','Ra','Rm','Cm','Tv','Tm']:\n _msg2+=\"%s=%.02f\\n\"%(_keys[i],abf.MTs[sweep][i])\n fitted=cm.algo_exp(np.arange(TCB-TCA),fitM,fitT,fitB)\n pylab.figure(figsize=(8,8))\n for subplot in [211,212]:\n pylab.subplot(subplot)\n #pylab.plot(abf.dataX,abf.dataY,alpha=.2,color='k',lw=2)\n pylab.plot(abf.dataX[:TCA],abf.dataY[:TCA],alpha=.2,color='k',lw=2)\n pylab.plot(abf.dataX[TCB:],abf.dataY[TCB:],alpha=.2,color='k',lw=2)\n pylab.plot(abf.dataX[TCA:TCB],abf.dataY[TCA:TCB],'o',alpha=.5,lw=4,mfc='none',mec='r')\n pylab.plot(abf.dataX[T1A:T1B],abf.dataY[T1A:T1B],alpha=.4,color='b')\n pylab.plot(abf.dataX[T2A:T2B],abf.dataY[T2A:T2B],alpha=.4,color='b')\n pylab.plot(abf.dataX[TCA:TCB],fitted,color='k',lw=2,ls=\"--\")\n for i in [TA, TB]:\n pylab.axvline(i/abf.rate,color='k',ls='--',alpha=.4)\n for i in [P1,P2]:\n pylab.axhline(i,color='b',ls=\"--\",alpha=.5)\n for i in [PCA,PCB,PP]:\n pylab.axhline(i,color='g',ls=\"--\",alpha=.5)\n pylab.tight_layout()\n pylab.subplots_adjust(right=0.75)\n pylab.annotate(_msg,(.8,.75),ha='left',va='top',alpha=.5,\n xycoords='figure fraction',family='monospace',size=10)\n pylab.annotate(_msg2,(.8,.95),ha='left',va='top',weight='bold',family='monospace',\n xycoords='figure fraction',size=12,color='g')\n pylab.subplot(211)\n pylab.axis([None,abf.dataX[T2B]+.05,None,None])\n pylab.subplot(212)\n pylab.axis([(TB-20)/abf.rate,(TCB+20)/abf.rate,P1-20,PP+20])\n swhlab.plot.annotate(abf)\n for key in _keys:\n del key #be clean about screwing with globals()\n return\n\ndef test():\n \"\"\"voltage clamp MT.\"\"\"\n abf=swhlab.ABF(r'C:\\Apps\\pythonModules\\abfs\\16701010.abf')\n swhlab.memtest.memtest(abf) #performs memtest on all sweeps\n swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did\n pylab.show()\n\ndef test2():\n \"\"\"current clamp MT.\"\"\"\n abf=swhlab.ABF(r'C:\\Apps\\pythonModules\\abfs\\16701006.abf')\n swhlab.memtest.memtest(abf) #performs memtest on all sweeps\n swhlab.memtest.checkSweep(abf) #lets you eyeball check how it did\n pylab.show()\n\nif __name__==\"__main__\":\n #test()\n #test2()\n test3()\n print(\"DONE\")" ]
[ [ "numpy.arange", "numpy.max", "numpy.array", "numpy.where", "numpy.average" ] ]
JelleAalbers/lenstronomy
[ "6db785667ff099fa8338e972b66253b2901b2827" ]
[ "lenstronomy/LensModel/MultiPlane/multi_plane_base.py" ]
[ "import numpy as np\nfrom lenstronomy.Cosmo.background import Background\nfrom lenstronomy.LensModel.profile_list_base import ProfileListBase\nimport lenstronomy.Util.constants as const\n\n__all__ = ['MultiPlaneBase']\n\n\nclass MultiPlaneBase(ProfileListBase):\n\n \"\"\"\n Multi-plane lensing class\n\n The lens model deflection angles are in units of reduced deflections from the specified redshift of the lens to the\n source redshift of the class instance.\n \"\"\"\n\n def __init__(self, lens_model_list, lens_redshift_list, z_source_convention, cosmo=None,\n numerical_alpha_class=None, cosmo_interp=False, z_interp_stop=None, num_z_interp=100):\n \"\"\"\n A description of the recursive multi-plane formalism can be found e.g. here: https://arxiv.org/abs/1312.1536\n\n :param lens_model_list: list of lens model strings\n :param lens_redshift_list: list of floats with redshifts of the lens models indicated in lens_model_list\n :param z_source_convention: float, redshift of a source to define the reduced deflection angles of the lens\n models. If None, 'z_source' is used.\n :param cosmo: instance of astropy.cosmology\n :param numerical_alpha_class: an instance of a custom class for use in NumericalAlpha() lens model\n (see documentation in Profiles/numerical_alpha)\n\n \"\"\"\n if z_interp_stop is None:\n z_interp_stop = z_source_convention\n self._cosmo_bkg = Background(cosmo, interp=cosmo_interp, z_stop=z_interp_stop, num_interp=num_z_interp)\n self._z_source_convention = z_source_convention\n if len(lens_redshift_list) > 0:\n z_lens_max = np.max(lens_redshift_list)\n if z_lens_max >= z_source_convention:\n raise ValueError('deflector redshifts higher or equal the source redshift convention (%s >= %s for the reduced lens'\n ' model quantities not allowed (leads to negative reduced deflection angles!'\n % (z_lens_max, z_source_convention))\n if not len(lens_model_list) == len(lens_redshift_list):\n raise ValueError(\"The length of lens_model_list does not correspond to redshift_list\")\n\n self._lens_redshift_list = lens_redshift_list\n super(MultiPlaneBase, self).__init__(lens_model_list, numerical_alpha_class=numerical_alpha_class,\n lens_redshift_list=lens_redshift_list,\n z_source_convention=z_source_convention)\n\n if len(lens_model_list) < 1:\n self._sorted_redshift_index = []\n else:\n self._sorted_redshift_index = self._index_ordering(lens_redshift_list)\n z_before = 0\n T_z = 0\n self._T_ij_list = []\n self._T_z_list = []\n # Sort redshift for vectorized reduced2physical factor calculation\n if len(lens_model_list)<1:\n self._reduced2physical_factor = []\n else:\n z_sort = np.array(self._lens_redshift_list)[self._sorted_redshift_index]\n z_source_array = np.ones(z_sort.shape)*z_source_convention\n self._reduced2physical_factor = self._cosmo_bkg.d_xy(0, z_source_convention) / self._cosmo_bkg.d_xy(z_sort, z_source_array)\n for idex in self._sorted_redshift_index:\n z_lens = self._lens_redshift_list[idex]\n if z_before == z_lens:\n delta_T = 0\n else:\n T_z = self._cosmo_bkg.T_xy(0, z_lens)\n delta_T = self._cosmo_bkg.T_xy(z_before, z_lens)\n self._T_ij_list.append(delta_T)\n self._T_z_list.append(T_z)\n z_before = z_lens\n\n def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens,\n include_z_start=False, T_ij_start=None, T_ij_end=None):\n \"\"\"\n ray-tracing through parts of the coin, starting with (x,y) co-moving distances and angles (alpha_x, alpha_y)\n at redshift z_start and then backwards to redshift z_stop\n\n :param x: co-moving position [Mpc]\n :param y: co-moving position [Mpc]\n :param alpha_x: ray angle at z_start [arcsec]\n :param alpha_y: ray angle at z_start [arcsec]\n :param z_start: redshift of start of computation\n :param z_stop: redshift where output is computed\n :param kwargs_lens: lens model keyword argument list\n :param include_z_start: bool, if True, includes the computation of the deflection angle at the same redshift as\n the start of the ray-tracing. ATTENTION: deflection angles at the same redshift as z_stop will be computed always!\n This can lead to duplications in the computation of deflection angles.\n :param T_ij_start: transverse angular distance between the starting redshift to the first lens plane to follow.\n If not set, will compute the distance each time this function gets executed.\n :param T_ij_end: transverse angular distance between the last lens plane being computed and z_end.\n If not set, will compute the distance each time this function gets executed.\n :return: co-moving position and angles at redshift z_stop\n \"\"\"\n x = np.array(x, dtype=float)\n y = np.array(y, dtype=float)\n alpha_x = np.array(alpha_x)\n alpha_y = np.array(alpha_y)\n z_lens_last = z_start\n first_deflector = True\n\n for i, idex in enumerate(self._sorted_redshift_index):\n z_lens = self._lens_redshift_list[idex]\n\n if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:\n if first_deflector is True:\n if T_ij_start is None:\n if z_start == 0:\n delta_T = self._T_ij_list[0]\n else:\n delta_T = self._cosmo_bkg.T_xy(z_start, z_lens)\n else:\n delta_T = T_ij_start\n first_deflector = False\n else:\n delta_T = self._T_ij_list[i]\n x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T)\n alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)\n\n z_lens_last = z_lens\n if T_ij_end is None:\n if z_lens_last == z_stop:\n delta_T = 0\n else:\n delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop)\n else:\n delta_T = T_ij_end\n x, y = self._ray_step_add(x, y, alpha_x, alpha_y, delta_T)\n return x, y, alpha_x, alpha_y\n\n def transverse_distance_start_stop(self, z_start, z_stop, include_z_start=False):\n \"\"\"\n computes the transverse distance (T_ij) that is required by the ray-tracing between the starting redshift and\n the first deflector afterwards and the last deflector before the end of the ray-tracing.\n\n :param z_start: redshift of the start of the ray-tracing\n :param z_stop: stop of ray-tracing\n :param include_z_start: boolean, if True includes the computation of the starting position if the first\n deflector is at z_start\n :return: T_ij_start, T_ij_end\n \"\"\"\n z_lens_last = z_start\n first_deflector = True\n T_ij_start = None\n for i, idex in enumerate(self._sorted_redshift_index):\n z_lens = self._lens_redshift_list[idex]\n if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop:\n if first_deflector is True:\n T_ij_start = self._cosmo_bkg.T_xy(z_start, z_lens)\n first_deflector = False\n z_lens_last = z_lens\n T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop)\n return T_ij_start, T_ij_end\n\n def geo_shapiro_delay(self, theta_x, theta_y, kwargs_lens, z_stop, T_z_stop=None, T_ij_end=None):\n \"\"\"\n geometric and Shapiro (gravitational) light travel time relative to a straight path through the coordinate (0,0)\n Negative sign means earlier arrival time\n\n :param theta_x: angle in x-direction on the image\n :param theta_y: angle in y-direction on the image\n :param kwargs_lens: lens model keyword argument list\n :param z_stop: redshift of the source to stop the backwards ray-tracing\n :param T_z_stop: optional, transversal angular distance from z=0 to z_stop\n :param T_ij_end: optional, transversal angular distance between the last lensing plane and the source plane\n :return: dt_geo, dt_shapiro, [days]\n \"\"\"\n dt_grav = np.zeros_like(theta_x, dtype=float)\n dt_geo = np.zeros_like(theta_x, dtype=float)\n x = np.zeros_like(theta_x, dtype=float)\n y = np.zeros_like(theta_y, dtype=float)\n alpha_x = np.array(theta_x, dtype=float)\n alpha_y = np.array(theta_y, dtype=float)\n i = 0\n z_lens_last = 0\n for i, index in enumerate(self._sorted_redshift_index):\n z_lens = self._lens_redshift_list[index]\n if z_lens <= z_stop:\n T_ij = self._T_ij_list[i]\n x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij)\n if i == 0:\n pass\n elif T_ij > 0:\n T_j = self._T_z_list[i]\n T_i = self._T_z_list[i - 1]\n beta_i_x, beta_i_y = x / T_i, y / T_i\n beta_j_x, beta_j_y = x_new / T_j, y_new / T_j\n dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij)\n dt_geo += dt_geo_new\n x, y = x_new, y_new\n dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens)\n alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i)\n\n dt_grav += dt_grav_new\n z_lens_last = z_lens\n if T_ij_end is None:\n T_ij_end = self._cosmo_bkg.T_xy(z_lens_last, z_stop)\n T_ij = T_ij_end\n x_new, y_new = self._ray_step(x, y, alpha_x, alpha_y, T_ij)\n if T_z_stop is None:\n T_z_stop = self._cosmo_bkg.T_xy(0, z_stop)\n T_j = T_z_stop\n T_i = self._T_z_list[i]\n beta_i_x, beta_i_y = x / T_i, y / T_i\n beta_j_x, beta_j_y = x_new / T_j, y_new / T_j\n dt_geo_new = self._geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij)\n dt_geo += dt_geo_new\n return dt_geo, dt_grav\n\n @staticmethod\n def _index_ordering(redshift_list):\n \"\"\"\n\n :param redshift_list: list of redshifts\n :return: indexes in ascending order to be evaluated (from z=0 to z=z_source)\n \"\"\"\n redshift_list = np.array(redshift_list)\n #sort_index = np.argsort(redshift_list[redshift_list < z_source])\n sort_index = np.argsort(redshift_list)\n #if len(sort_index) < 1:\n # Warning(\"There is no lens object between observer at z=0 and source at z=%s\" % z_source)\n return sort_index\n\n def _reduced2physical_deflection(self, alpha_reduced, index_lens):\n \"\"\"\n alpha_reduced = D_ds/Ds alpha_physical\n\n :param alpha_reduced: reduced deflection angle\n :param index_lens: integer, index of the deflector plane\n :return: physical deflection angle\n \"\"\"\n factor = self._reduced2physical_factor[index_lens]\n return alpha_reduced * factor\n\n def _gravitational_delay(self, x, y, kwargs_lens, index, z_lens):\n \"\"\"\n\n :param x: co-moving coordinate at the lens plane\n :param y: co-moving coordinate at the lens plane\n :param kwargs_lens: lens model keyword arguments\n :param z_lens: redshift of the deflector\n :param index: index of the lens model in sorted redshfit convention\n :return: gravitational delay in units of days as seen at z=0\n \"\"\"\n theta_x, theta_y = self._co_moving2angle(x, y, index)\n k = self._sorted_redshift_index[index]\n potential = self.func_list[k].function(theta_x, theta_y, **kwargs_lens[k])\n delay_days = self._lensing_potential2time_delay(potential, z_lens, z_source=self._z_source_convention)\n return -delay_days\n\n @staticmethod\n def _geometrical_delay(beta_i_x, beta_i_y, beta_j_x, beta_j_y, T_i, T_j, T_ij):\n \"\"\"\n\n :param beta_i_x: angle on the sky at plane i\n :param beta_i_y: angle on the sky at plane i\n :param beta_j_x: angle on the sky at plane j\n :param beta_j_y: angle on the sky at plane j\n :param T_i: transverse diameter distance to z_i\n :param T_j: transverse diameter distance to z_j\n :param T_ij: transverse diameter distance from z_i to z_j\n :return: excess delay relative to a straight line\n \"\"\"\n d_beta_x = beta_j_x - beta_i_x\n d_beta_y = beta_j_y - beta_i_y\n tau_ij = T_i * T_j / T_ij * const.Mpc / const.c / const.day_s * const.arcsec**2\n return tau_ij * (d_beta_x ** 2 + d_beta_y ** 2) / 2\n\n def _lensing_potential2time_delay(self, potential, z_lens, z_source):\n \"\"\"\n transforms the lensing potential (in units arcsec^2) to a gravitational time-delay as measured at z=0\n\n :param potential: lensing potential\n :param z_lens: redshift of the deflector\n :param z_source: redshift of source for the definition of the lensing quantities\n :return: gravitational time-delay in units of days\n \"\"\"\n D_dt = self._cosmo_bkg.ddt(z_lens, z_source)\n delay_days = const.delay_arcsec2days(potential, D_dt)\n return delay_days\n\n def _co_moving2angle(self, x, y, index):\n \"\"\"\n transforms co-moving distances Mpc into angles on the sky (radian)\n\n :param x: co-moving distance\n :param y: co-moving distance\n :param index: index of plane\n :return: angles on the sky\n \"\"\"\n T_z = self._T_z_list[index]\n theta_x = x / T_z\n theta_y = y / T_z\n return theta_x, theta_y\n\n @staticmethod\n def _ray_step(x, y, alpha_x, alpha_y, delta_T):\n \"\"\"\n ray propagation with small angle approximation\n\n :param x: co-moving x-position\n :param y: co-moving y-position\n :param alpha_x: deflection angle in x-direction at (x, y)\n :param alpha_y: deflection angle in y-direction at (x, y)\n :param delta_T: transverse angular diameter distance to the next step\n :return: co-moving position at the next step (backwards)\n \"\"\"\n x_ = x + alpha_x * delta_T\n y_ = y + alpha_y * delta_T\n return x_, y_\n\n @staticmethod\n def _ray_step_add(x, y, alpha_x, alpha_y, delta_T):\n \"\"\"\n ray propagation with small angle approximation\n\n :param x: co-moving x-position\n :param y: co-moving y-position\n :param alpha_x: deflection angle in x-direction at (x, y)\n :param alpha_y: deflection angle in y-direction at (x, y)\n :param delta_T: transverse angular diameter distance to the next step\n :return: co-moving position at the next step (backwards)\n \"\"\"\n x += alpha_x * delta_T\n y += alpha_y * delta_T\n return x, y\n\n def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, index):\n \"\"\"\n adds the physical deflection angle of a single lens plane to the deflection field\n\n :param x: co-moving distance at the deflector plane\n :param y: co-moving distance at the deflector plane\n :param alpha_x: physical angle (radian) before the deflector plane\n :param alpha_y: physical angle (radian) before the deflector plane\n :param kwargs_lens: lens model parameter kwargs\n :param index: index of the lens model to be added in sorted redshift list convention\n :param idex_lens: redshift of the deflector plane\n :return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective)\n \"\"\"\n theta_x, theta_y = self._co_moving2angle(x, y, index)\n k = self._sorted_redshift_index[index]\n alpha_x_red, alpha_y_red = self.func_list[k].derivatives(theta_x, theta_y, **kwargs_lens[k])\n alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, index)\n alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, index)\n return alpha_x - alpha_x_phys, alpha_y - alpha_y_phys\n\n @staticmethod\n def _start_condition(inclusive, z_lens, z_start):\n \"\"\"\n\n :param inclusive: boolean, if True selects z_lens including z_start, else only selects z_lens > z_start\n :param z_lens: deflector redshift\n :param z_start: starting redshift (lowest redshift)\n :return: boolean of condition\n \"\"\"\n\n if inclusive:\n return z_lens >= z_start\n else:\n return z_lens > z_start\n" ]
[ [ "numpy.zeros_like", "numpy.ones", "numpy.argsort", "numpy.max", "numpy.array" ] ]
benvcutilli/CountingPlusFriendly
[ "1947e2a765e20c87e080da22b4ecc4da1f272b02" ]
[ "Friendly/LaTeX/figures/cosinecomparison.py" ]
[ "# The Plotly^^^plotly^^^ package\nimport plotly\n\n# Importing ^^^numpy^^^\nimport numpy\n\n\n\n\ndef sigmoid(x):\n return (1 + numpy.exp(-x)) ** -1\n\n\n\n\n\n\n\n\n\nsamplesPerDimension = 500\n# Using numpy.linspace to create x and y values is from somewhere on ^^^plotly^^^'s website, most\n# likely. It is a convenient way to do this, so that's why.\nevaluationRange = numpy.linspace([-5, -5], [5, 5], samplesPerDimension, axis=1)\n\n\n# Using the technique that I used from networkcomponents.py (PairwiseDifference) where one dimension\n# is on the first axis and the other is on the second axis so that they can broadcast to create all\n# permutations between the array of x values and the array of y values. Before broadcasting, we need\n# to add a dimension to both the x vector and y vector, but at the beginning and end of them,\n# respectively, which is also what happens in PairwiseDifference. However, this code doesn't\n# actually broadcast, but it mimics broadcasting with the .repeat(...) calls.\n####################################################################################################\n# #\n\nx = numpy.expand_dims(evaluationRange[0], 0).repeat(samplesPerDimension, 0)\ny = numpy.expand_dims(evaluationRange[1], 1).repeat(samplesPerDimension, 1)\nevaluationPairs = numpy.stack([x, y], 2)\n\n# #\n####################################################################################################\n\nweights = numpy.array([1, 1])\nconstant = 1.0\n\n# Calculating every combination for the three functions\ndotProduct = numpy.dot(evaluationPairs, weights)\ncosine = dotProduct \\\n / \\\n ( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) )\nsoftenedCosine = dotProduct \\\n / \\\n ( numpy.linalg.norm(weights) * numpy.linalg.norm(evaluationPairs, axis=2) + constant)\n\n\n\ndotProductSurface = plotly.graph_objects.Surface(\n x=evaluationRange[0],\n y=evaluationRange[1], z=sigmoid(dotProduct)\n )\n\ncosineSurface = plotly.graph_objects.Surface(\n x=evaluationRange[0],\n y=evaluationRange[1], z=cosine\n )\n\nsoftenedCosineSurface = plotly.graph_objects.Surface(\n x=evaluationRange[0],\n y=evaluationRange[1], z=softenedCosine\n )\n\n\nfigure = plotly.graph_objects.Figure(\n softenedCosineSurface,\n layout={ \"scene\": { \"aspectmode\": \"data\" } }\n )\n\n# \"validate\" left as True partially because I trust the default value listed in\n# ^^^plotlyfigureshow^^^\nfigure.show(renderer=\"firefox\")\n\n#figure.write_image(\"graph.png\", \"png\", 1200, 900, 1.0, True, \"kaleido\")" ]
[ [ "numpy.linalg.norm", "numpy.array", "numpy.exp", "numpy.expand_dims", "numpy.stack", "numpy.dot", "numpy.linspace" ] ]
530824679/YOLOv2
[ "eff9ddbab58da970e7fb449cd1974fb810fd6023" ]
[ "model/ops.py" ]
[ "# -*- coding: utf-8 -*-\n# --------------------------------------\n# @Time : 2020/11/01\n# @Author : Oscar Chen\n# @Email : [email protected]\n# @File : ops.py\n# Description :base operators.\n# --------------------------------------\n\nimport tensorflow as tf\n\ndef leaky_relu(x):\n return tf.nn.leaky_relu(x, alpha=0.1, name='leaky_relu')\n\ndef conv2d(inputs, filters_num, filters_size, pad_size=0, stride=1, batch_normalize=True, activation=leaky_relu, use_bias=False, is_train=True, name='conv2d'):\n if pad_size > 0:\n inputs = tf.pad(inputs, [[0,0], [pad_size, pad_size], [pad_size, pad_size],[0,0]])\n\n out = tf.layers.conv2d(inputs, filters=filters_num, kernel_size=filters_size, strides=stride, padding='VALID', activation=None, use_bias=use_bias, name=name)\n\n if batch_normalize:\n out = tf.layers.batch_normalization(out, axis=-1, momentum=0.9, training=is_train, name=name+'_bn')\n\n if activation:\n out = activation(out)\n\n return out\n\ndef maxpool(inputs, size=2, stride=2, name='maxpool'):\n with tf.name_scope(name):\n out = tf.layers.max_pooling2d(inputs, pool_size=size, strides=stride, padding='SAME')\n return out\n\ndef reorg(inputs, stride):\n return tf.space_to_depth(inputs, block_size=stride)" ]
[ [ "tensorflow.pad", "tensorflow.layers.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.space_to_depth", "tensorflow.nn.leaky_relu", "tensorflow.name_scope", "tensorflow.layers.max_pooling2d" ] ]
thaipduong/safe-control-gym
[ "69f8f627d232d50813a7fff6113dd6d5caccf930" ]
[ "safe_control_gym/controllers/mpc/gp_mpc_hexa.py" ]
[ "\"\"\"Model Predictive Control with a Gaussian Process model.\n\nBased on:\n * L. Hewing, J. Kabzan and M. N. Zeilinger, \"Cautious Model Predictive Control Using Gaussian Process Regression,\"\n in IEEE Transactions on Control Systems Technology, vol. 28, no. 6, pp. 2736-2743, Nov. 2020, doi: 10.1109/TCST.2019.2949757.\n\nImplementation details:\n 1. The previous time step MPC solution is used to compute the set constraints and GP dynamics rollout.\n Here, the dynamics are rolled out using the Mean Equivelence method, the fastest, but least accurate.\n 2. The GP is approximated using the Fully Independent Training Conditional (FITC) outlined in\n * J. Quinonero-Candela, C. E. Rasmussen, and R. Herbrich, “A unifying view of sparse approximate Gaussian process regression,”\n Journal of Machine Learning Research, vol. 6, pp. 1935–1959, 2005.\n https://www.jmlr.org/papers/volume6/quinonero-candela05a/quinonero-candela05a.pdf\n * E. Snelson and Z. Ghahramani, “Sparse gaussian processes using pseudo-inputs,” in Advances in Neural Information Processing\n Systems, Y. Weiss, B. Scholkopf, and J. C. Platt, Eds., 2006, pp. 1257–1264.\n and the inducing points are the previous MPC solution.\n 3. Each dimension of the learned error dynamics is an independent Zero Mean SE Kernel GP.\n\n\"\"\"\nimport scipy\nimport numpy as np\nimport casadi as cs\nimport time\nimport torch\nimport gpytorch\n\nfrom copy import deepcopy\nfrom skopt.sampler import Lhs\nfrom functools import partial\nfrom sklearn.model_selection import train_test_split\n\nfrom safe_control_gym.controllers.mpc.linear_mpc import LinearMPC, MPC\nfrom safe_control_gym.controllers.mpc.mpc_utils import discretize_linear_system\nfrom safe_control_gym.controllers.mpc.gp_utils import GaussianProcessCollection, ZeroMeanIndependentGPModel, covSEard\nfrom safe_control_gym.envs.benchmark_env import Task\n\n\nclass GPMPC(MPC):\n \"\"\"MPC with Gaussian Process as dynamics residual. \n\n \"\"\"\n\n def __init__(\n self,\n env_func,\n seed: int = 1337,\n horizon: int = 5,\n q_mpc: list = [1],\n r_mpc: list = [1],\n additional_constraints: list = None,\n use_prev_start: bool = True,\n train_iterations: int = 800,\n validation_iterations: int = 200,\n optimization_iterations: list = None,\n learning_rate: list = None,\n normalize_training_data: bool = False,\n use_gpu: bool = False,\n gp_model_path: str = None,\n prob: float = 0.955,\n initial_rollout_std: float = 0.005,\n input_mask: list = None,\n target_mask: list = None,\n gp_approx: str = 'mean_eq',\n sparse_gp: bool = False,\n online_learning: bool = False,\n inertial_prop: list = [1.0],\n prior_param_coeff: float = 1.0,\n output_dir: str = \"results/temp\",\n **kwargs\n ):\n \"\"\"Initialize GP-MPC.\n\n Args:\n env_func (gym.Env): functionalized initialization of the environment.\n seed (int): random seed.\n horizon (int): MPC planning horizon.\n Q, R (np.array): cost weight matrix.\n use_prev_start (bool): Warmstart mpc with the previous solution.\n train_iterations (int): the number of training examples to use for each dimension of the GP.\n validation_iterations (int): the number of points to use use for the test set during training.\n optimization_iterations (list): the number of optimization iterations for each dimension of the GP.\n learning_rate (list): the learning rate for training each dimension of the GP.\n normalize_training_data (bool): Normalize the training data.\n use_gpu (bool): use GPU while training the gp.\n gp_model_path (str): path to a pretrained GP model. If None, will train a new one.\n output_dir (str): directory to store model and results.\n prob (float): desired probabilistic safety level.\n initial_rollout_std (float): the initial std (across all states) for the mean_eq rollout.\n inertial_prop (list): to initialize the inertial properties of the prior model.\n prior_param_coeff (float): constant multiplying factor to adjust the prior model intertial properties.\n input_mask (list): list of which input dimensions to use in GP model. If None, all are used.\n target_mask (list): list of which output dimensions to use in the GP model. If None, all are used.\n gp_approx (str): 'mean_eq' used mean equivalence rollout for the GP dynamics. Only one that works currently.\n online_learning (bool): if true, GP kernel values will be updated using past trajectory values.\n additional_constraints (list): list of Constraint objects defining additional constraints to be used.\n\n \"\"\"\n print(\"############################################### GP-MPC hexa ###########################################\")\n self.prior_env_func = partial(env_func,\n inertial_prop=np.array(inertial_prop)*prior_param_coeff)\n self.prior_param_coeff = prior_param_coeff\n # Initialize the method using linear MPC.\n self.prior_ctrl = LinearMPC(\n self.prior_env_func,\n horizon=horizon,\n q_mpc=q_mpc,\n r_mpc=r_mpc,\n use_prev_start=use_prev_start,\n output_dir=output_dir,\n additional_constraints=additional_constraints,\n )\n self.prior_ctrl.reset()\n super().__init__(\n self.prior_env_func,\n horizon=horizon,\n q_mpc=q_mpc,\n r_mpc=r_mpc,\n use_prev_start=use_prev_start,\n output_dir=output_dir,\n additional_constraints=additional_constraints,\n **kwargs)\n # Setup environments.\n self.env_func = env_func\n self.env = env_func(randomized_init=False)\n self.env_training = env_func(randomized_init=True)\n # No training data accumulated yet so keep the dynamics function as linear prior.\n self.train_data = None\n self.prior_dynamics_func = self.prior_ctrl.linear_dynamics_func\n # GP and training parameters.\n self.gaussian_process = None\n self.train_iterations = train_iterations\n self.validation_iterations = validation_iterations\n self.optimization_iterations = optimization_iterations\n self.learning_rate = learning_rate\n self.gp_model_path = gp_model_path\n self.normalize_training_data = normalize_training_data\n self.use_gpu = use_gpu\n self.seed = seed\n self.prob = prob\n self.sparse_gp = sparse_gp\n if input_mask is None:\n self.input_mask = np.arange(self.model.nx + self.model.nu).tolist()\n else:\n self.input_mask = input_mask\n if target_mask is None:\n self.target_mask = np.arange(self.model.nx).tolist()\n else:\n self.target_mask = target_mask\n Bd = np.eye(self.model.nx)\n self.Bd = Bd[:, self.target_mask]\n self.gp_approx = gp_approx\n self.online_learning = online_learning\n self.last_obs = None\n self.last_action = None\n self.initial_rollout_std = initial_rollout_std\n\n def setup_prior_dynamics(self):\n \"\"\"Computes the LQR gain used for propograting GP uncertainty from the prior model dynamics.\n\n \"\"\"\n # Determine the LQR gain K to propogate the input uncertainty (doing this at each timestep will increase complexity).\n A, B = discretize_linear_system(self.prior_ctrl.dfdx, self.prior_ctrl.dfdu, self.dt)\n Q_lqr = self.Q\n R_lqr = self.R\n P = scipy.linalg.solve_discrete_are(A, B, Q_lqr, R_lqr)\n btp = np.dot(B.T, P)\n self.lqr_gain = -np.dot(np.linalg.inv(self.R + np.dot(btp, B)), np.dot(btp, A))\n self.discrete_dfdx = A\n self.discrete_dfdu = B\n\n def set_gp_dynamics_func(self):\n \"\"\"Updates symbolic dynamics.\n\n With actual control frequency, initialize GP model and add to the combined dynamics.\n\n \"\"\"\n self.setup_prior_dynamics()\n # Compute the probabilistic constraint inverse CDF according to section III.D.b in Hewing 2019.\n self.inverse_cdf = scipy.stats.norm.ppf(1 - (1/self.model.nx - (self.prob + 1)/(2*self.model.nx)))\n self.create_sparse_GP_machinery()\n\n def create_sparse_GP_machinery(self):\n \"\"\"This setups the gaussian process approximations for FITC formulation.\n\n \"\"\"\n lengthscales, signal_var, noise_var, gp_K_plus_noise = self.gaussian_process.get_hyperparameters(as_numpy=True)\n self.length_scales = lengthscales.squeeze()\n self.signal_var = signal_var.squeeze()\n self.noise_var = noise_var.squeeze()\n self.gp_K_plus_noise = gp_K_plus_noise\n Nx = len(self.input_mask)\n Ny = len(self.target_mask)\n N = self.gaussian_process.n_training_samples\n # Create CasADI function for computing the kernel K_z_zind with parameters for z, z_ind, length scales and signal variance.\n # We need the CasADI version of this so that it can by symbolically differentiated in in the MPC optimization.\n z1 = cs.SX.sym('z1', Nx)\n z2 = cs.SX.sym('z2', Nx)\n ell_s = cs.SX.sym('ell', Nx)\n sf2_s = cs.SX.sym('sf2')\n z_ind = cs.SX.sym('z_ind', self.T, Nx)\n covSE = cs.Function('covSE', [z1, z2, ell_s, sf2_s],\n [covSEard(z1, z2, ell_s, sf2_s)])\n ks = cs.SX.zeros(1, self.T)\n for i in range(self.T):\n ks[i] = covSE(z1, z_ind[i, :], ell_s, sf2_s)\n ks_func = cs.Function('K_s', [z1, z_ind, ell_s, sf2_s], [ks])\n K_z_zind = cs.SX.zeros(Ny, self.T)\n for i in range(Ny):\n K_z_zind[i,:] = ks_func(z1, z_ind, self.length_scales[i,:], self.signal_var[i])\n # This will be mulitplied by the mean_post_factor computed at every time step to compute the approximate mean.\n self.K_z_zind_func = cs.Function('K_z_zind', [z1, z_ind],[K_z_zind],['z1', 'z2'],['K'])\n\n def preprocess_training_data(self,\n x_seq,\n u_seq,\n x_next_seq\n ):\n \"\"\"Converts trajectory data for GP trianing.\n \n Args:\n x_seq (list): state sequence of np.array (nx,). \n u_seq (list): action sequence of np.array (nu,). \n x_next_seq (list): next state sequence of np.array (nx,). \n \n Returns:\n np.array: inputs for GP training, (N, nx+nu).\n np.array: targets for GP training, (N, nx).\n\n \"\"\"\n # Get the predicted dynamics. This is a linear prior, thus we need to account for the fact that\n # it is linearized about an eq using self.X_GOAL and self.U_GOAL.\n x_pred_seq = self.prior_dynamics_func(x0=x_seq.T - self.prior_ctrl.X_LIN[:, None],\n p=u_seq.T - self.prior_ctrl.U_LIN[:,None])['xf'].toarray()\n targets = (x_next_seq.T - (x_pred_seq+self.prior_ctrl.X_LIN[:,None])).transpose() # (N, nx).\n inputs = np.hstack([x_seq, u_seq]) # (N, nx+nu).\n return inputs, targets\n\n def precompute_probabilistic_limits(self,\n print_sets=True\n ):\n \"\"\"This updates the constraint value limits to account for the uncertainty in the dynamics rollout.\n\n Args:\n print_sets (bool): True to print out the sets for debugging purposes.\n\n \"\"\"\n nx, nu = self.model.nx, self.model.nu\n T = self.T\n state_covariances = np.zeros((self.T+1, nx, nx))\n input_covariances = np.zeros((self.T, nu, nu))\n # Initilize lists for the tightening of each constraint.\n state_constraint_set = []\n for state_constraint in self.constraints.state_constraints:\n state_constraint_set.append(np.zeros((state_constraint.num_constraints, T+1)))\n input_constraint_set = []\n for input_constraint in self.constraints.input_constraints:\n input_constraint_set.append(np.zeros((input_constraint.num_constraints, T)))\n if self.x_prev is not None and self.u_prev is not None:\n cov_x = np.diag([self.initial_rollout_std**2]*nx)\n for i in range(T):\n state_covariances[i] = cov_x\n cov_u = self.lqr_gain @ cov_x @ self.lqr_gain.T\n input_covariances[i] = cov_u\n cov_xu = cov_x @ self.lqr_gain.T\n z = np.hstack((self.x_prev[:,i], self.u_prev[:,i]))\n if self.gp_approx == 'taylor':\n raise NotImplementedError(\"Taylor GP approximation is currently not working.\")\n elif self.gp_approx == 'mean_eq':\n _, cov_d_tensor = self.gaussian_process.predict(z[None,:], return_pred=False)\n cov_d = cov_d_tensor.detach().numpy()\n else:\n raise NotImplementedError('gp_approx method is incorrect or not implemented')\n # Loop through input constraints and tighten by the required ammount.\n for ui, input_constraint in enumerate(self.constraints.input_constraints):\n input_constraint_set[ui][:, i] = -1*self.inverse_cdf * \\\n np.absolute(input_constraint.A) @ np.sqrt(np.diag(cov_u))\n for si, state_constraint in enumerate(self.constraints.state_constraints):\n state_constraint_set[si][:, i] = -1*self.inverse_cdf * \\\n np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))\n if self.gp_approx == 'taylor':\n raise NotImplementedError(\"Taylor GP rollout not implemented.\")\n elif self.gp_approx == 'mean_eq':\n # Compute the next step propogated state covariance using mean equivilence.\n cov_x = self.discrete_dfdx @ cov_x @ self.discrete_dfdx.T + \\\n self.discrete_dfdx @ cov_xu @ self.discrete_dfdu.T + \\\n self.discrete_dfdu @ cov_xu.T @ self.discrete_dfdx.T + \\\n self.discrete_dfdu @ cov_u @ self.discrete_dfdu.T + \\\n self.Bd @ cov_d @ self.Bd.T\n else:\n raise NotImplementedError('gp_approx method is incorrect or not implemented')\n # Udate Final covariance.\n for si, state_constraint in enumerate(self.constraints.state_constraints):\n state_constraint_set[si][:,-1] = -1 * self.inverse_cdf * \\\n np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))\n state_covariances[-1] = cov_x\n if print_sets:\n print(\"Probabilistic State Constraint values along Horizon:\")\n print(state_constraint_set)\n print(\"Probabilistic Input Constraint values along Horizon:\")\n print(input_constraint_set)\n self.results_dict['input_constraint_set'].append(input_constraint_set)\n self.results_dict['state_constraint_set'].append(state_constraint_set)\n self.results_dict['state_horizon_cov'].append(state_covariances)\n self.results_dict['input_horizon_cov'].append(input_covariances)\n return state_constraint_set, input_constraint_set\n\n def precompute_sparse_gp_values(self):\n \"\"\"Uses the last MPC solution to precomupte values associated with the FITC GP approximation.\n\n \"\"\"\n n_data_points = self.gaussian_process.n_training_samples\n dim_gp_inputs = len(self.input_mask)\n dim_gp_outputs = len(self.target_mask)\n inputs = self.train_data['train_inputs']\n targets = self.train_data['train_targets']\n # Get the inducing points.\n if self.x_prev is not None and self.u_prev is not None:\n # Use the previous MPC solution as in Hewing 2019.\n z_ind = np.hstack((self.x_prev[:,:-1].T, self.u_prev.T))\n z_ind = z_ind[:,self.input_mask]\n else:\n # If there is no previous solution. Choose T random training set points.\n inds = self.env.np_random.choice(range(n_data_points), size=self.T)\n #z_ind = self.data_inputs[inds][:, self.input_mask]\n z_ind = inputs[inds][:, self.input_mask]\n K_zind_zind = self.gaussian_process.kernel(torch.Tensor(z_ind).double())\n K_zind_zind_inv = self.gaussian_process.kernel_inv(torch.Tensor(z_ind).double())\n K_x_zind = self.gaussian_process.kernel(torch.from_numpy(inputs[:, self.input_mask]).double(),\n torch.Tensor(z_ind).double())\n Q_X_X = K_x_zind @ K_zind_zind_inv @ K_x_zind.transpose(1,2)\n Gamma = torch.diagonal(self.gaussian_process.K_plus_noise + Q_X_X, 0, 1, 2)\n Gamma_inv = torch.diag_embed(1/Gamma)\n Sigma = torch.pinverse(K_zind_zind + K_x_zind.transpose(1,2) @ Gamma_inv @ K_x_zind)\n mean_post_factor = torch.zeros((dim_gp_outputs, self.T))\n for i in range(dim_gp_outputs):\n mean_post_factor[i] = Sigma[i] @ K_x_zind[i].T @ Gamma_inv[i] @ \\\n torch.from_numpy(targets[:,self.target_mask[i]]).double()\n return mean_post_factor.detach().numpy(), Sigma.detach().numpy(), K_zind_zind_inv.detach().numpy(), z_ind\n\n def setup_gp_optimizer(self):\n \"\"\"Sets up nonlinear optimization problem including cost objective, variable bounds and dynamics constraints.\n\n \"\"\"\n nx, nu = self.model.nx, self.model.nu\n T = self.T\n # Define optimizer and variables.\n opti = cs.Opti()\n # States.\n x_var = opti.variable(nx, T + 1)\n # Inputs.\n u_var = opti.variable(nu, T)\n # Initial state.\n x_init = opti.parameter(nx, 1)\n # Reference (equilibrium point or trajectory, last step for terminal cost).\n x_ref = opti.parameter(nx, T + 1)\n # Chance constraint limits.\n state_constraint_set = []\n for state_constraint in self.constraints.state_constraints:\n state_constraint_set.append(opti.parameter(state_constraint.num_constraints, T+1))\n input_constraint_set = []\n for input_constraint in self.constraints.input_constraints:\n input_constraint_set.append(opti.parameter(input_constraint.num_constraints, T))\n # Sparse GP mean postfactor matrix.\n mean_post_factor = opti.parameter(len(self.target_mask), T)\n # Sparse GP inducing points.\n z_ind = opti.parameter(T, len(self.input_mask))\n # Cost (cumulative).\n cost = 0\n cost_func = self.model.loss\n for i in range(T):\n cost += cost_func(x=x_var[:, i],\n u=u_var[:, i],\n Xr=x_ref[:, i],\n Ur=np.zeros((nu, 1)),\n Q=self.Q,\n R=self.R)[\"l\"]\n # Terminal cost.\n cost += cost_func(x=x_var[:, -1],\n u=np.zeros((nu, 1)),\n Xr=x_ref[:, -1],\n Ur=np.zeros((nu, 1)),\n Q=self.Q,\n R=self.R)[\"l\"]\n opti.minimize(cost)\n z = cs.vertcat(x_var[:,:-1], u_var)\n z = z[self.input_mask,:]\n for i in range(self.T):\n # Dynamics constraints using the dynamics of the prior and the mean of the GP.\n # This follows the tractable dynamics formulation in Section III.B in Hewing 2019.\n # Note that for the GP approximation, we are purposely using elementwise multiplication *.\n if self.sparse_gp:\n next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],\n p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \\\n self.prior_ctrl.X_LIN[:,None]+ self.Bd @ cs.sum2(self.K_z_zind_func(z1=z[:,i].T, z2=z_ind)['K'] * mean_post_factor)\n else:\n # Sparse GP approximation doesn't always work well, thus, use Exact GP regression. This is much slower,\n # but for unstable systems, make performance much better.\n next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],\n p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \\\n self.prior_ctrl.X_LIN[:,None]+ self.Bd @ self.gaussian_process.casadi_predict(z=z[:,i])['mean']\n opti.subject_to(x_var[:, i + 1] == next_state)\n # Probabilistic state and input constraints according to Hewing 2019 constraint tightening.\n for s_i, state_constraint in enumerate(self.state_constraints_sym):\n opti.subject_to(state_constraint(x_var[:, i]) <= state_constraint_set[s_i][:,i])\n for u_i, input_constraint in enumerate(self.input_constraints_sym):\n opti.subject_to(input_constraint(u_var[:, i]) <= input_constraint_set[u_i][:,i])\n # Final state constraints.\n for s_i, state_constraint in enumerate(self.state_constraints_sym):\n opti.subject_to(state_constraint(x_var[:, -1]) <= state_constraint_set[s_i][:,-1])\n # Initial condition constraints.\n opti.subject_to(x_var[:, 0] == x_init)\n # Create solver (IPOPT solver in this version).\n opts = {\"ipopt.print_level\": 4,\n \"ipopt.sb\": \"yes\",\n \"ipopt.max_iter\": 100, #100,\n \"print_time\": 1}\n opti.solver('ipopt', opts)\n self.opti_dict = {\n \"opti\": opti,\n \"x_var\": x_var,\n \"u_var\": u_var,\n \"x_init\": x_init,\n \"x_ref\": x_ref,\n \"state_constraint_set\": state_constraint_set,\n \"input_constraint_set\": input_constraint_set,\n \"mean_post_factor\": mean_post_factor,\n \"z_ind\": z_ind,\n \"cost\": cost\n }\n\n def select_action_with_gp(self,\n obs\n ):\n \"\"\"Solves nonlinear MPC problem to get next action.\n\n Args:\n obs (np.array): current state/observation.\n\n Returns:\n np.array: input/action to the task/env.\n\n \"\"\"\n opti_dict = self.opti_dict\n opti = opti_dict[\"opti\"]\n x_var = opti_dict[\"x_var\"]\n u_var = opti_dict[\"u_var\"]\n x_init = opti_dict[\"x_init\"]\n x_ref = opti_dict[\"x_ref\"]\n state_constraint_set = opti_dict[\"state_constraint_set\"]\n input_constraint_set = opti_dict[\"input_constraint_set\"]\n mean_post_factor = opti_dict[\"mean_post_factor\"]\n z_ind = opti_dict[\"z_ind\"]\n cost = opti_dict[\"cost\"]\n # Assign the initial state.\n opti.set_value(x_init, obs)\n # Assign reference trajectory within horizon.\n goal_states = self.get_references()\n opti.set_value(x_ref, goal_states)\n if self.mode == \"tracking\":\n self.traj_step += 1\n # Set the probabilistic state and input constraint set limits.\n state_constraint_set_prev, input_constraint_set_prev = self.precompute_probabilistic_limits()\n for si in range(len(self.constraints.state_constraints)):\n opti.set_value(state_constraint_set[si], state_constraint_set_prev[si])\n for ui in range(len(self.constraints.input_constraints)):\n opti.set_value(input_constraint_set[ui], input_constraint_set_prev[ui])\n mean_post_factor_val, Sigma, K_zind_zind_inv, z_ind_val = self.precompute_sparse_gp_values()\n opti.set_value(mean_post_factor, mean_post_factor_val)\n opti.set_value(z_ind, z_ind_val)\n # Initial guess for the optimization problem.\n if self.warmstart and self.x_prev is not None and self.u_prev is not None:\n # shift previous solutions by 1 step\n x_guess = deepcopy(self.x_prev)\n u_guess = deepcopy(self.u_prev)\n x_guess[:, :-1] = x_guess[:, 1:]\n u_guess[:-1] = u_guess[1:]\n opti.set_initial(x_var, x_guess)\n opti.set_initial(u_var, u_guess)\n # Solve the optimization problem.\n try:\n sol = opti.solve()\n x_val, u_val = sol.value(x_var), sol.value(u_var)\n except RuntimeError:\n x_val, u_val = opti.debug.value(x_var), opti.debug.value(u_var)\n u_val = np.atleast_2d(u_val)\n self.x_prev = x_val\n self.u_prev = u_val\n self.results_dict['horizon_states'].append(deepcopy(self.x_prev))\n self.results_dict['horizon_inputs'].append(deepcopy(self.u_prev))\n zi = np.hstack((x_val[:,0], u_val[:,0]))\n zi = zi[self.input_mask]\n gp_contribution = np.sum(self.K_z_zind_func(z1=zi, z2=z_ind_val)['K'].toarray() * mean_post_factor_val,axis=1)\n print(\"GP Mean eq Contribution: %s\" % gp_contribution)\n zi = np.hstack((x_val[:,0], u_val[:,0]))\n pred, _, _ = self.gaussian_process.predict(zi[None,:])\n print(\"True GP value: %s\" % pred.numpy())\n lin_pred = self.prior_dynamics_func(x0=x_val[:,0]-self.prior_ctrl.X_LIN,\n p=u_val[:, 0]-self.prior_ctrl.U_LIN)['xf'].toarray() + \\\n self.prior_ctrl.X_LIN[:,None]\n self.results_dict['linear_pred'].append(lin_pred)\n self.results_dict['gp_mean_eq_pred'].append(gp_contribution)\n self.results_dict['gp_pred'].append(pred.numpy())\n # Take the first one from solved action sequence.\n if u_val.ndim > 1:\n action = u_val[:, 0]\n else:\n action = np.array([u_val[0]])\n self.prev_action = action,\n return action\n\n def learn(self,\n input_data=None,\n target_data=None,\n gp_model=None,\n plot=False\n ):\n \"\"\"Performs GP training.\n\n Args:\n input_data, target_data (optiona, np.array): data to use for training\n gp_model (str): if not None, this is the path to pretrained models to use instead of training new ones.\n plot (bool): to plot validation trajectories or not.\n\n Returns:\n training_results (dict): Dictionary of the training results.\n\n \"\"\"\n if gp_model is None:\n gp_model = self.gp_model_path\n self.prior_ctrl.remove_constraints(self.prior_ctrl.additional_constraints)\n self.reset()\n if self.online_learning:\n input_data = np.zeros((self.train_iterations, len(self.input_mask)))\n target_data = np.zeros((self.train_iterations, len(self.target_mask)))\n if input_data is None and target_data is None:\n train_inputs = []\n train_targets = []\n train_info = []\n\n ############\n # Use Latin Hypercube Sampling to generate states withing environment bounds.\n lhs_sampler = Lhs(lhs_type='classic', criterion='maximin')\n limits = [(self.env.INIT_STATE_RAND_INFO[key].low, self.env.INIT_STATE_RAND_INFO[key].high) for key in\n self.env.INIT_STATE_RAND_INFO]\n # todo: parameterize this if we actually want it.\n num_eq_samples = 0\n samples = lhs_sampler.generate(limits,\n self.train_iterations + self.validation_iterations - num_eq_samples,\n random_state=self.seed)\n # todo: choose if we want eq samples or not.\n delta = 0.01\n eq_limits = [(self.prior_ctrl.X_LIN[eq]-delta, self.prior_ctrl.X_LIN[eq]+delta) for eq in range(self.model.nx)]\n if num_eq_samples > 0:\n eq_samples = lhs_sampler.generate(eq_limits, num_eq_samples, random_state=self.seed)\n #samples = samples.append(eq_samples)\n init_state_samples = np.array(samples + eq_samples)\n else:\n init_state_samples = np.array(samples)\n input_limits = np.vstack((self.constraints.input_constraints[0].lower_bounds,\n self.constraints.input_constraints[0].upper_bounds)).T\n input_samples = lhs_sampler.generate(input_limits,\n self.train_iterations + self.validation_iterations,\n random_state=self.seed)\n input_samples = np.array(input_samples) # not being used currently\n seeds = self.env.np_random.randint(0,99999, size=self.train_iterations + self.validation_iterations)\n\n load_from_file = False\n if load_from_file:\n gpmpc_data = np.load(\"/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand_good1.npz\")\n x_seq_all = gpmpc_data[\"x_seq_all\"]\n x_next_seq_all = gpmpc_data[\"x_next_seq_all\"]\n u_seq_all = gpmpc_data[\"u_seq_all\"]\n else:\n x_seq_all = []\n u_seq_all = []\n x_next_seq_all = []\n for i in range(self.train_iterations + self.validation_iterations):\n if load_from_file:\n x_seq = x_seq_all[i]\n x_next_seq = x_next_seq_all[i]\n u_seq = u_seq_all[i]\n else:\n # For random initial state training.\n init_state = init_state_samples[i,:]\n # Collect data with prior controller.\n run_env = self.env_func(init_state=init_state, randomized_init=False, seed=int(seeds[i]))\n episode_results = self.prior_ctrl.run(env=run_env, max_steps=1, gp_training = True)\n run_env.close()\n x_obs = episode_results['obs'][-3:,:]\n u_seq = episode_results['action'][-1:,:]\n run_env.close()\n x_seq = x_obs[:-1,:]\n x_next_seq = x_obs[1:,:]\n x_seq_all.append(x_seq)\n x_next_seq_all.append(x_next_seq)\n u_seq_all.append(u_seq)\n train_inputs_i, train_targets_i = self.preprocess_training_data(x_seq, u_seq, x_next_seq)\n train_inputs.append(train_inputs_i)\n train_targets.append(train_targets_i)\n np.savez(\"/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand.npz\", x_seq_all = x_seq_all, x_next_seq_all = x_next_seq_all, u_seq_all = u_seq_all)\n ###########\n else:\n train_inputs = input_data\n train_targets = target_data\n # assign all data\n train_inputs = np.vstack(train_inputs)\n train_targets = np.vstack(train_targets)\n self.data_inputs = train_inputs\n self.data_targets = train_targets\n train_idx, test_idx = train_test_split(\n #list(range(self.train_iterations + self.validation_iterations)),\n list(range(train_inputs.shape[0])),\n test_size=self.validation_iterations/(self.train_iterations+self.validation_iterations),\n random_state=self.seed\n )\n train_inputs = self.data_inputs[train_idx, :]\n train_targets = self.data_targets[train_idx, :]\n self.train_data = {'train_inputs': train_inputs, 'train_targets': train_targets}\n test_inputs = self.data_inputs[test_idx, :]\n test_targets = self.data_targets[test_idx, :]\n self.test_data = {'test_inputs': test_inputs, 'test_targets': test_targets}\n\n\n train_inputs_tensor = torch.Tensor(train_inputs).double()\n train_targets_tensor = torch.Tensor(train_targets).double()\n test_inputs_tensor = torch.Tensor(test_inputs).double()\n test_targets_tensor = torch.Tensor(test_targets).double()\n\n if plot:\n init_state = np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n valid_env = self.env_func(init_state=init_state,\n randomized_init=False)\n validation_results = self.prior_ctrl.run(env=valid_env,\n max_steps=40)\n valid_env.close()\n x_obs = validation_results['obs']\n u_seq = validation_results['action']\n x_seq = x_obs[:-1, :]\n x_next_seq = x_obs[1:, :]\n # Define likelihood.\n likelihood = gpytorch.likelihoods.GaussianLikelihood(\n noise_constraint=gpytorch.constraints.GreaterThan(1e-6),\n ).double()\n self.gaussian_process = GaussianProcessCollection(ZeroMeanIndependentGPModel,\n likelihood,\n len(self.target_mask),\n input_mask=self.input_mask,\n target_mask=self.target_mask,\n normalize=self.normalize_training_data\n )\n if gp_model:\n self.gaussian_process.init_with_hyperparam(train_inputs_tensor,\n train_targets_tensor,\n gp_model)\n else:\n # Train the GP.\n self.gaussian_process.train(train_inputs_tensor,\n train_targets_tensor,\n test_inputs_tensor,\n test_targets_tensor,\n n_train=self.optimization_iterations,\n learning_rate=self.learning_rate,\n gpu=self.use_gpu,\n dir=self.output_dir)\n # Plot validation.\n if plot:\n validation_inputs, validation_targets = self.preprocess_training_data(x_seq, u_seq, x_next_seq)\n fig_count = 0\n fig_count = self.gaussian_process.plot_trained_gp(torch.Tensor(validation_inputs).double(),\n torch.Tensor(validation_targets).double(),\n fig_count=fig_count)\n self.set_gp_dynamics_func()\n self.setup_gp_optimizer()\n self.prior_ctrl.add_constraints(self.prior_ctrl.additional_constraints)\n self.prior_ctrl.reset()\n # Collect training results.\n training_results = {}\n training_results['train_targets'] = train_targets\n training_results['train_inputs'] = train_inputs\n try:\n training_results['info'] = train_info\n except UnboundLocalError:\n training_results['info'] = None\n return training_results\n\n def select_action(self,\n obs\n ):\n \"\"\"Select the action based on the given observation.\n\n Args:\n obs (np.array): current observed state.\n\n Returns:\n action (np.array): desired policy action.\n\n \"\"\"\n if self.gaussian_process is None:\n action = self.prior_ctrl.select_action(obs)\n else:\n if(self.last_obs is not None and self.last_action is not None and self.online_learning):\n print(\"[ERROR]: Not yet supported.\")\n exit()\n t1 = time.perf_counter()\n action = self.select_action_with_gp(obs)\n t2 = time.perf_counter()\n print(\"GP SELECT ACTION TIME: %s\" %(t2 - t1))\n self.last_obs = obs\n self.last_action = action\n return action\n\n def close(self):\n \"\"\"Clean up.\n\n \"\"\"\n self.env_training.close()\n self.env.close()\n\n def reset_results_dict(self):\n \"\"\"\n\n \"\"\"\n \"Result the results_dict before running.\"\n super().reset_results_dict()\n self.results_dict['input_constraint_set'] = []\n self.results_dict['state_constraint_set'] = []\n self.results_dict['state_horizon_cov'] = []\n self.results_dict['input_horizon_cov'] = []\n self.results_dict['gp_mean_eq_pred'] = []\n self.results_dict['gp_pred'] = []\n self.results_dict['linear_pred'] = []\n\n def reset(self):\n \"\"\"Reset the controller before running.\n\n \"\"\"\n # Setup reference input.\n if self.env.TASK == Task.STABILIZATION:\n self.mode = \"stabilization\"\n self.x_goal = self.env.X_GOAL\n elif self.env.TASK == Task.TRAJ_TRACKING:\n self.mode = \"tracking\"\n self.traj = self.env.X_GOAL.T\n self.traj_step = 0\n # Dynamics model.\n if self.gaussian_process is not None:\n self.set_gp_dynamics_func()\n # CasADi optimizer.\n self.setup_gp_optimizer()\n self.prior_ctrl.reset()\n # Previously solved states & inputs, useful for warm start.\n self.x_prev = None\n self.u_prev = None\n" ]
[ [ "numpy.diag", "scipy.stats.norm.ppf", "scipy.linalg.solve_discrete_are", "numpy.vstack", "torch.diagonal", "numpy.savez", "torch.from_numpy", "numpy.absolute", "torch.Tensor", "numpy.eye", "numpy.load", "numpy.atleast_2d", "numpy.zeros", "numpy.arange", "numpy.hstack", "numpy.array", "torch.diag_embed", "torch.zeros", "numpy.dot" ] ]
PouyaREZ/Wastewater_Energy_Optimization
[ "ead604b715337dc8c76871910d38965d1b8b1856" ]
[ "Plotters/Results/Plots_Paper_One.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 4 2020\n\n\n@Author: PouyaRZ\n\n____________________________________________________\nPlots to produce:\n1. LCC of equipment for each scenario for all the individuals\n2, SCC of equipment for each scenario for all the individuals\n\n3. SCC vs LCC scatter plot.\n\n4. SCC vs chiller type\n5. SCC vs CHP type,\n6. LCC vs chiller type\n7. SCC vs CHP type\n\n8. Traces of building types across all the runs\n____________________________________________________\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef DF_Filter(filename):\n \n file = np.loadtxt(filename, dtype='float')\n \n inputDF = pd.DataFrame(file)\n \n error_tol = 1.15\n \n# print('GFA stats:')\n# print(inputDF.iloc[:,38].describe())\n print('+++++ processing %s +++++\\n'%(filename))\n \n print('Count duplicates:')\n condition1 = inputDF.duplicated()==True\n print(inputDF[condition1][38].count())\n \n \n print('Count under the min GFA:') # Count non-trivial neighborhoods\n condition2 = inputDF[38] <= 1/error_tol#<=647497/10\n print(inputDF[condition2][38].count())\n \n \n print('Count over the max GFA:')\n condition3 = inputDF[38]>=647497*5*error_tol\n print(inputDF[condition3][38].count())\n \n \n print('Count over the max Site GFA:')\n condition4 = inputDF[38]/inputDF[36]>=647497*error_tol\n print(inputDF[condition4][38].count())\n \n \n print('Count valid answers:')\n print(len(inputDF) - inputDF[condition1 | condition2 | condition3 | condition4][38].count())\n \n# print('------------------')\n # Filtering the inadmissible results\n Filtered = ~(condition1 | condition2 | condition3 | condition4)\n inputDF = inputDF[Filtered]\n inputDF.reset_index(inplace=True, drop=True)\n \n# print('Annual energy demand stats (MWh):')\n inputDF[26] /= inputDF[38] # Normalizing LCC ($/m2)\n inputDF[27] /= inputDF[38] # Normalizing SCC ($/m2)\n inputDF[39] /= inputDF[38] # Normalizing CO2 (Tonnes/m2)\n inputDF[40] /= (10**3*inputDF[38]) # Normalizing total energy demand (MWh/m2)\n inputDF[41] /= inputDF[38] # Normalizing total wwater treatment demand (L/m2)\n for i in range(29,36): # Converting percent areas to integer %\n inputDF[i] = inputDF[i] * 100\n# print(inputDF[40].describe())\n \n return inputDF\n \n\n\n### MAIN FUNCTION\nprint('loading data')\nfilenames = ['../RQ1_W_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt',\n '../RQ1_WO_CWWTP_ModConsts_Feb17/SDO_LHS_TestRuns288_Constraint_SF_Test.txt']\nDFNames = ['CCHP|CWWTP','CCHP+WWT']\nDFs = {}\nfor i in range(2):\n DFs[DFNames[i]] = DF_Filter(filenames[i])\n\n\n\n\nplt.style.use('ggplot')\ncolors_rb = {DFNames[0]:'r', DFNames[1]:'b'}\n\n\n\n\n\n\n\n\n# =============================================================================\n## CHP/Chiller/Solar Types used in the individual neighborhood\nCHP_Types = {}\nCHP_Types[1] = 'Gas_1'\nCHP_Types[2] = 'Gas_2'\nCHP_Types[3] = 'Gas_3'\nCHP_Types[4] = 'Gas_4'\nCHP_Types[5] = 'Gas_5'\nCHP_Types[6] = 'Micro_1'\nCHP_Types[7] = 'Micro_2'\nCHP_Types[8] = 'Micro_3'\nCHP_Types[9] = 'Recipro_1'\nCHP_Types[10] = 'Recipro_2'\nCHP_Types[11] = 'Recipro_3'\nCHP_Types[12] = 'Recipro_4'\nCHP_Types[13] = 'Recipro_5'\nCHP_Types[14] = 'Steam_1'\nCHP_Types[15] = 'Steam_2'\nCHP_Types[16] = 'Steam_3'\nCHP_Types[17] = 'Fuel_Cell_1'\nCHP_Types[18] = 'Fuel_Cell_2'\nCHP_Types[19] = 'Fuel_Cell_3'\nCHP_Types[20] = 'Fuel_Cell_4'\nCHP_Types[21] = 'Fuel_Cell_5'\nCHP_Types[22] = 'Fuel_Cell_6'\nCHP_Types[23] = 'Bio_1'\nCHP_Types[24] = 'Bio_2'\nCHP_Types[25] = 'Bio_3'\nCHP_Types[26] = 'Bio_4'\nCHP_Types[27] = 'Bio_5'\nCHP_Types[28] = 'Bio_6'\nCHP_Types[29] = 'Bio_7'\nCHP_Types[30] = 'Bio_8'\nCHP_Types[31] = 'Bio_9'\nCHP_Types[32] = 'Bio_10'\n\n\nChiller_Types = {}\nChiller_Types[1] = 'Electric_1'\nChiller_Types[2] = 'Electric_2'\nChiller_Types[3] = 'Electric_3'\nChiller_Types[4] = 'Electric_4'\nChiller_Types[5] = 'Electric_5'\nChiller_Types[6] = 'Electric_6'\nChiller_Types[7] = 'Electric_7'\nChiller_Types[8] = 'Electric_8'\nChiller_Types[9] = 'Electric_9'\nChiller_Types[10] = 'Absorp_1'\nChiller_Types[11] = 'Absorp_2'\nChiller_Types[12] = 'Absorp_3'\nChiller_Types[13] = 'Absorp_4'\nChiller_Types[14] = 'Absorp_5'\nChiller_Types[15] = 'Absorp_6'\nChiller_Types[16] = 'Absorp_7'\nChiller_Types[17] = 'Absorp_8'\n\n\nWWT_Types = {}\nWWT_Types[1] = \"FO_MD\"\nWWT_Types[2] = \"FO_RO\"\nWWT_Types[3] = \"CWWTP\"\n\n\n\n## CHP, Chiller and WWT name assignments\n# CHP = {}\n# Chiller = {}\n# WWT = {}\nfor DFName in DFNames:\n # CHP[DFName] = np.array([CHP_Types[int(i)] for i in DFs[DFName][21]]) # Making strings of CHP names instead of integers\n DFs[DFName][21] = np.array([CHP_Types[int(i)] for i in DFs[DFName][21]]) # Making strings of CHP names instead of integers\n # Chiller[DFName] = np.array([Chiller_Types[int(i)] for i in DFs[DFName][22]]) # Making strings of Chiller names instead of integers\n DFs[DFName][22] = np.array([Chiller_Types[int(i)] for i in DFs[DFName][22]]) # Making strings of Chiller names instead of integers\n # WWT[DFName] = np.array([WWT_Types[int(i)] for i in DFs[DFName][24]]) # Making strings of WWT module names instead of integers\n DFs[DFName][24] = np.array([WWT_Types[int(i)] for i in DFs[DFName][24]]) # Making strings of WWT module names instead of integers\n\n\n# =============================================================================\n\n\n\n\n\n######################## PLOTS ##########################\n\n#############################################\nprint('plotting overall LCC and SCC graphs')\n# LCC\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=26, ascending=True).reset_index(drop=True)\n plt.scatter(x=sortedDF.index,y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel('Rank')\nplt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.title('LCC')\nplt.legend()\nplt.savefig('LCC_Ascending.png', dpi=400, bbox_inches='tight')\n\n\n\n# SCC\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=27, ascending=True).reset_index(drop=True)\n plt.scatter(x=sortedDF.index,y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel('Rank')\nplt.ylabel(r'SCC (k\\$/$m^2$)')\n# plt.title('SCC')\nplt.legend()\nplt.savefig('SCC_Ascending.png', dpi=400, bbox_inches='tight')\n\nplt.close('all')\n\n\n\n#############################################\nprint('plotting LCC and SCC box plots')\n\nprint('\\n#############################################')\nprint('Stats of LCC ($/m2) for Disintegrated Case:\\n',(DFs[DFNames[0]][26]).describe())\nprint('Stats of LCC ($/m2) for Integrated Case:\\n',(DFs[DFNames[1]][26]).describe())\nprint('Stats of SCC ($/m2) for Disintegrated Case:\\n',(DFs[DFNames[0]][27]).describe())\nprint('Stats of SCC ($/m2) for Integrated Case:\\n',(DFs[DFNames[1]][27]).describe())\nprint('#############################################\\n')\n\n# =============================================================================\n# # LCC\n# plt.figure(figsize=(10,5))\n# # for DFName in DFNames:\n# plt.boxplot(x=[(DFs[DFNames[0]][26]/10**3), (DFs[DFNames[1]][26]/10**3)])\n# # (DFs[DFName][0][26]/10**6).plot(label=DFName)\n# # plt.xlabel('Rank')\n# plt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.xticks([1,2],[DFNames[0],DFNames[1]])\n# # plt.title('LCC')\n# plt.savefig('LCC_Boxplot.png', dpi=400, bbox_inches='tight')\n# \n# \n# \n# # SCC\n# plt.figure(figsize=(10,5))\n# # for DFName in DFNames:\n# plt.boxplot(x=[(DFs[DFNames[0]][27]/10**3), (DFs[DFNames[1]][27]/10**3)])\n# # (DFs[DFName][0][26]/10**6).plot(label=DFName)\n# # plt.xlabel('Rank')\n# plt.ylabel(r'SCC (k\\$/$m^2$)')\n# plt.xticks([1,2],[DFNames[0],DFNames[1]])\n# # plt.title('LCC')\n# plt.savefig('SCC_Boxplot.png', dpi=400, bbox_inches='tight')\n# \n# plt.close('all')\n# =============================================================================\n\n\n'''\n#############################################\nprint('plotting LCC/SCC vs total neighborhood energy and ww graphs')\n\nprint('\\n#############################################')\nprint('Stats of Total Energy Demand (MWh/m2) for Disintegrated Case:\\n',(DFs[DFNames[0]][40]).describe())\nprint('Stats of Total Energy Demand (MWh/m2) for Integrated Case:\\n',(DFs[DFNames[1]][40]).describe())\nprint('Stats of Total Wastewater Treatment Demand (m3/m2) for Disintegrated Case:\\n',(DFs[DFNames[0]][41]/10**3).describe())\nprint('Stats of Total Wastewater Treatment Demand (m3/m2) for Integrated Case:\\n',(DFs[DFNames[1]][41]/10**3).describe())\nprint('#############################################\\n')\n\n# LCC vs Neighborhood's Total Energy Use\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\n plt.scatter(x=(sortedDF[40]),y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.title('LCC')\nplt.legend()\nplt.savefig('LCC_vs_Energy_Demand.png', dpi=400, bbox_inches='tight')\n\n\n# LCC vs Neighborhood's Total WWater Demand\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\n plt.scatter(x=(sortedDF[41]/10**3),y=(sortedDF[26]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.title('LCC')\nplt.legend()\nplt.savefig('LCC_vs_WWater_Demand.png', dpi=400, bbox_inches='tight')\n\n\n\n# SCC vs Neighborhood's Total Energy Use\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\n plt.scatter(x=(sortedDF[40]),y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.ylabel(r'SCC (k\\$/$m^2$)')\n# plt.title('LCC')\nplt.legend()\nplt.savefig('SCC_vs_Energy_Demand.png', dpi=400, bbox_inches='tight')\n\n\n# SCC vs Neighborhood's Total WWater Demand\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\n plt.scatter(x=(sortedDF[41]/10**3),y=(sortedDF[27]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.ylabel(r'SCC (k\\$/$m^2$)')\n# plt.title('LCC')\nplt.legend()\nplt.savefig('SCC_vs_WWater_Demand.png', dpi=400, bbox_inches='tight')\n\nplt.close('all')\n\n#############################################\n\nprint('plotting building mix vs neighborhood energy and ww graphs')\n\n# Building Mix vs Neighborhood's Total WWater Demand (integrated)\nDFName = 'CCHP+WWT'\nbldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']\ncolors = ['m','b','c','g','y','orange','r']\ncolumns = list(range(29,36))\nplt.figure(figsize=(10,5))\nsortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\nfor i in range(len(bldg_types)):\n plt.scatter(x=(sortedDF[41]/10**3),y=DFs[DFName].iloc[:,columns[i]],\n s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.ylabel('Percent of Total GFA (%)')\nplt.ylim(0, 100)\nplt.xlim(0,11)\n# plt.title('LCC')\nplt.legend()\nplt.savefig('Bldg_Mix_vs_WWater_Demand_Integ.png', dpi=400, bbox_inches='tight')\n\n\n\n# Building Mix vs Neighborhood's Total WWater Demand (Disintegrated)\nDFName = 'CCHP|CWWTP'\nbldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']\ncolors = ['m','b','c','g','y','orange','r']\ncolumns = list(range(29,36))\nplt.figure(figsize=(10,5))\nsortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\nfor i in range(len(bldg_types)):\n plt.scatter(x=(sortedDF[41]/10**3),y=DFs[DFName].iloc[:,columns[i]],\n s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.ylabel('Percent of Total GFA (%)')\n# plt.title('LCC')\nplt.ylim(0, 100)\nplt.xlim(0,11)\nplt.legend()\nplt.savefig('Bldg_Mix_vs_WWater_Demand_Disinteg.png', dpi=400, bbox_inches='tight')\n\n\n\n\n# Building Mix vs Neighborhood's Total Energy Demand (integrated)\nDFName = 'CCHP+WWT'\nbldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']\ncolors = ['m','b','c','g','y','orange','r']\ncolumns = list(range(29,36))\nplt.figure(figsize=(10,5))\nsortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\nfor i in range(len(bldg_types)):\n plt.scatter(x=(sortedDF[40]),y=DFs[DFName].iloc[:,columns[i]],\n s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.ylabel('Percent of Total GFA (%)')\n# plt.title('LCC')\nplt.ylim(0, 100)\nplt.xlim(0,1)\nplt.legend()\nplt.savefig('Bldg_Mix_vs_Energy_Demand_Integ.png', dpi=400, bbox_inches='tight')\n\n\n\n# Building Mix vs Neighborhood's Total Energy Demand (Disintegrated)\nDFName = 'CCHP|CWWTP'\nbldg_types = ['Res','Off','Com','Ind','Hos','Med','Edu']\ncolors = ['m','b','c','g','y','orange','r']\ncolumns = list(range(29,36))\nplt.figure(figsize=(10,5))\nsortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\nfor i in range(len(bldg_types)):\n plt.scatter(x=(sortedDF[40]),y=DFs[DFName].iloc[:,columns[i]],\n s=0.5, label=bldg_types[i], c=colors[i], alpha=0.5)\n# (DFs[DFName][0][26]/10**6).plot(label=DFName)\nplt.xlabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.ylabel('Percent of Total GFA (%)')\n# plt.title('LCC')\nplt.ylim(0, 100)\nplt.xlim(0,1)\nplt.legend()\nplt.savefig('Bldg_Mix_vs_Energy_Demand_Disinteg.png', dpi=400, bbox_inches='tight')\n\nplt.close('all')\n\n#############################################\nprint('plotting Supply type vs total neighborhood energy and ww graphs')\n\n# Total Energy Demand vs CHP\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\n plt.scatter(x=DFs[DFName][21],y=(sortedDF[40]),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\nplt.xlabel(r'CHP Type')\nplt.ylabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.legend()\nplt.savefig('Total_Energy_vs_CHP.png', dpi=400, bbox_inches='tight')\n\n\n# Total WWater Demand vs CHP\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\n plt.scatter(x=DFs[DFName][21],y=(sortedDF[41]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\nplt.xlabel(r'CHP Type')\nplt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.legend()\nplt.savefig('Total_WWater_vs_CHP.png', dpi=400, bbox_inches='tight')\n\n\n# Total Energy Demand vs Chiller\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\n plt.scatter(x=DFs[DFName][22],y=(sortedDF[40]),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\nplt.xlabel(r'Chiller Type')\nplt.ylabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.legend()\nplt.savefig('Total_Energy_vs_Chiller.png', dpi=400, bbox_inches='tight')\n\n\n# Total WWater Demand vs Chiller\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n sortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\n plt.scatter(x=DFs[DFName][22],y=(sortedDF[41]/10**3),label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\nplt.xlabel(r'Chiller Type')\nplt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.legend()\nplt.savefig('Total_WWater_vs_Chiller.png', dpi=400, bbox_inches='tight')\n\n\n# Total Energy Demand vs WWT (integrated)\nplt.figure(figsize=(10,5))\nDFName = 'CCHP+WWT'\nsortedDF = DFs[DFName].sort_values(by=40, ascending=True).reset_index(drop=True)\nplt.scatter(x=DFs[DFName][24],y=(sortedDF[40]),s=2, c=colors_rb[DFName])\nplt.xlabel(r'WWT Type')\nplt.ylabel(r'Total Energy Demand (MWh/$m^2$)')\nplt.legend()\nplt.savefig('Total_Energy_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')\n\n\n# Total WWater Demand vs WWT (integrated)\nplt.figure(figsize=(10,5))\nDFName = 'CCHP+WWT'\nsortedDF = DFs[DFName].sort_values(by=41, ascending=True).reset_index(drop=True)\nplt.scatter(x=DFs[DFName][24],y=(sortedDF[41]/10**3), s=2, c=colors_rb[DFName])\nplt.xlabel(r'WWT Type')\nplt.ylabel(r'Total Wastewater Treatment Demand ($m^3$/$m^2$)')\nplt.savefig('Total_Wwater_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')\n'''\nplt.close('all')\n\n#############################################\nprint('plotting pareto fronts')\n\n# LCC vs CO2\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][39],label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\nplt.xlabel(r'LCC (k\\$/$m^2$)')\nplt.ylabel(r'Lifecycle $CO_{2e}$ (T/$m^2$)')\nplt.legend()\nplt.savefig('CO2_vs_LCC.png', dpi=400, bbox_inches='tight')\n\n\n\n\n#############################################\n\n# LCC vs SCC\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3,label=DFName, s=2, alpha=0.5, c=colors_rb[DFName])\nplt.xlabel(r'LCC (k\\$/$m^2$)')\nplt.ylabel(r'SCC (k\\$/$m^2$)')\nplt.legend()\nplt.savefig('SCC_vs_LCC.png', dpi=400, bbox_inches='tight')\n\n\n# LCC vs SCC w Generation-based transparency\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n alphas = np.linspace(0.1, 1, len(DFs[DFName]))\n rgba_colors = np.zeros((len(DFs[DFName]),4))\n if DFName == DFNames[0]:\n rgba_colors[:,0] = 1.0 # red\n else:\n rgba_colors[:,2] = 1.0 # blue\n rgba_colors[:,3] = alphas\n plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3,label=DFName, s=1, c=rgba_colors)\nplt.xlabel(r'LCC (k\\$/$m^2$)')\nplt.ylabel(r'SCC (k\\$/$m^2$)')\nplt.legend()\nplt.savefig('SCC_vs_LCC_Gen_Colorcoded.png', dpi=400, bbox_inches='tight')\n\n\n# LCC vs SCC w Generation-based transparency and elite-filtered\nplt.figure(figsize=(10,5))\nfor DFName in DFNames:\n DF = DFs[DFName][DFs[DFName][26]/10**3 <= 500]\n DF = DF[DFs[DFName][27]/10**3 <= 0.1]\n alphas = np.linspace(0.1, 1, len(DF))\n rgba_colors = np.zeros((len(DF),4))\n if DFName == DFNames[0]:\n rgba_colors[:,0] = 1.0 # red\n else:\n rgba_colors[:,2] = 1.0 # blue\n rgba_colors[:,3] = alphas\n plt.scatter(x=DF[26]/10**3,y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors)\nplt.xlabel(r'LCC (k\\$/$m^2$)')\nplt.ylabel(r'SCC (k\\$/$m^2$)')\nplt.legend()\nplt.savefig('SCC_vs_LCC_Gen_Colorcoded_Filtered.png', dpi=400, bbox_inches='tight')\n\n\n# =============================================================================\n# # LCC vs SCC (integrated)\n# plt.figure(figsize=(10,5))\n# DFName = 'CCHP+WWT'\n# plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3, s=2)\n# plt.xlabel(r'LCC (k\\$/$m^2$)')\n# plt.ylabel(r'SCC (k\\$/$m^2$)')\n# plt.savefig('SCC_vs_LCC_Integ.png', dpi=400, bbox_inches='tight')\n# \n# \n# # LCC vs SCC (disintegrated)\n# plt.figure(figsize=(10,5))\n# DFName = 'CCHP|CWWTP'\n# plt.scatter(x=DFs[DFName][26]/10**3,y=DFs[DFName][27]/10**3, s=2)\n# # (DFs[DFName][0][26]/10**6).plot(label=DFName)\n# plt.xlabel(r'LCC (k\\$/$m^2$)')\n# plt.ylabel(r'SCC (k\\$/$m^2$)')\n# # plt.title('LCC')\n# plt.savefig('SCC_vs_LCC_Disinteg.png', dpi=400, bbox_inches='tight')\n# \n# =============================================================================\n\n#############################################\nprint('plotting Supply type vs opt objectives')\n\n\nprint('\\n#############################################')\nDisinteg_Grpd_by_CHP_meanLCC = DFs[DFNames[0]].groupby(21)[26].mean()\nDisnteg_Grpd_by_CHP_medLCC = DFs[DFNames[0]].groupby(21)[26].median()\nDisnteg_Grpd_by_CHP_meanSCC = DFs[DFNames[0]].groupby(21)[27].mean()\nDisnteg_Grpd_by_CHP_medSCC = DFs[DFNames[0]].groupby(21)[27].median()\nInteg_Grpd_by_CHP_meanLCC = DFs[DFNames[1]].groupby(21)[26].mean()\nInteg_Grpd_by_CHP_medLCC = DFs[DFNames[1]].groupby(21)[26].median()\nInteg_Grpd_by_CHP_meanSCC = DFs[DFNames[1]].groupby(21)[27].mean()\nInteg_Grpd_by_CHP_medSCC = DFs[DFNames[1]].groupby(21)[27].median()\nitems = [Disinteg_Grpd_by_CHP_meanLCC, Disnteg_Grpd_by_CHP_medLCC, Disnteg_Grpd_by_CHP_meanSCC,\n Disnteg_Grpd_by_CHP_medSCC, Integ_Grpd_by_CHP_meanLCC, Integ_Grpd_by_CHP_medLCC,\n Integ_Grpd_by_CHP_meanSCC, Integ_Grpd_by_CHP_medSCC]\nitems_names = ['Disinteg_Grpd_by_CHP_meanLCC', 'Disnteg_Grpd_by_CHP_medLCC', 'Disnteg_Grpd_by_CHP_meanSCC',\n 'Disnteg_Grpd_by_CHP_medSCC', 'Integ_Grpd_by_CHP_meanLCC', 'Integ_Grpd_by_CHP_medLCC',\n 'Integ_Grpd_by_CHP_meanSCC', 'Integ_Grpd_by_CHP_medSCC']\nfor i in range(len(items)):\n print(items_names[i], items[i])\nprint('#############################################\\n')\n\n\n\n# shapes = {DFNames[0]: '+', DFNames[1]: 'x'}\n\n\n# LCC vs CHP\nfor DFName in DFNames:\n plt.figure(figsize=(10,5))\n DF = DFs[DFName].sort_values(by=21)\n plt.scatter(x=DF[21], y=DF[26]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])#, marker=shapes[DFName])\n plt.xlabel(r'CHP Type')\n plt.xticks(rotation=75)\n plt.ylabel(r'LCC (k\\$/$m^2$)')\n plt.ylim(-5, 500)\n # plt.legend()\n if DFName == 'CCHP|CWWTP':\n plt.savefig('LCC_vs_CHP_disinteg.png', dpi=400, bbox_inches='tight')\n else:\n plt.savefig('LCC_vs_CHP_integ.png', dpi=400, bbox_inches='tight')\n\n\n# SCC vs CHP\nfor DFName in DFNames:\n plt.figure(figsize=(10,5))\n DF = DFs[DFName].sort_values(by=21)\n plt.scatter(x=DF[21], y=DF[27]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])\n plt.xlabel(r'CHP Type')\n plt.xticks(rotation=75)\n plt.ylabel(r'SCC (k\\$/$m^2$)')\n plt.ylim(-0.01, 0.1)\n # plt.legend()\n if DFName == 'CCHP|CWWTP':\n plt.savefig('SCC_vs_CHP_disinteg.png', dpi=400, bbox_inches='tight')\n else:\n plt.savefig('SCC_vs_CHP_integ.png', dpi=400, bbox_inches='tight')\n\n\n# SCC vs CHP with LCC-oriented transparency\nfor DFName in DFNames:\n plt.figure(figsize=(10,5))\n DF = DFs[DFName].sort_values(by=21)\n DF = DF[(DF[26]<=100) & (DF[27]<=100)]\n print('number of indivs plotted: ', len(DF))\n alphas = 1.2 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0)\n # alphas = np.linspace(0.1, 1, len(DFs[DFName]))\n rgba_colors = np.zeros((len(DF),4))\n rgba_colors[:,3] = alphas\n plt.scatter(x=DF[21],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors)\n plt.xlabel(r'CHP Type')\n plt.xticks(rotation=75)\n plt.ylabel(r'SCC (k\\$/$m^2$)')\n plt.ylim(-0.01, 0.1)\n # plt.legend()\n if DFName == 'CCHP|CWWTP':\n plt.savefig('SCC_vs_CHP_disinteg_colorCoded.png', dpi=400, bbox_inches='tight')\n else:\n plt.savefig('SCC_vs_CHP_integ_colorCoded.png', dpi=400, bbox_inches='tight')\n \n \n\n# =============================================================================\n# # LCC vs CHP (integrated)\n# plt.figure(figsize=(10,5))\n# DFName = 'CCHP+WWT'\n# plt.scatter(x=DFs[DFName][21], y=DFs[DFName][26]/10**3, s=2)\n# plt.xlabel(r'CHP Type')\n# plt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.savefig('LCC_vs_CHP_Integ.png', dpi=400, bbox_inches='tight')\n# \n# \n# # LCC vs CHP (disintegrated)\n# plt.figure(figsize=(10,5))\n# DFName = 'CCHP|CWWTP'\n# plt.scatter(x=DFs[DFName][21], y=DFs[DFName][26]/10**3, s=2)\n# plt.xlabel(r'CHP Type')\n# plt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.savefig('LCC_vs_CHP_Disinteg.png', dpi=400, bbox_inches='tight')\n# =============================================================================\n\n\n\n# LCC vs Chiller\nfor DFName in DFNames:\n plt.figure(figsize=(10,5))\n DF = DFs[DFName].sort_values(by=22)\n plt.scatter(x=DF[22], y=DF[26]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])\n plt.xlabel(r'Chiller Type')\n plt.xticks(rotation=75)\n plt.ylabel(r'LCC (k\\$/$m^2$)')\n plt.ylim(-5, 500)\n # plt.legend()\n if DFName == 'CCHP|CWWTP':\n plt.savefig('LCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight')\n else:\n plt.savefig('LCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight')\n\n\n# SCC vs Chiller\nfor DFName in DFNames:\n plt.figure(figsize=(10,5))\n DF = DFs[DFName].sort_values(by=22)\n plt.scatter(x=DF[22], y=DF[27]/10**3,label=DFName, s=2, alpha=0.5)#, c=colors_rb[DFName])\n plt.xlabel(r'Chiller Type')\n plt.xticks(rotation=75)\n plt.ylabel(r'SCC (k\\$/$m^2$)')\n plt.ylim(-0.01, 0.1)\n # plt.legend()\n if DFName == 'CCHP|CWWTP':\n plt.savefig('SCC_vs_Chiller_disinteg.png', dpi=400, bbox_inches='tight')\n else:\n plt.savefig('SCC_vs_Chiller_integ.png', dpi=400, bbox_inches='tight')\n \n \n# SCC vs Chiller with LCC-oriented transparency\nfor DFName in DFNames:\n plt.figure(figsize=(10,5))\n DF = DFs[DFName].sort_values(by=22)\n DF = DF[(DF[26]<=100) & (DF[27]<=0.5)]\n print('number of indivs plotted: ', len(DF))\n alphas = 1 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0)\n # alphas = np.linspace(0.1, 1, len(DFs[DFName]))\n rgba_colors = np.zeros((len(DF),4))\n rgba_colors[:,3] = alphas\n plt.scatter(x=DF[22],y=DF[27]/10**3,label=DFName, s=1, c=rgba_colors)\n plt.xlabel(r'Chiller Type')\n plt.xticks(rotation=75)\n plt.ylabel(r'SCC (k\\$/$m^2$)')\n plt.ylim(-0.01, 0.1)\n # plt.legend()\n if DFName == 'CCHP|CWWTP':\n plt.savefig('SCC_vs_Chiller_disinteg_colorCoded.png', dpi=400, bbox_inches='tight')\n else:\n plt.savefig('SCC_vs_Chiller_integ_colorCoded.png', dpi=400, bbox_inches='tight')\n\n\n\n# =============================================================================\n# # LCC vs Chiller (integrated)\n# plt.figure(figsize=(10,5))\n# DFName = 'CCHP+WWT'\n# plt.scatter(x=DFs[DFName][22], y=DFs[DFName][26]/10**3, s=2)\n# plt.xlabel(r'Chiller Type')\n# plt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.savefig('LCC_vs_Chiller_Integ.png', dpi=400, bbox_inches='tight')\n# \n# \n# # LCC vs Chiller (disintegrated)\n# plt.figure(figsize=(10,5))\n# DFName = 'CCHP|CWWTP'\n# plt.scatter(x=DFs[DFName][22], y=DFs[DFName][26]/10**3, s=2)\n# plt.xlabel(r'Chiller Type')\n# plt.ylabel(r'LCC (k\\$/$m^2$)')\n# plt.savefig('LCC_vs_Chiller_Disinteg.png', dpi=400, bbox_inches='tight')\n# =============================================================================\n\n\n\n# LCC vs WWT (integrated)\nplt.figure(figsize=(10,5))\nDFName = 'CCHP+WWT'\nDF = DFs[DFName].sort_values(by=24)\nplt.scatter(x=DF[24], y=DF[26]/10**3, s=2)#, c=colors_rb[DFName])\nplt.xlabel(r'WWT Type')\nplt.xticks(rotation=75)\nplt.ylabel(r'LCC (k\\$/$m^2$)')\nplt.ylim(-5, 500)\nplt.savefig('LCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')\n\n\n\n# SCC vs WWT (integrated)\nplt.figure(figsize=(10,5))\nDFName = 'CCHP+WWT'\nDF = DFs[DFName].sort_values(by=24)\nplt.scatter(x=DF[24], y=DF[27]/10**3, s=2)#, c=colors_rb[DFName])\nplt.xlabel(r'WWT Type')\nplt.xticks(rotation=75)\nplt.ylabel(r'SCC (k\\$/$m^2$)')\nplt.ylim(-0.01, 0.1)\nplt.savefig('SCC_vs_WWT_Integ.png', dpi=400, bbox_inches='tight')\n\n\n\n# SCC vs WWT with LCC-oriented transparency (integrated)\nplt.figure(figsize=(10,5))\nDFName = 'CCHP+WWT'\nDF = DFs[DFName].sort_values(by=24)\nDF = DF[(DF[26]<=100) & (DF[27]<=0.5)]\nprint('number of indivs plotted: ', len(DF))\nalphas = 1 - DF[26]/DF[26].max() # Normalized LCCs (lowest LCC: 1; highest LCC: 0)\n# alphas = np.linspace(0.1, 1, len(DFs[DFName]))\nrgba_colors = np.zeros((len(DF),4))\nrgba_colors[:,3] = alphas\nplt.scatter(x=DF[24],y=DF[27]/10**3,s=1, c=rgba_colors)\nplt.xlabel(r'WWT Type')\nplt.xticks(rotation=75)\nplt.ylabel(r'SCC (k\\$/$m^2$)')\nplt.ylim(-0.01, 0.1)\nplt.savefig('SCC_vs_WWT_Integ_colorCoded.png', dpi=400, bbox_inches='tight')\n\nplt.close('all')\n\n#############################################\n'''\nprint('plotting building mix traces')\n\n# Building Mix trace plots\nDFName = 'CCHP+WWT'\nplt.figure(figsize=(10,5))\nfig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(111)\nNum_Individuals = len(DFs[DFName])\ncm = plt.get_cmap('rainbow')\nax.set_prop_cycle(color=[cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])#ax.set_color_cycle([cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])\nfor i in range(Num_Individuals):\n ax.plot(['Res','Off','Com','Ind','Hos','Med','Edu'],\n DFs[DFName].iloc[i,29:36],linewidth=0.2, alpha=0.5)\nax.set_xlabel('Building-Use')\nax.set_ylabel('Percent of Total GFA (%)')\nplt.ylim(0, 100)\nfig.savefig('Uses_Integ.png', dpi=400, bbox_inches='tight')\n\n\n\n\nDFName = 'CCHP|CWWTP'\nfig = plt.figure(figsize=(10,5))\nax = fig.add_subplot(111)\nNum_Individuals = len(DFs[DFName])\ncm = plt.get_cmap('rainbow')\nax.set_prop_cycle(color=[cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])#ax.set_color_cycle([cm(1.*i/Num_Individuals) for i in range(Num_Individuals)])\ny_array = np.array(DFs[DFName].iloc[:,29:36])\nfor i in range(Num_Individuals):\n ax.plot(['Res','Off','Com','Ind','Hos','Med','Edu'],\n DFs[DFName].iloc[i,29:36],linewidth=0.2, alpha=0.5)\nax.set_xlabel('Building-Use')\nax.set_ylabel('Percent of Total GFA (%)')\nplt.ylim(0, 100)\nfig.savefig('Uses_Disinteg.png', dpi=400, bbox_inches='tight')\nplt.close('all')\n'''\n" ]
[ [ "matplotlib.pyplot.style.use", "matplotlib.pyplot.legend", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.ylim", "numpy.loadtxt", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
Felihong/wikidata-sequence-analysis
[ "1d86ad9812c90864eb2c9ab72e5e61474d439f1e" ]
[ "src/sequence_generator.py" ]
[ "import pandas as pd\nfrom itertools import groupby\nfrom operator import itemgetter\n\nclass SequenceGenerator:\n\n def __init__(self, csvfile, jsThreshold):\n self.datafile = csvfile\n self.jsThreshold = jsThreshold\n\n \"\"\"\n Convert the input csv file into dataframe\n \"\"\"\n def _csv2df(self):\n return pd.read_csv(self.datafile, dtype={'item_id':int, 'user_id':str})\n\n \"\"\"\n Generate database by selecting the non-null sequences satisfying the js-distance threshold\n \"\"\"\n def generate_db(self):\n db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'js_distance']].sort_values(by=['item_id','rev_timestamp'])\n filter = db.loc[db['js_distance'] >= self.jsThreshold][['item_id', 'user_id', 'edit_type']]\n return filter[filter.user_id.notnull()]\n\n def generate_dev_db(self, dev):\n db = self._csv2df()[['item_id', 'user_id', 'edit_type', 'rev_timestamp', 'prediction', 'js_distance']].sort_values(by=['item_id', 'rev_timestamp'])\n filter = db.loc[(db['js_distance']>=self.jsThreshold) & (db['prediction']==dev)][['item_id', 'user_id', 'edit_type']]\n return filter[filter.user_id.notnull()]\n\n \"\"\"\n Generate the sequence database by integrating all edits conducted upon one article in a list, where\n the serial edits from the same editor are collapsed into one sub-list\n Args: \n csv file of scheme: article_id : int\n editor_id : int \n edit_type : string \n Return:\n A list of list [[a], [b]], where a and b are collapsed edit types \n \"\"\"\n def generate_sequence(self):\n db = self.generate_db()\n df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list})\n result = df.groupby(['item_id']).agg({'edit_type': list})\n tmp = []\n for ls in result.values.tolist():\n tmp.append(ls[0])\n return tmp\n\n def generate_dev_sequence(self, dev):\n db = self.generate_dev_db(dev=dev)\n df = db.groupby(['item_id', 'user_id']).agg({'edit_type': list})\n return df.values.tolist()" ]
[ [ "pandas.read_csv" ] ]
cvmxn1/OpenFermion
[ "cf53c063d0f124a02ff8776bb7f8afb110d4bde6" ]
[ "src/openfermion/resource_estimates/molecule/pyscf_utils.py" ]
[ "#coverage:ignore\n\"\"\" Drivers for various PySCF electronic structure routines \"\"\"\nfrom typing import Tuple, Optional\nimport sys\nimport h5py\nimport numpy as np\nfrom pyscf import gto, scf, ao2mo, mcscf, lo, tools, cc\nfrom pyscf.mcscf import avas\n\n\ndef stability(pyscf_mf):\n \"\"\"\n Test wave function stability and re-optimize SCF.\n\n Args:\n pyscf_mf: PySCF mean field object (e.g. `scf.RHF()`)\n\n Returns:\n pyscf_mf: Updated PySCF mean field object\n \"\"\"\n new_orbitals = pyscf_mf.stability()[0]\n new_1rdm = pyscf_mf.make_rdm1(new_orbitals, pyscf_mf.mo_occ)\n pyscf_mf = pyscf_mf.run(new_1rdm)\n\n return pyscf_mf\n\n\ndef localize(pyscf_mf, loc_type='pm', verbose=0):\n \"\"\" Localize orbitals given a PySCF mean-field object\n\n Args:\n pyscf_mf: PySCF mean field object\n loc_type (str): localization type;\n Pipek-Mezey ('pm') or Edmiston-Rudenberg ('er')\n verbose (int): print level during localization\n\n Returns:\n pyscf_mf: Updated PySCF mean field object with localized orbitals\n \"\"\"\n # Note: After loading with `load_casfile_to_pyscf()` you can quiet message\n # by resetting mf.mol, i.e., mf.mol = gto.M(...)\n # but this assumes you have the *exact* molecular specification on hand.\n # I've gotten acceptable results by restoring mf.mol this way (usually\n # followed by calling mf.kernel()). But consistent localization is not a\n # given (not unique) despite restoring data this way, hence the message.\n if len(pyscf_mf.mol.atom) == 0:\n sys.exit(\"`localize()` requires atom loc. and atomic basis to be\" + \\\n \" defined.\\n \" + \\\n \"It also can be sensitive to the initial guess and MO\" + \\\n \" coefficients.\\n \" + \\\n \"Best to try re-creating the PySCF molecule and doing the\" + \\\n \" SCF, rather than\\n \" + \\\n \"try to load the mean-field object with\" + \\\n \" `load_casfile_to_pyscf()`. You can \\n \" + \\\n \"try to provide the missing information, but consistency\" + \\\n \" cannot be guaranteed!\")\n\n # Split-localize (localize DOCC, SOCC, and virtual separately)\n docc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 2.))[0]\n socc_idx = np.where(np.isclose(pyscf_mf.mo_occ, 1.))[0]\n virt_idx = np.where(np.isclose(pyscf_mf.mo_occ, 0.))[0]\n\n # Pipek-Mezey\n if loc_type.lower() == 'pm':\n print(\"Localizing doubly occupied ... \", end=\"\")\n loc_docc_mo = lo.PM(\n pyscf_mf.mol,\n pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose)\n print(\"singly occupied ... \", end=\"\")\n loc_socc_mo = lo.PM(\n pyscf_mf.mol,\n pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose)\n print(\"virtual ... \", end=\"\")\n loc_virt_mo = lo.PM(\n pyscf_mf.mol,\n pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose)\n print(\"DONE\")\n\n # Edmiston-Rudenberg\n elif loc_type.lower() == 'er':\n print(\"Localizing doubly occupied ... \", end=\"\")\n loc_docc_mo = lo.ER(\n pyscf_mf.mol,\n pyscf_mf.mo_coeff[:, docc_idx]).kernel(verbose=verbose)\n print(\"singly occupied ... \", end=\"\")\n loc_socc_mo = lo.ER(\n pyscf_mf.mol,\n pyscf_mf.mo_coeff[:, socc_idx]).kernel(verbose=verbose)\n print(\"virtual ... \", end=\"\")\n loc_virt_mo = lo.ER(\n pyscf_mf.mol,\n pyscf_mf.mo_coeff[:, virt_idx]).kernel(verbose=verbose)\n print(\"DONE\")\n\n # overwrite orbitals with localized orbitals\n pyscf_mf.mo_coeff[:, docc_idx] = loc_docc_mo.copy()\n pyscf_mf.mo_coeff[:, socc_idx] = loc_socc_mo.copy()\n pyscf_mf.mo_coeff[:, virt_idx] = loc_virt_mo.copy()\n\n return pyscf_mf\n\n\ndef avas_active_space(pyscf_mf,\n ao_list=None,\n molden_fname='avas_localized_orbitals',\n **kwargs):\n \"\"\" Return AVAS active space as PySCF molecule and mean-field object\n\n Args:\n pyscf_mf: PySCF mean field object\n\n Kwargs:\n ao_list: list of strings of AOs (print mol.ao_labels() to see options)\n Example: ao_list = ['H 1s', 'O 2p', 'O 2s'] for water\n verbose (bool): do additional print\n molden_fname (str): MOLDEN filename to save AVAS active space orbitals.\n Default is to save\n to 'avas_localized_orbitals.molden'\n **kwargs: other keyworded arguments to pass into avas.avas()\n\n Returns:\n pyscf_active_space_mol: Updated PySCF molecule object from\n AVAS-selected active space\n pyscf_active_space_mf: Updated PySCF mean field object from\n AVAS-selected active space\n \"\"\"\n\n # Note: requires openshell_option = 3 for this to work, which keeps all\n # singly occupied in CAS\n # we also require canonicalize = False so that we don't destroy local orbs\n avas_output = avas.avas(pyscf_mf,\n ao_list,\n canonicalize=False,\n openshell_option=3,\n **kwargs)\n active_norb, active_ne, reordered_orbitals = avas_output\n\n active_alpha, _ = get_num_active_alpha_beta(pyscf_mf, active_ne)\n\n if molden_fname is not None:\n # save set of localized orbitals for active space\n if isinstance(pyscf_mf, scf.rohf.ROHF):\n frozen_alpha = pyscf_mf.nelec[0] - active_alpha\n assert frozen_alpha >= 0\n else:\n frozen_alpha = pyscf_mf.mol.nelectron // 2 - active_alpha\n assert frozen_alpha >= 0\n\n active_space_idx = slice(frozen_alpha, frozen_alpha + active_norb)\n active_mos = reordered_orbitals[:, active_space_idx]\n tools.molden.from_mo(pyscf_mf.mol,\n molden_fname + '.molden',\n mo_coeff=active_mos)\n\n # Choosing an active space changes the molecule (\"freezing\" electrons,\n # for example), so we\n # form the active space tensors first, then re-form the PySCF objects to\n # ensure consistency\n pyscf_active_space_mol, pyscf_active_space_mf = cas_to_pyscf(\n *pyscf_to_cas(pyscf_mf,\n cas_orbitals=active_norb,\n cas_electrons=active_ne,\n avas_orbs=reordered_orbitals))\n\n return pyscf_active_space_mol, pyscf_active_space_mf\n\n\ndef cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta):\n \"\"\" Return a PySCF molecule and mean-field object from pre-computed CAS Ham\n\n Args:\n h1 (ndarray) - 2D matrix containing one-body terms (MO basis)\n eri (ndarray) - 4D tensor containing two-body terms (MO basis)\n ecore (float) - frozen core electronic energy + nuclear repulsion energy\n num_alpha (int) - number of spin up electrons in CAS space\n num_beta (int) - number of spin down electrons in CAS space\n\n Returns:\n pyscf_mol: PySCF molecule object\n pyscf_mf: PySCF mean field object\n \"\"\"\n\n n_orb = len(h1) # number orbitals\n assert [n_orb] * 4 == [*eri.shape] # check dims are consistent\n\n pyscf_mol = gto.M()\n pyscf_mol.nelectron = num_alpha + num_beta\n n_orb = h1.shape[0]\n alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha)\n beta_diag = [1] * num_beta + [0] * (n_orb - num_beta)\n\n # Assumes Hamiltonian is either RHF or ROHF ... should be OK since UHF will\n # have two h1s, etc.\n if num_alpha == num_beta:\n pyscf_mf = scf.RHF(pyscf_mol)\n scf_energy = ecore + \\\n 2*np.einsum('ii', h1[:num_alpha,:num_alpha]) + \\\n 2*np.einsum('iijj',\n eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha]) - \\\n np.einsum('ijji',\n eri[:num_alpha,:num_alpha,:num_alpha,:num_alpha])\n\n else:\n pyscf_mf = scf.ROHF(pyscf_mol)\n pyscf_mf.nelec = (num_alpha, num_beta)\n # grab singly and doubly occupied orbitals (assume high-spin open shell)\n docc = slice(None, min(num_alpha, num_beta))\n socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta))\n scf_energy = ecore + \\\n 2.0*np.einsum('ii',h1[docc, docc]) + \\\n np.einsum('ii',h1[socc, socc]) + \\\n 2.0*np.einsum('iijj',eri[docc, docc, docc, docc]) - \\\n np.einsum('ijji',eri[docc, docc, docc, docc]) + \\\n np.einsum('iijj',eri[socc, socc, docc, docc]) - \\\n 0.5*np.einsum('ijji',eri[socc, docc, docc, socc]) + \\\n np.einsum('iijj',eri[docc, docc, socc, socc]) - \\\n 0.5*np.einsum('ijji',eri[docc, socc, socc, docc]) + \\\n 0.5*np.einsum('iijj',eri[socc, socc, socc, socc]) - \\\n 0.5*np.einsum('ijji',eri[socc, socc, socc, socc])\n\n pyscf_mf.get_hcore = lambda *args: np.asarray(h1)\n pyscf_mf.get_ovlp = lambda *args: np.eye(h1.shape[0])\n pyscf_mf.energy_nuc = lambda *args: ecore\n pyscf_mf._eri = eri # ao2mo.restore('8', np.zeros((8, 8, 8, 8)), 8)\n pyscf_mf.e_tot = scf_energy\n\n pyscf_mf.init_guess = '1e'\n pyscf_mf.mo_coeff = np.eye(n_orb)\n pyscf_mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag)\n pyscf_mf.mo_energy, _ = np.linalg.eigh(pyscf_mf.get_fock())\n\n return pyscf_mol, pyscf_mf\n\n\ndef pyscf_to_cas(pyscf_mf,\n cas_orbitals: Optional[int] = None,\n cas_electrons: Optional[int] = None,\n avas_orbs=None):\n \"\"\" Return CAS Hamiltonian tensors from a PySCF mean-field object\n\n Args:\n pyscf_mf: PySCF mean field object\n cas_orbitals (int, optional): number of orbitals in CAS space,\n default all orbitals\n cas_electrons (int, optional): number of electrons in CAS space,\n default all electrons\n avas_orbs (ndarray, optional): orbitals selected by AVAS in PySCF\n\n Returns:\n h1 (ndarray) - 2D matrix containing one-body terms (MO basis)\n eri (ndarray) - 4D tensor containing two-body terms (MO basis)\n ecore (float) - frozen core electronic energy + nuclear repulsion energy\n num_alpha (int) - number of spin up electrons in CAS space\n num_beta (int) - number of spin down electrons in CAS space\n \"\"\"\n\n # Only RHF or ROHF possible with mcscf.CASCI\n assert isinstance(pyscf_mf, scf.rhf.RHF) # ROHF is child of RHF class\n\n if cas_orbitals is None:\n cas_orbitals = len(pyscf_mf.mo_coeff)\n if cas_electrons is None:\n cas_electrons = pyscf_mf.mol.nelectron\n\n cas = mcscf.CASCI(pyscf_mf, ncas=cas_orbitals, nelecas=cas_electrons)\n h1, ecore = cas.get_h1eff(mo_coeff=avas_orbs)\n eri = cas.get_h2cas(mo_coeff=avas_orbs)\n eri = ao2mo.restore('s1', eri, h1.shape[0]) # chemist convention (11|22)\n ecore = float(ecore)\n\n num_alpha, num_beta = get_num_active_alpha_beta(pyscf_mf, cas_electrons)\n\n return h1, eri, ecore, num_alpha, num_beta\n\n\ndef get_num_active_alpha_beta(pyscf_mf, cas_electrons):\n \"\"\" Return number of alpha and beta electrons in the active space given\n number of CAS electrons\n This assumes that all the unpaired electrons are in the active space\n\n Args:\n pyscf_mf: PySCF mean field object\n cas_orbitals (int): number of electrons in CAS space,\n\n Returns:\n num_alpha (int): number of alpha (spin-up) electrons in active space\n num_beta (int): number of beta (spin-down) electrons in active space\n \"\"\"\n # Sanity checks and active space info\n total_electrons = pyscf_mf.mol.nelectron\n frozen_electrons = total_electrons - cas_electrons\n assert frozen_electrons % 2 == 0\n\n # ROHF == RHF but RHF != ROHF, and we only do either RHF or ROHF\n if isinstance(pyscf_mf, scf.rohf.ROHF):\n frozen_alpha = frozen_electrons // 2\n frozen_beta = frozen_electrons // 2\n num_alpha = pyscf_mf.nelec[0] - frozen_alpha\n num_beta = pyscf_mf.nelec[1] - frozen_beta\n assert np.isclose(num_beta + num_alpha, cas_electrons)\n\n else:\n assert cas_electrons % 2 == 0\n num_alpha = cas_electrons // 2\n num_beta = cas_electrons // 2\n\n return num_alpha, num_beta\n\n\ndef load_casfile_to_pyscf(fname,\n num_alpha: Optional[int] = None,\n num_beta: Optional[int] = None):\n \"\"\" Load CAS Hamiltonian from pre-computed HD5 file into a PySCF molecule\n and mean-field object\n\n Args:\n fname (str): path to hd5 file to be created containing CAS one and two\n body terms\n num_alpha (int, optional): number of spin up electrons in CAS space\n num_beta (int, optional): number of spin down electrons in CAS space\n\n Returns:\n pyscf_mol: PySCF molecule object\n pyscf_mf: PySCF mean field object\n \"\"\"\n\n with h5py.File(fname, \"r\") as f:\n eri = np.asarray(f['eri'][()])\n # h1 one body elements are sometimes called different things. Try a few.\n try:\n h1 = np.asarray(f['h0'][()])\n except KeyError:\n try:\n h1 = np.asarray(f['hcore'][()])\n except KeyError:\n try:\n h1 = np.asarray(f['h1'][()])\n except KeyError:\n raise KeyError(\"Could not find 1-electron Hamiltonian\")\n # ecore sometimes exists, and sometimes as enuc (no frozen electrons)\n try:\n ecore = float(f['ecore'][()])\n except KeyError:\n try:\n ecore = float(f['enuc'][()])\n except KeyError:\n ecore = 0.0\n # read the number of spin up and spin down electrons if not input\n if (num_alpha is None) or (num_beta is None):\n try:\n num_alpha = int(f['active_nalpha'][()])\n except KeyError:\n sys.exit(\"In `load_casfile_to_pyscf()`: \\n\" + \\\n \" No values found on file for num_alpha \" + \\\n \"(key: 'active_nalpha' in h5). \" + \\\n \" Try passing in a value for num_alpha, or\" + \\\n \" re-check integral file.\")\n try:\n num_beta = int(f['active_nbeta'][()])\n except KeyError:\n sys.exit(\"In `load_casfile_to_pyscf()`: \\n\" + \\\n \" No values found on file for num_beta \" + \\\n \"(key: 'active_nbeta' in h5). \" + \\\n \" Try passing in a value for num_beta, or\" + \\\n \" re-check integral file.\")\n\n pyscf_mol, pyscf_mf = cas_to_pyscf(h1, eri, ecore, num_alpha, num_beta)\n\n return pyscf_mol, pyscf_mf\n\n\ndef save_pyscf_to_casfile(fname,\n pyscf_mf,\n cas_orbitals: Optional[int] = None,\n cas_electrons: Optional[int] = None,\n avas_orbs=None):\n \"\"\" Save CAS Hamiltonian from a PySCF mean-field object to an HD5 file\n\n Args:\n fname (str): path to hd5 file to be created containing CAS terms\n pyscf_mf: PySCF mean field object\n cas_orbitals (int, optional): number of orb in CAS space, default all\n cas_electrons (int, optional): number of elec in CAS, default all elec\n avas_orbs (ndarray, optional): orbitals selected by AVAS in PySCF\n \"\"\"\n h1, eri, ecore, num_alpha, num_beta = \\\n pyscf_to_cas(pyscf_mf, cas_orbitals, cas_electrons, avas_orbs)\n\n with h5py.File(fname, 'w') as fid:\n fid.create_dataset('ecore', data=float(ecore), dtype=float)\n fid.create_dataset(\n 'h0',\n data=h1) # note the name change to be consistent with THC paper\n fid.create_dataset('eri', data=eri)\n fid.create_dataset('active_nalpha', data=int(num_alpha), dtype=int)\n fid.create_dataset('active_nbeta', data=int(num_beta), dtype=int)\n\n\ndef factorized_ccsd_t(pyscf_mf, eri_rr = None, use_kernel = True,\\\n no_triples=False) -> Tuple[float, float, float]:\n \"\"\" Compute CCSD(T) energy using rank-reduced ERIs\n\n Args:\n pyscf_mf - PySCF mean field object\n eri_rr (ndarray) - rank-reduced ERIs, or use full ERIs from pyscf_mf\n use_kernel (bool) - re-do SCF, using canonical orbitals for one-body?\n no_triples (bool) - skip the perturbative triples correction? (CCSD)\n\n Returns:\n e_scf (float) - SCF energy\n e_cor (float) - Correlation energy from CCSD(T)\n e_tot (float) - Total energy; i.e. SCF + Corr energy from CCSD(T)\n \"\"\"\n h1, eri_full, ecore, num_alpha, num_beta = pyscf_to_cas(pyscf_mf)\n\n # If no rank-reduced ERIs, use the full (possibly local) ERIs from pyscf_mf\n if eri_rr is None:\n eri_rr = eri_full\n\n e_scf, e_cor, e_tot = ccsd_t(h1, eri_rr, ecore, num_alpha, num_beta,\\\n eri_full, use_kernel, no_triples)\n\n return e_scf, e_cor, e_tot\n\n\ndef ccsd_t(h1, eri, ecore, num_alpha: int, num_beta: int, eri_full = None,\\\n use_kernel=True, no_triples=False) -> Tuple[float, float, float]:\n \"\"\" Helper function to do CCSD(T) on set of one- and two-body Hamil elems\n\n Args:\n h1 (ndarray) - 2D matrix containing one-body terms (MO basis)\n eri (ndarray) - 4D tensor containing two-body terms (MO basis)\n may be from integral factorization (e.g. SF/DF/THC)\n ecore (float) - frozen core electronic energy + nuclear repulsion energy\n num_alpha (int) - number of spin alpha electrons in Hamiltonian\n num_beta (int) - number of spin beta electrons in Hamiltonian\n eri_full (ndarray) - optional 4D tensor containing full two-body\n terms (MO basis) for the SCF procedure only\n use_kernel (bool) - re-run SCF prior to doing CCSD(T)?\n no_triples (bool) - skip the perturbative triples correction? (CCSD)\n\n Returns:\n e_scf (float) - SCF energy\n e_cor (float) - Correlation energy from CCSD(T)\n e_tot (float) - Total energy; i.e. SCF + Corr energy from CCSD(T)\n \"\"\"\n\n mol = gto.M()\n mol.nelectron = num_alpha + num_beta\n n_orb = h1.shape[0]\n alpha_diag = [1] * num_alpha + [0] * (n_orb - num_alpha)\n beta_diag = [1] * num_beta + [0] * (n_orb - num_beta)\n\n # If eri_full not provided, use (possibly rank-reduced) ERIs for check\n if eri_full is None:\n eri_full = eri\n\n # either RHF or ROHF ... should be OK since UHF will have two h1s, etc.\n if num_alpha == num_beta:\n mf = scf.RHF(mol)\n scf_energy = ecore + \\\n 2*np.einsum('ii',h1[:num_alpha,:num_alpha]) + \\\n 2*np.einsum('iijj',eri_full[:num_alpha,\\\n :num_alpha,\\\n :num_alpha,\\\n :num_alpha]) - \\\n np.einsum('ijji',eri_full[:num_alpha,\\\n :num_alpha,\\\n :num_alpha,\\\n :num_alpha])\n\n else:\n mf = scf.ROHF(mol)\n mf.nelec = (num_alpha, num_beta)\n # grab singly and doubly occupied orbitals (assume high-spin open shell)\n docc = slice(None, min(num_alpha, num_beta))\n socc = slice(min(num_alpha, num_beta), max(num_alpha, num_beta))\n scf_energy = ecore + \\\n 2.0*np.einsum('ii',h1[docc, docc]) + \\\n np.einsum('ii',h1[socc, socc]) + \\\n 2.0*np.einsum('iijj',eri_full[docc, docc, docc, docc]) - \\\n np.einsum('ijji',eri_full[docc, docc, docc, docc]) + \\\n np.einsum('iijj',eri_full[socc, socc, docc, docc]) - \\\n 0.5*np.einsum('ijji',eri_full[socc, docc, docc, socc]) + \\\n np.einsum('iijj',eri_full[docc, docc, socc, socc]) - \\\n 0.5*np.einsum('ijji',eri_full[docc, socc, socc, docc]) + \\\n 0.5*np.einsum('iijj',eri_full[socc, socc, socc, socc]) - \\\n 0.5*np.einsum('ijji',eri_full[socc, socc, socc, socc])\n\n mf.get_hcore = lambda *args: np.asarray(h1)\n mf.get_ovlp = lambda *args: np.eye(h1.shape[0])\n mf.energy_nuc = lambda *args: ecore\n mf._eri = eri_full # ao2mo.restore('8', np.zeros((8, 8, 8, 8)), 8)\n\n mf.init_guess = '1e'\n mf.mo_coeff = np.eye(n_orb)\n mf.mo_occ = np.array(alpha_diag) + np.array(beta_diag)\n w, _ = np.linalg.eigh(mf.get_fock())\n mf.mo_energy = w\n\n # Rotate the interaction tensors into the canonical basis.\n # Reiher and Li tensors, for example, are read-in in the local MO basis,\n # which is not optimal for the CCSD(T) calculation (canonical gives better\n # energy estimate whereas QPE is invariant to choice of basis)\n if use_kernel:\n mf.conv_tol = 1e-7\n mf.init_guess = '1e'\n mf.verbose = 4\n mf.diis_space = 24\n mf.level_shift = 0.5\n mf.conv_check = False\n mf.max_cycle = 800\n mf.kernel(mf.make_rdm1(mf.mo_coeff,\n mf.mo_occ)) # use MO info to generate guess\n mf = stability(mf)\n mf = stability(mf)\n mf = stability(mf)\n\n # Check if SCF has changed by doing restart, and print warning if so\n try:\n assert np.isclose(scf_energy, mf.e_tot, rtol=1e-14)\n except AssertionError:\n print(\n \"WARNING: E(SCF) from input integrals does not match E(SCF)\" + \\\n \" from mf.kernel()\")\n print(\" Will use E(SCF) = {:12.6f} from mf.kernel going forward.\".\n format(mf.e_tot))\n print(\"E(SCF, ints) = {:12.6f} whereas E(SCF) = {:12.6f}\".format(\n scf_energy, mf.e_tot))\n\n # New SCF energy and orbitals for CCSD(T)\n scf_energy = mf.e_tot\n\n # Now re-set the eri's to the (possibly rank-reduced) ERIs\n mf._eri = eri\n mf.mol.incore_anyway = True\n\n mycc = cc.CCSD(mf)\n mycc.max_cycle = 800\n mycc.conv_tol = 1E-8\n mycc.conv_tol_normt = 1E-4\n mycc.diis_space = 24\n mycc.verbose = 4\n mycc.kernel()\n\n if no_triples:\n et = 0.0\n else:\n et = mycc.ccsd_t()\n\n e_scf = scf_energy # may be read-in value or 'fresh' SCF value\n e_cor = mycc.e_corr + et\n e_tot = e_scf + e_cor\n\n print(\"E(SCF): \", e_scf)\n print(\"E(cor): \", e_cor)\n print(\"Total energy: \", e_tot)\n return e_scf, e_cor, e_tot\n\n\ndef open_shell_t1_d1(t1a, t1b, mo_occ, nalpha, nbeta):\n \"\"\"\n T1-diagnostic for open-shell is defined w.r.t Sx eigenfunction of T1\n where reference is ROHF.\n\n given i double occ, c unoccupied, x is single occuplied The T1 amps\n (high spin) in Sz basis are:\n T1 = t_{ia}^{ca}(ca^ ia) + t_{ib}^{cb}(cb^ ib)\n + t_{xa}^{ca}(ca^ xa) + t_{ib}^{xb}(xb^ ib)\n T1 in the Sx basis are\n T1 = f_{i}^{c}E_{ci} + v_{i}^{c}A_{ci}\n + sqrt(2)f_{x}^{c}(ca^ xa) + sqrt(2)f_{i}^{x}(xb^ ib)\n\n where E_{ci} = ca^ ia + cb^ ib and A_{ci} = ca^ ia - cb^ ib.\n\n See: The Journal of Chemical Physics 98, 9734 (1993);\n doi: 10.1063/1.464352\n Chemical Physics Letters 372 (2003) 362–367;\n doi:10.1016/S0009-2614(03)00435-4\n\n based on these and two papers from Lee the T1-openshell diagnostic is\n\n sqrt(sum_{ia}(f_{ia})^2 + 2sum_{xa}(t_{xa}^{ca})^2\n + 2 sum_{ix}(t_{ib}^{xb})^2) / 2 sqrt{N}\n\n To get this relate eqs 3-7 from Chemical Physics Letters 372 (2003) 362–367\n to Eqs. 45, 46, and 51 from Journal of Chemical Physics 98, 9734 (1993);\n doi: 10.1063/1.464352.\n \"\"\"\n # compute t1-diagnostic\n docc_idx = np.where(np.isclose(mo_occ, 2.))[0]\n socc_idx = np.where(np.isclose(mo_occ, 1.))[0]\n virt_idx = np.where(np.isclose(mo_occ, 0.))[0]\n t1a_docc = t1a[docc_idx, :] # double occ-> virtual\n t1b_docc = t1b[docc_idx, :][:, -len(virt_idx):] # double occ-> virtual\n if len(socc_idx) > 0:\n t1_xa = t1a[socc_idx, :] # single occ -> virtual\n t1_ix = t1b[docc_idx, :][:, :len(socc_idx)] # double occ -> single occ\n else:\n t1_xa = np.array(())\n t1_ix = np.array(())\n\n if nalpha - nbeta + len(virt_idx) != t1b.shape[1]:\n raise ValueError(\n \"Inconsistent shapes na {}, nb {}, t1b.shape {},{}\".format(\n nalpha, nbeta, t1b.shape[0], t1b.shape[1]))\n\n if t1a_docc.shape != (len(docc_idx), len(virt_idx)):\n raise ValueError(\"T1a_ia does not have the right shape\")\n if t1b_docc.shape != (len(docc_idx), len(virt_idx)):\n raise ValueError(\"T1b_ia does not have the right shape\")\n if len(socc_idx) > 0:\n if t1_ix.shape != (len(docc_idx), len(socc_idx)):\n raise ValueError(\"T1_ix does not have the right shape\")\n if t1_xa.shape != (len(socc_idx), len(virt_idx)):\n raise ValueError(\"T1_xa does not have the right shape\")\n\n t1_diagnostic = np.sqrt(\n np.sum((t1a_docc + t1b_docc)**2) + 2 * np.sum(t1_xa**2) +\n 2 * np.sum(t1_ix**2)) / (2 * np.sqrt(nalpha + nbeta))\n # compute D1-diagnostic\n f_ia = 0.5 * (t1a_docc + t1b_docc)\n s_f_ia_2, _ = np.linalg.eigh(f_ia @ f_ia.T)\n s_f_ia_2_norm = np.sqrt(np.max(s_f_ia_2, initial=0))\n\n if len(socc_idx) > 0:\n f_xa = np.sqrt(1 / 2) * t1_xa\n f_ix = np.sqrt(1 / 2) * t1_ix\n s_f_xa_2, _ = np.linalg.eigh(f_xa @ f_xa.T)\n s_f_ix_2, _ = np.linalg.eigh(f_ix @ f_ix.T)\n else:\n s_f_xa_2 = np.array(())\n s_f_ix_2 = np.array(())\n s_f_xa_2_norm = np.sqrt(np.max(s_f_xa_2, initial=0))\n s_f_ix_2_norm = np.sqrt(np.max(s_f_ix_2, initial=0))\n\n d1_diagnostic = np.max(\n np.array([s_f_ia_2_norm, s_f_xa_2_norm, s_f_ix_2_norm]))\n\n return t1_diagnostic, d1_diagnostic\n" ]
[ [ "numpy.sqrt", "numpy.eye", "numpy.sum", "numpy.einsum", "numpy.linalg.eigh", "numpy.isclose", "numpy.asarray", "numpy.max", "numpy.array" ] ]
wenhaopeter/read_pytorch_code
[ "491f989cd918cf08874dd4f671fb7f0142a0bc4f", "491f989cd918cf08874dd4f671fb7f0142a0bc4f", "491f989cd918cf08874dd4f671fb7f0142a0bc4f", "491f989cd918cf08874dd4f671fb7f0142a0bc4f", "491f989cd918cf08874dd4f671fb7f0142a0bc4f" ]
[ "torch/testing/_internal/common_utils.py", "test/distributed/test_ddp_under_dist_autograd.py", "caffe2/python/data_parallel_model_test.py", "test/test_show_pickle.py", "caffe2/python/operator_test/bbox_transform_test.py" ]
[ "r\"\"\"Importing this file must **not** initialize CUDA context. test_distributed\nrelies on this assumption to properly run. This means that when this is imported\nno CUDA calls shall be made, including torch.cuda.device_count(), etc.\n\ntorch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.\n\"\"\"\n\nimport sys\nimport os\nimport platform\nimport re\nimport gc\nimport types\nfrom functools import partial\nimport inspect\nimport io\nimport argparse\nimport unittest\nimport warnings\nimport random\nimport contextlib\nimport socket\nimport subprocess\nimport time\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom itertools import product\nfrom copy import deepcopy\nfrom numbers import Number\nimport tempfile\nimport json\nfrom urllib.request import urlopen\nimport __main__\nimport errno\nfrom typing import cast, Any, Iterable, Optional\n\nfrom torch.testing._internal import expecttest\nfrom torch.testing import _compare_tensors_internal, _compare_scalars_internal, _compare_return_type\n\nimport torch\nimport torch.cuda\nfrom torch._utils_internal import get_writable_path\nfrom torch._six import string_classes\nimport torch.backends.cudnn\nimport torch.backends.mkl\nfrom enum import Enum\nfrom torch.autograd import gradcheck\nfrom torch.autograd.gradcheck import gradgradcheck\n\ntorch.backends.disable_global_flags()\n\nIS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'\n\nclass ProfilingMode(Enum):\n LEGACY = 1\n SIMPLE = 2\n PROFILING = 3\n\ndef cppProfilingFlagsToProfilingMode():\n old_prof_exec_state = torch._C._jit_set_profiling_executor(True)\n old_prof_mode_state = torch._C._jit_set_profiling_mode(True)\n torch._C._jit_set_profiling_executor(old_prof_exec_state)\n torch._C._jit_set_profiling_mode(old_prof_mode_state)\n\n if old_prof_exec_state:\n if old_prof_mode_state:\n return ProfilingMode.PROFILING\n else:\n return ProfilingMode.SIMPLE\n else:\n return ProfilingMode.LEGACY\n\n@contextmanager\ndef enable_profiling_mode_for_profiling_tests():\n if GRAPH_EXECUTOR == ProfilingMode.PROFILING:\n old_prof_exec_state = torch._C._jit_set_profiling_executor(True)\n old_prof_mode_state = torch._C._jit_set_profiling_mode(True)\n try:\n yield\n finally:\n if GRAPH_EXECUTOR == ProfilingMode.PROFILING:\n torch._C._jit_set_profiling_executor(old_prof_exec_state)\n torch._C._jit_set_profiling_mode(old_prof_mode_state)\n\n@contextmanager\ndef enable_profiling_mode():\n old_prof_exec_state = torch._C._jit_set_profiling_executor(True)\n old_prof_mode_state = torch._C._jit_set_profiling_mode(True)\n try:\n yield\n finally:\n torch._C._jit_set_profiling_executor(old_prof_exec_state)\n torch._C._jit_set_profiling_mode(old_prof_mode_state)\n\n@contextmanager\ndef num_profiled_runs(num_runs):\n old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)\n try:\n yield\n finally:\n torch._C._jit_set_num_profiled_runs(old_num_runs)\n\nfunc_call = torch._C.ScriptFunction.__call__\nmeth_call = torch._C.ScriptMethod.__call__\n\ndef prof_callable(callable, *args, **kwargs):\n if 'profile_and_replay' in kwargs:\n del kwargs['profile_and_replay']\n if GRAPH_EXECUTOR == ProfilingMode.PROFILING:\n with enable_profiling_mode_for_profiling_tests():\n callable(*args, **kwargs)\n return callable(*args, **kwargs)\n\n return callable(*args, **kwargs)\n\ndef prof_func_call(*args, **kwargs):\n return prof_callable(func_call, *args, **kwargs)\n\ndef prof_meth_call(*args, **kwargs):\n return prof_callable(meth_call, *args, **kwargs)\n\ntorch._C.ScriptFunction.__call__ = prof_func_call\ntorch._C.ScriptMethod.__call__ = prof_meth_call\n\ndef _get_test_report_path():\n # allow users to override the test file location. We need this\n # because the distributed tests run the same test file multiple\n # times with different configurations.\n override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')\n test_source = override if override is not None else 'python-unittest'\n return os.path.join('test-reports', test_source)\n\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument('--subprocess', action='store_true',\n help='whether to run each test in a subprocess')\nparser.add_argument('--seed', type=int, default=1234)\nparser.add_argument('--accept', action='store_true')\nparser.add_argument('--ge_config', type=str)\nparser.add_argument('--repeat', type=int, default=1)\nparser.add_argument('--test_bailouts', action='store_true')\nparser.add_argument('--save-xml', nargs='?', type=str,\n const=_get_test_report_path(),\n default=_get_test_report_path() if bool(os.environ.get('IN_CIRCLECI')) else None)\nparser.add_argument('--discover-tests', action='store_true')\nparser.add_argument('--log-suffix', type=str, default=\"\")\nparser.add_argument('--run-parallel', type=int, default=1)\n\nargs, remaining = parser.parse_known_args()\nif args.ge_config == 'legacy':\n GRAPH_EXECUTOR = ProfilingMode.LEGACY\nelif args.ge_config == 'profiling':\n GRAPH_EXECUTOR = ProfilingMode.PROFILING\nelif args.ge_config == 'simple':\n GRAPH_EXECUTOR = ProfilingMode.SIMPLE\nelse:\n # infer flags based on the default settings\n GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()\n\n\nLOG_SUFFIX = args.log_suffix\nRUN_PARALLEL = args.run_parallel\nTEST_BAILOUTS = args.test_bailouts\nTEST_DISCOVER = args.discover_tests\nTEST_IN_SUBPROCESS = args.subprocess\nTEST_SAVE_XML = args.save_xml\nREPEAT_COUNT = args.repeat\nSEED = args.seed\nif not expecttest.ACCEPT:\n expecttest.ACCEPT = args.accept\nUNITTEST_ARGS = [sys.argv[0]] + remaining\ntorch.manual_seed(SEED)\n\ndef wait_for_process(p):\n try:\n return p.wait()\n except KeyboardInterrupt:\n # Give `p` a chance to handle KeyboardInterrupt. Without this,\n # `pytest` can't print errors it collected so far upon KeyboardInterrupt.\n exit_status = p.wait(timeout=5)\n if exit_status is not None:\n return exit_status\n else:\n p.kill()\n raise\n except: # noqa E722, copied from python core library\n p.kill()\n raise\n finally:\n # Always call p.wait() to ensure exit\n p.wait()\n\ndef shell(command, cwd=None, env=None):\n sys.stdout.flush()\n sys.stderr.flush()\n # The following cool snippet is copied from Py3 core library subprocess.call\n # only the with\n # 1. `except KeyboardInterrupt` block added for SIGINT handling.\n # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do\n # `p.wait()` in a `final` block for the code to be portable.\n #\n # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323\n assert not isinstance(command, torch._six.string_classes), \"Command to shell should be a list or tuple of tokens\"\n p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)\n return wait_for_process(p)\n\n\n# Used to run the same test with different tensor types\ndef repeat_test_for_types(dtypes):\n def repeat_helper(f):\n @wraps(f)\n def call_helper(self, *args):\n for dtype in dtypes:\n with TestCase.subTest(self, dtype=dtype):\n f(self, *args, dtype=dtype)\n\n return call_helper\n return repeat_helper\n\n# Environment variable `IS_PYTORCH_CI` is set in `.jenkins/common.sh`.\nIS_PYTORCH_CI = bool(os.environ.get('IS_PYTORCH_CI'))\n\n\ndef discover_test_cases_recursively(suite_or_case):\n if isinstance(suite_or_case, unittest.TestCase):\n return [suite_or_case]\n rc = []\n for element in suite_or_case:\n rc.extend(discover_test_cases_recursively(element))\n return rc\n\ndef get_test_names(test_cases):\n return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]\n\ndef chunk_list(lst, nchunks):\n return [lst[i::nchunks] for i in range(nchunks)]\n\n\ndef run_tests(argv=UNITTEST_ARGS):\n if TEST_DISCOVER:\n suite = unittest.TestLoader().loadTestsFromModule(__main__)\n test_cases = discover_test_cases_recursively(suite)\n for name in get_test_names(test_cases):\n print(name)\n elif TEST_IN_SUBPROCESS:\n suite = unittest.TestLoader().loadTestsFromModule(__main__)\n test_cases = discover_test_cases_recursively(suite)\n failed_tests = []\n for case in test_cases:\n test_case_full_name = case.id().split('.', 1)[1]\n exitcode = shell([sys.executable] + argv + [test_case_full_name])\n if exitcode != 0:\n failed_tests.append(test_case_full_name)\n\n assert len(failed_tests) == 0, \"{} unit test(s) failed:\\n\\t{}\".format(\n len(failed_tests), '\\n\\t'.join(failed_tests))\n elif RUN_PARALLEL > 1:\n suite = unittest.TestLoader().loadTestsFromModule(__main__)\n test_cases = discover_test_cases_recursively(suite)\n test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)\n processes = []\n for i in range(RUN_PARALLEL):\n command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]\n processes.append(subprocess.Popen(command, universal_newlines=True))\n failed = False\n for p in processes:\n failed |= wait_for_process(p) != 0\n assert not failed, \"Some test shards have failed\"\n elif TEST_SAVE_XML is not None:\n # import here so that non-CI doesn't need xmlrunner installed\n import xmlrunner\n test_report_path = TEST_SAVE_XML + LOG_SUFFIX\n os.makedirs(test_report_path, exist_ok=True)\n verbose = '--verbose' in argv or '-v' in argv\n if verbose:\n print('Test results will be stored in {}'.format(test_report_path))\n unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))\n elif REPEAT_COUNT > 1:\n for _ in range(REPEAT_COUNT):\n if not unittest.main(exit=False, argv=argv).result.wasSuccessful():\n sys.exit(-1)\n else:\n unittest.main(argv=argv)\n\nIS_WINDOWS = sys.platform == \"win32\"\nIS_MACOS = sys.platform == \"darwin\"\nIS_PPC = platform.machine() == \"ppc64le\"\n\nif IS_WINDOWS:\n @contextmanager\n def TemporaryFileName():\n # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile\n # opens the file, and it cannot be opened multiple times in Windows. To support Windows,\n # close the file after creation and try to remove it manually\n f = tempfile.NamedTemporaryFile(delete=False)\n try:\n f.close()\n yield f.name\n finally:\n os.unlink(f.name)\nelse:\n @contextmanager # noqa: T484\n def TemporaryFileName():\n with tempfile.NamedTemporaryFile() as f:\n yield f.name\n\n\ndef _check_module_exists(name):\n r\"\"\"Returns if a top-level module with :attr:`name` exists *without**\n importing it. This is generally safer than try-catch block around a\n `import X`. It avoids third party libraries breaking assumptions of some of\n our tests, e.g., setting multiprocessing start method when imported\n (see librosa/#747, torchvision/#544).\n \"\"\"\n import importlib\n import importlib.util\n spec = importlib.util.find_spec(name)\n return spec is not None\n\nTEST_NUMPY = _check_module_exists('numpy')\nTEST_SCIPY = _check_module_exists('scipy')\nTEST_MKL = torch.backends.mkl.is_available()\nTEST_NUMBA = _check_module_exists('numba')\n\nTEST_DILL = _check_module_exists('dill')\n\nTEST_LIBROSA = _check_module_exists('librosa')\n\n# Python 2.7 doesn't have spawn\nNO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'\nTEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'\nTEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'\nTEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'\nTEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'\n# Enables tests that are slow to run (disabled by default)\nTEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'\n\n# Disables non-slow tests (these tests enabled by default)\n# This is usually used in conjunction with TEST_WITH_SLOW to\n# run *only* slow tests. (I could have done an enum, but\n# it felt a little awkward.\nTEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'\n\nif TEST_NUMPY:\n import numpy as np\n\n # Dict of NumPy dtype -> torch dtype (when the correspondence exists)\n numpy_to_torch_dtype_dict = {\n np.bool : torch.bool,\n np.uint8 : torch.uint8,\n np.int8 : torch.int8,\n np.int16 : torch.int16,\n np.int32 : torch.int32,\n np.int64 : torch.int64,\n np.float16 : torch.float16,\n np.float32 : torch.float32,\n np.float64 : torch.float64,\n np.complex64 : torch.complex64,\n np.complex128 : torch.complex128\n }\n\n # Dict of torch dtype -> NumPy dtype\n torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}\n\nALL_TENSORTYPES = [torch.float,\n torch.double,\n torch.half]\n\n# bfloat16 bringup is currently only available on ROCm\n# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES\n# when bfloat16 bringup is complete on all platforms\nif TEST_WITH_ROCM:\n ALL_TENSORTYPES2 = [torch.float,\n torch.double,\n torch.half,\n torch.bfloat16]\nelse:\n ALL_TENSORTYPES2 = ALL_TENSORTYPES\n\ndef skipIfRocm(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if TEST_WITH_ROCM:\n raise unittest.SkipTest(\"test doesn't currently work on the ROCm stack\")\n else:\n fn(*args, **kwargs)\n return wrapper\n\n\ndef skipIfCompiledWithoutNumpy(fn):\n # Even if the numpy module is present, if `USE_NUMPY=0` is used during the\n # build, numpy tests will fail\n numpy_support = TEST_NUMPY\n if numpy_support:\n try:\n # The numpy module is present, verify that PyTorch is compiled with\n # numpy support\n torch.from_numpy(np.array([2, 2]))\n except RuntimeError:\n numpy_support = False\n\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if not numpy_support:\n raise unittest.SkipTest(\"PyTorch was compiled without numpy support\")\n else:\n fn(*args, **kwargs)\n return wrapper\n\ndef _test_function(fn, device):\n def run_test_function(self):\n return fn(self, device)\n return run_test_function\n\n\ndef skipIfNoLapack(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if not torch._C.has_lapack:\n raise unittest.SkipTest('PyTorch compiled without Lapack')\n else:\n fn(*args, **kwargs)\n return wrapper\n\n\ndef skipIfNotRegistered(op_name, message):\n \"\"\"Wraps the decorator to hide the import of the `core`.\n\n Args:\n op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.\n message: message to fail with.\n\n Usage:\n @skipIfNotRegistered('MyOp', 'MyOp is not linked!')\n This will check if 'MyOp' is in the caffe2.python.core\n \"\"\"\n try:\n from caffe2.python import core\n skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,\n message)\n except ImportError:\n skipper = unittest.skip(\"Cannot import `caffe2.python.core`\")\n return skipper\n\n\ndef skipIfNoSciPy(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if not TEST_SCIPY:\n raise unittest.SkipTest(\"test require SciPy, but SciPy not found\")\n else:\n fn(*args, **kwargs)\n return wrapper\n\n\ndef slowTest(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if not TEST_WITH_SLOW:\n raise unittest.SkipTest(\"test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test\")\n else:\n fn(*args, **kwargs)\n wrapper.__dict__['slow_test'] = True\n return wrapper\n\n\ndef skipCUDAMemoryLeakCheckIf(condition):\n def dec(fn):\n if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True\n fn._do_cuda_memory_leak_check = not condition\n return fn\n return dec\n\ndef skipCUDANonDefaultStreamIf(condition):\n def dec(fn):\n if getattr(fn, '_do_cuda_non_default_stream', True): # if current True\n fn._do_cuda_non_default_stream = not condition\n return fn\n return dec\n\ndef suppress_warnings(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fn(*args, **kwargs)\n return wrapper\n\n\ndef get_cpu_type(type_name):\n module, name = type_name.rsplit('.', 1)\n assert module == 'torch.cuda'\n return getattr(torch, name)\n\n\ndef get_gpu_type(type_name):\n if isinstance(type_name, type):\n type_name = '{}.{}'.format(type_name.__module__, type_name.__name__)\n module, name = type_name.rsplit('.', 1)\n assert module == 'torch'\n return getattr(torch.cuda, name)\n\n\ndef to_gpu(obj, type_map=None):\n if type_map is None:\n type_map = {}\n if isinstance(obj, torch.Tensor):\n assert obj.is_leaf\n t = type_map.get(obj.type(), get_gpu_type(obj.type()))\n with torch.no_grad():\n res = obj.clone().type(t)\n res.requires_grad = obj.requires_grad\n return res\n elif torch.is_storage(obj):\n return obj.new().resize_(obj.size()).copy_(obj)\n elif isinstance(obj, list):\n return [to_gpu(o, type_map) for o in obj]\n elif isinstance(obj, tuple):\n return tuple(to_gpu(o, type_map) for o in obj)\n else:\n return deepcopy(obj)\n\n\ndef get_function_arglist(func):\n return inspect.getfullargspec(func).args\n\n\ndef set_rng_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n if TEST_NUMPY:\n np.random.seed(seed)\n\n\[email protected]\ndef freeze_rng_state():\n rng_state = torch.get_rng_state()\n if torch.cuda.is_available():\n cuda_rng_state = torch.cuda.get_rng_state()\n yield\n if torch.cuda.is_available():\n torch.cuda.set_rng_state(cuda_rng_state)\n torch.set_rng_state(rng_state)\n\[email protected]\ndef set_default_dtype(dtype):\n saved_dtype = torch.get_default_dtype()\n torch.set_default_dtype(dtype)\n yield\n torch.set_default_dtype(saved_dtype)\n\ndef iter_indices(tensor):\n if tensor.dim() == 0:\n return range(0)\n if tensor.dim() == 1:\n return range(tensor.size(0))\n return product(*(range(s) for s in tensor.size()))\n\n\ndef is_iterable(obj):\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\nclass CudaNonDefaultStream():\n def __enter__(self):\n # Before starting CUDA test save currently active streams on all\n # CUDA devices and set new non default streams to all CUDA devices\n # to ensure CUDA tests do not use default stream by mistake.\n beforeDevice = torch.cuda.current_device()\n self.beforeStreams = []\n for d in range(torch.cuda.device_count()):\n self.beforeStreams.append(torch.cuda.current_stream(d))\n deviceStream = torch.cuda.Stream(device=d)\n torch._C._cuda_setStream(deviceStream._cdata)\n torch._C._cuda_setDevice(beforeDevice)\n\n def __exit__(self, exec_type, exec_value, traceback):\n # After completing CUDA test load previously active streams on all\n # CUDA devices.\n beforeDevice = torch.cuda.current_device()\n for d in range(torch.cuda.device_count()):\n torch._C._cuda_setStream(self.beforeStreams[d]._cdata)\n torch._C._cuda_setDevice(beforeDevice)\n\nclass CudaMemoryLeakCheck():\n def __init__(self, testcase, name=None):\n self.name = testcase.id() if name is None else name\n self.testcase = testcase\n\n # initialize context & RNG to prevent false positive detections\n # when the test is the first to initialize those\n from torch.testing._internal.common_cuda import initialize_cuda_context_rng\n initialize_cuda_context_rng()\n\n @staticmethod\n def get_cuda_memory_usage():\n # we don't need CUDA synchronize because the statistics are not tracked at\n # actual freeing, but at when marking the block as free.\n num_devices = torch.cuda.device_count()\n gc.collect()\n return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))\n\n def __enter__(self):\n self.befores = self.get_cuda_memory_usage()\n\n def __exit__(self, exec_type, exec_value, traceback):\n # Don't check for leaks if an exception was thrown\n if exec_type is not None:\n return\n\n afters = self.get_cuda_memory_usage()\n\n for i, (before, after) in enumerate(zip(self.befores, afters)):\n self.testcase.assertEqual(\n before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(\n self.name, after - before, i))\n\n# \"min_satisfying_examples\" setting has been deprecated in hypythesis\n# 3.56.0 and removed in hypothesis 4.x\ntry:\n import hypothesis\n\n def settings(*args, **kwargs):\n if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):\n kwargs.pop('min_satisfying_examples')\n return hypothesis.settings(*args, **kwargs)\n\n\n hypothesis.settings.register_profile(\n \"pytorch_ci\",\n settings(\n derandomize=True,\n suppress_health_check=[hypothesis.HealthCheck.too_slow],\n database=None,\n max_examples=100,\n verbosity=hypothesis.Verbosity.normal))\n hypothesis.settings.register_profile(\n \"dev\",\n settings(\n suppress_health_check=[hypothesis.HealthCheck.too_slow],\n database=None,\n max_examples=10,\n verbosity=hypothesis.Verbosity.normal))\n hypothesis.settings.register_profile(\n \"debug\",\n settings(\n suppress_health_check=[hypothesis.HealthCheck.too_slow],\n database=None,\n max_examples=1000,\n verbosity=hypothesis.Verbosity.verbose))\n\n hypothesis.settings.load_profile(\n \"pytorch_ci\" if IS_PYTORCH_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE',\n 'dev')\n )\nexcept ImportError:\n print('Fail to import hypothesis in common_utils, tests are not derandomized')\n\ndisabled_test_from_issues = None\ndef check_disabled(test_name):\n global disabled_test_from_issues\n if disabled_test_from_issues is None:\n disabled_test_from_issues = {}\n\n def read_and_process():\n url = 'https://raw.githubusercontent.com/zdevito/pytorch_disabled_tests/master/result.json'\n contents = urlopen(url, timeout=1).read().decode('utf-8')\n the_response = json.loads(contents)\n for item in the_response['items']:\n title = item['title']\n key = 'DISABLED '\n if title.startswith(key):\n test_name = title[len(key):].strip()\n disabled_test_from_issues[test_name] = item['html_url']\n\n if not IS_SANDCASTLE and os.getenv(\"PYTORCH_RUN_DISABLED_TESTS\", \"0\") != \"1\":\n try:\n read_and_process()\n except Exception:\n print(\"Couldn't download test skip set, leaving all tests enabled...\")\n\n if test_name in disabled_test_from_issues:\n raise unittest.SkipTest(\n \"Test is disabled because an issue exists disabling it: {}\".format(disabled_test_from_issues[test_name]) +\n \" To enable set the environment variable PYTORCH_RUN_DISABLED_TESTS=1\")\n\n# Acquires the comparison dtype, required since isclose\n# requires both inputs have the same dtype, and isclose is not supported\n# for some device x dtype combinations.\n# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types\n# support needed bfloat16 comparison methods.\n# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't\n# support needed float16 comparison methods.\n# TODO: Update this once bfloat16 and float16 are better supported.\ndef get_comparison_dtype(a, b):\n # TODO: update this when promote_types supports bfloat16 and/or\n # isclose supports bfloat16.\n a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype\n b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype\n\n compare_dtype = torch.promote_types(a_dtype, b_dtype)\n\n # non-CUDA (CPU, for example) float16 -> float32\n # TODO: update this when isclose is implemented for CPU float16\n if (compare_dtype is torch.float16 and\n (a.device != b.device or a.device.type != 'cuda' or\n b.device.type != 'cuda')):\n compare_dtype = torch.float32\n\n return compare_dtype\n\nclass TestCase(expecttest.TestCase):\n # NOTE: \"precision\" lets classes and generated tests set minimum\n # atol values when comparing tensors. Used by @precisionOverride, for\n # example.\n # TODO: provide a better mechanism for generated tests to set rtol/atol.\n _precision: float = 0\n\n @property\n def precision(self) -> float:\n return self._precision\n\n @precision.setter\n def precision(self, prec: float) -> None:\n self._precision = prec\n\n _do_cuda_memory_leak_check = False\n _do_cuda_non_default_stream = False\n\n def __init__(self, method_name='runTest'):\n super().__init__(method_name)\n\n test_method = getattr(self, method_name, None)\n if test_method is not None:\n # Wraps the tested method if we should do CUDA memory check.\n self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)\n # FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044\n if self._do_cuda_memory_leak_check and not IS_WINDOWS:\n self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)\n\n # Wraps the tested method if we should enforce non default CUDA stream.\n self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)\n if self._do_cuda_non_default_stream and not IS_WINDOWS and not TEST_WITH_ROCM:\n self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)\n\n def assertLeaksNoCudaTensors(self, name=None):\n name = self.id() if name is None else name\n return CudaMemoryLeakCheck(self, name)\n\n def enforceNonDefaultStream(self):\n return CudaNonDefaultStream()\n\n def wrap_with_cuda_policy(self, method_name, policy):\n test_method = getattr(self, method_name)\n # the import below may initialize CUDA context, so we do it only if\n # self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream\n # is True.\n from torch.testing._internal.common_cuda import TEST_CUDA\n fullname = self.id().lower() # class_name.method_name\n if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):\n setattr(self, method_name, self.wrap_method_with_cuda_policy(test_method, policy))\n\n def wrap_method_with_cuda_policy(self, method, policy):\n # Assumes that `method` is the tested function in `self`.\n # NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope\n # alive, so this cannot be done in setUp and tearDown because\n # tearDown is run unconditionally no matter whether the test\n # passes or not. For the same reason, we can't wrap the `method`\n # call in try-finally and always do the check.\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n with policy():\n method(*args, **kwargs)\n return types.MethodType(wrapper, self)\n\n def wrap_with_cuda_memory_check(self, method):\n return self.wrap_method_with_cuda_policy(method, self.assertLeaksNoCudaTensors)\n\n\n def setUp(self):\n\n\n if TEST_SKIP_FAST:\n if not getattr(self, self._testMethodName).__dict__.get('slow_test', False):\n raise unittest.SkipTest(\"test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST\")\n check_disabled(str(self))\n\n set_rng_seed(SEED)\n\n def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device='cpu'):\n # Assert not given impossible combination, where the sparse dims have\n # empty numel, but nnz > 0 makes the indices containing values.\n assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'\n\n v_size = [nnz] + list(size[sparse_dim:])\n v = torch.randn(*v_size, device=device)\n i = torch.rand(sparse_dim, nnz, device=device)\n i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))\n i = i.to(torch.long)\n if is_uncoalesced:\n v = torch.cat([v, torch.randn_like(v)], 0)\n i = torch.cat([i, i], 1)\n\n x = torch.sparse_coo_tensor(i, v, torch.Size(size))\n\n if not is_uncoalesced:\n x = x.coalesce()\n else:\n # FIXME: `x` is a sparse view of `v`. Currently rebase_history for\n # sparse views is not implemented, so this workaround is\n # needed for inplace operations done on `x`, e.g., copy_().\n # Remove after implementing something equivalent to CopySlice\n # for sparse views.\n # NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards\n x = x.detach().clone()\n return x, x._indices().clone(), x._values().clone()\n\n def safeToDense(self, t):\n r = self.safeCoalesce(t)\n return r.to_dense()\n\n def safeCoalesce(self, t):\n tc = t.coalesce()\n self.assertEqual(tc.to_dense(), t.to_dense())\n self.assertTrue(tc.is_coalesced())\n\n # Our code below doesn't work when nnz is 0, because\n # then it's a 0D tensor, not a 2D tensor.\n if t._nnz() == 0:\n self.assertEqual(t._indices(), tc._indices())\n self.assertEqual(t._values(), tc._values())\n return tc\n\n value_map = {}\n for idx, val in zip(t._indices().t(), t._values()):\n idx_tup = tuple(idx.tolist())\n if idx_tup in value_map:\n value_map[idx_tup] += val\n else:\n value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val\n\n new_indices = sorted(list(value_map.keys()))\n new_values = [value_map[idx] for idx in new_indices]\n if t._values().ndimension() < 2:\n new_values = t._values().new(new_values)\n else:\n new_values = torch.stack(new_values)\n\n new_indices = t._indices().new(new_indices).t()\n tg = t.new(new_indices, new_values, t.size())\n\n self.assertEqual(tc._indices(), tg._indices())\n self.assertEqual(tc._values(), tg._values())\n\n if t.is_coalesced():\n self.assertEqual(tc._indices(), t._indices())\n self.assertEqual(tc._values(), t._values())\n\n return tg\n\n # Compares the given Torch and NumPy functions on the given tensor-like object.\n # NOTE: both torch_fn and np_fn should be functions that take a single\n # tensor (array). If the torch and/or NumPy function require additional\n # arguments then wrap the function in a lambda or pass a partial function.\n # TODO: support bfloat16 comparisons\n # TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)\n def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None):\n assert TEST_NUMPY\n assert dtype is not torch.bfloat16\n\n if isinstance(tensor_like, torch.Tensor):\n assert device is None\n assert dtype is None\n a = tensor_like.detach().cpu().numpy()\n t = tensor_like\n else:\n a = np.array(tensor_like, dtype=torch_to_numpy_dtype_dict[dtype])\n t = torch.tensor(tensor_like, device=device, dtype=dtype)\n\n np_result = np_fn(a)\n torch_result = torch_fn(t).cpu()\n\n # Converts arrays to tensors\n if isinstance(np_result, np.ndarray):\n try:\n np_result = torch.from_numpy(np_result)\n except Exception:\n # NOTE: copying an array before conversion is necessary when,\n # for example, the array has negative strides.\n np_result = torch.from_numpy(np_result.copy())\n\n self.assertEqual(np_result, torch_result)\n\n # Some analysis of tolerance by logging tests from test_torch.py can be found\n # in https://github.com/pytorch/pytorch/pull/32538.\n # dtype name : (rtol, atol)\n dtype_precisions = {\n torch.float16 : (0.001, 1e-5),\n torch.bfloat16 : (0.016, 1e-5),\n torch.float32 : (1.3e-6, 1e-5),\n torch.float64 : (1e-7, 1e-7),\n torch.complex32 : (0.001, 1e-5),\n torch.complex64 : (1.3e-6, 1e-5),\n torch.complex128 : (1e-7, 1e-7),\n }\n\n # Returns the \"default\" rtol and atol for comparing scalars or\n # tensors of the given dtypes.\n def _getDefaultRtolAndAtol(self, dtype0, dtype1):\n rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],\n self.dtype_precisions.get(dtype1, (0, 0))[0])\n atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],\n self.dtype_precisions.get(dtype1, (0, 0))[1])\n\n return rtol, atol\n\n # Checks if two dense tensors are equal(-ish), returning (True, None)\n # when they are and (False, debug_msg) when they are not.\n # If exact_dtype is true both tensors must have the same dtype.\n # If exact_device is true both tensors must be on the same device.\n # See the \"Test Framework Tensor 'Equality'\" note for more details.\n # NOTE: tensors on different devices are moved to the CPU to be compared when\n # exact_device is False.\n # NOTE: this function checks the tensors' devices, sizes, and dtypes\n # and acquires the appropriate device, dtype, rtol and atol to compare\n # them with. It then calls _compare_tensors_internal.\n def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,\n exact_dtype=True, exact_device=False) -> _compare_return_type:\n assert (atol is None) == (rtol is None)\n if not isinstance(a, torch.Tensor):\n return (False, \"argument a, {0}, to _compareTensors is not a tensor!\".format(a))\n if not isinstance(b, torch.Tensor):\n return (False, \"argument b, {0}, to _compareTensors is not a tensor!\".format(b))\n\n # Validates tensors are on the same device\n if exact_device and a.device != b.device:\n return (False, (\"Attempted to compare equality of tensors on \"\n \"different devices! Got devices {0} and \"\n \"{1}.\".format(a.device, b.device)))\n\n # Compares tensors of different devices on the CPU\n if a.device != b.device:\n a = a.cpu()\n b = b.cpu()\n\n # Checks size matches\n if a.size() != b.size():\n return (False, (\"Attempted to compare equality of tensors with \"\n \"different sizes. Got sizes {0} and {1}.\").format(a.size(), b.size()))\n\n # Checks dtype (if exact_dtype)\n if exact_dtype and a.dtype is not b.dtype:\n return (False, (\"Attempted to compare equality of tensors with \"\n \"different dtypes. Got dtypes {0} and {1}.\").format(a.dtype, b.dtype))\n\n # Acquires rtol and atol\n if rtol is None:\n rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)\n\n atol = max(atol, self.precision)\n\n # Converts to comparison dtype\n dtype = get_comparison_dtype(a, b)\n a = a.to(dtype)\n b = b.to(dtype)\n\n return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)\n\n # Checks if two scalars are equal(-ish), returning (True, None)\n # when they are and (False, debug_msg) when they are not.\n # NOTE: this function just acquires rtol and atol\n # before calling _compare_scalars_internal.\n def _compareScalars(self, a, b, *,\n rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:\n # Acquires rtol and atol\n assert (atol is None) == (rtol is None)\n if rtol is None:\n if isinstance(a, complex) or isinstance(b, complex):\n rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)\n elif isinstance(a, float) or isinstance(b, float):\n rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)\n else:\n rtol, atol = 0, 0\n atol = max(atol, self.precision)\n\n return _compare_scalars_internal(a, b, rtol=cast(float, rtol), atol=cast(float, atol), equal_nan=equal_nan)\n\n def assertEqualIgnoreType(self, *args, **kwargs) -> None:\n # If you are seeing this function used, that means test is written wrongly\n # and deserves detailed investigation\n return self.assertEqual(*args, exact_dtype=False, **kwargs)\n\n # Compares x and y\n # TODO: default exact_device to True\n def assertEqual(self, x, y, msg: Optional[str] = None, *,\n atol: Optional[float] = None, rtol: Optional[float] = None,\n equal_nan=True, exact_dtype=True, exact_device=False) -> None:\n assert (atol is None) == (rtol is None), \"If one of atol or rtol is specified the other must be, too\"\n\n # Tensor x Number and Number x Tensor comparisons\n if isinstance(x, torch.Tensor) and isinstance(y, Number):\n self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n elif isinstance(y, torch.Tensor) and isinstance(x, Number):\n self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n # Tensor x np.bool\n elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):\n self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):\n self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n # Tensor x Tensor\n elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):\n super().assertEqual(x.is_sparse, y.is_sparse, msg=msg)\n super().assertEqual(x.is_quantized, y.is_quantized, msg=msg)\n if x.is_sparse:\n x = self.safeCoalesce(x)\n y = self.safeCoalesce(y)\n indices_result, debug_msg = self._compareTensors(x._indices(), y._indices(),\n rtol=rtol, atol=atol,\n equal_nan=equal_nan, exact_dtype=exact_dtype,\n exact_device=exact_device)\n\n if not indices_result and msg is None:\n assert debug_msg is not None\n msg = \"Sparse tensor indices failed to compare as equal! \" + debug_msg\n self.assertTrue(indices_result, msg=msg)\n\n values_result, debug_msg = self._compareTensors(x._values(), y._values(),\n rtol=rtol, atol=atol,\n equal_nan=equal_nan, exact_dtype=exact_dtype,\n exact_device=exact_device)\n\n if not values_result and msg is None:\n assert debug_msg is not None\n msg = \"Sparse tensor values failed to compare as equal! \" + debug_msg\n self.assertTrue(values_result, msg=msg)\n elif x.is_quantized and y.is_quantized:\n self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,\n msg=msg, exact_dtype=exact_dtype,\n exact_device=exact_device)\n\n if x.qscheme() == torch.per_tensor_affine:\n self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,\n msg=msg, exact_dtype=exact_dtype,\n exact_device=exact_device)\n self.assertEqual(x.q_zero_point(), y.q_zero_point(),\n atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n elif x.qscheme() == torch.per_channel_affine:\n self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,\n msg=msg, exact_dtype=exact_dtype,\n exact_device=exact_device)\n self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),\n atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),\n atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n\n result, debug_msg = self._compareTensors(x.int_repr().to(torch.int32),\n y.int_repr().to(torch.int32),\n atol=atol, rtol=rtol,\n exact_dtype=exact_dtype,\n exact_device=exact_device)\n\n if not result and msg is None:\n assert debug_msg is not None\n msg = \"Quantized representations failed to compare as equal! \" + debug_msg\n self.assertTrue(result, msg=msg)\n else:\n result, debug_msg = self._compareTensors(x, y, rtol=rtol, atol=atol,\n equal_nan=equal_nan, exact_dtype=exact_dtype,\n exact_device=exact_device)\n\n if not result and msg is None:\n assert debug_msg is not None\n msg = \"Tensors failed to compare as equal! \" + debug_msg\n self.assertTrue(result, msg=msg)\n elif isinstance(x, string_classes) and isinstance(y, string_classes):\n super().assertEqual(x, y, msg=msg)\n elif type(x) == set and type(y) == set:\n super().assertEqual(x, y, msg=msg)\n elif isinstance(x, dict) and isinstance(y, dict):\n if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):\n self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,\n msg=msg, exact_dtype=exact_dtype,\n exact_device=exact_device)\n else:\n self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,\n msg=msg, exact_dtype=exact_dtype,\n exact_device=exact_device)\n key_list = list(x.keys())\n self.assertEqual([x[k] for k in key_list],\n [y[k] for k in key_list],\n atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n elif isinstance(x, type) and isinstance(y, type):\n # See TestTorch.test_assert_equal_generic_meta\n super().assertEqual(x, y, msg=msg)\n elif is_iterable(x) and is_iterable(y):\n super().assertEqual(len(x), len(y), msg=msg)\n for x_, y_ in zip(x, y):\n self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,\n exact_dtype=exact_dtype, exact_device=exact_device)\n elif isinstance(x, bool) and isinstance(y, bool):\n self.assertTrue(x == y, msg=msg)\n\n # Scalar x Scalar\n elif isinstance(x, Number) and isinstance(y, Number):\n result, debug_msg = self._compareScalars(x, y, rtol=rtol, atol=atol,\n equal_nan=equal_nan)\n if not result and msg is None:\n assert debug_msg is not None\n msg = \"Scalars failed to compare as equal! \" + debug_msg\n self.assertTrue(result, msg=msg)\n else:\n super().assertEqual(x, y, msg=msg)\n\n def assertAlmostEqual(self, x, y, *, places=None, msg=None, delta=None):\n prec = delta\n if places:\n prec = 10**(-places)\n rtol = None if prec is None else 0\n self.assertEqual(x, y, msg=msg, atol=prec, rtol=rtol)\n\n def assertNotEqual(self, x, y, msg: Optional[str] = None, *,\n atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:\n with self.assertRaises(AssertionError, msg=msg):\n self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)\n\n def assertEqualTypeString(self, x, y) -> None:\n # This API is used simulate deprecated x.type() == y.type()\n self.assertEqual(x.device, y.device)\n self.assertEqual(x.dtype, y.dtype)\n self.assertEqual(x.is_sparse, y.is_sparse)\n\n def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:\n for elem in iterable:\n if id(obj) == id(elem):\n return\n raise AssertionError(\"object not found in iterable\")\n\n # TODO: Support context manager interface\n # NB: The kwargs forwarding to callable robs the 'subname' parameter.\n # If you need it, manually apply your callable in a lambda instead.\n def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):\n subname = None\n if 'subname' in kwargs:\n subname = kwargs['subname']\n del kwargs['subname']\n try:\n callable(*args, **kwargs)\n except exc_type as e:\n self.assertExpected(str(e), subname)\n return\n # Don't put this in the try block; the AssertionError will catch it\n self.fail(msg=\"Did not raise when expected to\")\n\n def assertNotWarn(self, callable, msg=''):\n r\"\"\"\n Test if :attr:`callable` does not raise a warning.\n \"\"\"\n with warnings.catch_warnings(record=True) as ws:\n warnings.simplefilter(\"always\") # allow any warning to be raised\n callable()\n self.assertTrue(len(ws) == 0, msg)\n\n @contextmanager\n def maybeWarnsRegex(self, category, regex=''):\n \"\"\"Context manager for code that *may* warn, e.g. ``TORCH_WARN_ONCE``.\n\n This filters expected warnings from the test log and fails the test if\n any unexpected warnings are caught.\n \"\"\"\n with warnings.catch_warnings(record=True) as ws:\n warnings.simplefilter(\"always\") # allow any warning to be raised\n # Ignore expected warnings\n warnings.filterwarnings(\"ignore\", message=regex, category=category)\n try:\n yield\n finally:\n if len(ws) != 0:\n msg = 'Caught unexpected warnings:\\n'\n for w in ws:\n msg += warnings.formatwarning(\n w.message, w.category, w.filename, w.lineno, w.line)\n msg += '\\n'\n self.fail(msg)\n\n def assertExpected(self, s, subname=None):\n r\"\"\"\n Test that a string matches the recorded contents of a file\n derived from the name of this test and subname. This file\n is placed in the 'expect' directory in the same directory\n as the test script. You can automatically update the recorded test\n output using --accept.\n\n If you call this multiple times in a single function, you must\n give a unique subname each time.\n \"\"\"\n if not isinstance(s, str):\n raise TypeError(\"assertExpected is strings only\")\n\n def remove_prefix(text, prefix):\n if text.startswith(prefix):\n return text[len(prefix):]\n return text\n # NB: we take __file__ from the module that defined the test\n # class, so we place the expect directory where the test script\n # lives, NOT where test/common_utils.py lives. This doesn't matter in\n # PyTorch where all test scripts are in the same directory as\n # test/common_utils.py, but it matters in onnx-pytorch\n module_id = self.__class__.__module__\n munged_id = remove_prefix(self.id(), module_id + \".\")\n test_file = os.path.realpath(sys.modules[module_id].__file__)\n expected_file = os.path.join(os.path.dirname(test_file),\n \"expect\",\n munged_id)\n\n subname_output = \"\"\n if subname:\n expected_file += \"-\" + subname\n subname_output = \" ({})\".format(subname)\n expected_file += \".expect\"\n expected = None\n\n def accept_output(update_type):\n print(\"Accepting {} for {}{}:\\n\\n{}\".format(update_type, munged_id, subname_output, s))\n with open(expected_file, 'w') as f:\n f.write(s)\n\n try:\n with open(expected_file) as f:\n expected = f.read()\n except IOError as e:\n if e.errno != errno.ENOENT:\n raise\n elif expecttest.ACCEPT:\n return accept_output(\"output\")\n else:\n raise RuntimeError(\n (\"I got this output for {}{}:\\n\\n{}\\n\\n\"\n \"No expect file exists; to accept the current output, run:\\n\"\n \"python {} {} --accept\").format(munged_id, subname_output, s, __main__.__file__, munged_id))\n\n # a hack for JIT tests\n if IS_WINDOWS:\n expected = re.sub(r'CppOp\\[(.+?)\\]', 'CppOp[]', expected)\n s = re.sub(r'CppOp\\[(.+?)\\]', 'CppOp[]', s)\n\n # Adjust for producer_version\n expected = expected.replace(\n 'producer_version: \"XXX\"',\n 'producer_version: \"{}\"'.format(torch.onnx.producer_version)\n )\n if expecttest.ACCEPT:\n if expected != s:\n return accept_output(\"updated output\")\n else:\n if hasattr(self, \"assertMultiLineEqual\"):\n # Python 2.7 only\n # NB: Python considers lhs \"old\" and rhs \"new\".\n self.assertMultiLineEqual(expected, s)\n else:\n self.assertEqual(s, expected)\n\n def assertExpectedStripMangled(self, s, subname=None):\n s = re.sub(r'__torch__[^ ]+', '', s)\n self.assertExpected(s, subname)\n\n # returns captured stderr\n @staticmethod\n def runWithPytorchAPIUsageStderr(code):\n import subprocess\n\n env = os.environ.copy()\n env[\"PYTORCH_API_USAGE_STDERR\"] = \"1\"\n pipes = subprocess.Popen(\n [sys.executable, '-c', code],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env)\n return pipes.communicate()[1].decode('ascii')\n\n if sys.version_info < (3, 2):\n # assertRegexpMatches renamed to assertRegex in 3.2\n assertRegex = unittest.TestCase.assertRegexpMatches\n # assertRaisesRegexp renamed to assertRaisesRegex in 3.2\n assertRaisesRegex = unittest.TestCase.assertRaisesRegexp\n\n if sys.version_info < (3, 5):\n # assertNotRegexpMatches renamed to assertNotRegex in 3.5\n assertNotRegex = unittest.TestCase.assertNotRegexpMatches\n\n\ndef download_file(url, binary=True):\n from urllib.parse import urlsplit\n from urllib import request, error\n\n filename = os.path.basename(urlsplit(url)[2])\n data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))\n path = os.path.join(data_dir, filename)\n\n if os.path.exists(path):\n return path\n try:\n data = request.urlopen(url, timeout=15).read()\n with open(path, 'wb' if binary else 'w') as f:\n f.write(data)\n return path\n except error.URLError:\n msg = \"could not download test file '{}'\".format(url)\n warnings.warn(msg, RuntimeWarning)\n raise unittest.SkipTest(msg)\n\n\ndef find_free_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(('localhost', 0))\n sockname = sock.getsockname()\n sock.close()\n return sockname[1]\n\n# Errors that we can get in c10d initialization for which we should retry tests for.\nADDRESS_IN_USE = \"Address already in use\"\nCONNECT_TIMEOUT = \"connect() timed out.\"\n\ndef retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):\n \"\"\"Reruns a test if the test returns a RuntimeError and the exception\n matches exactly with one of the strings in connect_errors.\"\"\"\n # This if block is executed when using this function as a decorator with arguments.\n if func is None:\n return partial(retry_on_connect_failures, connect_errors=connect_errors)\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n tries_remaining = 10\n while True:\n try:\n return func(*args, **kwargs)\n except RuntimeError as error:\n if str(error) in connect_errors:\n tries_remaining -= 1\n if tries_remaining == 0:\n raise\n time.sleep(random.random())\n continue\n raise\n return wrapper\n\n\n# Decorator to retry upon certain Exceptions.\ndef retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n print(msg)\n time.sleep(mdelay)\n mtries -= 1\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n raise unittest.SkipTest(f\"Skipping after {tries} consecutive {str(e)}\") from e if skip_after_retries else e\n return f_retry # true decorator\n return deco_retry\n\n\n# Methods for matrix generation\n# Used in test_autograd.py and test_torch.py\ndef prod_single_zero(dim_size):\n result = torch.randn(dim_size, dim_size)\n result[0, 1] = 0\n return result\n\n\ndef random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):\n assert rank <= l\n A = torch.randn(l, l, dtype=dtype, device=device)\n u, s, v = A.svd()\n for i in range(l):\n if i >= rank:\n s[i] = 0\n elif s[i] == 0:\n s[i] = 1\n return u.mm(torch.diag(s)).mm(v.transpose(0, 1))\n\n\ndef random_symmetric_matrix(l, *batches, **kwargs):\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)\n A = (A + A.transpose(-2, -1)).div_(2)\n return A\n\n\ndef random_symmetric_psd_matrix(l, *batches, **kwargs):\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)\n return torch.matmul(A, A.transpose(-2, -1))\n\n\ndef random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),\n dtype=dtype, device=device)\n return torch.matmul(A, A.transpose(-2, -1)) \\\n + torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5\n\n\ndef make_nonzero_det(A, sign=None, min_singular_value=0.1):\n u, s, v = A.svd()\n s.clamp_(min=min_singular_value)\n A = torch.matmul(u, torch.matmul(torch.diag_embed(s), v.transpose(-2, -1)))\n det = A.det()\n if sign is not None:\n if A.dim() == 2:\n det = det.item()\n if (det < 0) ^ (sign < 0):\n A[0, :].neg_()\n else:\n cond = ((det < 0) ^ (sign < 0)).nonzero()\n if cond.size(0) > 0:\n for i in range(cond.size(0)):\n A[list(cond[i])][0, :].neg_()\n return A\n\n\ndef random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,\n **kwargs):\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n silent = kwargs.get(\"silent\", False)\n if silent and not torch._C.has_lapack:\n return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)\n\n A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)\n u, _, v = A.svd()\n s = torch.arange(1., matrix_size + 1, dtype=dtype, device=device).mul_(1.0 / (matrix_size + 1)).diag()\n return u.matmul(s.expand(batch_dims + (matrix_size, matrix_size)).matmul(v.transpose(-2, -1)))\n\n\ndef random_matrix(rows, columns, *batch_dims, **kwargs):\n \"\"\"Return rectangular matrix or batches of rectangular matrices.\n\n Parameters:\n dtype - the data type\n device - the device kind\n singular - when True, the output will be singular\n \"\"\"\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n silent = kwargs.get(\"silent\", False)\n singular = kwargs.get(\"singular\", False)\n if silent and not torch._C.has_lapack:\n return torch.ones(rows, columns, dtype=dtype, device=device)\n\n A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)\n u, _, v = A.svd(some=False)\n s = torch.zeros(rows, columns, dtype=dtype, device=device)\n k = min(rows, columns)\n for i in range(k):\n s[i, i] = float(i + 1) / (k + 1)\n if singular:\n # make matrix singular\n s[k - 1, k - 1] = 0\n if k > 2:\n # increase the order of singularity so that the pivoting\n # in LU factorization will be non-trivial\n s[0, 0] = 0\n return u.matmul(s.expand(batch_dims + (rows, columns)).matmul(v.transpose(-2, -1)))\n\n\ndef random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):\n \"\"\"Return rectangular matrix or batches of rectangular matrices with\n given rank.\n \"\"\"\n B = random_matrix(rows, rank, *batch_dims, **kwargs)\n C = random_matrix(rank, columns, *batch_dims, **kwargs)\n return B.matmul(C)\n\n\ndef random_sparse_matrix(rows, columns, density=0.01, **kwargs):\n \"\"\"Return rectangular random sparse matrix within given density.\n\n The density of the result approaches to given density as the size\n of the matrix is increased and a relatively small value of density\n is specified but higher than min(rows, columns)/(rows * columns)\n for non-singular matrices.\n \"\"\"\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n singular = kwargs.get(\"singular\", False)\n\n k = min(rows, columns)\n nonzero_elements = max(min(rows, columns), int(rows * columns * density))\n\n row_indices = [i % rows for i in range(nonzero_elements)]\n column_indices = [i % columns for i in range(nonzero_elements)]\n random.shuffle(column_indices)\n indices = [row_indices, column_indices]\n values = torch.randn(nonzero_elements, dtype=dtype, device=device)\n # ensure that the diagonal dominates\n values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()\n A = torch.sparse_coo_tensor(indices, values, (rows, columns), device=device)\n return A.coalesce()\n\n\ndef random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):\n \"\"\"Return random sparse positive-definite matrix with given density.\n\n The eigenvalues of the matrix are defined as::\n arange(1, matrix_size+1)/matrix_size\n\n Algorithm:\n A = diag(arange(1, matrix_size+1)/matrix_size)\n while <A density is smaller than required>:\n <choose random i, j in range(matrix_size), theta in [0, 2*pi]>\n R = <rotation matrix (i,j,theta)>\n A = R^T A R\n \"\"\"\n import math\n torch = kwargs.get('torch', globals()['torch'])\n dtype = kwargs.get('dtype', torch.double)\n device = kwargs.get('device', 'cpu')\n data = dict([((i, i), float(i + 1) / matrix_size)\n for i in range(matrix_size)])\n\n\n def multiply(data, N, i, j, cs, sn, left=True):\n for k in range(N):\n if left:\n ik, jk = (k, i), (k, j)\n else:\n ik, jk = (i, k), (j, k)\n aik, ajk = data.get(ik, 0), data.get(jk, 0)\n aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk\n if aik:\n data[ik] = aik\n else:\n data.pop(ik, None)\n if ajk:\n data[jk] = ajk\n else:\n data.pop(jk, None)\n\n target_nnz = density * matrix_size * matrix_size\n while len(data) < target_nnz:\n i = random.randint(0, matrix_size - 1)\n j = random.randint(0, matrix_size - 1)\n if i != j:\n theta = random.uniform(0, 2 * math.pi)\n cs = math.cos(theta)\n sn = math.sin(theta)\n multiply(data, matrix_size, i, j, cs, sn, left=True)\n multiply(data, matrix_size, i, j, cs, sn, left=False)\n icoords, jcoords, values = [], [], []\n for (i, j), v in sorted(data.items()):\n icoords.append(i)\n jcoords.append(j)\n values.append(v)\n indices = [icoords, jcoords]\n return torch.sparse_coo_tensor(indices, values, (matrix_size, matrix_size), dtype=dtype, device=device)\n\n\ndef do_test_dtypes(self, dtypes, layout, device):\n for dtype in dtypes:\n if dtype != torch.float16:\n out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)\n self.assertIs(dtype, out.dtype)\n self.assertIs(layout, out.layout)\n self.assertEqual(device, out.device)\n\n\ndef do_test_empty_full(self, dtypes, layout, device):\n shape = torch.Size([2, 3])\n\n def check_value(tensor, dtype, layout, device, value, requires_grad):\n self.assertEqual(shape, tensor.shape)\n self.assertIs(dtype, tensor.dtype)\n self.assertIs(layout, tensor.layout)\n self.assertEqual(tensor.requires_grad, requires_grad)\n if tensor.is_cuda and device is not None:\n self.assertEqual(device, tensor.device)\n if value is not None:\n fill = tensor.new(shape).fill_(value)\n self.assertEqual(tensor, fill)\n\n def get_int64_dtype(dtype):\n module = '.'.join(str(dtype).split('.')[1:-1])\n if not module:\n return torch.int64\n return operator.attrgetter(module)(torch).int64\n\n default_dtype = torch.get_default_dtype()\n check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)\n check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)\n for dtype in dtypes:\n for rg in {dtype.is_floating_point, False}:\n int64_dtype = get_int64_dtype(dtype)\n v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)\n check_value(v, dtype, layout, device, None, rg)\n out = v.new()\n check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),\n dtype, layout, device, None, rg)\n check_value(v.new_empty(shape), dtype, layout, device, None, False)\n check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),\n int64_dtype, layout, device, None, False)\n check_value(torch.empty_like(v), dtype, layout, device, None, False)\n check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),\n int64_dtype, layout, device, None, False)\n\n if dtype is not torch.float16 and layout != torch.sparse_coo:\n fv = 3\n v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)\n check_value(v, dtype, layout, device, fv, rg)\n check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)\n out = v.new()\n check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),\n dtype, layout, device, fv + 2, rg)\n check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),\n int64_dtype, layout, device, fv + 3, False)\n check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)\n check_value(torch.full_like(v, fv + 5,\n dtype=int64_dtype, layout=layout, device=device, requires_grad=False),\n int64_dtype, layout, device, fv + 5, False)\n\n\n\n\nTHESE_TAKE_WAY_TOO_LONG = {\n 'test_Conv3d_groups',\n 'test_conv_double_backward',\n 'test_conv_double_backward_groups',\n 'test_Conv3d_dilated',\n 'test_Conv3d_stride_padding',\n 'test_Conv3d_dilated_strided',\n 'test_Conv3d',\n 'test_Conv2d_dilated',\n 'test_ConvTranspose3d_dilated',\n 'test_ConvTranspose2d_dilated',\n 'test_snli',\n 'test_Conv2d',\n 'test_Conv2d_padding',\n 'test_ConvTranspose2d_no_bias',\n 'test_ConvTranspose2d',\n 'test_ConvTranspose3d',\n 'test_Conv2d_no_bias',\n 'test_matmul_4d_4d',\n 'test_multinomial_invalid_probs',\n}\n\n\nrunning_script_path = None\n\n\ndef set_running_script_path():\n global running_script_path\n try:\n running_file = os.path.abspath(os.path.realpath(sys.argv[0]))\n if running_file.endswith('.py'): # skip if the running file is not a script\n running_script_path = running_file\n except Exception:\n pass\n\n\ndef check_test_defined_in_running_script(test_case):\n if running_script_path is None:\n return\n test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))\n assert test_case_class_file == running_script_path, \"Class of loaded TestCase \\\"{}\\\" \" \\\n \"is not defined in the running script \\\"{}\\\", but in \\\"{}\\\". Did you \" \\\n \"accidentally import a unittest.TestCase from another file?\".format(\n test_case.id(), running_script_path, test_case_class_file)\n\n\ndef load_tests(loader, tests, pattern):\n set_running_script_path()\n test_suite = unittest.TestSuite()\n for test_group in tests:\n for test in test_group:\n check_test_defined_in_running_script(test)\n test_suite.addTest(test)\n return test_suite\n\n\nclass BytesIOContext(io.BytesIO):\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n pass\n\ndef _assertGradAndGradgradChecks(test_case, apply_fn, inputs):\n # call assert function rather than returning a bool since it's nicer\n # if we get whether this failed on the gradcheck or the gradgradcheck.\n test_case.assertTrue(gradcheck(apply_fn, inputs))\n test_case.assertTrue(gradgradcheck(apply_fn, inputs))\n\n\n# Using @precisionOverride specific to your test is the recommended way\n# of doing this. These are just some values that worked for test_nn.\ndtype2prec_DONTUSE = {torch.float: 1e-5,\n torch.double: 1e-5,\n torch.half: 1e-2,\n torch.bfloat16: 1e-1}\n", "#!/usr/bin/env python3\n\nfrom torch.testing._internal.distributed import ddp_under_dist_autograd_test\nfrom torch.testing._internal.common_utils import (\n run_tests,\n)\n\nclass TestDdpUnderDistAutogradWrapper(ddp_under_dist_autograd_test.TestDdpUnderDistAutograd):\n pass\n\nclass TestDdpComparison(ddp_under_dist_autograd_test.TestDdpComparison):\n pass\n\nif __name__ == \"__main__\":\n run_tests()\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom future.utils import viewkeys\nfrom multiprocessing import Process, Queue\nimport numpy as np\nimport os\nimport shutil\nimport tempfile\nimport unittest\nimport time\nfrom mock import Mock\nfrom hypothesis import assume, given\nimport hypothesis.strategies as st\n\nfrom caffe2.proto import caffe2_pb2\nfrom caffe2.python import brew, core, cnn, data_parallel_model, dyndep, \\\n model_helper, optimizer, rnn_cell, workspace\nfrom caffe2.python.test_util import TestCase\n\n\ndyndep.InitOpsLibrary(\"@/caffe2/caffe2/distributed:file_store_handler_ops\")\n\n\nclass TemporaryDirectory:\n def __enter__(self):\n self.tmpdir = tempfile.mkdtemp()\n return self.tmpdir\n\n def __exit__(self, type, value, traceback):\n shutil.rmtree(self.tmpdir)\n\n# Note(jiayq): we are yet to find out why Travis gives out an error in gloo\n# like:\n# RuntimeError: [enforce fail at /home/travis/build/caffe2/caffe2/third_party/gloo/gloo/transport/tcp/device.cc:113] ifa != nullptr. Unable to find interface for: [127.0.1.1]\n# See for example https://travis-ci.org/caffe2/caffe2/jobs/262433866\n# As a result, we will check if this is travis, and if yes, disable it.\[email protected](os.environ.get(\"TRAVIS\"), \"DPMTest has a known issue with Travis.\")\nclass DataParallelModelTest(TestCase):\n\n def run_model(self, devices, gpu):\n '''\n Helper function for test_equiv\n '''\n def input_builder_fun(model):\n return None\n\n def model_build_fun(model, loss_scale):\n fc = model.FC(\"data\", \"fc\", 16, 1,\n (\"ConstantFill\", {}), (\"ConstantFill\", {}))\n fc_fl = model.FlattenToVec(fc, \"fc_fl\")\n sigm = model.Sigmoid(fc_fl, \"sigm\")\n sq = model.SquaredL2Distance([sigm, \"label\"], \"sq\")\n loss = model.AveragedLoss(sq, \"loss\")\n loss = model.Scale(loss, scale=loss_scale)\n\n # For testing explicit sync\n model.param_init_net.UniformFill([], [\"sync_num\"], shape=[1])\n return [loss]\n\n def add_optimizer(model):\n return optimizer.build_sgd(\n model,\n 0.1,\n policy=\"fixed\",\n max_gradient_norm=5.0,\n allow_lr_injection=True,\n )\n\n workspace.ResetWorkspace()\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"test{}\".format(devices),\n )\n data_parallel_model.Parallelize(\n model,\n input_builder_fun=input_builder_fun,\n forward_pass_builder_fun=model_build_fun,\n optimizer_builder_fun=add_optimizer,\n devices=devices,\n cpu_device=not gpu,\n shared_model=not gpu,\n combine_spatial_bn=not gpu,\n )\n data_parallel_model.AddBlobSync(model, [\"sync_num\"])\n\n # Light test for LR names\n lr_names = data_parallel_model.GetLearningRateBlobNames(model)\n self.assertGreater(len(lr_names), 0)\n\n np.random.seed(2603)\n\n # Each run has same input, independent of number of gpus\n batch_size = 64\n for i in range(0, 10):\n full_data = np.random.rand(batch_size, 16)\n full_labels = np.round(full_data[:, 0])\n batch_per_device = batch_size // len(devices)\n\n for (j, g) in enumerate(devices):\n st = j * batch_per_device\n en = st + batch_per_device\n data = full_data[st:en, :].astype(np.float32)\n labels = full_labels[st:en].astype(np.float32)\n with core.DeviceScope(core.DeviceOption(model._device_type, g)):\n workspace.FeedBlob(\n \"{}_{}/data\".format(model._device_prefix, g), data\n )\n workspace.FeedBlob(\n \"{}_{}/label\".format(model._device_prefix, g), labels\n )\n\n if i == 0:\n workspace.RunNetOnce(model.param_init_net)\n workspace.CreateNet(model.net)\n\n workspace.FeedBlob(\n model._device_prefix + \"_0/sync_num\",\n np.array([i * 2]).astype(np.float32),\n device_option=core.DeviceOption(model._device_type, 0))\n workspace.RunNet(model.net.Proto().name)\n\n # Test AddBlobSync\n for j in model._devices:\n sync = workspace.FetchBlob(\n model._device_prefix + \"_{}/sync_num\".format(j))[0]\n self.assertTrue(abs(sync - i * 2) < 0.01)\n\n return workspace.FetchBlob(\"{}_0/fc_w\".format(model._device_prefix))\n\n def run_test_locally(self, fn, device_option=None, **kwargs):\n # Queue for assertion errors on subprocesses\n queue = Queue()\n\n # Capture any exception thrown by the subprocess\n def run_fn(*args, **kwargs):\n try:\n if device_option is None:\n fn(*args, **kwargs)\n workspace.ResetWorkspace()\n else:\n with core.DeviceScope(device_option):\n fn(*args, **kwargs)\n workspace.ResetWorkspace()\n except Exception as ex:\n queue.put(ex)\n\n # Start N processes in the background\n procs = []\n for i in range(kwargs['comm_size']):\n kwargs['comm_rank'] = i\n proc = Process(\n target=run_fn,\n kwargs=kwargs)\n proc.start()\n procs.append(proc)\n\n # Test complete, join background processes\n while len(procs) > 0:\n proc = procs.pop(0)\n while proc.is_alive():\n proc.join(1)\n\n # Raise exception if we find any.\n # Note that the following is executed ALSO after\n # the last process was joined, so if ANY exception\n # was raised, it will be re-raised here.\n if not queue.empty():\n raise queue.get()\n\n def test_equiv(self):\n '''\n Test that the model produces exactly same results given\n total batchsize, independent of number of GPUs.\n '''\n for gpu in [True, False]:\n if gpu and (not workspace.has_gpu_support or\n workspace.NumCudaDevices() < 2):\n continue\n result_2gpus = self.run_model([0, 1], gpu=gpu)\n result_1gpus = self.run_model([0], gpu=gpu)\n\n self.assertTrue(np.allclose(result_1gpus, result_2gpus))\n\n if not gpu or workspace.NumCudaDevices() >= 4:\n result_4gpus = self.run_model(list(range(4)), gpu=gpu)\n self.assertTrue(np.allclose(result_1gpus, result_4gpus))\n\n if not gpu or workspace.NumCudaDevices() >= 8:\n result_8gpus = self.run_model(list(range(8)), gpu=gpu)\n self.assertTrue(np.allclose(result_1gpus, result_8gpus))\n\n if not gpu or workspace.NumCudaDevices() >= 16:\n result_16gpus = self.run_model(list(range(16)), gpu=gpu)\n self.assertTrue(np.allclose(result_1gpus, result_16gpus))\n\n def test_checkpoint_params(self):\n def add_input_ops(model):\n pass\n\n def add_model_ops(model, loss_scale):\n model.NHWC2NCHW(\"data\", \"data_nchw\")\n model.Conv(\"data_nchw\", 'conv1', 3, 64,\n weight_init=(\"MSRAFill\", {}), kernel=7,\n stride=2, pad=3, no_bias=0)\n model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3, is_test=False)\n model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')\n model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)\n model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)\n model.Sigmoid('fc', 'fc_sigm')\n model.Softmax('fc_sigm', 'softmax')\n model.LabelCrossEntropy(['softmax', 'label'], 'xent')\n loss = model.AveragedLoss('xent', 'loss')\n\n # Add a duplicate param init to ensure it does not cause issues\n model.param_init_net.ConstantFill(\n [], [\"fc_w\"], shape=((64 * 56 * 56), 1000)\n )\n return [loss]\n\n def add_optimizer(model):\n optimizer.build_sgd(model, 0.1, policy=\"fixed\", momentum=0.9)\n\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"test\",\n )\n data_parallel_model.Parallelize_CPU(\n model,\n input_builder_fun=add_input_ops,\n forward_pass_builder_fun=add_model_ops,\n optimizer_builder_fun=add_optimizer,\n devices=[1, 2, 3],\n )\n\n # Only gpu_1 params should be returned (gpu_1 is the first gpu)\n checkpoint_params = data_parallel_model.GetCheckpointParams(model)\n for p in model.GetParams(\"cpu_1/\"):\n self.assertTrue(p in checkpoint_params)\n self.assertTrue(p + \"_momentum\" in checkpoint_params)\n for p in model.GetParams(\"cpu_2/\"):\n self.assertFalse(p in checkpoint_params)\n self.assertTrue(\n core.BlobReference(\"cpu_1/fc_w_momentum\") in checkpoint_params)\n for c in model.GetComputedParams(\"cpu_1/\"):\n self.assertTrue(c in checkpoint_params)\n for c in model.GetComputedParams(\"cpu_2/\"):\n self.assertFalse(c in checkpoint_params)\n self.assertFalse(core.BlobReference(\"cpu_1/data\") in checkpoint_params)\n self.assertTrue(core.BlobReference(\"optimizer_iteration\") in checkpoint_params)\n\n def test_net_conversion_and_append_net(self):\n other = model_helper.ModelHelper()\n fc1 = brew.fc(other, \"data\", \"other_fc1\", dim_in=3*227*227, dim_out=10)\n fc2 = brew.fc(other, fc1, \"other_fc2\", dim_in=10, dim_out=10)\n brew.fc(other, fc2, \"other_fc3\", dim_in=10, dim_out=10)\n\n def add_input_ops(model):\n model.net.UniformFill([], [\"data\"], shape=[4, 227, 227, 3])\n model.net.UniformFill([], [\"label\"], shape=[4])\n\n def add_model_ops(model, loss_scale):\n model.NHWC2NCHW(\"data\", \"data_nchw\")\n model.Conv(\"data_nchw\", 'conv1', 3, 64,\n weight_init=(\"MSRAFill\", {}), kernel=7,\n stride=2, pad=3, no_bias=0)\n model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3, is_test=False)\n model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')\n model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)\n model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=10)\n\n # Append the net and param_init_net of the other model\n appendnet = data_parallel_model.ConvertNetForDevice(other.net)\n model.net.AppendNet(appendnet)\n\n model.param_init_net.AppendNet(\n data_parallel_model.ConvertNetForDevice(other.param_init_net))\n\n model.Sigmoid('fc', 'fc_sigm')\n model.Softmax('fc_sigm', 'softmax')\n loss = model.AveragedLoss('softmax', 'loss')\n return [loss]\n\n def add_optimizer(model):\n optimizer.build_sgd(model, 0.1, policy=\"fixed\", momentum=0.9)\n\n model = cnn.CNNModelHelper(\n order=\"NCHW\",\n name=\"test\",\n )\n data_parallel_model.Parallelize_CPU(\n model,\n input_builder_fun=add_input_ops,\n forward_pass_builder_fun=add_model_ops,\n optimizer_builder_fun=add_optimizer,\n devices=range(4)\n )\n\n # Just create and run net and confirm no exception is thrown\n workspace.RunNetOnce(model.param_init_net)\n workspace.CreateNet(model.net)\n workspace.RunNet(model.net)\n\n def test_synchronization_barrier(self):\n def run(comm_rank, comm_size, tmpdir):\n def add_input_ops(model):\n pass\n\n def add_model_ops(model, loss_scale):\n return []\n\n def add_optimizer(model):\n pass\n\n store_handler = \"store_handler\"\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"FileStoreHandlerCreate\",\n [],\n [store_handler],\n path=tmpdir))\n rendezvous = dict(\n kv_handler=store_handler,\n shard_id=comm_rank,\n num_shards=comm_size,\n engine='GLOO',\n )\n\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"test\",\n )\n data_parallel_model.Parallelize_CPU(\n model,\n input_builder_fun=add_input_ops,\n forward_pass_builder_fun=add_model_ops,\n optimizer_builder_fun=add_optimizer,\n devices=[1, 2, 3],\n rendezvous=rendezvous\n )\n data_parallel_model.RunInitNet(model)\n\n for _ in range(2):\n data_parallel_model.Synchronize(model)\n\n with TemporaryDirectory() as tmpdir:\n self.run_test_locally(\n run,\n comm_size=2,\n device_option=None,\n tmpdir=tmpdir)\n\n def test_pre_train_synchronization_barrier(self):\n def run(comm_rank, comm_size, tmpdir):\n def add_input_ops(model):\n pass\n\n def add_model_ops(model, loss_scale):\n return []\n\n def add_optimizer(model):\n pass\n\n workspace.ResetWorkspace()\n store_handler = \"store_handler\"\n workspace.RunOperatorOnce(\n core.CreateOperator(\n \"FileStoreHandlerCreate\",\n [],\n [store_handler],\n path=tmpdir))\n rendezvous = dict(\n kv_handler=store_handler,\n shard_id=comm_rank,\n num_shards=comm_size,\n engine='GLOO',\n )\n\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"test\",\n )\n # Set network timeout to 2 seconds, and add a 3 seconds\n # sleep for 1 host. Make sure there is no timeout on the\n # second RunNet.\n data_parallel_model._DEFAULT_TIMEOUT_SEC = 2\n data_parallel_model.Parallelize_CPU(\n model,\n input_builder_fun=add_input_ops,\n forward_pass_builder_fun=add_model_ops,\n optimizer_builder_fun=add_optimizer,\n devices=[1, 2, 3],\n rendezvous=rendezvous,\n barrier_net_timeout_sec=5\n )\n data_parallel_model.RunInitNet(model)\n data_parallel_model.RunNet(model, 2)\n if comm_rank == 0:\n time.sleep(data_parallel_model._DEFAULT_TIMEOUT_SEC)\n data_parallel_model.RunNet(model, 2)\n\n with TemporaryDirectory() as tmpdir:\n self.run_test_locally(\n run,\n comm_size=2,\n device_option=None,\n tmpdir=tmpdir)\n\n def test_device_scope_check(self):\n with self.assertRaises(AssertionError):\n with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 0)):\n data_parallel_model.Parallelize_GPU(None, None, None)\n\n def test_net_transformer_function(self):\n devices = [1, 2, 3]\n\n def add_input_ops(model):\n model.param_init_net.UniformFill([], [\"data\"], shape=[32, 8])\n\n def add_optimizer(model):\n optimizer.build_sgd(model, 0.1)\n\n def add_model_ops(model, loss_scale):\n fc1 = brew.fc(model, \"data\", \"fc1\", dim_in=8, dim_out=8)\n return [fc1]\n\n kwargs = {\n 'input_builder_fun': add_input_ops,\n 'forward_pass_builder_fun': add_model_ops,\n 'devices': devices,\n }\n\n # assert that the transformer is called for both train and test cases\n transform = Mock()\n kwargs['net_transformer_fun'] = transform\n model = model_helper.ModelHelper(name=\"r\", init_params=False)\n data_parallel_model.Parallelize_CPU(model, **kwargs)\n self.assertTrue(transform.called)\n self.assertEqual(transform.call_count, 1)\n\n transform = Mock()\n kwargs['net_transformer_fun'] = transform\n kwargs['optimizer_builder_fun'] = add_optimizer\n model = model_helper.ModelHelper(name=\"r\", init_params=True)\n data_parallel_model.Parallelize_CPU(model, **kwargs)\n self.assertTrue(transform.called)\n self.assertEqual(transform.call_count, 1)\n\n @given(seed=st.integers(0, 65535), batch_size=st.integers(1, 20))\n def test_multi_device_bn_op_level_cpu(self, seed, batch_size):\n self._bn_check_op_level(\"cpu\", seed, batch_size)\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support.\")\n @unittest.skipIf(workspace.NumCudaDevices() < 2, \"Need at least 2 GPUs.\")\n @given(seed=st.integers(0, 65535), batch_size=st.integers(1, 20))\n def test_multi_device_bn_op_level_gpu(self, seed, batch_size):\n self._bn_check_op_level(\"gpu\", seed, batch_size)\n\n def _bn_check_op_level(self, device_type, seed, batch_size):\n '''\n Test multi device batch normalization at the operation level. This is\n done by checking the outputs of batch normalization and its gradient\n operator. We compare values produced with our manually calculated\n batch normalization values and gradients.\n '''\n devices = [0, 1]\n epsilon = 1e-3\n tolerance = 1e-3\n\n def _test_forward_pass(x, devices, device_type, scale, bias, epsilon):\n x_concat = np.concatenate(x)\n mean = np.mean(x_concat, axis=0)\n var = np.var(x_concat, axis=0)\n for device in devices:\n x_i = x[device]\n x_hat = (x_i - mean) / (np.sqrt(var + epsilon))\n expected_out = scale * x_hat + bias\n spatial_out = workspace.FetchBlob(\n \"{}_{}/bn_out\".format(device_type, device))\n rel_error = np.linalg.norm(spatial_out - expected_out) \\\n / np.linalg.norm(expected_out)\n self.assertTrue(rel_error < 0.005)\n\n def _test_backward_pass(x, devices, device_type, scale, tolerance):\n dBias_arr = []\n dY_arr = []\n dGamma_arr = []\n num_devices = len(devices)\n mean = np.array(workspace.FetchBlob(\n \"{}_0/bn_out_sm\".format(device_type)), dtype=np.float32)\n inv_var = np.array(workspace.FetchBlob(\n \"{}_0/bn_out_siv\".format(device_type)), dtype=np.float32)\n\n # dBias\n # Sum dBias values over all devices to find the average gradient\n for device in devices:\n dY_blob = workspace.FetchBlob(\n \"{}_{}/bn_out_grad\".format(device_type, device))\n dY = np.array(dY_blob, dtype=np.float32)\n dY_arr.append(dY)\n dBias_arr.append(np.array(np.sum(dY, axis=0), dtype=np.float32))\n dBias = np.sum(dBias_arr, dtype=np.float32)\n dBias_avg = dBias / num_devices\n for device in devices:\n dBiasActual = np.sum(workspace.FetchBlob(\"{}_{}/bn_out_b_grad\"\n .format(device_type, device)), dtype=np.float32)\n self.assertTrue(np.isclose([dBiasActual], [dBias], atol=tolerance))\n\n # dGamma\n # Sum dGamma values over all devices to find the average gradient\n for device in devices:\n dGamma = np.sum((x[device] - mean) * inv_var * dY_arr[device],\n axis=0, dtype=np.float32)\n dGamma_arr.append(dGamma)\n dGamma = np.sum(dGamma_arr, axis=0, dtype=np.float32)\n dGamma_avg = dGamma / num_devices\n for device in devices:\n dGammaActual = workspace.FetchBlob(\n \"{}_{}/bn_out_s_grad\".format(device_type, device))\n self.assertTrue(np.isclose([dGamma], [dGammaActual], atol=tolerance))\n\n # dX\n scale_inv_var = scale * inv_var / batch_size\n for device in devices:\n dX = scale_inv_var * (dY_arr[device] * batch_size - dBias_avg\n - (x[device] - mean) * dGamma_avg * inv_var)\n dX_actual = workspace.FetchBlob(\n \"{}_{}/tanh_grad\".format(device_type, device))\n self.assertTrue(np.isclose([dX], [dX_actual], atol=tolerance).all())\n\n def add_input_ops(model):\n for device in devices:\n data = np.random.rand(batch_size, 1, 1, 1).astype(np.float32)\n workspace.FeedBlob(\"{}_{}/data\".format(device_type, device), data)\n\n def add_model_ops(model, loss_scale):\n if device_type == \"gpu\":\n model.CopyCPUToGPU(\"data\", \"device_data\")\n model.Tanh(\"device_data\", \"tanh\")\n else:\n model.Tanh(\"data\", \"tanh\")\n model.SpatialBN(\"tanh\", \"bn_out\", 1, epsilon=epsilon, is_test=False)\n model.Sqr(\"bn_out\", \"sqr\")\n loss = model.SumElements(\"sqr\", \"loss\")\n return [loss]\n\n def add_optimizer(model):\n return optimizer.build_sgd(model, 0.1)\n\n np.random.seed(seed)\n workspace.ResetWorkspace()\n model = cnn.CNNModelHelper(\n order=\"NCHW\",\n name=\"test\"\n )\n data_parallel_model.Parallelize(\n model,\n input_builder_fun=add_input_ops,\n forward_pass_builder_fun=add_model_ops,\n optimizer_builder_fun=add_optimizer,\n devices=devices,\n cpu_device=device_type == \"cpu\",\n shared_model=False,\n combine_spatial_bn=True,\n )\n\n workspace.RunNetOnce(model.param_init_net)\n scale = workspace.FetchBlob(\"{}_0/bn_out_s\".format(device_type))\n bias = workspace.FetchBlob(\"{}_0/bn_out_b\".format(device_type))\n workspace.RunNetOnce(model.net)\n\n x = []\n for device in devices:\n x_blob = workspace.FetchBlob(\"{}_{}/tanh\".format(device_type, device))\n x_i = np.array(x_blob, dtype=np.float32)\n x.append(x_i)\n\n _test_forward_pass(x, devices, device_type, scale, bias, epsilon)\n _test_backward_pass(x, devices, device_type, scale, tolerance)\n\n @given(seed=st.integers(0, 65535), batch_size=st.integers(1, 20))\n def test_multi_device_bn_net_lvl_cpu(self, seed, batch_size):\n if batch_size % 2 == 1:\n batch_size += 1\n self._test_multi_device_bn_net_lvl(\"cpu\", seed, batch_size)\n\n @unittest.skipIf(not workspace.has_gpu_support, \"No gpu support.\")\n @unittest.skipIf(workspace.NumCudaDevices() < 2, \"Need at least 2 GPUs.\")\n @given(seed=st.integers(0, 65535), batch_size=st.integers(1, 20))\n def test_multi_device_bn_net_lvl_gpu(self, seed, batch_size):\n if batch_size % 2 == 1:\n batch_size += 1\n self._test_multi_device_bn_net_lvl(\"gpu\", seed, batch_size)\n\n def _test_multi_device_bn_net_lvl(self, device_type, seed, batch_size):\n '''\n Test multi device batch normalization at the net level. This is done\n by verifying that the final batch normalization outputs and the\n gradient outputs from multiple devices are the same as those produced\n from a single device\n '''\n\n # Verify that the gradients calculated over multiple devices are the\n # same as the gradients calculated over one device. These values should\n # be equivalent because combine_spatial_bn sums values over all devices\n def _verify_bn_outputs(\n devices,\n device_type,\n tolerance,\n single_device_bn_out,\n two_device_bn_out_vals,\n single_device_grads,\n two_device_grads,\n ):\n two_device_bn_out = np.concatenate(two_device_bn_out_vals)\n self.assertTrue(np.isclose(\n [single_device_bn_out], [two_device_bn_out], atol=tolerance).all())\n\n # Scalar and Bias gradients should be the same across devices\n gradient_names = [\"bn_out_s_grad\", \"bn_out_b_grad\"]\n for name in gradient_names:\n expected_grad = single_device_grads[name]\n for device in devices:\n actual_grad = two_device_grads[device][name]\n self.assertTrue(\n np.isclose([actual_grad], [expected_grad], atol=tolerance))\n\n # Expected tanh_grad should be the combined tanh_grad vectors\n # across the devices\n first_grad = two_device_grads[0][\"tanh_grad\"]\n second_grad = two_device_grads[1][\"tanh_grad\"]\n actual_grad = np.concatenate([first_grad, second_grad])\n expected_grad = single_device_grads[\"tanh_grad\"]\n rel_error = np.linalg.norm(actual_grad - expected_grad) \\\n / np.linalg.norm(expected_grad)\n self.assertTrue(rel_error < 1e-3)\n\n def _create_model(multiple_devices):\n def add_input_ops_no_combine(model):\n workspace.FeedBlob(\"{}_0/data\".format(device_type), data)\n\n def add_input_ops_combine(model):\n half = int(batch_size / 2)\n workspace.FeedBlob(\"{}_0/data\".format(device_type), data[:half])\n workspace.FeedBlob(\"{}_1/data\".format(device_type), data[half:])\n\n def add_model_ops(model, loss_scale):\n if device_type == \"gpu\":\n model.CopyCPUToGPU(\"data\", \"device_data\")\n model.Tanh(\"device_data\", \"tanh\")\n else:\n model.Tanh(\"data\", \"tanh\")\n model.SpatialBN(\"tanh\", \"bn_out\", 1, epsilon=epsilon, is_test=False)\n model.Sqr(\"bn_out\", \"sqr\")\n loss = model.SumElements(\"sqr\", \"loss\")\n return [loss]\n\n def add_optimizer(model):\n return optimizer.build_sgd(model, 0.1)\n\n if multiple_devices:\n input_fun = add_input_ops_combine\n devices = [0, 1]\n combine_spatial_bn = True\n else:\n input_fun = add_input_ops_no_combine\n devices = [0]\n combine_spatial_bn = False\n model = cnn.CNNModelHelper(\n order=\"NCHW\",\n name=\"test\"\n )\n data_parallel_model.Parallelize(\n model,\n input_builder_fun=input_fun,\n forward_pass_builder_fun=add_model_ops,\n optimizer_builder_fun=add_optimizer,\n devices=devices,\n cpu_device=device_type == \"cpu\",\n shared_model=False,\n combine_spatial_bn=combine_spatial_bn,\n )\n return model\n\n devices = [0, 1]\n epsilon = 1e-3\n tolerance = 1e-3\n # We are generating random data\n np.random.seed(seed)\n data = np.random.rand(batch_size, 1, 1, 1).astype(np.float32)\n data = np.reshape(data, (batch_size, 1, 1, 1))\n\n # Get values calculated without combine_spatial_bn\n workspace.ResetWorkspace()\n model_no_combine = _create_model(multiple_devices=False)\n workspace.RunNetOnce(model_no_combine.param_init_net)\n workspace.RunNetOnce(model_no_combine.net)\n single_device_bn_out = workspace.FetchBlob(\"{}_0/bn_out\".format(device_type))\n single_device_grads = {}\n single_device_grads[\"bn_out_s_grad\"] = workspace.FetchBlob(\n \"{}_0/bn_out_s_grad\".format(device_type))\n single_device_grads[\"bn_out_b_grad\"] = workspace.FetchBlob(\n \"{}_0/bn_out_b_grad\".format(device_type))\n single_device_grads[\"tanh_grad\"] = workspace.FetchBlob(\n \"{}_0/tanh_grad\".format(device_type))\n\n # Get values calculated over multiple devices with combine_spatial_bn true\n workspace.ResetWorkspace()\n model_combine = _create_model(multiple_devices=True)\n workspace.RunNetOnce(model_combine.param_init_net)\n workspace.RunNetOnce(model_combine.net)\n two_device_bn_out_vals = []\n two_device_grads = {}\n for device in devices:\n bn_out_blob = \"{}_{}/bn_out\".format(device_type, device)\n two_device_bn_out_vals.append(workspace.FetchBlob(bn_out_blob))\n two_device_grads[device] = {}\n two_device_grads[device][\"bn_out_s_grad\"] = workspace.FetchBlob(\n \"{}_{}/bn_out_s_grad\".format(device_type, device))\n two_device_grads[device][\"bn_out_b_grad\"] = workspace.FetchBlob(\n \"{}_{}/bn_out_b_grad\".format(device_type, device))\n two_device_grads[device][\"tanh_grad\"] = workspace.FetchBlob(\n \"{}_{}/tanh_grad\".format(device_type, device))\n\n # Check to see if the combined values are equivalent\n _verify_bn_outputs(\n devices,\n device_type,\n tolerance,\n single_device_bn_out,\n two_device_bn_out_vals,\n single_device_grads,\n two_device_grads\n )\n\nclass RecurrentNetworkParallelTest(TestCase):\n\n def run_model(self, devices, gpu):\n\n '''\n Helper function for test_equiv\n '''\n def input_builder_fun(model):\n return None\n\n def model_build_fun(model, loss_scale):\n workspace.FeedBlob(\n core.ScopedBlobReference(\"seq_lengths\"),\n np.array([self.T] * self.batch_per_device, dtype=np.int32)\n )\n model.param_init_net.ConstantFill(\n [],\n \"hidden_init\",\n value=0.0,\n shape=[1, self.batch_per_device, self.hidden_dim]\n )\n model.param_init_net.ConstantFill(\n [],\n \"cell_init\",\n value=0.0,\n shape=[1, self.batch_per_device, self.hidden_dim]\n )\n\n output, _last_hidden, _, _last_state, = rnn_cell.LSTM(\n model=model,\n input_blob=\"data\",\n seq_lengths=\"seq_lengths\",\n initial_states=(\"hidden_init\", \"cell_init\"),\n dim_in=self.input_dim,\n dim_out=self.hidden_dim,\n scope=\"partest\",\n )\n\n # A silly loss function\n loss = model.AveragedLoss(\n model.Sub([output, \"target\"], \"dist\"),\n \"loss\",\n )\n loss = model.Scale(loss, \"loss_scaled\", scale=loss_scale)\n return [loss]\n\n def param_update_fun(model):\n ITER = model.Iter(\"ITER\")\n LR = model.net.LearningRate(\n [ITER],\n \"LR\",\n base_lr=(-0.1),\n policy=\"fixed\",\n )\n ONE = model.param_init_net.ConstantFill(\n [], \"ONE\", shape=[1], value=1.0,\n )\n for param in model.GetParams():\n param_grad = model.param_to_grad[param]\n model.WeightedSum([param, ONE, param_grad, LR], param)\n\n assert len(model.GetParams()) == len(model.params) // len(model._devices)\n\n workspace.ResetWorkspace()\n model = cnn.CNNModelHelper(\n name=\"recurrent_test{}\".format(devices),\n )\n\n self.T = 8\n self.batch_size = 64\n self.input_dim = 8\n self.hidden_dim = 31\n self.batch_per_device = self.batch_size // len(devices)\n\n data_parallel_model.Parallelize(\n model,\n input_builder_fun=input_builder_fun,\n forward_pass_builder_fun=model_build_fun,\n param_update_builder_fun=param_update_fun,\n devices=devices,\n optimize_gradient_memory=True,\n cpu_device=not gpu,\n )\n\n # Change all initialization to be ConstantFills so that\n # the everything is deterministic\n for op in model.param_init_net.Proto().op:\n if op.type.endswith('Fill'):\n op.type = 'ConstantFill'\n\n # Each run has same input, independent of number of gpus\n np.random.seed(20150210)\n for i in range(0, 10):\n full_data = np.random.rand(self.T, self.batch_size, self.input_dim)\n full_target = np.random.rand(\n self.T, self.batch_size, self.hidden_dim\n )\n\n for (j, g) in enumerate(devices):\n st = j * self.batch_per_device\n en = st + self.batch_per_device\n data = full_data[:, st:en, :].astype(np.float32)\n targets = full_target[:, st:en, :].astype(np.float32)\n with core.DeviceScope(core.DeviceOption(model._device_type, g)):\n workspace.FeedBlob(\n \"{}_{}/data\".format(model._device_prefix, g), data\n )\n workspace.FeedBlob(\n \"{}_{}/target\".format(model._device_prefix, g), targets\n )\n\n if i == 0:\n workspace.RunNetOnce(model.param_init_net)\n workspace.CreateNet(model.net)\n\n workspace.RunNet(model.net.Proto().name)\n\n return workspace.FetchBlob(\"{}_0/partest/i2h_w\".format(model._device_prefix))\n\n @unittest.skip(\"Test is flaky: https://github.com/pytorch/pytorch/issues/10322\")\n def test_equiv_recurrent(self):\n '''\n Test that the model produces exactly same results given\n total batchsize, independent of number of GPUs/CPUs.\n '''\n for gpu in [True, False]:\n if gpu and not workspace.has_gpu_support:\n continue\n result_2gpus = self.run_model([0, 1], gpu)\n result_1gpus = self.run_model([0], gpu)\n\n self.assertTrue(np.allclose(result_1gpus, result_2gpus))\n\n if not gpu or workspace.NumCudaDevices() >= 4:\n result_4gpus = self.run_model(list(range(4)), gpu)\n self.assertTrue(np.allclose(result_1gpus, result_4gpus))\n\n if not gpu or workspace.NumCudaDevices() >= 8:\n result_8gpus = self.run_model(list(range(8)), gpu)\n self.assertTrue(np.allclose(result_1gpus, result_8gpus))\n\n\[email protected](not workspace.has_gpu_support, \"No gpu support.\")\[email protected](workspace.NumCudaDevices() < 2, \"Need at least 2 GPUs.\")\nclass SparseDataParallelModelTest(TestCase):\n\n '''\n Create and run the model. We try with both storing indices for gather\n on CPU and on GPU\n '''\n def run_model(self, V, gpu_devices, cpu_indices):\n\n def input_builder_fun(model):\n return None\n\n def model_build_fun(model, loss_scale):\n if cpu_indices:\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\n gathered_cpu = model.net.Gather(\n [self.vecs, 'indices'], 'gathered_cpu')\n\n gathered = model.CopyCPUToGPU(gathered_cpu, \"gathered\")\n else:\n gpu_vecs = model.param_init_net.CopyCPUToGPU(\n self.vecs, \"gpuvecs\",\n )\n model.params.append(gpu_vecs)\n gathered = model.net.Gather([gpu_vecs, 'indices'], 'gathered')\n flattened = model.Flatten(gathered, \"flattened\")\n fc = model.FC(flattened, \"fc\", 16 * 16, 1,\n (\"ConstantFill\", {}), (\"ConstantFill\", {}))\n fc_fl = model.FlattenToVec(fc, \"fc_fl\")\n sigm = model.Sigmoid(fc_fl, \"sigm\")\n sq = model.SquaredL2Distance([sigm, \"label\"], \"sq\")\n loss = model.AveragedLoss(sq, \"loss\")\n loss = model.Scale(loss, scale=loss_scale)\n return [loss]\n\n def param_update_fun(model):\n ONE = model.param_init_net.ConstantFill(\n [], \"ONE\", shape=[1], value=1.0,\n )\n LR = model.CopyCPUToGPU(self.LR, \"LR\")\n for param in model.GetParams():\n param_grad = model.param_to_grad[param]\n if not isinstance(param_grad, core.GradientSlice):\n model.WeightedSum([param, ONE, param_grad, LR], param)\n else:\n param_momentum = model.param_init_net.ConstantFill(\n [param],\n param + '_momentum',\n value=0.0,\n )\n model.net.SparseMomentumSGDUpdate(\n [\n param_grad.values,\n param_momentum,\n LR,\n param,\n param_grad.indices,\n ],\n [\n param_grad.values, param_momentum, param\n ],\n momentum=0.1,\n nesterov=0,\n )\n\n workspace.ResetWorkspace()\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"sparse_test{}\".format(gpu_devices),\n )\n\n with core.NameScope(\"cpu\"):\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\n self.ITER = model.Iter(\"ITER\")\n self.LR = model.net.LearningRate(\n [self.ITER],\n \"LR\",\n base_lr=(-0.1),\n policy=\"fixed\",\n )\n self.vecs = model.param_init_net.UniformFill(\n [], \"vecs\", shape=[V, 16])\n if cpu_indices:\n model.params.append(self.vecs)\n self.ONE_CPU = model.param_init_net.ConstantFill(\n [], \"ONE_CPU\", shape=[1], value=1.0,\n )\n\n data_parallel_model.Parallelize_GPU(\n model,\n input_builder_fun=input_builder_fun,\n forward_pass_builder_fun=model_build_fun,\n param_update_builder_fun=param_update_fun,\n devices=gpu_devices,\n )\n\n # Update the vecs\n if cpu_indices:\n with core.NameScope(\"cpu\"):\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\n for param in model.GetParams():\n param_grad = model.param_to_grad[param]\n model.ScatterWeightedSum([param, self.ONE_CPU,\n param_grad.indices,\n param_grad.values,\n self.LR],\n self.vecs)\n else:\n with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 0)):\n model.CopyGPUToCPU(\"gpu_0/gpuvecs\", self.vecs)\n\n np.random.seed(2603)\n\n # Each run has same input, independent of number of gpus\n batch_size = 64\n for i in range(0, 10):\n full_indices = np.random.permutation(V)[:batch_size * 16].reshape(\n batch_size, 16\n )\n full_labels = full_indices[:, 0] % 2\n batch_per_device = batch_size // len(gpu_devices)\n\n for (j, g) in enumerate(gpu_devices):\n st = j * batch_per_device\n en = st + batch_per_device\n indices = full_indices[st:en, :].astype(np.int32)\n labels = full_labels[st:en].astype(np.float32)\n\n device_for_indices = core.DeviceOption(caffe2_pb2.CPU)\n if not cpu_indices:\n device_for_indices = core.DeviceOption(workspace.GpuDeviceType, g)\n\n with core.DeviceScope(device_for_indices):\n workspace.FeedBlob(\"gpu_{}/indices\".format(g), indices)\n\n with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, g)):\n workspace.FeedBlob(\"gpu_{}/label\".format(g), labels)\n\n if i == 0:\n workspace.RunNetOnce(model.param_init_net)\n # Force vecs to be same on all runs\n orig_vecs = np.random.rand(V, 16).astype(np.float32)\n workspace.FeedBlob(\n self.vecs,\n orig_vecs\n )\n if not cpu_indices:\n for g in gpu_devices:\n workspace.FeedBlob(\n \"gpu_{}/gpuvecs\".format(g),\n orig_vecs,\n device_option=core.DeviceOption(workspace.GpuDeviceType, g),\n )\n workspace.CreateNet(model.net)\n\n workspace.RunNet(model.net.Proto().name)\n if len(gpu_devices) == 2:\n if not cpu_indices:\n idx = workspace.FetchBlob(\"gpu_0/indices\")\n idx = list(idx.flatten())\n n = len(idx)\n nu = len(set(idx))\n assert n == nu, \"We cannot have duplicate indices\"\n\n # Sanity check to see the vecs were updated\n self.assertFalse(\n np.allclose(workspace.FetchBlob(self.vecs), orig_vecs))\n return [workspace.FetchBlob(self.vecs if cpu_indices else \"gpu_0/gpuvecs\"),\n workspace.FetchBlob(\"gpu_0/fc_w\")]\n\n def _test_equiv_sparse(self, cpu_indices):\n '''\n Test that the model produces exactly same results given\n total batchsize, independent of number of GPUs.\n '''\n V = 10000\n result_2gpus = self.run_model(V, [0, 1], cpu_indices)\n result_1gpus = self.run_model(V, [0], cpu_indices)\n\n self.assertTrue(np.allclose(result_1gpus[0], result_2gpus[0]))\n self.assertTrue(np.allclose(result_1gpus[1], result_2gpus[1]))\n\n if workspace.NumCudaDevices() >= 4:\n result_4gpus = self.run_model(V, list(range(4)), cpu_indices)\n self.assertTrue(np.allclose(result_1gpus[0], result_4gpus[0]))\n self.assertTrue(np.allclose(result_1gpus[1], result_4gpus[1]))\n\n if workspace.NumCudaDevices() >= 8:\n result_8gpus = self.run_model(V, list(range(8)), cpu_indices)\n self.assertTrue(np.allclose(result_1gpus[0], result_8gpus[0]))\n self.assertTrue(np.allclose(result_1gpus[1], result_8gpus[1]))\n\n def test_equiv_sparse(self):\n self._test_equiv_sparse(True)\n self._test_equiv_sparse(False)\n\n\[email protected](not workspace.has_gpu_support, \"No gpu support.\")\[email protected](workspace.NumGpuDevices() < 2, \"Need at least 2 GPUs.\")\nclass ParallelizeBMUFTest(TestCase):\n\n def _run_model(self, gpu_devices):\n '''\n Helper function for test_equiv\n '''\n def input_builder_fun(model):\n return None\n\n def _model_build_fun(self, model, loss_scale):\n fc = model.FC(\n \"data\", \"fc\", 16, 1, (\"ConstantFill\", {}), (\"ConstantFill\", {})\n )\n fc_fl = model.FlattenToVec(fc, \"fc_fl\")\n sigm = model.Sigmoid(fc_fl, \"sigm\")\n sq = model.SquaredL2Distance([sigm, \"label\"], \"sq\")\n loss = model.AveragedLoss(sq, \"loss\")\n loss = model.Scale(loss, scale=loss_scale)\n\n return [loss]\n\n def _param_update_fun(self, model):\n ITER = model.Iter(\"ITER\")\n LR = model.net.LearningRate(\n [ITER],\n \"LR\",\n base_lr=(-0.1),\n policy=\"fixed\",\n )\n ONE = model.param_init_net.ConstantFill(\n [], \"ONE\", shape=[1], value=1.0,\n )\n for param in model.GetParams():\n grad = model.param_to_grad[param]\n model.WeightedSum([param, ONE, grad, LR], param)\n\n def _generate_data(self, devices, device_type, device_prefix):\n np.random.seed(26)\n # Each run has same input, independent of number of gpus\n batch_size = 64\n for _ in range(0, 10):\n full_data = np.random.rand(batch_size, 16)\n full_labels = np.round(full_data[:, 0])\n batch_per_device = batch_size // len(devices)\n\n for (j, g) in enumerate(devices):\n st = j * batch_per_device\n en = st + batch_per_device\n data = full_data[st:en, :].astype(np.float32)\n labels = full_labels[st:en].astype(np.float32)\n with core.DeviceScope(core.DeviceOption(device_type, g)):\n workspace.FeedBlob(\"{}_{}/data\".format(device_prefix, g), data)\n workspace.FeedBlob(\"{}_{}/label\".format(device_prefix, g), labels)\n\n @given(\n cpu_device=st.booleans()\n )\n def test_parallelize_bmuf(self, cpu_device):\n assume(cpu_device or workspace.has_gpu_support or workspace.has_hip_support)\n\n workspace.ResetWorkspace()\n\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"test\"\n )\n devices = [0, 1]\n\n def input_builder_fun(model):\n return None\n\n if not cpu_device:\n device_type = workspace.GpuDeviceType\n device_prefix = \"gpu\"\n else:\n device_type = caffe2_pb2.CPU\n device_prefix = \"cpu\"\n self._generate_data(devices, device_type, device_prefix)\n\n data_parallel_model.Parallelize_BMUF(\n model,\n input_builder_fun,\n self._model_build_fun,\n self._param_update_fun,\n devices=devices,\n cpu_device=cpu_device\n )\n\n data_parallel_model.RunInitNet(model)\n\n # Check initial momentum params are zeros\n self.assertEqual(\n list(viewkeys(model._device_grouped_blobs)), ['fc_w', 'fc_b']\n )\n self.assertEqual(workspace.FetchBlob('{}_0/fc_b_v'.format(device_prefix)), 0)\n np.testing.assert_equal(\n workspace.FetchBlob('{}_0/fc_w_v'.format(device_prefix)),\n np.zeros(16).astype(np.float32).reshape(1, 16)\n )\n\n # Run the algorithm for one iteration to have non-zero params.\n data_parallel_model.RunNet(model, 1)\n\n # Save iteration momentum and post local update params\n v_b_ = workspace.FetchBlob('{}_0/fc_b_v'.format(device_prefix))\n v_w_ = workspace.FetchBlob('{}_0/fc_w_v'.format(device_prefix))\n\n workspace.RunNetOnce(model.net)\n\n b_0_ = workspace.FetchBlob('{}_0/fc_b'.format(device_prefix))\n w_0_ = workspace.FetchBlob('{}_0/fc_w'.format(device_prefix))\n b_1_ = workspace.FetchBlob('{}_1/fc_b'.format(device_prefix))\n w_1_ = workspace.FetchBlob('{}_1/fc_w'.format(device_prefix))\n\n # Compute block gradients.\n b_g_ = workspace.FetchBlob('{}_0/fc_b_g'.format(device_prefix))\n w_g_ = workspace.FetchBlob('{}_0/fc_w_g'.format(device_prefix))\n workspace.RunNetOnce(model._global_model_param_updates_net)\n\n g_b = (b_0_ + b_1_) / 2 - b_g_\n g_w = (w_0_ + w_1_) / 2 - w_g_\n v_b = workspace.FetchBlob('{}_0/fc_b_v'.format(device_prefix))\n v_w = workspace.FetchBlob('{}_0/fc_w_v'.format(device_prefix))\n\n w_g = workspace.FetchBlob('{}_0/fc_w_g'.format(device_prefix))\n b_g = workspace.FetchBlob('{}_0/fc_b_g'.format(device_prefix))\n w_0 = workspace.FetchBlob('{}_0/fc_w'.format(device_prefix))\n b_0 = workspace.FetchBlob('{}_0/fc_b'.format(device_prefix))\n w_1 = workspace.FetchBlob('{}_1/fc_w'.format(device_prefix))\n b_1 = workspace.FetchBlob('{}_1/fc_b'.format(device_prefix))\n\n # Check momentum update step\n np.testing.assert_equal(v_b, 0.5 * v_b_ + g_b)\n np.testing.assert_equal(v_w, 0.5 * v_w_ + g_w)\n\n np.testing.assert_equal(w_g, w_0)\n np.testing.assert_equal(w_g, w_1)\n np.testing.assert_equal(b_g, b_0)\n np.testing.assert_equal(b_g, b_1)\n\n # Check params update step\n np.testing.assert_equal(w_0, w_g_ + v_w)\n np.testing.assert_equal(b_0, b_g_ + v_b)\n\n\[email protected](not workspace.has_gpu_support, \"No gpu support.\")\[email protected](workspace.NumGpuDevices() < 2, \"Need at least 2 GPUs.\")\nclass SparseDataParallelModelTestWithSharedIndices(TestCase):\n\n '''\n Create and run the model. We try with both storing indices for gather\n on CPU and on GPU\n '''\n def run_model(self, V, gpu_devices):\n\n def input_builder_fun(model):\n return None\n\n def model_build_fun(model, loss_scale):\n gpu_vecs_gathered = []\n gpu_vecs = []\n for num, vec in enumerate(self.vecs):\n gpu_vec = model.param_init_net.CopyCPUToGPU(\n vec, 'gpuvec_{}'.format(num),\n )\n if num != 2:\n model.params.append(gpu_vec)\n gpu_vecs.append(gpu_vec)\n for num, gpu_vec in enumerate(gpu_vecs):\n gpu_vec_gathered = model.net.Gather(\n [gpu_vec, 'indices'],\n ['gpu_vec_gathered_{}'.format(num)]\n )\n gpu_vecs_gathered.append(gpu_vec_gathered)\n\n assert len(gpu_vecs_gathered) == 3\n\n fc = model.net.FC(\n [\n gpu_vecs_gathered[2],\n gpu_vecs_gathered[0],\n gpu_vecs_gathered[1],\n ],\n ['fc'],\n )\n _, loss = model.net.SoftmaxWithLoss(\n [fc, 'label'],\n ['ce_loss', 'avg_loss'],\n only_loss=True,\n )\n loss = model.Scale(loss, scale=loss_scale)\n model.net.Print(loss, [], limit=10)\n return [loss]\n\n def param_update_fun(model):\n ONE = model.param_init_net.ConstantFill(\n [], \"ONE\", shape=[1], value=1.0,\n )\n LR = model.CopyCPUToGPU(self.LR, \"LR\")\n for param in model.GetParams():\n param_grad = model.param_to_grad[param]\n if not isinstance(param_grad, core.GradientSlice):\n model.WeightedSum([param, ONE, param_grad, LR], param)\n else:\n model.net.ScatterWeightedSum(\n [\n param,\n ONE,\n param_grad.indices,\n param_grad.values,\n ONE,\n ],\n param,\n )\n\n workspace.ResetWorkspace()\n model = cnn.CNNModelHelper(\n order=\"NHWC\",\n name=\"sparse_test{}\".format(gpu_devices),\n )\n batch_size = 32\n batch_per_device = batch_size // len(gpu_devices)\n\n with core.NameScope(\"cpu\"):\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):\n self.ITER = model.Iter(\"ITER\")\n self.LR = model.net.LearningRate(\n [self.ITER],\n \"LR\",\n base_lr=(-0.1),\n policy=\"fixed\",\n )\n '''\n self.vecs consists of 3 big blobs on which we call Gather:\n 1) FC weights, shape=(V, 16)\n 2) FC bias, shape=(V)\n 3) FC input, shape=(batch_per_device, 16)\n '''\n self.vecs = [\n model.param_init_net.UniformFill(\n [], \"vec_{}\".format(num), shape=[V, 16])\n for num in range(2)\n ]\n self.vecs.append(\n model.param_init_net.UniformFill(\n [],\n \"vec_2\", shape=[batch_per_device, 16]\n )\n )\n self.ONE_CPU = model.param_init_net.ConstantFill(\n [], \"ONE_CPU\", shape=[1], value=1.0,\n )\n\n data_parallel_model.Parallelize_GPU(\n model,\n input_builder_fun=input_builder_fun,\n forward_pass_builder_fun=model_build_fun,\n param_update_builder_fun=param_update_fun,\n devices=gpu_devices,\n )\n\n # Update the vecs\n with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 0)):\n for num, vec in enumerate(self.vecs[:-1]):\n model.CopyGPUToCPU(\"gpu_0/gpuvec_{}\".format(num), vec)\n\n # Each run has same input, independent of number of gpus\n for i in range(0, 10):\n np.random.seed(2603)\n full_indices = np.random.permutation(V)[:batch_size].reshape(\n batch_size\n )\n full_labels = full_indices[:] % batch_per_device\n\n for (j, g) in enumerate(gpu_devices):\n st = j * batch_per_device\n en = st + batch_per_device\n indices = full_indices[st:en].astype(np.int32)\n labels = full_labels[st:en].astype(np.int32)\n\n with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, g)):\n workspace.FeedBlob(\"gpu_{}/indices\".format(g), indices)\n workspace.FeedBlob(\"gpu_{}/label\".format(g), labels)\n\n if i == 0:\n workspace.RunNetOnce(model.param_init_net)\n # Force vecs to be same on all runs\n orig_vecs = [\n np.random.rand(V, 16).astype(np.float32),\n np.random.rand(V).astype(np.float32),\n np.random.rand(V, 16).astype(np.float32),\n ]\n for vec, orig_vec in zip(self.vecs, orig_vecs):\n workspace.FeedBlob(\n vec,\n orig_vec\n )\n for g in gpu_devices:\n for num, orig_vec in enumerate(orig_vecs):\n workspace.FeedBlob(\n \"gpu_{}/gpuvec_{}\".format(g, num),\n orig_vec,\n device_option=core.DeviceOption(\n workspace.GpuDeviceType, g),\n )\n workspace.CreateNet(model.net)\n\n workspace.RunNet(model.net.Proto().name)\n\n idx = workspace.FetchBlob('gpu_0/indices')\n grad_slices = [\n workspace.FetchBlob(\n 'gpu_{}/gpu_vec_gathered_{}_grad'.format(g, num))\n for g in gpu_devices for num in range(2)\n ]\n for grad_slice in grad_slices:\n # print (len(idx), len(grad_slice))\n assert len(idx) == len(grad_slice), (\n 'Number of indices {} is not same as number of gradient '\n 'slices {}. This might lead to illegal memory access'.format(\n len(idx), len(grad_slice)\n )\n )\n\n def test_sparse_shared_indices_gpu(self):\n '''\n Test that the model has same number of indices and gradient rows\n given total batchsize, independent of number of GPUs.\n '''\n V = 10000\n self.run_model(V, [0, 1])\n self.run_model(V, [0])\n\n if workspace.NumGpuDevices() >= 4:\n self.run_model(V, list(range(4)))\n\n if workspace.NumGpuDevices() >= 8:\n self.run_model(V, list(range(8)))\n\n\nif __name__ == \"__main__\":\n import unittest\n unittest.main()\n", "import unittest\nimport io\nimport tempfile\nimport torch\nimport torch.utils.show_pickle\n\nfrom torch.testing._internal.common_utils import IS_WINDOWS\n\nclass TestShowPickle(unittest.TestCase):\n\n @unittest.skipIf(IS_WINDOWS, \"Can't re-open temp file on Windows\")\n def test_scripted_model(self):\n class MyCoolModule(torch.nn.Module):\n def __init__(self, weight):\n super().__init__()\n self.weight = weight\n\n def forward(self, x):\n return x * self.weight\n\n m = torch.jit.script(MyCoolModule(torch.tensor([2.0])))\n\n with tempfile.NamedTemporaryFile() as tmp:\n torch.jit.save(m, tmp)\n tmp.flush()\n buf = io.StringIO()\n torch.utils.show_pickle.main([\"\", tmp.name + \"@*/data.pkl\"], output_stream=buf)\n output = buf.getvalue()\n self.assertRegex(output, \"MyCoolModule\")\n self.assertRegex(output, \"weight\")\n\n\nif __name__ == '__main__':\n unittest.main()\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom caffe2.python import core\nfrom hypothesis import given\nimport caffe2.python.hypothesis_test_util as hu\nimport caffe2.python.serialized_test.serialized_test_util as serial\nimport hypothesis.strategies as st\nimport numpy as np\n\n\n# Reference implementation from detectron/lib/utils/boxes.py\ndef bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):\n \"\"\"Forward transform that maps proposal boxes to predicted ground-truth\n boxes using bounding-box regression deltas. See bbox_transform_inv for a\n description of the weights argument.\n \"\"\"\n if boxes.shape[0] == 0:\n return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)\n\n boxes = boxes.astype(deltas.dtype, copy=False)\n\n widths = boxes[:, 2] - boxes[:, 0] + 1.0\n heights = boxes[:, 3] - boxes[:, 1] + 1.0\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n\n wx, wy, ww, wh = weights\n dx = deltas[:, 0::4] / wx\n dy = deltas[:, 1::4] / wy\n dw = deltas[:, 2::4] / ww\n dh = deltas[:, 3::4] / wh\n\n # Prevent sending too large values into np.exp()\n BBOX_XFORM_CLIP = np.log(1000. / 16.)\n dw = np.minimum(dw, BBOX_XFORM_CLIP)\n dh = np.minimum(dh, BBOX_XFORM_CLIP)\n\n pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]\n pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]\n pred_w = np.exp(dw) * widths[:, np.newaxis]\n pred_h = np.exp(dh) * heights[:, np.newaxis]\n\n pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)\n # x1\n pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w\n # y1\n pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h\n # x2 (note: \"- 1\" is correct; don't be fooled by the asymmetry)\n pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1\n # y2 (note: \"- 1\" is correct; don't be fooled by the asymmetry)\n pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1\n\n return pred_boxes\n\n\n# Reference implementation from detectron/lib/utils/boxes.py\ndef clip_tiled_boxes(boxes, im_shape):\n \"\"\"Clip boxes to image boundaries. im_shape is [height, width] and boxes\n has shape (N, 4 * num_tiled_boxes).\"\"\"\n assert (\n boxes.shape[1] % 4 == 0\n ), \"boxes.shape[1] is {:d}, but must be divisible by 4.\".format(\n boxes.shape[1]\n )\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)\n return boxes\n\n\ndef generate_rois(roi_counts, im_dims):\n assert len(roi_counts) == len(im_dims)\n all_rois = []\n for i, num_rois in enumerate(roi_counts):\n if num_rois == 0:\n continue\n # [batch_idx, x1, y1, x2, y2]\n rois = np.random.uniform(0, im_dims[i], size=(roi_counts[i], 5)).astype(\n np.float32\n )\n rois[:, 0] = i # batch_idx\n # Swap (x1, x2) if x1 > x2\n rois[:, 1], rois[:, 3] = (\n np.minimum(rois[:, 1], rois[:, 3]),\n np.maximum(rois[:, 1], rois[:, 3]),\n )\n # Swap (y1, y2) if y1 > y2\n rois[:, 2], rois[:, 4] = (\n np.minimum(rois[:, 2], rois[:, 4]),\n np.maximum(rois[:, 2], rois[:, 4]),\n )\n all_rois.append(rois)\n if len(all_rois) > 0:\n return np.vstack(all_rois)\n return np.empty((0, 5)).astype(np.float32)\n\n\ndef bbox_transform_rotated(\n boxes,\n deltas,\n weights=(1.0, 1.0, 1.0, 1.0),\n angle_bound_on=True,\n angle_bound_lo=-90,\n angle_bound_hi=90,\n):\n \"\"\"\n Similar to bbox_transform but for rotated boxes with angle info.\n \"\"\"\n if boxes.shape[0] == 0:\n return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)\n\n boxes = boxes.astype(deltas.dtype, copy=False)\n\n ctr_x = boxes[:, 0]\n ctr_y = boxes[:, 1]\n widths = boxes[:, 2]\n heights = boxes[:, 3]\n angles = boxes[:, 4]\n\n wx, wy, ww, wh = weights\n dx = deltas[:, 0::5] / wx\n dy = deltas[:, 1::5] / wy\n dw = deltas[:, 2::5] / ww\n dh = deltas[:, 3::5] / wh\n da = deltas[:, 4::5] * 180.0 / np.pi\n\n # Prevent sending too large values into np.exp()\n BBOX_XFORM_CLIP = np.log(1000. / 16.)\n dw = np.minimum(dw, BBOX_XFORM_CLIP)\n dh = np.minimum(dh, BBOX_XFORM_CLIP)\n\n pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)\n pred_boxes[:, 0::5] = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]\n pred_boxes[:, 1::5] = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]\n pred_boxes[:, 2::5] = np.exp(dw) * widths[:, np.newaxis]\n pred_boxes[:, 3::5] = np.exp(dh) * heights[:, np.newaxis]\n\n pred_angle = da + angles[:, np.newaxis]\n if angle_bound_on:\n period = angle_bound_hi - angle_bound_lo\n assert period % 180 == 0\n pred_angle[np.where(pred_angle < angle_bound_lo)] += period\n pred_angle[np.where(pred_angle > angle_bound_hi)] -= period\n pred_boxes[:, 4::5] = pred_angle\n\n return pred_boxes\n\n\ndef clip_tiled_boxes_rotated(boxes, im_shape, angle_thresh=1.0):\n \"\"\"\n Similar to clip_tiled_boxes but for rotated boxes with angle info.\n Only clips almost horizontal boxes within angle_thresh. The rest are\n left unchanged.\n \"\"\"\n assert (\n boxes.shape[1] % 5 == 0\n ), \"boxes.shape[1] is {:d}, but must be divisible by 5.\".format(\n boxes.shape[1]\n )\n\n (H, W) = im_shape[:2]\n\n # Filter boxes that are almost upright within angle_thresh tolerance\n idx = np.where(np.abs(boxes[:, 4::5]) <= angle_thresh)\n idx5 = idx[1] * 5\n # convert to (x1, y1, x2, y2)\n x1 = boxes[idx[0], idx5] - (boxes[idx[0], idx5 + 2] - 1) / 2.0\n y1 = boxes[idx[0], idx5 + 1] - (boxes[idx[0], idx5 + 3] - 1) / 2.0\n x2 = boxes[idx[0], idx5] + (boxes[idx[0], idx5 + 2] - 1) / 2.0\n y2 = boxes[idx[0], idx5 + 1] + (boxes[idx[0], idx5 + 3] - 1) / 2.0\n # clip\n x1 = np.maximum(np.minimum(x1, W - 1), 0)\n y1 = np.maximum(np.minimum(y1, H - 1), 0)\n x2 = np.maximum(np.minimum(x2, W - 1), 0)\n y2 = np.maximum(np.minimum(y2, H - 1), 0)\n # convert back to (xc, yc, w, h)\n boxes[idx[0], idx5] = (x1 + x2) / 2.0\n boxes[idx[0], idx5 + 1] = (y1 + y2) / 2.0\n boxes[idx[0], idx5 + 2] = x2 - x1 + 1\n boxes[idx[0], idx5 + 3] = y2 - y1 + 1\n\n return boxes\n\n\ndef generate_rois_rotated(roi_counts, im_dims):\n rois = generate_rois(roi_counts, im_dims)\n # [batch_id, ctr_x, ctr_y, w, h, angle]\n rotated_rois = np.empty((rois.shape[0], 6)).astype(np.float32)\n rotated_rois[:, 0] = rois[:, 0] # batch_id\n rotated_rois[:, 1] = (rois[:, 1] + rois[:, 3]) / 2. # ctr_x = (x1 + x2) / 2\n rotated_rois[:, 2] = (rois[:, 2] + rois[:, 4]) / 2. # ctr_y = (y1 + y2) / 2\n rotated_rois[:, 3] = rois[:, 3] - rois[:, 1] + 1.0 # w = x2 - x1 + 1\n rotated_rois[:, 4] = rois[:, 4] - rois[:, 2] + 1.0 # h = y2 - y1 + 1\n rotated_rois[:, 5] = np.random.uniform(-90.0, 90.0) # angle in degrees\n return rotated_rois\n\n\nclass TestBBoxTransformOp(serial.SerializedTestCase):\n @serial.given(\n num_rois=st.integers(1, 10),\n num_classes=st.integers(1, 10),\n im_dim=st.integers(100, 600),\n skip_batch_id=st.booleans(),\n rotated=st.booleans(),\n angle_bound_on=st.booleans(),\n clip_angle_thresh=st.sampled_from([-1.0, 1.0]),\n **hu.gcs_cpu_only\n )\n def test_bbox_transform(\n self,\n num_rois,\n num_classes,\n im_dim,\n skip_batch_id,\n rotated,\n angle_bound_on,\n clip_angle_thresh,\n gc,\n dc,\n ):\n \"\"\"\n Test with all rois belonging to a single image per run.\n \"\"\"\n rois = (\n generate_rois_rotated([num_rois], [im_dim])\n if rotated\n else generate_rois([num_rois], [im_dim])\n )\n box_dim = 5 if rotated else 4\n if skip_batch_id:\n rois = rois[:, 1:]\n deltas = np.random.randn(num_rois, box_dim * num_classes).astype(np.float32)\n im_info = np.array([im_dim, im_dim, 1.0]).astype(np.float32).reshape(1, 3)\n\n def bbox_transform_ref(rois, deltas, im_info):\n boxes = rois if rois.shape[1] == box_dim else rois[:, 1:]\n im_shape = im_info[0, 0:2]\n if rotated:\n box_out = bbox_transform_rotated(\n boxes, deltas, angle_bound_on=angle_bound_on\n )\n box_out = clip_tiled_boxes_rotated(\n box_out, im_shape, angle_thresh=clip_angle_thresh\n )\n else:\n box_out = bbox_transform(boxes, deltas)\n box_out = clip_tiled_boxes(box_out, im_shape)\n return [box_out]\n\n op = core.CreateOperator(\n \"BBoxTransform\",\n [\"rois\", \"deltas\", \"im_info\"],\n [\"box_out\"],\n apply_scale=False,\n correct_transform_coords=True,\n rotated=rotated,\n angle_bound_on=angle_bound_on,\n clip_angle_thresh=clip_angle_thresh,\n )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[rois, deltas, im_info],\n reference=bbox_transform_ref,\n )\n\n @given(\n roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),\n num_classes=st.integers(1, 10),\n rotated=st.booleans(),\n angle_bound_on=st.booleans(),\n clip_angle_thresh=st.sampled_from([-1.0, 1.0]),\n **hu.gcs_cpu_only\n )\n def test_bbox_transform_batch(\n self,\n roi_counts,\n num_classes,\n rotated,\n angle_bound_on,\n clip_angle_thresh,\n gc,\n dc,\n ):\n \"\"\"\n Test with rois for multiple images in a batch\n \"\"\"\n batch_size = len(roi_counts)\n total_rois = sum(roi_counts)\n im_dims = np.random.randint(100, 600, batch_size)\n rois = (\n generate_rois_rotated(roi_counts, im_dims)\n if rotated\n else generate_rois(roi_counts, im_dims)\n )\n box_dim = 5 if rotated else 4\n deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)\n im_info = np.zeros((batch_size, 3)).astype(np.float32)\n im_info[:, 0] = im_dims\n im_info[:, 1] = im_dims\n im_info[:, 2] = 1.0\n\n def bbox_transform_ref(rois, deltas, im_info):\n box_out = []\n offset = 0\n for i, num_rois in enumerate(roi_counts):\n if num_rois == 0:\n continue\n cur_boxes = rois[offset : offset + num_rois, 1:]\n cur_deltas = deltas[offset : offset + num_rois]\n im_shape = im_info[i, 0:2]\n if rotated:\n cur_box_out = bbox_transform_rotated(\n cur_boxes, cur_deltas, angle_bound_on=angle_bound_on\n )\n cur_box_out = clip_tiled_boxes_rotated(\n cur_box_out, im_shape, angle_thresh=clip_angle_thresh\n )\n else:\n cur_box_out = bbox_transform(cur_boxes, cur_deltas)\n cur_box_out = clip_tiled_boxes(cur_box_out, im_shape)\n box_out.append(cur_box_out)\n offset += num_rois\n\n if len(box_out) > 0:\n box_out = np.vstack(box_out)\n else:\n box_out = np.empty(deltas.shape).astype(np.float32)\n return [box_out, roi_counts]\n\n op = core.CreateOperator(\n \"BBoxTransform\",\n [\"rois\", \"deltas\", \"im_info\"],\n [\"box_out\", \"roi_batch_splits\"],\n apply_scale=False,\n correct_transform_coords=True,\n rotated=rotated,\n angle_bound_on=angle_bound_on,\n clip_angle_thresh=clip_angle_thresh,\n )\n\n self.assertReferenceChecks(\n device_option=gc,\n op=op,\n inputs=[rois, deltas, im_info],\n reference=bbox_transform_ref,\n )\n" ]
[ [ "torch.testing._internal.common_cuda.initialize_cuda_context_rng", "torch.testing._compare_tensors_internal", "numpy.random.seed", "numpy.array", "torch.autograd.gradcheck", "torch.autograd.gradcheck.gradgradcheck" ], [ "torch.testing._internal.common_utils.run_tests" ], [ "numpy.sqrt", "numpy.sum", "numpy.allclose", "numpy.linalg.norm", "numpy.zeros", "numpy.testing.assert_equal", "numpy.var", "numpy.random.permutation", "numpy.reshape", "numpy.random.seed", "numpy.isclose", "numpy.random.rand", "numpy.round", "numpy.concatenate", "numpy.array", "numpy.mean" ], [ "torch.tensor", "torch.jit.save", "torch.utils.show_pickle.main" ], [ "numpy.random.uniform", "numpy.vstack", "numpy.empty", "numpy.zeros", "numpy.abs", "numpy.random.randn", "numpy.exp", "numpy.log", "numpy.maximum", "numpy.where", "numpy.random.randint", "numpy.array", "numpy.minimum" ] ]
Geolem/nltk
[ "39b84d97bc857fce4fef185c69b94546b8474551" ]
[ "nltk/parse/dependencygraph.py" ]
[ "# Natural Language Toolkit: Dependency Grammars\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Author: Jason Narad <[email protected]>\n# Steven Bird <[email protected]> (modifications)\n#\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n#\n\n\"\"\"\nTools for reading and writing dependency trees.\nThe input is assumed to be in Malt-TAB format\n(http://stp.lingfil.uu.se/~nivre/research/MaltXML.html).\n\"\"\"\n\nfrom collections import defaultdict\nfrom itertools import chain\nfrom pprint import pformat\nimport subprocess\nimport warnings\n\nfrom nltk.tree import Tree\n\n#################################################################\n# DependencyGraph Class\n#################################################################\n\n\nclass DependencyGraph:\n \"\"\"\n A container for the nodes and labelled edges of a dependency structure.\n \"\"\"\n\n def __init__(\n self,\n tree_str=None,\n cell_extractor=None,\n zero_based=False,\n cell_separator=None,\n top_relation_label=\"ROOT\",\n ):\n \"\"\"Dependency graph.\n\n We place a dummy `TOP` node with the index 0, since the root node is\n often assigned 0 as its head. This also means that the indexing of the\n nodes corresponds directly to the Malt-TAB format, which starts at 1.\n\n If zero-based is True, then Malt-TAB-like input with node numbers\n starting at 0 and the root node assigned -1 (as produced by, e.g.,\n zpar).\n\n :param str cell_separator: the cell separator. If not provided, cells\n are split by whitespace.\n\n :param str top_relation_label: the label by which the top relation is\n identified, for examlple, `ROOT`, `null` or `TOP`.\n\n \"\"\"\n self.nodes = defaultdict(\n lambda: {\n \"address\": None,\n \"word\": None,\n \"lemma\": None,\n \"ctag\": None,\n \"tag\": None,\n \"feats\": None,\n \"head\": None,\n \"deps\": defaultdict(list),\n \"rel\": None,\n }\n )\n\n self.nodes[0].update({\"ctag\": \"TOP\", \"tag\": \"TOP\", \"address\": 0})\n\n self.root = None\n\n if tree_str:\n self._parse(\n tree_str,\n cell_extractor=cell_extractor,\n zero_based=zero_based,\n cell_separator=cell_separator,\n top_relation_label=top_relation_label,\n )\n\n def remove_by_address(self, address):\n \"\"\"\n Removes the node with the given address. References\n to this node in others will still exist.\n \"\"\"\n del self.nodes[address]\n\n def redirect_arcs(self, originals, redirect):\n \"\"\"\n Redirects arcs to any of the nodes in the originals list\n to the redirect node address.\n \"\"\"\n for node in self.nodes.values():\n new_deps = []\n for dep in node[\"deps\"]:\n if dep in originals:\n new_deps.append(redirect)\n else:\n new_deps.append(dep)\n node[\"deps\"] = new_deps\n\n def add_arc(self, head_address, mod_address):\n \"\"\"\n Adds an arc from the node specified by head_address to the\n node specified by the mod address.\n \"\"\"\n relation = self.nodes[mod_address][\"rel\"]\n self.nodes[head_address][\"deps\"].setdefault(relation, [])\n self.nodes[head_address][\"deps\"][relation].append(mod_address)\n # self.nodes[head_address]['deps'].append(mod_address)\n\n def connect_graph(self):\n \"\"\"\n Fully connects all non-root nodes. All nodes are set to be dependents\n of the root node.\n \"\"\"\n for node1 in self.nodes.values():\n for node2 in self.nodes.values():\n if node1[\"address\"] != node2[\"address\"] and node2[\"rel\"] != \"TOP\":\n relation = node2[\"rel\"]\n node1[\"deps\"].setdefault(relation, [])\n node1[\"deps\"][relation].append(node2[\"address\"])\n # node1['deps'].append(node2['address'])\n\n def get_by_address(self, node_address):\n \"\"\"Return the node with the given address.\"\"\"\n return self.nodes[node_address]\n\n def contains_address(self, node_address):\n \"\"\"\n Returns true if the graph contains a node with the given node\n address, false otherwise.\n \"\"\"\n return node_address in self.nodes\n\n def to_dot(self):\n \"\"\"Return a dot representation suitable for using with Graphviz.\n\n >>> dg = DependencyGraph(\n ... 'John N 2\\\\n'\n ... 'loves V 0\\\\n'\n ... 'Mary N 2'\n ... )\n >>> print(dg.to_dot())\n digraph G{\n edge [dir=forward]\n node [shape=plaintext]\n <BLANKLINE>\n 0 [label=\"0 (None)\"]\n 0 -> 2 [label=\"ROOT\"]\n 1 [label=\"1 (John)\"]\n 2 [label=\"2 (loves)\"]\n 2 -> 1 [label=\"\"]\n 2 -> 3 [label=\"\"]\n 3 [label=\"3 (Mary)\"]\n }\n\n \"\"\"\n # Start the digraph specification\n s = \"digraph G{\\n\"\n s += \"edge [dir=forward]\\n\"\n s += \"node [shape=plaintext]\\n\"\n\n # Draw the remaining nodes\n for node in sorted(self.nodes.values(), key=lambda v: v[\"address\"]):\n s += '\\n%s [label=\"%s (%s)\"]' % (\n node[\"address\"],\n node[\"address\"],\n node[\"word\"],\n )\n for rel, deps in node[\"deps\"].items():\n for dep in deps:\n if rel is not None:\n s += '\\n%s -> %s [label=\"%s\"]' % (node[\"address\"], dep, rel)\n else:\n s += \"\\n%s -> %s \" % (node[\"address\"], dep)\n s += \"\\n}\"\n\n return s\n\n def _repr_svg_(self):\n \"\"\"Show SVG representation of the transducer (IPython magic).\n\n >>> dg = DependencyGraph(\n ... 'John N 2\\\\n'\n ... 'loves V 0\\\\n'\n ... 'Mary N 2'\n ... )\n >>> dg._repr_svg_().split('\\\\n')[0]\n '<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>'\n\n \"\"\"\n dot_string = self.to_dot()\n\n try:\n process = subprocess.Popen(\n [\"dot\", \"-Tsvg\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n except OSError as e:\n raise Exception(\"Cannot find the dot binary from Graphviz package\") from e\n out, err = process.communicate(dot_string)\n if err:\n raise Exception(\n \"Cannot create svg representation by running dot from string: {}\"\n \"\".format(dot_string)\n )\n return out\n\n def __str__(self):\n return pformat(self.nodes)\n\n def __repr__(self):\n return \"<DependencyGraph with {0} nodes>\".format(len(self.nodes))\n\n @staticmethod\n def load(\n filename, zero_based=False, cell_separator=None, top_relation_label=\"ROOT\"\n ):\n \"\"\"\n :param filename: a name of a file in Malt-TAB format\n :param zero_based: nodes in the input file are numbered starting from 0\n rather than 1 (as produced by, e.g., zpar)\n :param str cell_separator: the cell separator. If not provided, cells\n are split by whitespace.\n :param str top_relation_label: the label by which the top relation is\n identified, for examlple, `ROOT`, `null` or `TOP`.\n\n :return: a list of DependencyGraphs\n\n \"\"\"\n with open(filename) as infile:\n return [\n DependencyGraph(\n tree_str,\n zero_based=zero_based,\n cell_separator=cell_separator,\n top_relation_label=top_relation_label,\n )\n for tree_str in infile.read().split(\"\\n\\n\")\n ]\n\n def left_children(self, node_index):\n \"\"\"\n Returns the number of left children under the node specified\n by the given address.\n \"\"\"\n children = chain.from_iterable(self.nodes[node_index][\"deps\"].values())\n index = self.nodes[node_index][\"address\"]\n return sum(1 for c in children if c < index)\n\n def right_children(self, node_index):\n \"\"\"\n Returns the number of right children under the node specified\n by the given address.\n \"\"\"\n children = chain.from_iterable(self.nodes[node_index][\"deps\"].values())\n index = self.nodes[node_index][\"address\"]\n return sum(1 for c in children if c > index)\n\n def add_node(self, node):\n if not self.contains_address(node[\"address\"]):\n self.nodes[node[\"address\"]].update(node)\n\n def _parse(\n self,\n input_,\n cell_extractor=None,\n zero_based=False,\n cell_separator=None,\n top_relation_label=\"ROOT\",\n ):\n \"\"\"Parse a sentence.\n\n :param extractor: a function that given a tuple of cells returns a\n 7-tuple, where the values are ``word, lemma, ctag, tag, feats, head,\n rel``.\n\n :param str cell_separator: the cell separator. If not provided, cells\n are split by whitespace.\n\n :param str top_relation_label: the label by which the top relation is\n identified, for examlple, `ROOT`, `null` or `TOP`.\n\n \"\"\"\n\n def extract_3_cells(cells, index):\n word, tag, head = cells\n return index, word, word, tag, tag, \"\", head, \"\"\n\n def extract_4_cells(cells, index):\n word, tag, head, rel = cells\n return index, word, word, tag, tag, \"\", head, rel\n\n def extract_7_cells(cells, index):\n line_index, word, lemma, tag, _, head, rel = cells\n try:\n index = int(line_index)\n except ValueError:\n # index can't be parsed as an integer, use default\n pass\n return index, word, lemma, tag, tag, \"\", head, rel\n\n def extract_10_cells(cells, index):\n line_index, word, lemma, ctag, tag, feats, head, rel, _, _ = cells\n try:\n index = int(line_index)\n except ValueError:\n # index can't be parsed as an integer, use default\n pass\n return index, word, lemma, ctag, tag, feats, head, rel\n\n extractors = {\n 3: extract_3_cells,\n 4: extract_4_cells,\n 7: extract_7_cells,\n 10: extract_10_cells,\n }\n\n if isinstance(input_, str):\n input_ = (line for line in input_.split(\"\\n\"))\n\n lines = (l.rstrip() for l in input_)\n lines = (l for l in lines if l)\n\n cell_number = None\n for index, line in enumerate(lines, start=1):\n cells = line.split(cell_separator)\n if cell_number is None:\n cell_number = len(cells)\n else:\n assert cell_number == len(cells)\n\n if cell_extractor is None:\n try:\n cell_extractor = extractors[cell_number]\n except KeyError as e:\n raise ValueError(\n \"Number of tab-delimited fields ({0}) not supported by \"\n \"CoNLL(10) or Malt-Tab(4) format\".format(cell_number)\n ) from e\n\n try:\n index, word, lemma, ctag, tag, feats, head, rel = cell_extractor(\n cells, index\n )\n except (TypeError, ValueError):\n # cell_extractor doesn't take 2 arguments or doesn't return 8\n # values; assume the cell_extractor is an older external\n # extractor and doesn't accept or return an index.\n word, lemma, ctag, tag, feats, head, rel = cell_extractor(cells)\n\n if head == \"_\":\n continue\n\n head = int(head)\n if zero_based:\n head += 1\n\n self.nodes[index].update(\n {\n \"address\": index,\n \"word\": word,\n \"lemma\": lemma,\n \"ctag\": ctag,\n \"tag\": tag,\n \"feats\": feats,\n \"head\": head,\n \"rel\": rel,\n }\n )\n\n # Make sure that the fake root node has labeled dependencies.\n if (cell_number == 3) and (head == 0):\n rel = top_relation_label\n self.nodes[head][\"deps\"][rel].append(index)\n\n if self.nodes[0][\"deps\"][top_relation_label]:\n root_address = self.nodes[0][\"deps\"][top_relation_label][0]\n self.root = self.nodes[root_address]\n self.top_relation_label = top_relation_label\n else:\n warnings.warn(\n \"The graph doesn't contain a node \" \"that depends on the root element.\"\n )\n\n def _word(self, node, filter=True):\n w = node[\"word\"]\n if filter:\n if w != \",\":\n return w\n return w\n\n def _tree(self, i):\n \"\"\" Turn dependency graphs into NLTK trees.\n\n :param int i: index of a node\n :return: either a word (if the indexed node is a leaf) or a ``Tree``.\n \"\"\"\n node = self.get_by_address(i)\n word = node[\"word\"]\n deps = sorted(chain.from_iterable(node[\"deps\"].values()))\n\n if deps:\n return Tree(word, [self._tree(dep) for dep in deps])\n else:\n return word\n\n def tree(self):\n \"\"\"\n Starting with the ``root`` node, build a dependency tree using the NLTK\n ``Tree`` constructor. Dependency labels are omitted.\n \"\"\"\n node = self.root\n\n word = node[\"word\"]\n deps = sorted(chain.from_iterable(node[\"deps\"].values()))\n return Tree(word, [self._tree(dep) for dep in deps])\n\n def triples(self, node=None):\n \"\"\"\n Extract dependency triples of the form:\n ((head word, head tag), rel, (dep word, dep tag))\n \"\"\"\n\n if not node:\n node = self.root\n\n head = (node[\"word\"], node[\"ctag\"])\n for i in sorted(chain.from_iterable(node[\"deps\"].values())):\n dep = self.get_by_address(i)\n yield (head, dep[\"rel\"], (dep[\"word\"], dep[\"ctag\"]))\n for triple in self.triples(node=dep):\n yield triple\n\n def _hd(self, i):\n try:\n return self.nodes[i][\"head\"]\n except IndexError:\n return None\n\n def _rel(self, i):\n try:\n return self.nodes[i][\"rel\"]\n except IndexError:\n return None\n\n # what's the return type? Boolean or list?\n def contains_cycle(self):\n \"\"\"Check whether there are cycles.\n\n >>> dg = DependencyGraph(treebank_data)\n >>> dg.contains_cycle()\n False\n\n >>> cyclic_dg = DependencyGraph()\n >>> top = {'word': None, 'deps': [1], 'rel': 'TOP', 'address': 0}\n >>> child1 = {'word': None, 'deps': [2], 'rel': 'NTOP', 'address': 1}\n >>> child2 = {'word': None, 'deps': [4], 'rel': 'NTOP', 'address': 2}\n >>> child3 = {'word': None, 'deps': [1], 'rel': 'NTOP', 'address': 3}\n >>> child4 = {'word': None, 'deps': [3], 'rel': 'NTOP', 'address': 4}\n >>> cyclic_dg.nodes = {\n ... 0: top,\n ... 1: child1,\n ... 2: child2,\n ... 3: child3,\n ... 4: child4,\n ... }\n >>> cyclic_dg.root = top\n\n >>> cyclic_dg.contains_cycle()\n [3, 1, 2, 4]\n\n \"\"\"\n distances = {}\n\n for node in self.nodes.values():\n for dep in node[\"deps\"]:\n key = tuple([node[\"address\"], dep])\n distances[key] = 1\n\n for _ in self.nodes:\n new_entries = {}\n\n for pair1 in distances:\n for pair2 in distances:\n if pair1[1] == pair2[0]:\n key = tuple([pair1[0], pair2[1]])\n new_entries[key] = distances[pair1] + distances[pair2]\n\n for pair in new_entries:\n distances[pair] = new_entries[pair]\n if pair[0] == pair[1]:\n path = self.get_cycle_path(self.get_by_address(pair[0]), pair[0])\n return path\n\n return False # return []?\n\n def get_cycle_path(self, curr_node, goal_node_index):\n for dep in curr_node[\"deps\"]:\n if dep == goal_node_index:\n return [curr_node[\"address\"]]\n for dep in curr_node[\"deps\"]:\n path = self.get_cycle_path(self.get_by_address(dep), goal_node_index)\n if len(path) > 0:\n path.insert(0, curr_node[\"address\"])\n return path\n return []\n\n def to_conll(self, style):\n \"\"\"\n The dependency graph in CoNLL format.\n\n :param style: the style to use for the format (3, 4, 10 columns)\n :type style: int\n :rtype: str\n \"\"\"\n\n if style == 3:\n template = \"{word}\\t{tag}\\t{head}\\n\"\n elif style == 4:\n template = \"{word}\\t{tag}\\t{head}\\t{rel}\\n\"\n elif style == 10:\n template = (\n \"{i}\\t{word}\\t{lemma}\\t{ctag}\\t{tag}\\t{feats}\\t{head}\\t{rel}\\t_\\t_\\n\"\n )\n else:\n raise ValueError(\n \"Number of tab-delimited fields ({0}) not supported by \"\n \"CoNLL(10) or Malt-Tab(4) format\".format(style)\n )\n\n return \"\".join(\n template.format(i=i, **node)\n for i, node in sorted(self.nodes.items())\n if node[\"tag\"] != \"TOP\"\n )\n\n def nx_graph(self):\n \"\"\"Convert the data in a ``nodelist`` into a networkx labeled directed graph.\"\"\"\n import networkx\n\n nx_nodelist = list(range(1, len(self.nodes)))\n nx_edgelist = [\n (n, self._hd(n), self._rel(n)) for n in nx_nodelist if self._hd(n)\n ]\n self.nx_labels = {}\n for n in nx_nodelist:\n self.nx_labels[n] = self.nodes[n][\"word\"]\n\n g = networkx.MultiDiGraph()\n g.add_nodes_from(nx_nodelist)\n g.add_edges_from(nx_edgelist)\n\n return g\n\n\nclass DependencyGraphError(Exception):\n \"\"\"Dependency graph exception.\"\"\"\n\n\ndef demo():\n malt_demo()\n conll_demo()\n conll_file_demo()\n cycle_finding_demo()\n\n\ndef malt_demo(nx=False):\n \"\"\"\n A demonstration of the result of reading a dependency\n version of the first sentence of the Penn Treebank.\n \"\"\"\n dg = DependencyGraph(\n \"\"\"Pierre NNP 2 NMOD\nVinken NNP 8 SUB\n, , 2 P\n61 CD 5 NMOD\nyears NNS 6 AMOD\nold JJ 2 NMOD\n, , 2 P\nwill MD 0 ROOT\njoin VB 8 VC\nthe DT 11 NMOD\nboard NN 9 OBJ\nas IN 9 VMOD\na DT 15 NMOD\nnonexecutive JJ 15 NMOD\ndirector NN 12 PMOD\nNov. NNP 9 VMOD\n29 CD 16 NMOD\n. . 9 VMOD\n\"\"\"\n )\n tree = dg.tree()\n tree.pprint()\n if nx:\n # currently doesn't work\n import networkx\n from matplotlib import pylab\n\n g = dg.nx_graph()\n g.info()\n pos = networkx.spring_layout(g, dim=1)\n networkx.draw_networkx_nodes(g, pos, node_size=50)\n # networkx.draw_networkx_edges(g, pos, edge_color='k', width=8)\n networkx.draw_networkx_labels(g, pos, dg.nx_labels)\n pylab.xticks([])\n pylab.yticks([])\n pylab.savefig(\"tree.png\")\n pylab.show()\n\n\ndef conll_demo():\n \"\"\"\n A demonstration of how to read a string representation of\n a CoNLL format dependency tree.\n \"\"\"\n dg = DependencyGraph(conll_data1)\n tree = dg.tree()\n tree.pprint()\n print(dg)\n print(dg.to_conll(4))\n\n\ndef conll_file_demo():\n print(\"Mass conll_read demo...\")\n graphs = [DependencyGraph(entry) for entry in conll_data2.split(\"\\n\\n\") if entry]\n for graph in graphs:\n tree = graph.tree()\n print(\"\\n\")\n tree.pprint()\n\n\ndef cycle_finding_demo():\n dg = DependencyGraph(treebank_data)\n print(dg.contains_cycle())\n cyclic_dg = DependencyGraph()\n cyclic_dg.add_node({\"word\": None, \"deps\": [1], \"rel\": \"TOP\", \"address\": 0})\n cyclic_dg.add_node({\"word\": None, \"deps\": [2], \"rel\": \"NTOP\", \"address\": 1})\n cyclic_dg.add_node({\"word\": None, \"deps\": [4], \"rel\": \"NTOP\", \"address\": 2})\n cyclic_dg.add_node({\"word\": None, \"deps\": [1], \"rel\": \"NTOP\", \"address\": 3})\n cyclic_dg.add_node({\"word\": None, \"deps\": [3], \"rel\": \"NTOP\", \"address\": 4})\n print(cyclic_dg.contains_cycle())\n\n\ntreebank_data = \"\"\"Pierre NNP 2 NMOD\nVinken NNP 8 SUB\n, , 2 P\n61 CD 5 NMOD\nyears NNS 6 AMOD\nold JJ 2 NMOD\n, , 2 P\nwill MD 0 ROOT\njoin VB 8 VC\nthe DT 11 NMOD\nboard NN 9 OBJ\nas IN 9 VMOD\na DT 15 NMOD\nnonexecutive JJ 15 NMOD\ndirector NN 12 PMOD\nNov. NNP 9 VMOD\n29 CD 16 NMOD\n. . 9 VMOD\n\"\"\"\n\nconll_data1 = \"\"\"\n1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _\n2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _\n3 met met Prep Prep voor 8 mod _ _\n4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _\n5 moeder moeder N N soort|ev|neut 3 obj1 _ _\n6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _\n7 gaan ga V V hulp|inf 6 vc _ _\n8 winkelen winkel V V intrans|inf 11 cnj _ _\n9 , , Punc Punc komma 8 punct _ _\n10 zwemmen zwem V V intrans|inf 11 cnj _ _\n11 of of Conj Conj neven 7 vc _ _\n12 terrassen terras N N soort|mv|neut 11 cnj _ _\n13 . . Punc Punc punt 12 punct _ _\n\"\"\"\n\nconll_data2 = \"\"\"1 Cathy Cathy N N eigen|ev|neut 2 su _ _\n2 zag zie V V trans|ovt|1of2of3|ev 0 ROOT _ _\n3 hen hen Pron Pron per|3|mv|datofacc 2 obj1 _ _\n4 wild wild Adj Adj attr|stell|onverv 5 mod _ _\n5 zwaaien zwaai N N soort|mv|neut 2 vc _ _\n6 . . Punc Punc punt 5 punct _ _\n\n1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _\n2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _\n3 met met Prep Prep voor 8 mod _ _\n4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _\n5 moeder moeder N N soort|ev|neut 3 obj1 _ _\n6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _\n7 gaan ga V V hulp|inf 6 vc _ _\n8 winkelen winkel V V intrans|inf 11 cnj _ _\n9 , , Punc Punc komma 8 punct _ _\n10 zwemmen zwem V V intrans|inf 11 cnj _ _\n11 of of Conj Conj neven 7 vc _ _\n12 terrassen terras N N soort|mv|neut 11 cnj _ _\n13 . . Punc Punc punt 12 punct _ _\n\n1 Dat dat Pron Pron aanw|neut|attr 2 det _ _\n2 werkwoord werkwoord N N soort|ev|neut 6 obj1 _ _\n3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _\n4 ze ze Pron Pron per|3|evofmv|nom 6 su _ _\n5 zelf zelf Pron Pron aanw|neut|attr|wzelf 3 predm _ _\n6 uitgevonden vind V V trans|verldw|onverv 3 vc _ _\n7 . . Punc Punc punt 6 punct _ _\n\n1 Het het Pron Pron onbep|neut|zelfst 2 su _ _\n2 hoorde hoor V V trans|ovt|1of2of3|ev 0 ROOT _ _\n3 bij bij Prep Prep voor 2 ld _ _\n4 de de Art Art bep|zijdofmv|neut 6 det _ _\n5 warme warm Adj Adj attr|stell|vervneut 6 mod _ _\n6 zomerdag zomerdag N N soort|ev|neut 3 obj1 _ _\n7 die die Pron Pron betr|neut|zelfst 6 mod _ _\n8 ze ze Pron Pron per|3|evofmv|nom 12 su _ _\n9 ginds ginds Adv Adv gew|aanw 12 mod _ _\n10 achter achter Adv Adv gew|geenfunc|stell|onverv 12 svp _ _\n11 had heb V V hulp|ovt|1of2of3|ev 7 body _ _\n12 gelaten laat V V trans|verldw|onverv 11 vc _ _\n13 . . Punc Punc punt 12 punct _ _\n\n1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _\n2 hadden heb V V trans|ovt|1of2of3|mv 0 ROOT _ _\n3 languit languit Adv Adv gew|geenfunc|stell|onverv 11 mod _ _\n4 naast naast Prep Prep voor 11 mod _ _\n5 elkaar elkaar Pron Pron rec|neut 4 obj1 _ _\n6 op op Prep Prep voor 11 ld _ _\n7 de de Art Art bep|zijdofmv|neut 8 det _ _\n8 strandstoelen strandstoel N N soort|mv|neut 6 obj1 _ _\n9 kunnen kan V V hulp|inf 2 vc _ _\n10 gaan ga V V hulp|inf 9 vc _ _\n11 liggen lig V V intrans|inf 10 vc _ _\n12 . . Punc Punc punt 11 punct _ _\n\n1 Zij zij Pron Pron per|3|evofmv|nom 2 su _ _\n2 zou zal V V hulp|ovt|1of2of3|ev 7 cnj _ _\n3 mams mams N N soort|ev|neut 4 det _ _\n4 rug rug N N soort|ev|neut 5 obj1 _ _\n5 ingewreven wrijf V V trans|verldw|onverv 6 vc _ _\n6 hebben heb V V hulp|inf 2 vc _ _\n7 en en Conj Conj neven 0 ROOT _ _\n8 mam mam V V trans|ovt|1of2of3|ev 7 cnj _ _\n9 de de Art Art bep|zijdofmv|neut 10 det _ _\n10 hare hare Pron Pron bez|3|ev|neut|attr 8 obj1 _ _\n11 . . Punc Punc punt 10 punct _ _\n\n1 Of of Conj Conj onder|metfin 0 ROOT _ _\n2 ze ze Pron Pron per|3|evofmv|nom 3 su _ _\n3 had heb V V hulp|ovt|1of2of3|ev 0 ROOT _ _\n4 gewoon gewoon Adj Adj adv|stell|onverv 10 mod _ _\n5 met met Prep Prep voor 10 mod _ _\n6 haar haar Pron Pron bez|3|ev|neut|attr 7 det _ _\n7 vriendinnen vriendin N N soort|mv|neut 5 obj1 _ _\n8 rond rond Adv Adv deelv 10 svp _ _\n9 kunnen kan V V hulp|inf 3 vc _ _\n10 slenteren slenter V V intrans|inf 9 vc _ _\n11 in in Prep Prep voor 10 mod _ _\n12 de de Art Art bep|zijdofmv|neut 13 det _ _\n13 buurt buurt N N soort|ev|neut 11 obj1 _ _\n14 van van Prep Prep voor 13 mod _ _\n15 Trafalgar_Square Trafalgar_Square MWU N_N eigen|ev|neut_eigen|ev|neut 14 obj1 _ _\n16 . . Punc Punc punt 15 punct _ _\n\"\"\"\n\nif __name__ == \"__main__\":\n demo()\n" ]
[ [ "matplotlib.pylab.savefig", "matplotlib.pylab.yticks", "matplotlib.pylab.show", "matplotlib.pylab.xticks" ] ]
leguiart/MSc_Thesis
[ "22ffc73c75d814856850f26c4586d90896b74cf3" ]
[ "evosoro_pymoo/Algorithms/RankAndNoveltySurvival.py" ]
[ "import numpy as np\n\nfrom pymoo.core.survival import Survival\nfrom pymoo.util.nds.non_dominated_sorting import NonDominatedSorting\nfrom pymoo.util.randomized_argsort import randomized_argsort\n\n# ---------------------------------------------------------------------------------------------------------\n# Survival Selection\n# ---------------------------------------------------------------------------------------------------------\n\n\nclass RankAndNoveltySurvival(Survival):\n\n def __init__(self, nds=None) -> None:\n super().__init__(filter_infeasible=True)\n self.nds = nds if nds is not None else NonDominatedSorting()\n\n def _do(self, problem, pop, *args, n_survive=None, **kwargs):\n\n # get the objective space values and objects\n F = pop.get(\"F\").astype(float, copy=False)\n\n # the final indices of surviving individuals\n survivors = []\n\n # do the non-dominated sorting until splitting front\n fronts = self.nds.do(F, n_stop_if_ranked=n_survive)\n\n for k, front in enumerate(fronts):\n\n # calculate the novelty of the front\n novelty_of_front = get_unaligned_novelty(pop[front])\n\n # save rank and crowding in the individual class\n for j, i in enumerate(front):\n pop[i].set(\"rank\", k)\n pop[i].set(\"crowding\", novelty_of_front[j])\n\n # current front sorted by crowding distance if splitting\n if len(survivors) + len(front) > n_survive:\n I = randomized_argsort(novelty_of_front, order='descending', method='numpy')\n I = I[:(n_survive - len(survivors))]\n\n # otherwise take the whole front unsorted\n else:\n I = np.arange(len(front))\n\n # extend the survivors by all or selected individuals\n survivors.extend(front[I])\n\n return pop[survivors]\n\n\ndef get_unaligned_novelty(pop):\n return np.array([x_i.X.unaligned_novelty_metric for x_i in pop])\n" ]
[ [ "numpy.array" ] ]
robertklee/SENG474-DataMining
[ "85b6ff300e18320fe8b40c89d5f22fde51ba588e" ]
[ "notebooks/imgaug-playground.py" ]
[ "import imgaug as ia\nfrom imgaug import augmenters as iaa\nimport numpy as np\nfrom scipy import misc\nimport imageio\nimport cv2\n\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug.augmentables import Keypoint, KeypointsOnImage\n\n\nia.seed(1)\n\nimage = ia.quokka(size=(256, 256))\nkps = KeypointsOnImage([\n Keypoint(x=65, y=100),\n Keypoint(x=75, y=200),\n Keypoint(x=100, y=100),\n Keypoint(x=200, y=80)\n], shape=image.shape)\n\nseq = iaa.Sequential([\n iaa.Multiply((1.2, 1.5)), # change brightness, doesn't affect keypoints\n iaa.Affine(\n rotate=10,\n scale=(0.5, 0.7)\n ) # rotate by exactly 10deg and scale to 50-70%, affects keypoints\n])\n\n# Augment keypoints and images.\nimage_aug, kps_aug = seq(image=image, keypoints=kps)\n\n# print coordinates before/after augmentation (see below)\n# use after.x_int and after.y_int to get rounded integer coordinates\nfor i in range(len(kps.keypoints)):\n before = kps.keypoints[i]\n after = kps_aug.keypoints[i]\n print(\"Keypoint %d: (%.8f, %.8f) -> (%.8f, %.8f)\" % (\n i, before.x, before.y, after.x, after.y)\n )\n\n# image with keypoints before/after augmentation (shown below)\nimage_before = kps.draw_on_image(image, size=7)\nimage_after = kps_aug.draw_on_image(image_aug, size=7)\n\ndef main():\n imgs = np.zeros((1, 100, 100, 3), dtype=np.uint8) + 255\n bbs = ia.BoundingBoxesOnImage([\n ia.BoundingBox(x1=0, x2=50, y1=0, y2=50)\n ], shape=imgs.shape[1:])\n\n aug = iaa.Sequential([\n iaa.Crop(px=10),\n iaa.Pad(px=10, pad_cval=128),\n iaa.Affine(scale=0.5, cval=0)\n ])\n\n aug_det = aug.to_deterministic()\n imgs_aug = aug_det.augment_images(imgs)\n bbs_aug = aug_det.augment_bounding_boxes([bbs])\n\n print(\"bbs:\")\n for bbs_aug_i in bbs_aug[0].bounding_boxes:\n print(bbs_aug_i)\n\n cv2.imshow('orig',imgs)\n cv2.imshow('aug',bbs_aug[0].draw_on_image(imgs_aug[0]))\n cv2.waitKey()\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.zeros" ] ]
anmartinezs/pyseg_system
[ "5bb07c7901062452a34b73f376057cabc15a13c3" ]
[ "code/pyto/tomo/ctf.py" ]
[ "\"\"\"\nFunctions related to ctf.\n\nCurrently only few that allow running ctffind from console or notebook.\n\nWork in progress.\n\n# Author: Vladan Lucic (Max Planck Institute for Biochemistry)\n# $Id$\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom builtins import zip\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nfrom past.utils import old_div\nfrom past.builtins import basestring\n\n__version__ = \"$Revision$\"\n\nimport os\nimport subprocess\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pyto.util.nested\nfrom pyto.io.image_io import ImageIO\nfrom pyto.grey.image import Image\n\n\nclass Ctf(object):\n \"\"\"\n Determination of CTF by external tools\n \"\"\"\n\n # prefix for validation attributed obtained from gctf\n validation_prefix = \"validation_\"\n\n # default params ctffind 4.0.17, also 4.1\n default_params_ctffind = {\n \"pixel_a\":1, \"cs\":2.7, \"amp\":0.1, \"phase\":\"no\", 'box':512, \n 'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000, \n 'def_step':500, 'astig':100, 'known_astig':'no', 'slow_search':'yes',\n 'restraint_astig':'yes', 'tolerated_astig':200,\n 'phase':'yes', 'min_phase':0, 'max_phase':2, 'phase_step':0.1,\n 'expert':'no'}\n\n # parameter list for ctffind 4.0.17 (currently not used, left for reference)\n param_names_ctffind_4_0 = [\n 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res', \n 'min_def', 'max_def', 'def_step', 'astig', 'phase', \n 'min_phase', 'max_phase', 'phase_step']\n\n # default parameter list for 4.1; consistent with default_params_ctffind\n param_names_ctffind_4_1 = [\n 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res', \n 'min_def', 'max_def', 'def_step', 'known_astig', 'slow_search',\n 'restraint_astig','tolerated_astig',\n 'phase', 'min_phase', 'max_phase', 'phase_step', 'expert']\n\n def __init__(self):\n \"\"\"\n Initializes common attributes\n \"\"\"\n\n # attributes\n self.image_path_orig = []\n self.image_inds = []\n self.image_path = []\n self.ctf_path = []\n self.phases = []\n self.defoci_1 = []\n self.defoci_2 = []\n self.defoci = []\n self.resolution = []\n self.pixel_a = []\n self.angle = []\n\n @classmethod\n def find(\n cls, image_dir, image_prefix, ctf_dir, params, pixel_a=None, \n flatten='auto', tool='ctffind', executable=None,\n param_file='ctf_params.txt', fast=False, max_images=None, \n plot_ctf=True, plot_ps=True, b_plot=True, exp_f_plot=False, \n show_legend=True, plot_phases=True, plot_defoci=True, \n plot_resolution=True, print_results=True, print_validation=False):\n \"\"\"\n Determines and shows CTF fits for multiple images. \n\n All files located in (arg) image_dir whose namess start with (arg)\n image_prefix and that have extension mrc, em or st are selected\n for the ctf determination.\n\n If a selected file is 3D (image stack), and arg flatten is True or \n 'auto', all z-slices are summed up (saved in ctf_dir) and the ctf \n is detemined on the resulting (flattened. Alternatively, if arg \n flatten is False, z-slices are extracted, saved in ctf_dir and \n analyzed separately.\n\n All resulting files, as well as the extraced or flattened images \n (in case of 3D files) are saved or moved to directory ctf_dir.\n\n CTF is determined using external tools. Current options are:\n - CTFFIND\n - gCTF \n These tools have to be installed externally.\n\n Parameters for the ctf tools are specified as a dictionary (arg params).\n Parameters used for both ctffind and gctf are:\n - 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res', \n 'min_def', 'max_def', 'def_step', 'astig', 'phase', \n 'min_phase', 'max_phase', 'phase_step'\n Voltage ('voltage') should always be specified. The pixel size \n (pixel_a) has to be specified in case it can not be read from \n the image header. All other parameters are optional, if they are\n not specified the ctffind / gctg default values are used.\n\n The default values should be fine for single particle images.\n Parameter recommendations for phase plate images are given in\n the ctffind / gctf documentation.\n\n In case of ctffind, arg params can also be a list containing the \n parameter values in the same order as specified above, starting\n with voltage.\n\n Important for ctffind: Because the required arguments differ between \n versions 4.0 and 4.1, as well as depend on values specified, it is \n not guaranteed that the dictionary form of arg params will work.\n In case of problems, specify params as a list.\n\n In addition, all other gctf arguments can also be specified \n (without '--'). It is suggested to use:\n 'do_EPA':'', 'do_validation':''\n\n Parameter units are the same as in the ctf deterimantion tools.\n\n Intended for use in an environment such as Jupyter notebook.\n\n Arguments:\n - image_dir: directory where images reside\n - image prefix: beginning of image file(s)\n - ctf_dir: directory where the ctf determination results and \n extracted images are saved\n - pixel_a: pixel size in A\n - params: ctf determination parameters\n - flatten: indicated whether 3D images should be flatten (True or \n 'auto') or not (False).\n - tool: name of the ctf detmination tool\n - executable: ctf tool executable\n - param_file: name of the temporary parameter file \n - fast: flag indicating whether ctffind --fast option is used\n - print_results: flag indicating if phase and defoci found \n are printed for each analyzed image\n - plot_ctf: flag indicating whether ctf is plotted for each \n analyzed image\n - show_legend: flag indicating whether a legend is shown on ctf graphs\n - plot_phases, plot_defoci: flags indicating whether a graph \n containing phases and defoci of all images respectivelly are plotted\n - max_images: max number if image analyzed, for testing\n\n Returns an instance of this class. The following attributes are all \n lists where elements correspond to individual images:\n - image_path_orig: image path of the input file\n - image_path: image path of the image that is actually used\n to deterime ctf. It differs from image_path_orig if the original\n (input) image is a stack that is flattened or used to extract slices\n - image_inds: index of a slice extracted for a stack\n - ctf_path: path of the ctf fit image\n - defocus_1, defocus_2, defocus: defoci along the two axes and the\n mean defocus in um\n - angle: defocus (astigmatism) angle\n - phase: phase shift in multiples of pi\n - resolution: resolution in nm\n - ccc: correlation coefficient\n - pixel_a: pixel size in A\n - b_factor: b-factor (gctf only)\n \"\"\"\n\n # initialize\n index = 0\n new = cls()\n print_head = True\n if plot_ctf and fast: \n print(\n \"Warning: CTF will not be plotted because fast execution\"\n + \" was chosen\")\n\n # check which ctf tool to use\n if tool == 'ctffind':\n if executable is None:\n executable = 'ctffind'\n elif tool == 'gctf':\n if executable is None:\n executable = 'gctf'\n else:\n raise ValueError(\n \"CTF determination tool \" + str(tool) + \" was not understood.\")\n new.tool = tool\n\n # cftfind on all images\n file_list = np.sort(os.listdir(image_dir))\n for image_name in file_list:\n\n # skip files that are not images\n if not image_name.startswith(image_prefix): continue\n if not (image_name.endswith('.mrc') or image_name.endswith('.st') \n or image_name.endswith('.em')): \n continue\n if image_name.endswith('ctf.mrc'): continue\n\n # set input image path \n image_path = os.path.join(image_dir, image_name)\n\n # figure out if to flatten or not (just once, assume all files \n # are the same)\n im_io = ImageIO(file=image_path)\n if image_name.endswith('.st'):\n im_io.readHeader(fileFormat='mrc')\n else:\n im_io.readHeader()\n z_dim = im_io.shape[2]\n n_digits = int(np.ceil(np.log10(z_dim)))\n if isinstance(flatten, bool):\n pass\n elif isinstance(flatten, basestring) and (flatten == 'auto'):\n if z_dim > 1: \n flatten = True\n else:\n flatten = False\n else:\n raise ValueError(\n \"Argument flatten: \"+ str(flatten) +\" was not understood.\") \n\n # load stack and prepare image name, if need to extract images\n if (z_dim > 1) and not flatten:\n image_dir, image_name = os.path.split(image_path)\n image_base, image_extension = image_name.rsplit('.', 1)\n image_name_new_tmplt = (\n image_base + '_%0' + str(n_digits) + 'd.mrc')\n if image_name.endswith('.st'):\n stack = Image.read(\n image_path, memmap=True, fileFormat='mrc')\n else:\n stack = Image.read(image_path, memmap=True)\n else:\n image_path_to_read = image_path\n\n # find ctf of the current image or stack\n for image_in_stack_ind in range(z_dim):\n\n # extract and save images if needed\n if (z_dim > 1) and not flatten:\n if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)\n image_path_to_read = os.path.join(\n ctf_dir, (image_name_new_tmplt % image_in_stack_ind))\n one_image = Image()\n one_image.data = stack.data[:,:,image_in_stack_ind]\n one_image.write(\n file=image_path_to_read, pixel=stack.pixelsize)\n\n # save image path retlated\n new.image_path_orig.append(image_path)\n new.image_inds.append(image_in_stack_ind)\n new.image_path.append(image_path_to_read)\n\n # find ctf\n if tool == 'ctffind':\n\n # ctffind\n res_one = cls.ctffind(\n image_path=image_path_to_read, flatten=flatten, \n ctf_dir=ctf_dir, executable=executable, \n pixel_a=pixel_a, params=params, \n param_file=param_file, fast=fast, print_head=print_head,\n print_results= print_results, \n plot_ctf=plot_ctf, show_legend=show_legend)\n\n elif tool == 'gctf':\n\n # gctf\n res_one = cls.gctf(\n image_path=image_path_to_read, params=params, \n pixel_a=pixel_a, flatten=flatten, ctf_dir=ctf_dir, \n executable=executable, \n plot_ctf=plot_ctf, plot_ps=plot_ps ,b_plot=b_plot, \n exp_f_plot=exp_f_plot, show_legend=show_legend,\n print_results=print_results, \n print_head=print_head, \n print_validation=print_validation)\n \n # save gctf specific data\n try:\n new.b_factor.append(res_one['b_factor'])\n except AttributeError:\n new.b_factor = [res_one['b_factor']]\n for name, value in list(res_one.items()):\n if name.startswith(cls.validation_prefix):\n try:\n previous_val = getattr(new, name)\n previous_val.append(value)\n setattr(new, name, previous_val)\n except AttributeError:\n setattr(new, name, [value])\n\n else:\n raise ValueError(\"Sorry tool: \" + tool + \" was not found.\")\n\n # save data common for ctffind and gctf\n new.phases.append(res_one[\"phase\"])\n new.defoci.append(res_one[\"defocus\"])\n new.defoci_1.append(res_one['defocus_1'])\n new.defoci_2.append(res_one['defocus_2'])\n new.resolution.append(res_one['resolution'])\n new.pixel_a.append(res_one['pixel_a'])\n new.angle.append(res_one['angle'])\n new.ctf_path.append(res_one['ctf_path'])\n\n # keep track of n images processed so far\n print_head = False\n index = index + 1\n if (max_images is not None) and (index > max_images): break\n if flatten: break\n\n # plot phases\n if plot_phases:\n plt.figure()\n plt.bar(list(range(index)), new.phases)\n plt.plot([0, index], [0.5, 0.5], 'r--')\n plt.ylabel('Phase shift [$\\pi$]')\n plt.xlabel('Images')\n plt.title(\"Phase shift summary\")\n\n # plot defocus\n if plot_defoci:\n plt.figure()\n plt.bar(list(range(index)), new.defoci)\n plt.ylabel('Defocus [$\\mu m$]')\n plt.xlabel('Images')\n plt.title(\"Defocus summary\")\n\n # plot resolution\n if plot_resolution:\n plt.figure()\n plt.bar(list(range(index)), new.resolution)\n plt.ylabel('Resolution [nm]')\n plt.xlabel('Images')\n plt.title(\"Resolution summary\")\n\n return new\n\n @classmethod\n def ctffind(\n cls, image_path, ctf_dir, params, pixel_a=None, flatten=False, \n executable='ctffind', param_file='ctf_params.txt', fast=False, \n print_results=True, print_head=True, \n plot_ctf=True, show_legend=True):\n \"\"\"\n Determines and shows CTF fits of one image using ctffind.\n\n See find() for more information.\n \"\"\"\n\n # make ctf dir if doesn't exist\n if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)\n\n # find pixel size\n if pixel_a is None:\n pixel_a = cls.read_pixel_size(image_path=image_path) \n\n # flatten frame stack\n if flatten:\n image_path = cls.flatten_stack(\n stack_path=image_path, flat_dir=ctf_dir)\n\n # default params ctffind 4.0.17 (moved to top of this file anyway)\n #default_params = {\n # \"pixel_a\":1, \"cs\":2.7, \"amp\":0.1, \"phase\":\"no\", 'box':512, \n # 'min_res':30, 'max_res':5, 'min_def':5000, 'max_def':50000, \n # 'def_step':500, 'astig':100, 'phase':'no', 'min_phase':0, \n # 'max_phase':2, 'phase_step':0.1}\n #param_names = [\n # 'pixel_a', 'voltage', 'cs', 'amp', 'box', 'min_res', 'max_res', \n # 'min_def', 'max_def', 'def_step', 'astig', 'phase', \n # 'min_phase', 'max_phase', 'phase_step']\n \n # keep params if list, add default if dict\n if isinstance(params, list):\n comb_params = [pixel_a] + params\n elif isinstance(params, dict):\n params_dict = cls.default_params_ctffind.copy()\n params_dict.update(params)\n params_dict['pixel_a'] = pixel_a\n param_names = cls.make_param_names_ctffind(params=params_dict)\n comb_params = [params_dict[name] for name in param_names]\n\n # set ctffind out paths\n image_dir, image_name = os.path.split(image_path)\n image_base, image_extension = image_name.rsplit('.', 1)\n ctf_path = os.path.join(ctf_dir, image_base + '_ctf.mrc') \n ctf_txt_path = os.path.join(ctf_dir, image_base + '_ctf.txt')\n ctf_avrot_path = os.path.join(ctf_dir, image_base + '_ctf_avrot.txt')\n\n # wite ctf parameters to a file\n param_path = os.path.join(ctf_dir, param_file)\n pf = open(param_path, 'w')\n pf.write(image_path + '\\n')\n pf.write(ctf_path + '\\n')\n str_params = [str(par) + '\\n' for par in comb_params]\n pf.writelines(str_params)\n pf.flush()\n\n # execute ctffind\n # shell commands that work:\n # - ctffind < param_path\n # - cat params.txt | ctffind\n #print(image)\n if fast:\n ctf_cmd = [executable, '--fast']\n else:\n ctf_cmd = [executable]\n try:\n subprocess.check_call(ctf_cmd, stdin=open(param_path))\n except Exception as exc:\n # workaround for ctffind command returning code 255 (4.1.8, 09.2018)\n logging.debug('CalledProcessError: ' + str(exc))\n \n # read results:\n ctf_txt = np.loadtxt(ctf_txt_path)\n results = {\n \"defocus_1\":ctf_txt[1]/10000., \"defocus_2\":ctf_txt[2]/10000., \n \"angle\" : ctf_txt[3], \"phase\":old_div(ctf_txt[4],np.pi), \n \"ccc\" : ctf_txt[5], \"resolution\" : ctf_txt[6] / 10., \n 'pixel_a':pixel_a}\n results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.\n results['ctf_path'] = ctf_path\n\n # prepare header for defoci and phases\n if print_head:\n left_space = ' ' * old_div((len(image_name) - 5), 2)\n right_space = ' ' *old_div ((len(image_name) - 4), 2)\n head_1 = (\n left_space + \"Image\" + right_space + \n \" Defocus 1 Defocus 2 Phase Resolution\")\n head_2 = (\n left_space + \" \" + right_space + \n \" um um [pi] nm \")\n\n # prepare results\n if print_results:\n data_format = '%s %6.2f %6.2f %6.2f %6.2f '\n data_vars = (\n image_name, results[\"defocus_1\"], results[\"defocus_2\"], \n results[\"phase\"], results[\"resolution\"])\n\n # print\n if print_head:\n print(head_1)\n print(head_2)\n if print_results:\n print(data_format % data_vars)\n\n # plot ctf\n if plot_ctf:\n plt.figure()\n avrot_data = np.loadtxt(ctf_avrot_path)\n x_data = avrot_data[0] / pixel_a\n plt.plot(x_data, avrot_data[2], 'g-', label='PS')\n plt.plot(\n x_data, avrot_data[3], color='orange', linewidth=2, \n label='CTF fit')\n plt.plot(\n x_data, avrot_data[4], color='blue', linewidth=2, \n label='Quality')\n plt.ylim(-0.1, 1.1)\n plt.xlabel(\"Spatial frequency [1/A])\")\n plt.ylabel(\"Amplitude\")\n if show_legend: plt.legend()\n plt.show()\n\n return results\n\n @classmethod\n def make_param_names_ctffind(cls, params):\n \"\"\"\n Makes a list of parameter names that's suitable for ctffind 4.1 and\n it is in accordance with the specified params.\n\n Argument:\n - params: dict of parameters\n\n Returns parameter list\n \"\"\"\n\n # optional parts\n if params['restraint_astig'] in ['yes', 'y']:\n restraint_astig_part = ['restraint_astig','tolerated_astig']\n else:\n restraint_astig_part = ['restraint_astig']\n if (params['phase'] == 'yes') or (params['phase'] == 'y'):\n phase_part = ['phase', 'min_phase', 'max_phase', 'phase_step']\n else:\n phase_part = ['phase']\n\n # combine\n param_names = (\n cls.param_names_ctffind_4_1[:12] + restraint_astig_part\n + phase_part + ['expert'])\n\n return param_names\n \n @classmethod\n def gctf(\n cls, image_path, ctf_dir, params, pixel_a=None, flatten=False, \n executable='gctf', plot_ps=True, plot_ctf=True, \n b_plot=True, exp_f_plot=False, show_legend=True, \n print_results=True, print_head=True, print_validation=False):\n \"\"\"\n Determines and shows CTF fits of one image using gctf.\n\n See find() for more information.\n \"\"\" \n\n # make ctf dir if doesn't exist\n if not os.path.exists(ctf_dir): os.makedirs(ctf_dir)\n\n # find pixel size\n if pixel_a is None:\n pixel_a = cls.read_pixel_size(image_path=image_path) \n\n # flatten frame stack if needed\n if flatten:\n image_path = cls.flatten_stack(\n stack_path=image_path, flat_dir=ctf_dir)\n\n # prepare parameters\n gctf_names = {\n 'pixel_a':'apix', 'voltage':'kV', 'cs':'Cs', 'amp':'ac', \n 'box':'boxsize', 'min_res':'resL', 'max_res':'resH', \n 'min_def':'defL', 'max_def':'defH', 'def_step':'defS', \n 'astig':'astm', 'phase':'phase', 'min_phase':'phase_shift_L', \n 'max_phase':'phase_shift_H', 'phase_step':'phase_shift_S'}\n params[\"pixel_a\"] = pixel_a \n params_list = [\n [\"--\" + gctf_names.get(key, key), str(val)] \n for key, val in list(params.items())]\n params_list = pyto.util.nested.flatten(params_list)\n params_list = [par for par in params_list if len(par) > 0]\n #print(params_list)\n\n # execute ctffind\n ctf_cmd = [executable] + params_list + [image_path]\n call_status = subprocess.check_call(ctf_cmd)\n\n # set gctf out paths\n image_dir, image_name = os.path.split(image_path)\n image_base, image_extension = image_name.rsplit('.', 1)\n epa_path = os.path.join(ctf_dir, image_base + '_EPA.log')\n gctf_path = os.path.join(ctf_dir, image_base + '_gctf.log') \n ctf_path = os.path.join(ctf_dir, image_base + '.ctf') \n tmp_epa_path = os.path.join(image_dir, image_base + '_EPA.log')\n tmp_gctf_path = os.path.join(image_dir, image_base + '_gctf.log') \n tmp_ctf_path = os.path.join(image_dir, image_base + '.ctf') \n\n # move generated files to ctf_dir\n if image_dir != ctf_dir:\n call_status = subprocess.check_call(['mv', tmp_epa_path, epa_path])\n call_status = subprocess.check_call(\n ['mv', tmp_gctf_path, gctf_path])\n call_status = subprocess.check_call(['mv', tmp_ctf_path, ctf_path])\n call_status = subprocess.check_call(\n ['mv', 'micrographs_all_gctf.star', ctf_dir])\n\n # read results\n in_last_cycle = False\n in_last_cycle_data = False\n validation_lines = []\n for line in open(gctf_path):\n\n # read defocus\n if line.find('LAST CYCLE') >= 0: \n in_last_cycle = True\n #print line.strip('\\n')\n elif in_last_cycle and (line.find('Defocus_U') >= 0): \n #print line.strip('\\n')\n head_split = line.strip().split()\n in_last_cycle_data = True\n elif in_last_cycle_data:\n #print line.strip('\\n')\n data_split = line.strip().split()[:-2]\n in_last_cycle_data = False\n\n # read res limit and b factor\n elif in_last_cycle and line.startswith('Resolution limit'): \n resolution = float(line.split()[-1])\n elif in_last_cycle and line.startswith('Estimated Bfactor'): \n b_factor = float(line.split()[-1])\n in_last_cycle = False\n\n # read validation\n elif line.find('VALIDATION_SCORE') >= 0:\n validation_lines.append(line.strip('\\n'))\n\n # extract results\n results_native = dict(\n [(head, float(value)) \n for head, value in zip(head_split, data_split)])\n results_native[\"Defocus_U\"] = results_native[\"Defocus_U\"] / 10000.\n results_native[\"Defocus_V\"] = results_native[\"Defocus_V\"] / 10000.\n #print(results_native)\n key_dict = {\n \"Defocus_U\":\"defocus_1\", \"Defocus_V\":\"defocus_2\",\n \"Angle\":\"angle\", \"CCC\":\"ccc\", \"Phase_shift\":\"phase\"}\n results = dict([\n (key_dict[old_key], value)\n for old_key, value in list(results_native.items())])\n results['defocus'] = (results['defocus_1'] + results['defocus_2']) / 2.\n results['phase'] = results.get('phase', 0) / 180.\n results[\"resolution\"] = resolution / 10.\n results[\"b_factor\"] = b_factor\n #if results.get(\"phase\") is None: results[\"phase\"] = 0\n results['ctf_path'] = ctf_path\n results['pixel_a'] = pixel_a\n for val_line in validation_lines:\n val_list = val_line.strip().split()\n name_suf = val_list[0].replace('-', '_')\n results[cls.validation_prefix + name_suf] = int(val_list[-1])\n\n # prepare header for defoci and phases\n if print_head:\n left_space = ' ' * (old_div((len(image_name) - 5), 2))\n right_space = ' ' * (old_div((len(image_name) - 4), 2))\n head_1 = (\n left_space + \"Image\" + right_space + \n \" Defocus 1 Defocus 2 Phase Resolution\")\n head_2 = (\n left_space + \" \" + right_space + \n \" um um [pi] nm \")\n\n # prepare results\n if print_results:\n data_format = '%s %6.2f %6.2f %6.2f %6.2f '\n data_vars = (\n image_name, results[\"defocus_1\"], results[\"defocus_2\"], \n results[\"phase\"], results[\"resolution\"])\n\n # add validation to header and results\n val_names = np.sort(\n [val_nam for val_nam in results\n if val_nam.startswith(cls.validation_prefix)])[::-1]\n for val_nam in val_names:\n if print_head:\n head_1 += (\" \" + val_nam.split(cls.validation_prefix, 1)[1])\n head_2 += \" \"\n if print_results:\n data_format += ' %2d '\n data_vars += (results[val_nam],)\n\n # print\n if print_head:\n print(head_1)\n print(head_2)\n if print_results:\n print(data_format % data_vars)\n\n # print validation\n if print_validation:\n for val_line in validation_lines:\n print(val_line)\n\n # plot ctf\n epa = np.loadtxt(epa_path, skiprows=1)\n if plot_ps:\n plt.figure()\n plt.plot(1./epa[:,0], epa[:,2])\n plt.ylabel('ln(|F|)')\n #if show_legend: plt.legend()\n plt.show()\n if plot_ctf:\n plt.figure()\n if b_plot:\n exp_b = np.exp(-b_factor * 1./epa[:,0]**2 / 4.)\n else:\n exp_b = 1\n plt.plot(1./epa[:,0], epa[:,1] * exp_b, label=\"CTF fit\")\n if exp_f_plot:\n plt.plot(\n 1./epa[:,0], np.exp(epa[:,3]), label=\"$e^{ln(|F|-Bg)}$\")\n else:\n plt.plot(1./epa[:,0], epa[:,3], label=\"$ln(|F|-Bg)$\")\n plt.xlabel('Resolution [1/A]')\n if show_legend: plt.legend()\n plt.show()\n\n # return\n return results\n\n @classmethod\n def read_pixel_size(cls, image_path):\n \"\"\"\n Reads pixel size from an image file.\n\n Raises ValueError if pixel size can not be read from the image\n\n Argument:\n - image_path: image path\n\n Returns: pixel size in A\n \"\"\"\n\n image_io = ImageIO()\n if image_path.endswith('.st'):\n image_io.readHeader(file=image_path, fileFormat='mrc')\n else:\n image_io.readHeader(file=image_path)\n if image_io.pixel is not None:\n if isinstance(image_io.pixel, (list, tuple)):\n pixel_a = 10 * image_io.pixel[0] \n else:\n pixel_a = 10 * image_io.pixel\n else:\n raise ValueError(\n \"Pixel size could not be found from image \" + image_path +\n \". Please specify pixel_a as an argument.\")\n\n # in case of 0 pix size\n if pixel_a == 0:\n raise ValueError(\n \"Pixel size could not be found from image \" + image_path +\n \". Please specify pixel_a as an argument.\")\n\n return pixel_a\n\n @classmethod\n def flatten_stack(cls, stack_path, flat_dir):\n \"\"\"\n Flattens image stack, that is sums up all z-slices and writes\n the resulting (flat) image).\n\n Arguments:\n - stack_path: path to the image stack\n - flat_path: path where the resulting image is saved\n\n Returns resulting image path\n \"\"\"\n\n # parse stack path\n stack_dir, stack_name = os.path.split(stack_path)\n stack_base, stack_extension = stack_name.rsplit('.', 1)\n if stack_extension == 'st': \n stack_extension = 'mrc'\n file_format = 'mrc'\n else:\n file_format = None\n\n # read, flatten and write\n flat_path = os.path.join(\n flat_dir, stack_base + '_flat.' + stack_extension)\n frame = Image.read(file=stack_path, fileFormat=file_format)\n frame.data = np.sum(frame.data, axis=2, dtype=frame.data.dtype)\n frame.write(file=flat_path, pixel=frame.pixelsize)\n\n return flat_path\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.exp", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.log10", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
a868111817/language-model-playground
[ "daecd4e39bbf8128b04aa236ad1d31cd22c3c1d9" ]
[ "test/lmp/model/_lstm/conftest.py" ]
[ "r\"\"\"Setup fixtures for testing :py:class:`lmp.model.LSTMModel`.\"\"\"\n\nimport pytest\nimport torch\n\nfrom lmp.model import LSTMModel\nfrom lmp.tknzr import BaseTknzr\n\n\[email protected]\ndef lstm_model(\n tknzr: BaseTknzr,\n d_emb: int,\n d_hid: int,\n n_hid_lyr: int,\n n_pre_hid_lyr: int,\n n_post_hid_lyr: int,\n p_emb: float,\n p_hid: float,\n) -> LSTMModel:\n r\"\"\"Example ``LSTMModel`` instance.\"\"\"\n return LSTMModel(\n d_emb=d_emb,\n d_hid=d_hid,\n n_hid_lyr=n_hid_lyr,\n n_pre_hid_lyr=n_pre_hid_lyr,\n n_post_hid_lyr=n_post_hid_lyr,\n p_emb=p_emb,\n p_hid=p_hid,\n tknzr=tknzr,\n )\n\n\[email protected]\ndef batch_prev_tkids(lstm_model: LSTMModel) -> torch.Tensor:\n r\"\"\"Example input batch of token ids.\"\"\"\n # Shape: (2, 3).\n return torch.randint(\n low=0,\n high=lstm_model.emb.num_embeddings,\n size=(2, 3),\n )\n\n\[email protected]\ndef batch_next_tkids(\n lstm_model: LSTMModel,\n batch_prev_tkids: torch.Tensor,\n) -> torch.Tensor:\n r\"\"\"Example target batch of token ids.\"\"\"\n # Same shape as `batch_prev_tkids`.\n return torch.cat(\n [\n batch_prev_tkids[..., :-1],\n torch.randint(\n low=0,\n high=lstm_model.emb.num_embeddings,\n size=(batch_prev_tkids.shape[0], 1),\n ),\n ],\n dim=1,\n )\n" ]
[ [ "torch.randint" ] ]
hust-cec-2021/ma2bea
[ "196f8de33cc4902bd9cb1fdd5400e41f9c275b55" ]
[ "code/visualization/ucb.py" ]
[ "import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef get_args():\n # create argument parser\n parser = argparse.ArgumentParser()\n # parameter for problem\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--benchmark_id', type=int, default=0)\n parser.add_argument('--rmp', type=float, default=0.3)\n # parse args\n args = parser.parse_args()\n # add other args\n return args\n\nROOT = '../../result'\n\ndef load(args):\n folder = os.path.join(ROOT, '{}/{}_{}'.format(args.benchmark_id, args.algorithm, args.rmp))\n Fitness = []\n for name in os.listdir(folder):\n path = os.path.join(folder, name)\n if 'ucb' in name:\n y = np.load(path)\n Fitness.append(y)\n return np.array(Fitness)\n\ndef get_label(args):\n return '{}_{}'.format(args.algorithm, args.benchmark_id)\n\ndef plot(Fitness, args):\n cs = [\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['r', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'r', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'r', 'r', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'b', 'r', 'b', 'b', 'b', 'b', 'b'],\n ['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],\n ]\n\n label = get_label(args)\n Fitness = Fitness[:, :, args.source]\n mean_fitness = np.mean(Fitness, axis=0)\n i = 0\n for target in range(mean_fitness.shape[1]):\n if target != args.source:\n plt.plot(mean_fitness[:, target], label='T{}'.format(target+1), color=cs[args.source][i], linewidth=0.3)\n plt.ylabel('UCB value')\n i += 1\n\ndef main():\n # get args\n args = get_args()\n\n # plot each algorithm\n args.algorithm = 'MTO'\n Fitness = load(args)\n for source in range(10):\n args.source = source\n plot(Fitness, args)\n\n plt.legend()\n plt.ylim((0, 2))\n plt.savefig('plot/ucb/{}.eps'.format(source + 1), dpi=300)\n plt.savefig('plot/ucb/{}.png'.format(source + 1), dpi=300)\n plt.clf()\n plt.cla()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.load", "matplotlib.pyplot.legend", "matplotlib.pyplot.cla", "matplotlib.pyplot.clf", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "numpy.array", "numpy.mean" ] ]
allisontam/chemprop
[ "87ac151c68d8a200d564b064103c4f514e29f6bd" ]
[ "chemprop/train/run_training.py" ]
[ "from argparse import Namespace\nimport csv\nfrom logging import Logger\nimport os\nfrom pprint import pformat\nfrom typing import List\n\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport torch\nfrom tqdm import trange\nimport pickle\nfrom torch.optim.lr_scheduler import ExponentialLR\n\nfrom .evaluate import evaluate, evaluate_predictions\nfrom .predict import predict, save_predictions\nfrom .train import train\nfrom chemprop.data import StandardScaler\nfrom chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv\nfrom chemprop.models import build_model\nfrom chemprop.nn_utils import param_count\nfrom chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\\\n makedirs, save_checkpoint\n\n\ndef run_training(args: Namespace, logger: Logger = None) -> List[float]:\n \"\"\"\n Trains a model and returns test scores on the model checkpoint with the highest validation score.\n\n :param args: Arguments.\n :param logger: Logger.\n :return: A list of ensemble scores for each task.\n \"\"\"\n if logger is not None:\n debug, info = logger.debug, logger.info\n else:\n debug = info = print\n\n # Set GPU\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n\n # Print args\n debug(pformat(vars(args)))\n\n # Get data\n debug('Loading data')\n args.task_names = get_task_names(args.data_path, args.data_format)\n data = get_data(path=args.data_path, args=args, logger=logger)\n args.num_tasks = data.num_tasks()\n args.features_size = data.features_size()\n debug(f'Number of tasks = {args.num_tasks}')\n\n # Split data\n debug(f'Splitting data with seed {args.seed}')\n if args.separate_test_path:\n test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)\n if args.separate_val_path:\n val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)\n\n if args.separate_val_path and args.separate_test_path:\n train_data = data\n elif args.separate_val_path:\n train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)\n elif args.separate_test_path:\n train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)\n elif args.split_type == 'loocv':\n train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)\n else:\n train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)\n\n if args.dataset_type == 'classification':\n class_sizes = get_class_sizes(test_data)\n debug('Class sizes in test set')\n for i, task_class_sizes in enumerate(class_sizes):\n debug(f'{args.task_names[i]} '\n f'{\", \".join(f\"{cls}: {size * 100:.2f}%\" for cls, size in enumerate(task_class_sizes))}')\n if not args.train_all and task_class_sizes == 0: # TODO: only works for just 1 property prediction task\n debug('Moved to next epoch due to homogenous targets in test set.')\n return [float('nan')]\n\n if args.save_smiles_splits:\n with open(args.data_path, 'r') as f:\n reader = csv.reader(f)\n header = next(reader)\n\n lines_by_smiles = {}\n indices_by_smiles = {}\n for i, line in enumerate(reader):\n smiles = (line[0], line[1])\n lines_by_smiles[smiles] = line\n indices_by_smiles[smiles] = i\n\n all_split_indices = []\n for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:\n with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['smiles'])\n for smiles in dataset.smiles():\n writer.writerow([smiles])\n with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n for smiles in dataset.smiles():\n writer.writerow(lines_by_smiles[smiles])\n split_indices = []\n for smiles in dataset.smiles():\n split_indices.append(indices_by_smiles[smiles])\n split_indices = sorted(split_indices)\n all_split_indices.append(split_indices)\n with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:\n pickle.dump(all_split_indices, f)\n\n if args.symmetric:\n train_data = flip_data(train_data)\n\n if args.features_scaling:\n drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)\n val_data.normalize_features(drug_scaler, cmpd_scaler)\n test_data.normalize_features(drug_scaler, cmpd_scaler)\n else:\n drug_scaler, cmpd_scaler = None, None\n\n args.train_data_size = len(train_data)\n \n debug(f'Total size = {len(data):,} | '\n f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')\n\n # Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)\n if args.dataset_type == 'regression':\n debug('Fitting scaler')\n train_smiles, train_targets = train_data.smiles(), train_data.targets()\n scaler = StandardScaler().fit(train_targets)\n scaled_targets = scaler.transform(train_targets).tolist()\n train_data.set_targets(scaled_targets)\n else:\n scaler = None\n\n # Get loss and metric functions\n loss_func = get_loss_func(args)\n metric_func = get_metric_func(metric=args.metric)\n\n # Set up test set evaluation\n test_smiles, test_targets = test_data.smiles(), test_data.targets()\n if args.dataset_type == 'multiclass':\n sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))\n else:\n sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))\n\n # Train ensemble of models\n for model_idx in range(args.ensemble_size):\n # Tensorboard writer\n save_dir = os.path.join(args.save_dir, f'model_{model_idx}')\n makedirs(save_dir)\n try:\n writer = SummaryWriter(log_dir=save_dir)\n except:\n writer = SummaryWriter(logdir=save_dir)\n # Load/build model\n if args.checkpoint_paths is not None:\n debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')\n model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)\n else:\n debug(f'Building model {model_idx}')\n model = build_model(args)\n\n debug(model)\n debug(f'Number of parameters = {param_count(model):,}')\n if args.cuda:\n debug('Moving model to cuda')\n model = model.cuda()\n\n # Ensure that model is saved in correct location for evaluation if 0 epochs\n save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)\n\n # Optimizers\n optimizer = build_optimizer(model, args)\n\n # Learning rate schedulers\n scheduler = build_lr_scheduler(optimizer, args)\n\n # Run training\n best_score = float('inf') if args.minimize_score else -float('inf')\n best_epoch, n_iter = 0, 0\n for epoch in trange(args.epochs):\n debug(f'Epoch {epoch}')\n\n n_iter = train(\n model=model,\n data=train_data,\n loss_func=loss_func,\n optimizer=optimizer,\n scheduler=scheduler,\n args=args,\n n_iter=n_iter,\n logger=logger,\n writer=writer\n )\n if isinstance(scheduler, ExponentialLR):\n scheduler.step()\n val_scores, val_loss = evaluate(\n model=model,\n data=val_data,\n loss_func=loss_func,\n num_tasks=args.num_tasks,\n metric_func=metric_func,\n batch_size=args.batch_size,\n dataset_type=args.dataset_type,\n scaler=scaler,\n logger=logger\n )\n\n # Average validation score\n avg_val_score = np.nanmean(val_scores)\n debug(f'Validation {args.metric} = {avg_val_score:.6f}')\n writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)\n\n debug(f'Validation loss = {val_loss:.6f}')\n writer.add_scalar(f'validation_loss', val_loss, n_iter)\n\n if args.show_individual_scores:\n # Individual validation scores\n for task_name, val_score in zip(args.task_names, val_scores):\n debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')\n writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)\n\n # Save model checkpoint if improved validation score\n if args.minimize_score and avg_val_score < best_score or \\\n not args.minimize_score and avg_val_score > best_score:\n best_score, best_epoch = avg_val_score, epoch\n save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)\n\n # Evaluate on test set using model with best validation score\n info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')\n model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)\n\n test_preds = predict(\n model=model,\n data=test_data,\n batch_size=args.batch_size,\n scaler=scaler\n )\n if args.save_preds:\n val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)\n train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)\n save_predictions(save_dir, train_data, val_data, test_data, \\\n train_preds, val_preds, test_preds, args.task_names, scaler)\n\n test_scores = evaluate_predictions(\n preds=test_preds,\n targets=test_targets,\n num_tasks=args.num_tasks,\n metric_func=metric_func,\n dataset_type=args.dataset_type,\n logger=logger\n )\n\n if len(test_preds) != 0:\n sum_test_preds += np.array(test_preds)\n\n # Average test score\n avg_test_score = np.nanmean(test_scores)\n info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')\n writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)\n\n if args.show_individual_scores:\n # Individual test scores\n for task_name, test_score in zip(args.task_names, test_scores):\n info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')\n writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)\n\n # Evaluate ensemble on test set\n avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()\n\n ensemble_scores = evaluate_predictions(\n preds=avg_test_preds,\n targets=test_targets,\n num_tasks=args.num_tasks,\n metric_func=metric_func,\n dataset_type=args.dataset_type,\n logger=logger\n )\n\n # Average ensemble score\n avg_ensemble_test_score = np.nanmean(ensemble_scores)\n info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')\n writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)\n\n # Individual ensemble scores\n if args.show_individual_scores:\n for task_name, ensemble_score in zip(args.task_names, ensemble_scores):\n info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')\n\n return ensemble_scores\n" ]
[ [ "numpy.nanmean", "numpy.array", "torch.cuda.set_device" ] ]
Lube-Project/ProgettoLube
[ "901ac307b68486d8289105c159ca702318bea5b0", "cbf33971e2c2e865783ec1a2302625539186a338", "cbf33971e2c2e865783ec1a2302625539186a338" ]
[ "ProgettoLube/WebInspector/venv/Lib/site-packages/skimage/color/tests/test_adapt_rgb.py", "ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py", "ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/python/keras/api/_v1/keras/mixed_precision/__init__.py" ]
[ "from functools import partial\n\nimport numpy as np\n\nfrom skimage import img_as_float, img_as_uint\nfrom skimage import color, data, filters\nfrom skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value\n\n# Down-sample image for quicker testing.\nCOLOR_IMAGE = data.astronaut()[::5, ::6]\nGRAY_IMAGE = data.camera()[::5, ::5]\n\nSIGMA = 3\nsmooth = partial(filters.gaussian, sigma=SIGMA)\nassert_allclose = partial(np.testing.assert_allclose, atol=1e-8)\n\n\n@adapt_rgb(each_channel)\ndef edges_each(image):\n return filters.sobel(image)\n\n\n@adapt_rgb(each_channel)\ndef smooth_each(image, sigma):\n return filters.gaussian(image, sigma)\n\n\n@adapt_rgb(each_channel)\ndef mask_each(image, mask):\n result = image.copy()\n result[mask] = 0\n return result\n\n\n@adapt_rgb(hsv_value)\ndef edges_hsv(image):\n return filters.sobel(image)\n\n\n@adapt_rgb(hsv_value)\ndef smooth_hsv(image, sigma):\n return filters.gaussian(image, sigma)\n\n\n@adapt_rgb(hsv_value)\ndef edges_hsv_uint(image):\n return img_as_uint(filters.sobel(image))\n\n\ndef test_gray_scale_image():\n # We don't need to test both `hsv_value` and `each_channel` since\n # `adapt_rgb` is handling gray-scale inputs.\n assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))\n\n\ndef test_each_channel():\n filtered = edges_each(COLOR_IMAGE)\n for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):\n expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))\n assert_allclose(channel, expected)\n\n\ndef test_each_channel_with_filter_argument():\n filtered = smooth_each(COLOR_IMAGE, SIGMA)\n for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):\n assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))\n\n\ndef test_each_channel_with_asymmetric_kernel():\n mask = np.triu(np.ones(COLOR_IMAGE.shape[:2], dtype=np.bool_))\n mask_each(COLOR_IMAGE, mask)\n\n\ndef test_hsv_value():\n filtered = edges_hsv(COLOR_IMAGE)\n value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]\n assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))\n\n\ndef test_hsv_value_with_filter_argument():\n filtered = smooth_hsv(COLOR_IMAGE, SIGMA)\n value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]\n assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))\n\n\ndef test_hsv_value_with_non_float_output():\n # Since `rgb2hsv` returns a float image and the result of the filtered\n # result is inserted into the HSV image, we want to make sure there isn't\n # a dtype mismatch.\n filtered = edges_hsv_uint(COLOR_IMAGE)\n filtered_value = color.rgb2hsv(filtered)[:, :, 2]\n value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]\n # Reduce tolerance because dtype conversion.\n assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utils for testing linear estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport shutil\nimport tempfile\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow.core.example import example_pb2\nfrom tensorflow.core.example import feature_pb2\nfrom tensorflow.python.feature_column import feature_column\nfrom tensorflow.python.feature_column import feature_column_v2\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import variables as variables_lib\nfrom tensorflow_estimator.python.estimator import estimator\nfrom tensorflow_estimator.python.estimator import run_config\nfrom tensorflow_estimator.python.estimator.canned import linear\nfrom tensorflow_estimator.python.estimator.canned import metric_keys\nfrom tensorflow_estimator.python.estimator.export import export\nfrom tensorflow_estimator.python.estimator.inputs import numpy_io\nfrom tensorflow_estimator.python.estimator.inputs import pandas_io\n\ntry:\n # pylint: disable=g-import-not-at-top\n import pandas as pd\n HAS_PANDAS = True\nexcept IOError:\n # Pandas writes a temporary file during import. If it fails, don't use pandas.\n HAS_PANDAS = False\nexcept ImportError:\n HAS_PANDAS = False\n\n# pylint rules which are disabled by default for test files.\n# pylint: disable=invalid-name,protected-access,missing-docstring\n\n# Names of variables created by model.\nAGE_WEIGHT_NAME = 'linear/linear_model/age/weights'\nHEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'\nOCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'\nBIAS_NAME = 'linear/linear_model/bias_weights'\nLANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'\n\n# This is so that we can easily switch between feature_column and\n# feature_column_v2 for testing.\nfeature_column.numeric_column = feature_column._numeric_column\nfeature_column.categorical_column_with_hash_bucket = feature_column._categorical_column_with_hash_bucket # pylint: disable=line-too-long\nfeature_column.categorical_column_with_vocabulary_list = feature_column._categorical_column_with_vocabulary_list # pylint: disable=line-too-long\nfeature_column.categorical_column_with_vocabulary_file = feature_column._categorical_column_with_vocabulary_file # pylint: disable=line-too-long\nfeature_column.embedding_column = feature_column._embedding_column\n\n\ndef assert_close(expected, actual, rtol=1e-04, name='assert_close'):\n with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:\n expected = ops.convert_to_tensor(expected, name='expected')\n actual = ops.convert_to_tensor(actual, name='actual')\n rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)\n rtol = ops.convert_to_tensor(rtol, name='rtol')\n return tf.compat.v1.debugging.assert_less(\n rdiff,\n rtol,\n data=('Condition expected =~ actual did not hold element-wise:'\n 'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,\n 'rtol = ', rtol,),\n name=scope)\n\n\ndef save_variables_to_ckpt(model_dir):\n init_all_op = [tf.compat.v1.initializers.global_variables()]\n with tf.compat.v1.Session() as sess:\n sess.run(init_all_op)\n tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))\n\n\ndef queue_parsed_features(feature_map):\n tensors_to_enqueue = []\n keys = []\n for key, tensor in six.iteritems(feature_map):\n keys.append(key)\n tensors_to_enqueue.append(tensor)\n queue_dtypes = [x.dtype for x in tensors_to_enqueue]\n input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)\n tf.compat.v1.train.queue_runner.add_queue_runner(\n tf.compat.v1.train.queue_runner.QueueRunner(\n input_queue, [input_queue.enqueue(tensors_to_enqueue)]))\n dequeued_tensors = input_queue.dequeue()\n return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}\n\n\ndef sorted_key_dict(unsorted_dict):\n return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-1.0 * x))\n\n\nclass CheckPartitionerVarHook(tf.compat.v1.train.SessionRunHook):\n \"\"\"A `SessionRunHook` to check a partitioned variable.\"\"\"\n\n def __init__(self, test_case, var_name, var_dim, partitions):\n self._test_case = test_case\n self._var_name = var_name\n self._var_dim = var_dim\n self._partitions = partitions\n\n def begin(self):\n with tf.compat.v1.variable_scope(\n tf.compat.v1.get_variable_scope()) as scope:\n scope.reuse_variables()\n partitioned_weight = tf.compat.v1.get_variable(\n self._var_name, shape=(self._var_dim, 1))\n self._test_case.assertTrue(\n isinstance(partitioned_weight, variables_lib.PartitionedVariable))\n for part in partitioned_weight:\n self._test_case.assertEqual(self._var_dim // self._partitions,\n part.get_shape()[0])\n\n\nclass BaseLinearRegressorPartitionerTest(object):\n\n def __init__(self, linear_regressor_fn, fc_lib=feature_column):\n self._linear_regressor_fn = linear_regressor_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def testPartitioner(self):\n x_dim = 64\n partitions = 4\n\n def _partitioner(shape, dtype):\n del dtype # unused; required by Fn signature.\n # Only partition the embedding tensor.\n return [partitions, 1] if shape[0] == x_dim else [1]\n\n regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(\n 'language', hash_bucket_size=x_dim),),\n partitioner=_partitioner,\n model_dir=self._model_dir)\n\n def _input_fn():\n return {\n 'language':\n tf.sparse.SparseTensor(\n values=['english', 'spanish'],\n indices=[[0, 0], [0, 1]],\n dense_shape=[1, 2])\n }, [[10.]]\n\n hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,\n partitions)\n regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])\n\n def testDefaultPartitionerWithMultiplePsReplicas(self):\n partitions = 2\n # This results in weights larger than the default partition size of 64M,\n # so partitioned weights are created (each weight uses 4 bytes).\n x_dim = 32 << 20\n\n class FakeRunConfig(run_config.RunConfig):\n\n @property\n def num_ps_replicas(self):\n return partitions\n\n # Mock the device setter as ps is not available on test machines.\n with tf.compat.v1.test.mock.patch.object(\n estimator,\n '_get_replica_device_setter',\n return_value=lambda _: '/cpu:0'):\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(\n 'language', hash_bucket_size=x_dim),),\n config=FakeRunConfig(),\n model_dir=self._model_dir)\n\n def _input_fn():\n return {\n 'language':\n tf.sparse.SparseTensor(\n values=['english', 'spanish'],\n indices=[[0, 0], [0, 1]],\n dense_shape=[1, 2])\n }, [[10.]]\n\n hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,\n partitions)\n linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])\n\n\n# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.\nclass BaseLinearRegressorEvaluationTest(object):\n\n def __init__(self, linear_regressor_fn, fc_lib=feature_column):\n self._linear_regressor_fn = linear_regressor_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def test_evaluation_for_simple_data(self):\n with tf.Graph().as_default():\n tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)\n tf.Variable([2.0], name=BIAS_NAME)\n tf.Variable(\n 100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n model_dir=self._model_dir)\n eval_metrics = linear_regressor.evaluate(\n input_fn=lambda: ({\n 'age': ((1,),)\n }, ((10.,),)), steps=1)\n\n # Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.\n self.assertDictEqual(\n {\n metric_keys.MetricKeys.LOSS: 9.,\n metric_keys.MetricKeys.LOSS_MEAN: 9.,\n metric_keys.MetricKeys.PREDICTION_MEAN: 13.,\n metric_keys.MetricKeys.LABEL_MEAN: 10.,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100\n }, eval_metrics)\n\n def test_evaluation_batch(self):\n \"\"\"Tests evaluation for batch_size==2.\"\"\"\n with tf.Graph().as_default():\n tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)\n tf.Variable([2.0], name=BIAS_NAME)\n tf.Variable(\n 100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n model_dir=self._model_dir)\n eval_metrics = linear_regressor.evaluate(\n input_fn=lambda: ({\n 'age': ((1,), (1,))\n }, ((10.,), (10.,))), steps=1)\n\n # Logit is (1. * 11.0 + 2.0) = 13, while label is 10.\n # Loss per example is 3**2 = 9.\n # Training loss is the sum over batch = 9 + 9 = 18\n # Average loss is the average over batch = 9\n self.assertDictEqual(\n {\n metric_keys.MetricKeys.LOSS: 18.,\n metric_keys.MetricKeys.LOSS_MEAN: 9.,\n metric_keys.MetricKeys.PREDICTION_MEAN: 13.,\n metric_keys.MetricKeys.LABEL_MEAN: 10.,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100\n }, eval_metrics)\n\n def test_evaluation_weights(self):\n \"\"\"Tests evaluation with weights.\"\"\"\n with tf.Graph().as_default():\n tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)\n tf.Variable([2.0], name=BIAS_NAME)\n tf.Variable(\n 100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n def _input_fn():\n features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}\n labels = ((10.,), (10.,))\n return features, labels\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n weight_column='weights',\n model_dir=self._model_dir)\n eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)\n\n # Logit is (1. * 11.0 + 2.0) = 13, while label is 10.\n # Loss per example is 3**2 = 9.\n # Training loss is the weighted sum over batch = 9 + 2*9 = 27\n # average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9\n self.assertDictEqual(\n {\n metric_keys.MetricKeys.LOSS: 27.,\n metric_keys.MetricKeys.LOSS_MEAN: 9.,\n metric_keys.MetricKeys.PREDICTION_MEAN: 13.,\n metric_keys.MetricKeys.LABEL_MEAN: 10.,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100\n }, eval_metrics)\n\n def test_evaluation_for_multi_dimensions(self):\n x_dim = 3\n label_dim = 2\n with tf.Graph().as_default():\n tf.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)\n tf.Variable([7.0, 8.0], name=BIAS_NAME)\n tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age', shape=(x_dim,)),),\n label_dimension=label_dim,\n model_dir=self._model_dir)\n input_fn = numpy_io.numpy_input_fn(\n x={\n 'age': np.array([[2., 4., 5.]]),\n },\n y=np.array([[46., 58.]]),\n batch_size=1,\n num_epochs=None,\n shuffle=False)\n eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)\n\n self.assertItemsEqual(\n (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,\n metric_keys.MetricKeys.PREDICTION_MEAN,\n metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),\n eval_metrics.keys())\n\n # Logit is\n # [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]\n # [3.0, 4.0]\n # [5.0, 6.0]\n # which is [46, 58]\n self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])\n\n def test_evaluation_for_multiple_feature_columns(self):\n with tf.Graph().as_default():\n tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)\n tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)\n tf.Variable([5.0], name=BIAS_NAME)\n tf.Variable(\n 100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n batch_size = 2\n feature_columns = [\n self._fc_lib.numeric_column('age'),\n self._fc_lib.numeric_column('height')\n ]\n input_fn = numpy_io.numpy_input_fn(\n x={\n 'age': np.array([20, 40]),\n 'height': np.array([4, 8])\n },\n y=np.array([[213.], [421.]]),\n batch_size=batch_size,\n num_epochs=None,\n shuffle=False)\n\n est = self._linear_regressor_fn(\n feature_columns=feature_columns, model_dir=self._model_dir)\n\n eval_metrics = est.evaluate(input_fn=input_fn, steps=1)\n self.assertItemsEqual(\n (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,\n metric_keys.MetricKeys.PREDICTION_MEAN,\n metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),\n eval_metrics.keys())\n\n # Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =\n # [213.0, 421.0], while label is [213., 421.]. Loss = 0.\n self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])\n\n def test_evaluation_for_multiple_feature_columns_mix(self):\n with tf.Graph().as_default():\n tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)\n tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)\n tf.Variable([5.0], name=BIAS_NAME)\n tf.Variable(\n 100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n batch_size = 2\n feature_columns = [\n feature_column.numeric_column('age'),\n tf.feature_column.numeric_column('height')\n ]\n\n def _input_fn():\n features_ds = tf.compat.v1.data.Dataset.from_tensor_slices({\n 'age': np.array([20, 40]),\n 'height': np.array([4, 8])\n })\n labels_ds = tf.compat.v1.data.Dataset.from_tensor_slices(\n np.array([[213.], [421.]]))\n return (tf.compat.v1.data.Dataset.zip(\n (features_ds, labels_ds)).batch(batch_size).repeat(None))\n\n est = self._linear_regressor_fn(\n feature_columns=feature_columns, model_dir=self._model_dir)\n\n eval_metrics = est.evaluate(input_fn=_input_fn, steps=1)\n self.assertItemsEqual(\n (metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,\n metric_keys.MetricKeys.PREDICTION_MEAN,\n metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),\n eval_metrics.keys())\n\n # Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =\n # [213.0, 421.0], while label is [213., 421.]. Loss = 0.\n self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])\n\n\nclass BaseLinearRegressorPredictTest(object):\n\n def __init__(self, linear_regressor_fn, fc_lib=feature_column):\n self._linear_regressor_fn = linear_regressor_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def test_1d(self):\n \"\"\"Tests predict when all variables are one-dimensional.\"\"\"\n with tf.Graph().as_default():\n tf.Variable([[10.]], name='linear/linear_model/x/weights')\n tf.Variable([.2], name=BIAS_NAME)\n tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('x'),),\n model_dir=self._model_dir)\n\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': np.array([[2.]])},\n y=None,\n batch_size=1,\n num_epochs=1,\n shuffle=False)\n predictions = linear_regressor.predict(input_fn=predict_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n # x * weight + bias = 2. * 10. + .2 = 20.2\n self.assertAllClose([[20.2]], predicted_scores)\n\n def testMultiDim(self):\n \"\"\"Tests predict when all variables are multi-dimenstional.\"\"\"\n batch_size = 2\n label_dimension = 3\n x_dim = 4\n feature_columns = (self._fc_lib.numeric_column('x', shape=(x_dim,)),)\n with tf.Graph().as_default():\n tf.Variable( # shape=[x_dim, label_dimension]\n [[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],\n name='linear/linear_model/x/weights')\n tf.Variable( # shape=[label_dimension]\n [.2, .4, .6], name=BIAS_NAME)\n tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=feature_columns,\n label_dimension=label_dimension,\n model_dir=self._model_dir)\n\n predict_input_fn = numpy_io.numpy_input_fn(\n # x shape=[batch_size, x_dim]\n x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},\n y=None,\n batch_size=batch_size,\n num_epochs=1,\n shuffle=False)\n predictions = linear_regressor.predict(input_fn=predict_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n # score = x * weight + bias, shape=[batch_size, label_dimension]\n self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],\n predicted_scores)\n\n def testTwoFeatureColumns(self):\n \"\"\"Tests predict with two feature columns.\"\"\"\n with tf.Graph().as_default():\n tf.Variable([[10.]], name='linear/linear_model/x0/weights')\n tf.Variable([[20.]], name='linear/linear_model/x1/weights')\n tf.Variable([.2], name=BIAS_NAME)\n tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('x0'),\n self._fc_lib.numeric_column('x1')),\n model_dir=self._model_dir)\n\n predict_input_fn = numpy_io.numpy_input_fn(\n x={\n 'x0': np.array([[2.]]),\n 'x1': np.array([[3.]])\n },\n y=None,\n batch_size=1,\n num_epochs=1,\n shuffle=False)\n predictions = linear_regressor.predict(input_fn=predict_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n # x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2\n self.assertAllClose([[80.2]], predicted_scores)\n\n def testTwoFeatureColumnsMix(self):\n \"\"\"Tests predict with two feature columns.\"\"\"\n with tf.Graph().as_default():\n tf.Variable([[10.]], name='linear/linear_model/x0/weights')\n tf.Variable([[20.]], name='linear/linear_model/x1/weights')\n tf.Variable([.2], name=BIAS_NAME)\n tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(feature_column.numeric_column('x0'),\n tf.feature_column.numeric_column('x1')),\n model_dir=self._model_dir)\n\n def _predict_input_fn():\n return tf.compat.v1.data.Dataset.from_tensor_slices({\n 'x0': np.array([[2.]]),\n 'x1': np.array([[3.]])\n }).batch(1)\n\n predictions = linear_regressor.predict(input_fn=_predict_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n # x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2\n self.assertAllClose([[80.2]], predicted_scores)\n\n def testSparseCombiner(self):\n w_a = 2.0\n w_b = 3.0\n w_c = 5.0\n bias = 5.0\n with tf.Graph().as_default():\n tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)\n tf.Variable([bias], name=BIAS_NAME)\n tf.Variable(\n 1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n def _input_fn():\n return tf.compat.v1.data.Dataset.from_tensors({\n 'language':\n tf.sparse.SparseTensor(\n values=['a', 'c', 'b', 'c'],\n indices=[[0, 0], [0, 1], [1, 0], [1, 1]],\n dense_shape=[2, 2]),\n })\n\n feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(\n 'language', vocabulary_list=['a', 'b', 'c']),)\n\n # Check prediction for each sparse_combiner.\n # With sparse_combiner = 'sum', we have\n # logits_1 = w_a + w_c + bias\n # = 2.0 + 5.0 + 5.0 = 12.0\n # logits_2 = w_b + w_c + bias\n # = 3.0 + 5.0 + 5.0 = 13.0\n linear_regressor = self._linear_regressor_fn(\n feature_columns=feature_columns, model_dir=self._model_dir)\n predictions = linear_regressor.predict(input_fn=_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n self.assertAllClose([[12.0], [13.0]], predicted_scores)\n\n # With sparse_combiner = 'mean', we have\n # logits_1 = 1/2 * (w_a + w_c) + bias\n # = 1/2 * (2.0 + 5.0) + 5.0 = 8.5\n # logits_2 = 1/2 * (w_b + w_c) + bias\n # = 1/2 * (3.0 + 5.0) + 5.0 = 9.0\n linear_regressor = self._linear_regressor_fn(\n feature_columns=feature_columns,\n model_dir=self._model_dir,\n sparse_combiner='mean')\n predictions = linear_regressor.predict(input_fn=_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n self.assertAllClose([[8.5], [9.0]], predicted_scores)\n\n # With sparse_combiner = 'sqrtn', we have\n # logits_1 = sqrt(2)/2 * (w_a + w_c) + bias\n # = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974\n # logits_2 = sqrt(2)/2 * (w_b + w_c) + bias\n # = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685\n linear_regressor = self._linear_regressor_fn(\n feature_columns=feature_columns,\n model_dir=self._model_dir,\n sparse_combiner='sqrtn')\n predictions = linear_regressor.predict(input_fn=_input_fn)\n predicted_scores = list([x['predictions'] for x in predictions])\n self.assertAllClose([[9.94974], [10.65685]], predicted_scores)\n\n\nclass BaseLinearRegressorIntegrationTest(object):\n\n def __init__(self, linear_regressor_fn, fc_lib=feature_column):\n self._linear_regressor_fn = linear_regressor_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,\n input_dimension, label_dimension, prediction_length):\n feature_columns = [\n self._fc_lib.numeric_column('x', shape=(input_dimension,))\n ]\n est = self._linear_regressor_fn(\n feature_columns=feature_columns,\n label_dimension=label_dimension,\n model_dir=self._model_dir)\n\n # TRAIN\n # learn y = x\n est.train(train_input_fn, steps=200)\n\n # EVALUTE\n scores = est.evaluate(eval_input_fn)\n self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])\n self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))\n\n # PREDICT\n predictions = np.array(\n [x['predictions'] for x in est.predict(predict_input_fn)])\n self.assertAllEqual((prediction_length, label_dimension), predictions.shape)\n\n # EXPORT\n feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(\n feature_columns)\n serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(\n feature_spec)\n export_dir = est.export_saved_model(tempfile.mkdtemp(),\n serving_input_receiver_fn)\n self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))\n\n def test_numpy_input_fn(self):\n \"\"\"Tests complete flow with numpy_input_fn.\"\"\"\n label_dimension = 2\n input_dimension = label_dimension\n batch_size = 10\n prediction_length = batch_size\n data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)\n data = data.reshape(batch_size, label_dimension)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=data,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=data,\n batch_size=batch_size,\n num_epochs=1,\n shuffle=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=None,\n batch_size=batch_size,\n num_epochs=1,\n shuffle=False)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=input_dimension,\n label_dimension=label_dimension,\n prediction_length=prediction_length)\n\n def test_pandas_input_fn(self):\n \"\"\"Tests complete flow with pandas_input_fn.\"\"\"\n if not HAS_PANDAS:\n return\n\n # Pandas DataFrame natually supports 1 dim data only.\n label_dimension = 1\n input_dimension = label_dimension\n batch_size = 10\n data = np.array([1., 2., 3., 4.], dtype=np.float32)\n x = pd.DataFrame({'x': data})\n y = pd.Series(data)\n prediction_length = 4\n\n train_input_fn = pandas_io.pandas_input_fn(\n x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)\n eval_input_fn = pandas_io.pandas_input_fn(\n x=x, y=y, batch_size=batch_size, shuffle=False)\n predict_input_fn = pandas_io.pandas_input_fn(\n x=x, batch_size=batch_size, shuffle=False)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=input_dimension,\n label_dimension=label_dimension,\n prediction_length=prediction_length)\n\n def test_input_fn_from_parse_example(self):\n \"\"\"Tests complete flow with input_fn constructed from parse_example.\"\"\"\n label_dimension = 2\n input_dimension = label_dimension\n batch_size = 10\n prediction_length = batch_size\n data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)\n data = data.reshape(batch_size, label_dimension)\n\n serialized_examples = []\n for datum in data:\n example = example_pb2.Example(\n features=feature_pb2.Features(\n feature={\n 'x':\n feature_pb2.Feature(\n float_list=feature_pb2.FloatList(value=datum)),\n 'y':\n feature_pb2.Feature(\n float_list=feature_pb2.FloatList(\n value=datum[:label_dimension])),\n }))\n serialized_examples.append(example.SerializeToString())\n\n feature_spec = {\n 'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),\n 'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),\n }\n\n def _train_input_fn():\n feature_map = tf.compat.v1.io.parse_example(serialized_examples,\n feature_spec)\n features = queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n\n def _eval_input_fn():\n feature_map = tf.compat.v1.io.parse_example(\n tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n\n def _predict_input_fn():\n feature_map = tf.compat.v1.io.parse_example(\n tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = queue_parsed_features(feature_map)\n features.pop('y')\n return features, None\n\n self._test_complete_flow(\n train_input_fn=_train_input_fn,\n eval_input_fn=_eval_input_fn,\n predict_input_fn=_predict_input_fn,\n input_dimension=input_dimension,\n label_dimension=label_dimension,\n prediction_length=prediction_length)\n\n\nclass BaseLinearRegressorTrainingTest(object):\n\n def __init__(self, linear_regressor_fn, fc_lib=feature_column):\n self._linear_regressor_fn = linear_regressor_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._model_dir)\n\n def _mock_optimizer(self, expected_loss=None):\n expected_var_names = [\n '%s/part_0:0' % AGE_WEIGHT_NAME,\n '%s/part_0:0' % BIAS_NAME\n ]\n\n def _minimize(loss, global_step=None, var_list=None):\n trainable_vars = var_list or tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n self.assertItemsEqual(expected_var_names,\n [var.name for var in trainable_vars])\n\n # Verify loss. We can't check the value directly, so we add an assert op.\n self.assertEquals(0, loss.shape.ndims)\n if expected_loss is None:\n if global_step is not None:\n return tf.compat.v1.assign_add(global_step, 1).op\n return tf.no_op()\n assert_loss = assert_close(\n tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),\n loss,\n name='assert_loss')\n with tf.control_dependencies((assert_loss,)):\n if global_step is not None:\n return tf.compat.v1.assign_add(global_step, 1).op\n return tf.no_op()\n\n mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(\n spec=tf.compat.v1.train.Optimizer,\n wraps=tf.compat.v1.train.Optimizer(\n use_locking=False, name='my_optimizer'))\n mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)\n\n # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.\n # So, return mock_optimizer itself for deepcopy.\n mock_optimizer.__deepcopy__ = lambda _: mock_optimizer\n return mock_optimizer\n\n def _assert_checkpoint(self,\n expected_global_step,\n expected_age_weight=None,\n expected_bias=None):\n shapes = {\n name: shape\n for (name, shape) in tf.train.list_variables(self._model_dir)\n }\n\n self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])\n self.assertEqual(\n expected_global_step,\n tf.train.load_variable(self._model_dir,\n tf.compat.v1.GraphKeys.GLOBAL_STEP))\n\n self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])\n if expected_age_weight is not None:\n self.assertEqual(expected_age_weight,\n tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))\n\n self.assertEqual([1], shapes[BIAS_NAME])\n if expected_bias is not None:\n self.assertEqual(expected_bias,\n tf.train.load_variable(self._model_dir, BIAS_NAME))\n\n def testFromScratchWithDefaultOptimizer(self):\n # Create LinearRegressor.\n label = 5.\n age = 17\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n model_dir=self._model_dir)\n\n # Train for a few steps, and validate final checkpoint.\n num_steps = 10\n linear_regressor.train(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=num_steps)\n self._assert_checkpoint(num_steps)\n\n def testTrainWithOneDimLabel(self):\n label_dimension = 1\n batch_size = 20\n feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]\n est = self._linear_regressor_fn(\n feature_columns=feature_columns,\n label_dimension=label_dimension,\n model_dir=self._model_dir)\n data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)\n self.assertEqual((batch_size,), data_rank_1.shape)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'age': data_rank_1},\n y=data_rank_1,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n est.train(train_input_fn, steps=200)\n self._assert_checkpoint(200)\n\n def testTrainWithOneDimWeight(self):\n label_dimension = 1\n batch_size = 20\n feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]\n est = self._linear_regressor_fn(\n feature_columns=feature_columns,\n label_dimension=label_dimension,\n weight_column='w',\n model_dir=self._model_dir)\n\n data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)\n self.assertEqual((batch_size,), data_rank_1.shape)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={\n 'age': data_rank_1,\n 'w': data_rank_1\n },\n y=data_rank_1,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n est.train(train_input_fn, steps=200)\n self._assert_checkpoint(200)\n\n def testFromScratch(self):\n # Create LinearRegressor.\n label = 5.\n age = 17\n # loss = (logits - label)^2 = (0 - 5.)^2 = 25.\n mock_optimizer = self._mock_optimizer(expected_loss=25.)\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n model_dir=self._model_dir,\n optimizer=mock_optimizer)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n linear_regressor.train(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n self._assert_checkpoint(\n expected_global_step=num_steps,\n expected_age_weight=0.,\n expected_bias=0.)\n\n def testFromCheckpoint(self):\n # Create initial checkpoint.\n age_weight = 10.0\n bias = 5.0\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)\n tf.Variable([bias], name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n # logits = age * age_weight + bias = 17 * 10. + 5. = 175\n # loss = (logits - label)^2 = (175 - 5)^2 = 28900\n mock_optimizer = self._mock_optimizer(expected_loss=28900.)\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n model_dir=self._model_dir,\n optimizer=mock_optimizer)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n linear_regressor.train(\n input_fn=lambda: ({\n 'age': ((17,),)\n }, ((5.,),)), steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n self._assert_checkpoint(\n expected_global_step=initial_global_step + num_steps,\n expected_age_weight=age_weight,\n expected_bias=bias)\n\n def testFromCheckpointMultiBatch(self):\n # Create initial checkpoint.\n age_weight = 10.0\n bias = 5.0\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)\n tf.Variable([bias], name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n # logits = age * age_weight + bias\n # logits[0] = 17 * 10. + 5. = 175\n # logits[1] = 15 * 10. + 5. = 155\n # loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004\n mock_optimizer = self._mock_optimizer(expected_loss=52004.)\n linear_regressor = self._linear_regressor_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n model_dir=self._model_dir,\n optimizer=mock_optimizer)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n linear_regressor.train(\n input_fn=lambda: ({\n 'age': ((17,), (15,))\n }, ((5.,), (3.,))),\n steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n self._assert_checkpoint(\n expected_global_step=initial_global_step + num_steps,\n expected_age_weight=age_weight,\n expected_bias=bias)\n\n\nclass BaseLinearClassifierTrainingTest(object):\n\n def __init__(self, linear_classifier_fn, fc_lib=feature_column):\n self._linear_classifier_fn = linear_classifier_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n shutil.rmtree(self._model_dir)\n\n def _mock_optimizer(self, expected_loss=None):\n expected_var_names = [\n '%s/part_0:0' % AGE_WEIGHT_NAME,\n '%s/part_0:0' % BIAS_NAME\n ]\n\n def _minimize(loss, global_step):\n trainable_vars = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n self.assertItemsEqual(expected_var_names,\n [var.name for var in trainable_vars])\n\n # Verify loss. We can't check the value directly, so we add an assert op.\n self.assertEquals(0, loss.shape.ndims)\n if expected_loss is None:\n return tf.compat.v1.assign_add(global_step, 1).op\n assert_loss = assert_close(\n tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),\n loss,\n name='assert_loss')\n with tf.control_dependencies((assert_loss,)):\n return tf.compat.v1.assign_add(global_step, 1).op\n\n mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(\n spec=tf.compat.v1.train.Optimizer,\n wraps=tf.compat.v1.train.Optimizer(\n use_locking=False, name='my_optimizer'))\n mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)\n\n # NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.\n # So, return mock_optimizer itself for deepcopy.\n mock_optimizer.__deepcopy__ = lambda _: mock_optimizer\n return mock_optimizer\n\n def _assert_checkpoint(self,\n n_classes,\n expected_global_step,\n expected_age_weight=None,\n expected_bias=None):\n logits_dimension = n_classes if n_classes > 2 else 1\n\n shapes = {\n name: shape\n for (name, shape) in tf.train.list_variables(self._model_dir)\n }\n\n self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])\n self.assertEqual(\n expected_global_step,\n tf.train.load_variable(self._model_dir,\n tf.compat.v1.GraphKeys.GLOBAL_STEP))\n\n self.assertEqual([1, logits_dimension], shapes[AGE_WEIGHT_NAME])\n if expected_age_weight is not None:\n self.assertAllEqual(\n expected_age_weight,\n tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))\n\n self.assertEqual([logits_dimension], shapes[BIAS_NAME])\n if expected_bias is not None:\n self.assertAllEqual(expected_bias,\n tf.train.load_variable(self._model_dir, BIAS_NAME))\n\n def _testFromScratchWithDefaultOptimizer(self, n_classes):\n label = 0\n age = 17\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n model_dir=self._model_dir)\n\n # Train for a few steps, and validate final checkpoint.\n num_steps = 10\n est.train(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=num_steps)\n self._assert_checkpoint(n_classes, num_steps)\n\n def testBinaryClassesFromScratchWithDefaultOptimizer(self):\n self._testFromScratchWithDefaultOptimizer(n_classes=2)\n\n def testMultiClassesFromScratchWithDefaultOptimizer(self):\n self._testFromScratchWithDefaultOptimizer(n_classes=4)\n\n def _testTrainWithTwoDimsLabel(self, n_classes):\n batch_size = 20\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n model_dir=self._model_dir)\n data_rank_1 = np.array([0, 1])\n data_rank_2 = np.array([[0], [1]])\n self.assertEqual((2,), data_rank_1.shape)\n self.assertEqual((2, 1), data_rank_2.shape)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'age': data_rank_1},\n y=data_rank_2,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n est.train(train_input_fn, steps=200)\n self._assert_checkpoint(n_classes, 200)\n\n def testBinaryClassesTrainWithTwoDimsLabel(self):\n self._testTrainWithTwoDimsLabel(n_classes=2)\n\n def testMultiClassesTrainWithTwoDimsLabel(self):\n self._testTrainWithTwoDimsLabel(n_classes=4)\n\n def _testTrainWithOneDimLabel(self, n_classes):\n batch_size = 20\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n model_dir=self._model_dir)\n data_rank_1 = np.array([0, 1])\n self.assertEqual((2,), data_rank_1.shape)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'age': data_rank_1},\n y=data_rank_1,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n est.train(train_input_fn, steps=200)\n self._assert_checkpoint(n_classes, 200)\n\n def testBinaryClassesTrainWithOneDimLabel(self):\n self._testTrainWithOneDimLabel(n_classes=2)\n\n def testMultiClassesTrainWithOneDimLabel(self):\n self._testTrainWithOneDimLabel(n_classes=4)\n\n def _testTrainWithTwoDimsWeight(self, n_classes):\n batch_size = 20\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n weight_column='w',\n n_classes=n_classes,\n model_dir=self._model_dir)\n data_rank_1 = np.array([0, 1])\n data_rank_2 = np.array([[0], [1]])\n self.assertEqual((2,), data_rank_1.shape)\n self.assertEqual((2, 1), data_rank_2.shape)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={\n 'age': data_rank_1,\n 'w': data_rank_2\n },\n y=data_rank_1,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n est.train(train_input_fn, steps=200)\n self._assert_checkpoint(n_classes, 200)\n\n def testBinaryClassesTrainWithTwoDimsWeight(self):\n self._testTrainWithTwoDimsWeight(n_classes=2)\n\n def testMultiClassesTrainWithTwoDimsWeight(self):\n self._testTrainWithTwoDimsWeight(n_classes=4)\n\n def _testTrainWithOneDimWeight(self, n_classes):\n batch_size = 20\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n weight_column='w',\n n_classes=n_classes,\n model_dir=self._model_dir)\n data_rank_1 = np.array([0, 1])\n self.assertEqual((2,), data_rank_1.shape)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={\n 'age': data_rank_1,\n 'w': data_rank_1\n },\n y=data_rank_1,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n est.train(train_input_fn, steps=200)\n self._assert_checkpoint(n_classes, 200)\n\n def testBinaryClassesTrainWithOneDimWeight(self):\n self._testTrainWithOneDimWeight(n_classes=2)\n\n def testMultiClassesTrainWithOneDimWeight(self):\n self._testTrainWithOneDimWeight(n_classes=4)\n\n def _testFromScratch(self, n_classes):\n label = 1\n age = 17\n # For binary classifier:\n # loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are\n # all zero initially) and label = 1 so,\n # loss = 1 * -log ( sigmoid(logits) ) = 0.69315\n # For multi class classifier:\n # loss = cross_entropy(logits, label) where logits are all 0s (weights are\n # all zero initially) and label = 1 so,\n # loss = 1 * -log ( 1.0 / n_classes )\n # For this particular test case, as logits are same, the formular\n # 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.\n mock_optimizer = self._mock_optimizer(\n expected_loss=(-1 * math.log(1.0 / n_classes)))\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n optimizer=mock_optimizer,\n model_dir=self._model_dir)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n est.train(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n self._assert_checkpoint(\n n_classes,\n expected_global_step=num_steps,\n expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],\n expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)\n\n def testBinaryClassesFromScratch(self):\n self._testFromScratch(n_classes=2)\n\n def testMultiClassesFromScratch(self):\n self._testFromScratch(n_classes=4)\n\n def _testFromCheckpoint(self, n_classes):\n # Create initial checkpoint.\n label = 1\n age = 17\n # For binary case, the expected weight has shape (1,1). For multi class\n # case, the shape is (1, n_classes). In order to test the weights, set\n # weights as 2.0 * range(n_classes).\n age_weight = [[2.0]] if n_classes == 2 else (np.reshape(\n 2.0 * np.array(list(range(n_classes)), dtype=np.float32),\n (1, n_classes)))\n bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n # For binary classifier:\n # logits = age * age_weight + bias = 17 * 2. - 35. = -1.\n # loss = sigmoid_cross_entropy(logits, label)\n # so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133\n # For multi class classifier:\n # loss = cross_entropy(logits, label)\n # where logits = 17 * age_weight + bias and label = 1\n # so, loss = 1 * -log ( soft_max(logits)[1] )\n if n_classes == 2:\n expected_loss = 1.3133\n else:\n logits = age_weight * age + bias\n logits_exp = np.exp(logits)\n softmax = logits_exp / logits_exp.sum()\n expected_loss = -1 * math.log(softmax[0, label])\n\n mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n optimizer=mock_optimizer,\n model_dir=self._model_dir)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n est.train(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n self._assert_checkpoint(\n n_classes,\n expected_global_step=initial_global_step + num_steps,\n expected_age_weight=age_weight,\n expected_bias=bias)\n\n def testBinaryClassesFromCheckpoint(self):\n self._testFromCheckpoint(n_classes=2)\n\n def testMultiClassesFromCheckpoint(self):\n self._testFromCheckpoint(n_classes=4)\n\n def _testFromCheckpointFloatLabels(self, n_classes):\n \"\"\"Tests float labels for binary classification.\"\"\"\n # Create initial checkpoint.\n if n_classes > 2:\n return\n label = 0.8\n age = 17\n age_weight = [[2.0]]\n bias = [-35.0]\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n # logits = age * age_weight + bias = 17 * 2. - 35. = -1.\n # loss = sigmoid_cross_entropy(logits, label)\n # => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617\n mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n optimizer=mock_optimizer,\n model_dir=self._model_dir)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n est.train(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n\n def testBinaryClassesFromCheckpointFloatLabels(self):\n self._testFromCheckpointFloatLabels(n_classes=2)\n\n def testMultiClassesFromCheckpointFloatLabels(self):\n self._testFromCheckpointFloatLabels(n_classes=4)\n\n def _testFromCheckpointMultiBatch(self, n_classes):\n # Create initial checkpoint.\n label = [1, 0]\n age = [17.0, 18.5]\n # For binary case, the expected weight has shape (1,1). For multi class\n # case, the shape is (1, n_classes). In order to test the weights, set\n # weights as 2.0 * range(n_classes).\n age_weight = [[2.0]] if n_classes == 2 else (np.reshape(\n 2.0 * np.array(list(range(n_classes)), dtype=np.float32),\n (1, n_classes)))\n bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n # For binary classifier:\n # logits = age * age_weight + bias\n # logits[0] = 17 * 2. - 35. = -1.\n # logits[1] = 18.5 * 2. - 35. = 2.\n # loss = sigmoid_cross_entropy(logits, label)\n # so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133\n # loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269\n # expected_loss = loss[0] + loss[1]\n # For multi class classifier:\n # loss = cross_entropy(logits, label)\n # where logits = [17, 18.5] * age_weight + bias and label = [1, 0]\n # so, loss = 1 * -log ( soft_max(logits)[label] )\n # expected_loss = loss[0] + loss[1]\n if n_classes == 2:\n expected_loss = 1.3133 + 2.1269\n else:\n logits = age_weight * np.reshape(age, (2, 1)) + bias\n logits_exp = np.exp(logits)\n softmax_row_0 = logits_exp[0] / logits_exp[0].sum()\n softmax_row_1 = logits_exp[1] / logits_exp[1].sum()\n expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])\n expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])\n expected_loss = expected_loss_0 + expected_loss_1\n\n mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)\n\n est = linear.LinearClassifier(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n optimizer=mock_optimizer,\n model_dir=self._model_dir)\n self.assertEqual(0, mock_optimizer.minimize.call_count)\n\n # Train for a few steps, and validate optimizer and final checkpoint.\n num_steps = 10\n est.train(input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps)\n self.assertEqual(1, mock_optimizer.minimize.call_count)\n self._assert_checkpoint(\n n_classes,\n expected_global_step=initial_global_step + num_steps,\n expected_age_weight=age_weight,\n expected_bias=bias)\n\n def testBinaryClassesFromCheckpointMultiBatch(self):\n self._testFromCheckpointMultiBatch(n_classes=2)\n\n def testMultiClassesFromCheckpointMultiBatch(self):\n self._testFromCheckpointMultiBatch(n_classes=4)\n\n\nclass BaseLinearClassifierEvaluationTest(object):\n\n def __init__(self, linear_classifier_fn, fc_lib=feature_column):\n self._linear_classifier_fn = linear_classifier_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n shutil.rmtree(self._model_dir)\n\n def _test_evaluation_for_simple_data(self, n_classes):\n label = 1\n age = 1.\n\n # For binary case, the expected weight has shape (1,1). For multi class\n # case, the shape is (1, n_classes). In order to test the weights, set\n # weights as 2.0 * range(n_classes).\n age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(\n -11.0 * np.array(list(range(n_classes)), dtype=np.float32),\n (1, n_classes)))\n bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes\n\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(\n 100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n est = self._linear_classifier_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n model_dir=self._model_dir)\n eval_metrics = est.evaluate(\n input_fn=lambda: ({\n 'age': ((age,),)\n }, ((label,),)), steps=1)\n\n if n_classes == 2:\n # Binary classes: loss = sum(corss_entropy(41)) = 41.\n expected_metrics = {\n metric_keys.MetricKeys.LOSS: 41.,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,\n metric_keys.MetricKeys.LOSS_MEAN: 41.,\n metric_keys.MetricKeys.ACCURACY: 0.,\n metric_keys.MetricKeys.PRECISION: 0.,\n metric_keys.MetricKeys.RECALL: 0.,\n metric_keys.MetricKeys.PREDICTION_MEAN: 0.,\n metric_keys.MetricKeys.LABEL_MEAN: 1.,\n metric_keys.MetricKeys.ACCURACY_BASELINE: 1,\n metric_keys.MetricKeys.AUC: 0.,\n metric_keys.MetricKeys.AUC_PR: 1.,\n }\n else:\n # Multi classes: loss = 1 * -log ( soft_max(logits)[label] )\n logits = age_weight * age + bias\n logits_exp = np.exp(logits)\n softmax = logits_exp / logits_exp.sum()\n expected_loss = -1 * math.log(softmax[0, label])\n\n expected_metrics = {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_MEAN: expected_loss,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,\n metric_keys.MetricKeys.ACCURACY: 0.,\n }\n\n self.assertAllClose(\n sorted_key_dict(expected_metrics),\n sorted_key_dict(eval_metrics),\n rtol=1e-3)\n\n def test_binary_classes_evaluation_for_simple_data(self):\n self._test_evaluation_for_simple_data(n_classes=2)\n\n def test_multi_classes_evaluation_for_simple_data(self):\n self._test_evaluation_for_simple_data(n_classes=4)\n\n def _test_evaluation_batch(self, n_classes):\n \"\"\"Tests evaluation for batch_size==2.\"\"\"\n label = [1, 0]\n age = [17., 18.]\n # For binary case, the expected weight has shape (1,1). For multi class\n # case, the shape is (1, n_classes). In order to test the weights, set\n # weights as 2.0 * range(n_classes).\n age_weight = [[2.0]] if n_classes == 2 else (np.reshape(\n 2.0 * np.array(list(range(n_classes)), dtype=np.float32),\n (1, n_classes)))\n bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n est = self._linear_classifier_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n model_dir=self._model_dir)\n eval_metrics = est.evaluate(\n input_fn=lambda: ({\n 'age': (age)\n }, (label)), steps=1)\n\n if n_classes == 2:\n # Logits are (-1., 1.) labels are (1, 0).\n # Loss is\n # loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133\n # loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133\n expected_loss = 1.3133 * 2\n\n expected_metrics = {\n metric_keys.MetricKeys.LOSS: expected_loss,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,\n metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,\n metric_keys.MetricKeys.ACCURACY: 0.,\n metric_keys.MetricKeys.PRECISION: 0.,\n metric_keys.MetricKeys.RECALL: 0.,\n metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,\n metric_keys.MetricKeys.LABEL_MEAN: 0.5,\n metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,\n metric_keys.MetricKeys.AUC: 0.,\n metric_keys.MetricKeys.AUC_PR: 0.25,\n }\n else:\n # Multi classes: loss = 1 * -log ( soft_max(logits)[label] )\n logits = age_weight * np.reshape(age, (2, 1)) + bias\n logits_exp = np.exp(logits)\n softmax_row_0 = logits_exp[0] / logits_exp[0].sum()\n softmax_row_1 = logits_exp[1] / logits_exp[1].sum()\n expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])\n expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])\n expected_loss = expected_loss_0 + expected_loss_1\n\n expected_metrics = {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,\n metric_keys.MetricKeys.ACCURACY: 0.,\n }\n\n self.assertAllClose(\n sorted_key_dict(expected_metrics),\n sorted_key_dict(eval_metrics),\n rtol=1e-3)\n\n def test_binary_classes_evaluation_batch(self):\n self._test_evaluation_batch(n_classes=2)\n\n def test_multi_classes_evaluation_batch(self):\n self._test_evaluation_batch(n_classes=4)\n\n def _test_evaluation_weights(self, n_classes):\n \"\"\"Tests evaluation with weights.\"\"\"\n\n label = [1, 0]\n age = [17., 18.]\n weights = [1., 2.]\n # For binary case, the expected weight has shape (1,1). For multi class\n # case, the shape is (1, n_classes). In order to test the weights, set\n # weights as 2.0 * range(n_classes).\n age_weight = [[2.0]] if n_classes == 2 else (np.reshape(\n 2.0 * np.array(list(range(n_classes)), dtype=np.float32),\n (1, n_classes)))\n bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes\n initial_global_step = 100\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(\n initial_global_step,\n name=tf.compat.v1.GraphKeys.GLOBAL_STEP,\n dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n est = self._linear_classifier_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n n_classes=n_classes,\n weight_column='w',\n model_dir=self._model_dir)\n eval_metrics = est.evaluate(\n input_fn=lambda: ({\n 'age': (age),\n 'w': (weights)\n }, (label)), steps=1)\n\n if n_classes == 2:\n # Logits are (-1., 1.) labels are (1, 0).\n # Loss is\n # loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133\n # loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133\n # weights = [1., 2.]\n expected_loss = 1.3133 * (1. + 2.)\n loss_mean = expected_loss / (1.0 + 2.0)\n label_mean = np.average(label, weights=weights)\n logits = [-1, 1]\n logistics = sigmoid(np.array(logits))\n predictions_mean = np.average(logistics, weights=weights)\n\n expected_metrics = {\n metric_keys.MetricKeys.LOSS: expected_loss,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,\n metric_keys.MetricKeys.LOSS_MEAN: loss_mean,\n metric_keys.MetricKeys.ACCURACY: 0.,\n metric_keys.MetricKeys.PRECISION: 0.,\n metric_keys.MetricKeys.RECALL: 0.,\n metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,\n metric_keys.MetricKeys.LABEL_MEAN: label_mean,\n metric_keys.MetricKeys.ACCURACY_BASELINE:\n (max(label_mean, 1 - label_mean)),\n metric_keys.MetricKeys.AUC: 0.,\n metric_keys.MetricKeys.AUC_PR: 0.1668,\n }\n else:\n # Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )\n logits = age_weight * np.reshape(age, (2, 1)) + bias\n logits_exp = np.exp(logits)\n softmax_row_0 = logits_exp[0] / logits_exp[0].sum()\n softmax_row_1 = logits_exp[1] / logits_exp[1].sum()\n expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])\n expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])\n loss_mean = np.average([expected_loss_0, expected_loss_1],\n weights=weights)\n expected_loss = loss_mean * np.sum(weights)\n\n expected_metrics = {\n metric_keys.MetricKeys.LOSS: expected_loss,\n metric_keys.MetricKeys.LOSS_MEAN: loss_mean,\n tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,\n metric_keys.MetricKeys.ACCURACY: 0.,\n }\n\n self.assertAllClose(\n sorted_key_dict(expected_metrics),\n sorted_key_dict(eval_metrics),\n rtol=1e-3)\n\n def test_binary_classes_evaluation_weights(self):\n self._test_evaluation_weights(n_classes=2)\n\n def test_multi_classes_evaluation_weights(self):\n self._test_evaluation_weights(n_classes=4)\n\n\nclass BaseLinearClassifierPredictTest(object):\n\n def __init__(self, linear_classifier_fn, fc_lib=feature_column):\n self._linear_classifier_fn = linear_classifier_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n shutil.rmtree(self._model_dir)\n\n def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):\n \"\"\"Tests predict when all variables are one-dimensional.\"\"\"\n age = 1.\n\n # For binary case, the expected weight has shape (1,1). For multi class\n # case, the shape is (1, n_classes). In order to test the weights, set\n # weights as 2.0 * range(n_classes).\n age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(\n -11.0 * np.array(list(range(n_classes)), dtype=np.float32),\n (1, n_classes)))\n bias = [10.0] if n_classes == 2 else [10.0] * n_classes\n\n with tf.Graph().as_default():\n tf.Variable(age_weight, name=AGE_WEIGHT_NAME)\n tf.Variable(bias, name=BIAS_NAME)\n tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n est = self._linear_classifier_fn(\n feature_columns=(self._fc_lib.numeric_column('age'),),\n label_vocabulary=label_vocabulary,\n n_classes=n_classes,\n model_dir=self._model_dir)\n\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'age': np.array([[age]])},\n y=None,\n batch_size=1,\n num_epochs=1,\n shuffle=False)\n predictions = list(est.predict(input_fn=predict_input_fn))\n\n if n_classes == 2:\n scalar_logits = np.asscalar(\n np.reshape(np.array(age_weight) * age + bias, (1,)))\n two_classes_logits = [0, scalar_logits]\n two_classes_logits_exp = np.exp(two_classes_logits)\n softmax = two_classes_logits_exp / two_classes_logits_exp.sum()\n\n expected_predictions = {\n 'class_ids': [0],\n 'all_class_ids': [0, 1],\n 'classes': [label_output_fn(0)],\n 'all_classes': [label_output_fn(0),\n label_output_fn(1)],\n 'logistic': [sigmoid(np.array(scalar_logits))],\n 'logits': [scalar_logits],\n 'probabilities': softmax,\n }\n else:\n onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))\n class_ids = onedim_logits.argmax()\n all_class_ids = list(range(len(onedim_logits)))\n logits_exp = np.exp(onedim_logits)\n softmax = logits_exp / logits_exp.sum()\n expected_predictions = {\n 'class_ids': [class_ids],\n 'all_class_ids': all_class_ids,\n 'classes': [label_output_fn(class_ids)],\n 'all_classes': [label_output_fn(i) for i in all_class_ids],\n 'logits': onedim_logits,\n 'probabilities': softmax,\n }\n\n self.assertEqual(1, len(predictions))\n # assertAllClose cannot handle byte type.\n self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])\n expected_predictions.pop('classes')\n predictions[0].pop('classes')\n self.assertAllEqual(expected_predictions['all_classes'],\n predictions[0]['all_classes'])\n expected_predictions.pop('all_classes')\n predictions[0].pop('all_classes')\n self.assertAllClose(\n sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0]))\n\n def testBinaryClassesWithoutLabelVocabulary(self):\n n_classes = 2\n self._testPredictions(\n n_classes,\n label_vocabulary=None,\n label_output_fn=lambda x: ('%s' % x).encode())\n\n def testBinaryClassesWithLabelVocabulary(self):\n n_classes = 2\n self._testPredictions(\n n_classes,\n label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],\n label_output_fn=lambda x: ('class_vocab_%s' % x).encode())\n\n def testMultiClassesWithoutLabelVocabulary(self):\n n_classes = 4\n self._testPredictions(\n n_classes,\n label_vocabulary=None,\n label_output_fn=lambda x: ('%s' % x).encode())\n\n def testMultiClassesWithLabelVocabulary(self):\n n_classes = 4\n self._testPredictions(\n n_classes,\n label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],\n label_output_fn=lambda x: ('class_vocab_%s' % x).encode())\n\n def testSparseCombiner(self):\n w_a = 2.0\n w_b = 3.0\n w_c = 5.0\n bias = 5.0\n with tf.Graph().as_default():\n tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)\n tf.Variable([bias], name=BIAS_NAME)\n tf.Variable(\n 1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)\n save_variables_to_ckpt(self._model_dir)\n\n def _input_fn():\n return tf.compat.v1.data.Dataset.from_tensors({\n 'language':\n tf.sparse.SparseTensor(\n values=['a', 'c', 'b', 'c'],\n indices=[[0, 0], [0, 1], [1, 0], [1, 1]],\n dense_shape=[2, 2]),\n })\n\n feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(\n 'language', vocabulary_list=['a', 'b', 'c']),)\n\n # Check prediction for each sparse_combiner.\n # With sparse_combiner = 'sum', we have\n # logits_1 = w_a + w_c + bias\n # = 2.0 + 5.0 + 5.0 = 12.0\n # logits_2 = w_b + w_c + bias\n # = 3.0 + 5.0 + 5.0 = 13.0\n linear_classifier = self._linear_classifier_fn(\n feature_columns=feature_columns, model_dir=self._model_dir)\n predictions = linear_classifier.predict(input_fn=_input_fn)\n predicted_scores = list([x['logits'] for x in predictions])\n self.assertAllClose([[12.0], [13.0]], predicted_scores)\n\n # With sparse_combiner = 'mean', we have\n # logits_1 = 1/2 * (w_a + w_c) + bias\n # = 1/2 * (2.0 + 5.0) + 5.0 = 8.5\n # logits_2 = 1/2 * (w_b + w_c) + bias\n # = 1/2 * (3.0 + 5.0) + 5.0 = 9.0\n linear_classifier = self._linear_classifier_fn(\n feature_columns=feature_columns,\n model_dir=self._model_dir,\n sparse_combiner='mean')\n predictions = linear_classifier.predict(input_fn=_input_fn)\n predicted_scores = list([x['logits'] for x in predictions])\n self.assertAllClose([[8.5], [9.0]], predicted_scores)\n\n # With sparse_combiner = 'sqrtn', we have\n # logits_1 = sqrt(2)/2 * (w_a + w_c) + bias\n # = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974\n # logits_2 = sqrt(2)/2 * (w_b + w_c) + bias\n # = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685\n linear_classifier = self._linear_classifier_fn(\n feature_columns=feature_columns,\n model_dir=self._model_dir,\n sparse_combiner='sqrtn')\n predictions = linear_classifier.predict(input_fn=_input_fn)\n predicted_scores = list([x['logits'] for x in predictions])\n self.assertAllClose([[9.94974], [10.65685]], predicted_scores)\n\n\nclass BaseLinearClassifierIntegrationTest(object):\n\n def __init__(self, linear_classifier_fn, fc_lib=feature_column):\n self._linear_classifier_fn = linear_classifier_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n self._model_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n if self._model_dir:\n shutil.rmtree(self._model_dir)\n\n def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,\n predict_input_fn, input_dimension, prediction_length):\n feature_columns = [\n self._fc_lib.numeric_column('x', shape=(input_dimension,))\n ]\n est = self._linear_classifier_fn(\n feature_columns=feature_columns,\n n_classes=n_classes,\n model_dir=self._model_dir)\n\n # TRAIN\n # learn y = x\n est.train(train_input_fn, steps=200)\n\n # EVALUTE\n scores = est.evaluate(eval_input_fn)\n self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])\n self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))\n\n # PREDICT\n predictions = np.array(\n [x['classes'] for x in est.predict(predict_input_fn)])\n self.assertAllEqual((prediction_length, 1), predictions.shape)\n\n # EXPORT\n feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(\n feature_columns)\n serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(\n feature_spec)\n export_dir = est.export_saved_model(tempfile.mkdtemp(),\n serving_input_receiver_fn)\n self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))\n\n def _test_numpy_input_fn(self, n_classes):\n \"\"\"Tests complete flow with numpy_input_fn.\"\"\"\n input_dimension = 4\n batch_size = 10\n prediction_length = batch_size\n data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)\n data = data.reshape(batch_size, input_dimension)\n target = np.array([1] * batch_size)\n\n train_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=target,\n batch_size=batch_size,\n num_epochs=None,\n shuffle=True)\n eval_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=target,\n batch_size=batch_size,\n num_epochs=1,\n shuffle=False)\n predict_input_fn = numpy_io.numpy_input_fn(\n x={'x': data},\n y=None,\n batch_size=batch_size,\n num_epochs=1,\n shuffle=False)\n\n self._test_complete_flow(\n n_classes=n_classes,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=input_dimension,\n prediction_length=prediction_length)\n\n def test_binary_classes_numpy_input_fn(self):\n self._test_numpy_input_fn(n_classes=2)\n\n def test_multi_classes_numpy_input_fn(self):\n self._test_numpy_input_fn(n_classes=4)\n\n def _test_pandas_input_fn(self, n_classes):\n \"\"\"Tests complete flow with pandas_input_fn.\"\"\"\n if not HAS_PANDAS:\n return\n\n # Pandas DataFrame natually supports 1 dim data only.\n input_dimension = 1\n batch_size = 10\n data = np.array([1., 2., 3., 4.], dtype=np.float32)\n target = np.array([1, 0, 1, 0], dtype=np.int32)\n x = pd.DataFrame({'x': data})\n y = pd.Series(target)\n prediction_length = 4\n\n train_input_fn = pandas_io.pandas_input_fn(\n x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)\n eval_input_fn = pandas_io.pandas_input_fn(\n x=x, y=y, batch_size=batch_size, shuffle=False)\n predict_input_fn = pandas_io.pandas_input_fn(\n x=x, batch_size=batch_size, shuffle=False)\n\n self._test_complete_flow(\n n_classes=n_classes,\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=input_dimension,\n prediction_length=prediction_length)\n\n def test_binary_classes_pandas_input_fn(self):\n self._test_pandas_input_fn(n_classes=2)\n\n def test_multi_classes_pandas_input_fn(self):\n self._test_pandas_input_fn(n_classes=4)\n\n def _test_input_fn_from_parse_example(self, n_classes):\n \"\"\"Tests complete flow with input_fn constructed from parse_example.\"\"\"\n input_dimension = 2\n batch_size = 10\n prediction_length = batch_size\n data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)\n data = data.reshape(batch_size, input_dimension)\n target = np.array([1] * batch_size, dtype=np.int64)\n\n serialized_examples = []\n for x, y in zip(data, target):\n example = example_pb2.Example(\n features=feature_pb2.Features(\n feature={\n 'x':\n feature_pb2.Feature(\n float_list=feature_pb2.FloatList(value=x)),\n 'y':\n feature_pb2.Feature(\n int64_list=feature_pb2.Int64List(value=[y])),\n }))\n serialized_examples.append(example.SerializeToString())\n\n feature_spec = {\n 'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),\n 'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),\n }\n\n def _train_input_fn():\n feature_map = tf.compat.v1.io.parse_example(serialized_examples,\n feature_spec)\n features = queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n\n def _eval_input_fn():\n feature_map = tf.compat.v1.io.parse_example(\n tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = queue_parsed_features(feature_map)\n labels = features.pop('y')\n return features, labels\n\n def _predict_input_fn():\n feature_map = tf.compat.v1.io.parse_example(\n tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),\n feature_spec)\n features = queue_parsed_features(feature_map)\n features.pop('y')\n return features, None\n\n self._test_complete_flow(\n n_classes=n_classes,\n train_input_fn=_train_input_fn,\n eval_input_fn=_eval_input_fn,\n predict_input_fn=_predict_input_fn,\n input_dimension=input_dimension,\n prediction_length=prediction_length)\n\n def test_binary_classes_input_fn_from_parse_example(self):\n self._test_input_fn_from_parse_example(n_classes=2)\n\n def test_multi_classes_input_fn_from_parse_example(self):\n self._test_input_fn_from_parse_example(n_classes=4)\n\n\nclass BaseLinearLogitFnTest(object):\n\n def __init__(self, fc_lib=feature_column):\n self._fc_lib = fc_lib\n\n def test_basic_logit_correctness(self):\n \"\"\"linear_logit_fn simply wraps feature_column_lib.linear_model.\"\"\"\n age = self._fc_lib.numeric_column('age')\n with tf.Graph().as_default():\n logit_fn = linear.linear_logit_fn_builder(units=2, feature_columns=[age])\n logits = logit_fn(features={'age': [[23.], [31.]]})\n bias_var = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,\n 'linear_model/bias_weights')[0]\n age_var = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]\n with tf.compat.v1.Session() as sess:\n sess.run([tf.compat.v1.initializers.global_variables()])\n self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())\n sess.run(bias_var.assign([10., 5.]))\n self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())\n sess.run(age_var.assign([[2.0, 3.0]]))\n # [2 * 23 + 10, 3 * 23 + 5] = [56, 74].\n # [2 * 31 + 10, 3 * 31 + 5] = [72, 98]\n self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())\n\n def test_compute_fraction_of_zero(self):\n \"\"\"Tests the calculation of sparsity.\"\"\"\n if self._fc_lib != feature_column:\n return\n age = tf.feature_column.numeric_column('age')\n occupation = feature_column.categorical_column_with_hash_bucket(\n 'occupation', hash_bucket_size=5)\n with tf.Graph().as_default():\n cols_to_vars = {}\n tf.compat.v1.feature_column.linear_model(\n features={\n 'age': [[23.], [31.]],\n 'occupation': [['doctor'], ['engineer']]\n },\n feature_columns=[age, occupation],\n units=3,\n cols_to_vars=cols_to_vars)\n cols_to_vars.pop('bias')\n fraction_zero = linear._compute_fraction_of_zero(\n list(cols_to_vars.values()))\n age_var = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]\n with tf.compat.v1.Session() as sess:\n sess.run([tf.compat.v1.initializers.global_variables()])\n # Upon initialization, all variables will be zero.\n self.assertAllClose(1, fraction_zero.eval())\n\n sess.run(age_var.assign([[2.0, 0.0, -1.0]]))\n # 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets\n # x 3-dim output) are zero.\n self.assertAllClose(16. / 18., fraction_zero.eval())\n\n def test_compute_fraction_of_zero_v2(self):\n \"\"\"Tests the calculation of sparsity.\"\"\"\n if self._fc_lib != feature_column_v2:\n return\n\n age = tf.feature_column.numeric_column('age')\n occupation = tf.feature_column.categorical_column_with_hash_bucket(\n 'occupation', hash_bucket_size=5)\n with tf.Graph().as_default():\n model = feature_column_v2.LinearModel(\n feature_columns=[age, occupation], units=3, name='linear_model')\n features = {\n 'age': [[23.], [31.]],\n 'occupation': [['doctor'], ['engineer']]\n }\n model(features)\n variables = model.variables\n variables.remove(model.bias)\n fraction_zero = linear._compute_fraction_of_zero(variables)\n age_var = tf.compat.v1.get_collection(\n tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]\n with tf.compat.v1.Session() as sess:\n sess.run([tf.compat.v1.initializers.global_variables()])\n # Upon initialization, all variables will be zero.\n self.assertAllClose(1, fraction_zero.eval())\n\n sess.run(age_var.assign([[2.0, 0.0, -1.0]]))\n # 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets\n # x 3-dim output) are zero.\n self.assertAllClose(16. / 18., fraction_zero.eval())\n\n\nclass BaseLinearWarmStartingTest(object):\n\n def __init__(self,\n _linear_classifier_fn,\n _linear_regressor_fn,\n fc_lib=feature_column):\n self._linear_classifier_fn = _linear_classifier_fn\n self._linear_regressor_fn = _linear_regressor_fn\n self._fc_lib = fc_lib\n\n def setUp(self):\n # Create a directory to save our old checkpoint and vocabularies to.\n self._ckpt_and_vocab_dir = tempfile.mkdtemp()\n\n # Make a dummy input_fn.\n def _input_fn():\n features = {\n 'age': [[23.], [31.]],\n 'age_in_years': [[23.], [31.]],\n 'occupation': [['doctor'], ['consultant']]\n }\n return features, [0, 1]\n\n self._input_fn = _input_fn\n\n def tearDown(self):\n # Clean up checkpoint / vocab dir.\n tf.compat.v1.summary.FileWriterCache.clear()\n shutil.rmtree(self._ckpt_and_vocab_dir)\n\n def test_classifier_basic_warm_starting(self):\n \"\"\"Tests correctness of LinearClassifier default warm-start.\"\"\"\n age = self._fc_lib.numeric_column('age')\n\n # Create a LinearClassifier and train to save a checkpoint.\n linear_classifier = self._linear_classifier_fn(\n feature_columns=[age],\n model_dir=self._ckpt_and_vocab_dir,\n n_classes=4,\n optimizer='SGD')\n linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second LinearClassifier, warm-started from the first. Use a\n # learning_rate = 0.0 optimizer to check values (use SGD so we don't have\n # accumulator values that change).\n warm_started_linear_classifier = self._linear_classifier_fn(\n feature_columns=[age],\n n_classes=4,\n optimizer=tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=0.0),\n warm_start_from=linear_classifier.model_dir)\n\n warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n for variable_name in warm_started_linear_classifier.get_variable_names():\n self.assertAllClose(\n linear_classifier.get_variable_value(variable_name),\n warm_started_linear_classifier.get_variable_value(variable_name))\n\n def test_regressor_basic_warm_starting(self):\n \"\"\"Tests correctness of LinearRegressor default warm-start.\"\"\"\n age = self._fc_lib.numeric_column('age')\n\n # Create a LinearRegressor and train to save a checkpoint.\n linear_regressor = self._linear_regressor_fn(\n feature_columns=[age],\n model_dir=self._ckpt_and_vocab_dir,\n optimizer='SGD')\n linear_regressor.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second LinearRegressor, warm-started from the first. Use a\n # learning_rate = 0.0 optimizer to check values (use SGD so we don't have\n # accumulator values that change).\n warm_started_linear_regressor = self._linear_regressor_fn(\n feature_columns=[age],\n optimizer=tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=0.0),\n warm_start_from=linear_regressor.model_dir)\n\n warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)\n for variable_name in warm_started_linear_regressor.get_variable_names():\n self.assertAllClose(\n linear_regressor.get_variable_value(variable_name),\n warm_started_linear_regressor.get_variable_value(variable_name))\n\n def test_warm_starting_selective_variables(self):\n \"\"\"Tests selecting variables to warm-start.\"\"\"\n age = self._fc_lib.numeric_column('age')\n\n # Create a LinearClassifier and train to save a checkpoint.\n linear_classifier = self._linear_classifier_fn(\n feature_columns=[age],\n model_dir=self._ckpt_and_vocab_dir,\n n_classes=4,\n optimizer='SGD')\n linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second LinearClassifier, warm-started from the first. Use a\n # learning_rate = 0.0 optimizer to check values (use SGD so we don't have\n # accumulator values that change).\n warm_started_linear_classifier = self._linear_classifier_fn(\n feature_columns=[age],\n n_classes=4,\n optimizer=tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=0.0),\n # The provided regular expression will only warm-start the age variable\n # and not the bias.\n warm_start_from=estimator.WarmStartSettings(\n ckpt_to_initialize_from=linear_classifier.model_dir,\n vars_to_warm_start='.*(age).*'))\n\n warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n self.assertAllClose(\n linear_classifier.get_variable_value(AGE_WEIGHT_NAME),\n warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))\n # Bias should still be zero from initialization.\n self.assertAllClose(\n [0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))\n\n def test_warm_starting_with_vocab_remapping_and_partitioning(self):\n \"\"\"Tests warm-starting with vocab remapping and partitioning.\"\"\"\n vocab_list = ['doctor', 'lawyer', 'consultant']\n vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(vocab_list))\n occupation = self._fc_lib.categorical_column_with_vocabulary_file(\n 'occupation',\n vocabulary_file=vocab_file,\n vocabulary_size=len(vocab_list))\n\n # Create a LinearClassifier and train to save a checkpoint.\n partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=2)\n linear_classifier = self._linear_classifier_fn(\n feature_columns=[occupation],\n model_dir=self._ckpt_and_vocab_dir,\n n_classes=4,\n optimizer='SGD',\n partitioner=partitioner)\n linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second LinearClassifier, warm-started from the first. Use a\n # learning_rate = 0.0 optimizer to check values (use SGD so we don't have\n # accumulator values that change). Use a new FeatureColumn with a\n # different vocabulary for occupation.\n new_vocab_list = ['doctor', 'consultant', 'engineer']\n new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,\n 'new_occupation_vocab')\n with open(new_vocab_file, 'w') as f:\n f.write('\\n'.join(new_vocab_list))\n new_occupation = self._fc_lib.categorical_column_with_vocabulary_file(\n 'occupation',\n vocabulary_file=new_vocab_file,\n vocabulary_size=len(new_vocab_list))\n # We can create our VocabInfo object from the new and old occupation\n # FeatureColumn's.\n occupation_vocab_info = estimator.VocabInfo(\n new_vocab=new_occupation.vocabulary_file,\n new_vocab_size=new_occupation.vocabulary_size,\n num_oov_buckets=new_occupation.num_oov_buckets,\n old_vocab=occupation.vocabulary_file,\n old_vocab_size=occupation.vocabulary_size,\n # Can't use constant_initializer with load_and_remap. In practice,\n # use a truncated normal initializer.\n backup_initializer=tf.compat.v1.initializers.random_uniform(\n minval=0.39, maxval=0.39))\n warm_started_linear_classifier = self._linear_classifier_fn(\n feature_columns=[occupation],\n n_classes=4,\n optimizer=tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=0.0),\n warm_start_from=estimator.WarmStartSettings(\n ckpt_to_initialize_from=linear_classifier.model_dir,\n var_name_to_vocab_info={\n OCCUPATION_WEIGHT_NAME: occupation_vocab_info\n },\n # Explicitly providing None here will only warm-start variables\n # referenced in var_name_to_vocab_info (the bias will not be\n # warm-started).\n vars_to_warm_start=None),\n partitioner=partitioner)\n\n warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n # 'doctor' was ID-0 and still ID-0.\n self.assertAllClose(\n linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],\n warm_started_linear_classifier.get_variable_value(\n OCCUPATION_WEIGHT_NAME)[0, :])\n # 'consultant' was ID-2 and now ID-1.\n self.assertAllClose(\n linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],\n warm_started_linear_classifier.get_variable_value(\n OCCUPATION_WEIGHT_NAME)[1, :])\n # 'engineer' is a new entry and should be initialized with the\n # backup_initializer in VocabInfo.\n self.assertAllClose([0.39] * 4,\n warm_started_linear_classifier.get_variable_value(\n OCCUPATION_WEIGHT_NAME)[2, :])\n # Bias should still be zero (from initialization logic).\n self.assertAllClose(\n [0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))\n\n def test_warm_starting_with_naming_change(self):\n \"\"\"Tests warm-starting with a Tensor name remapping.\"\"\"\n age_in_years = self._fc_lib.numeric_column('age_in_years')\n\n # Create a LinearClassifier and train to save a checkpoint.\n linear_classifier = self._linear_classifier_fn(\n feature_columns=[age_in_years],\n model_dir=self._ckpt_and_vocab_dir,\n n_classes=4,\n optimizer='SGD')\n linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n\n # Create a second LinearClassifier, warm-started from the first. Use a\n # learning_rate = 0.0 optimizer to check values (use SGD so we don't have\n # accumulator values that change).\n warm_started_linear_classifier = self._linear_classifier_fn(\n feature_columns=[self._fc_lib.numeric_column('age')],\n n_classes=4,\n optimizer=tf.compat.v1.train.GradientDescentOptimizer(\n learning_rate=0.0),\n # The 'age' variable correspond to the 'age_in_years' variable in the\n # previous model.\n warm_start_from=estimator.WarmStartSettings(\n ckpt_to_initialize_from=linear_classifier.model_dir,\n var_name_to_prev_var_name={\n AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')\n }))\n\n warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)\n self.assertAllClose(\n linear_classifier.get_variable_value(\n AGE_WEIGHT_NAME.replace('age', 'age_in_years')),\n warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))\n # The bias is also warm-started (with no name remapping).\n self.assertAllClose(\n linear_classifier.get_variable_value(BIAS_NAME),\n warm_started_linear_classifier.get_variable_value(BIAS_NAME))\n", "# This file is MACHINE GENERATED! Do not edit.\n# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.\n\"\"\"Public API for tf.keras.mixed_precision namespace.\n\"\"\"\n\nfrom __future__ import print_function as _print_function\n\nimport sys as _sys\n\nfrom . import experimental\n\ndel _print_function\n\nfrom tensorflow.python.util import module_wrapper as _module_wrapper\n\nif not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):\n _sys.modules[__name__] = _module_wrapper.TFModuleWrapper(\n _sys.modules[__name__], \"keras.mixed_precision\", public_apis=None, deprecation=True,\n has_lite=False)\n" ]
[ [ "numpy.ones", "numpy.rollaxis" ], [ "tensorflow.queue.FIFOQueue", "tensorflow.feature_column.categorical_column_with_hash_bucket", "tensorflow.compat.v1.train.Optimizer", "pandas.Series", "numpy.sum", "tensorflow.no_op", "tensorflow.compat.v1.train.limit_epochs", "tensorflow.Variable", "tensorflow.train.load_variable", "tensorflow.compat.v1.test.mock.patch.object", "tensorflow.compat.v1.data.Dataset.zip", "tensorflow.compat.v1.fixed_size_partitioner", "tensorflow.compat.v1.get_variable", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.test.mock.MagicMock", "tensorflow.compat.v1.summary.FileWriterCache.clear", "tensorflow.python.feature_column.feature_column.categorical_column_with_hash_bucket", "numpy.reshape", "tensorflow.Graph", "tensorflow.compat.v1.get_variable_scope", "tensorflow.io.FixedLenFeature", "tensorflow.compat.v1.debugging.assert_less", "tensorflow.python.feature_column.feature_column_v2.LinearModel", "tensorflow.compat.v1.assign_add", "numpy.linspace", "numpy.average", "tensorflow.feature_column.numeric_column", "tensorflow.compat.v1.Session", "tensorflow.train.list_variables", "tensorflow.compat.v1.feature_column.make_parse_example_spec", "tensorflow.python.feature_column.feature_column.numeric_column", "tensorflow.sparse.SparseTensor", "tensorflow.cast", "tensorflow.compat.v1.initializers.random_uniform", "tensorflow.compat.v1.initializers.global_variables", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.control_dependencies", "tensorflow.core.example.feature_pb2.Int64List", "tensorflow.compat.v1.io.parse_example", "tensorflow.core.example.feature_pb2.FloatList", "tensorflow.math.abs", "tensorflow.compat.v1.gfile.Exists", "pandas.DataFrame", "numpy.exp", "tensorflow.compat.v1.feature_column.linear_model", "tensorflow.compat.v1.train.GradientDescentOptimizer", "tensorflow.python.framework.ops.name_scope", "numpy.array", "tensorflow.compat.v1.get_collection" ], [ "tensorflow.python.util.module_wrapper.TFModuleWrapper" ] ]
Animatory/pytorch-image-models
[ "3ace100fcfdab3619dc71307613c42e53fb70221" ]
[ "timm/models/pit.py" ]
[ "\"\"\" Pooling-based Vision Transformer (PiT) in PyTorch\n\nA PyTorch implement of Pooling-based Vision Transformers as described in\n'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302\n\nThis code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.\n\nModifications for timm by / Copyright 2020 Ross Wightman\n\"\"\"\n# PiT\n# Copyright 2021-present NAVER Corp.\n# Apache License v2.0\n\nimport math\nimport re\nfrom functools import partial\nfrom typing import Tuple\n\nimport torch\nfrom torch import nn\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .helpers import build_model_with_cfg\nfrom .layers import trunc_normal_, to_2tuple\nfrom .registry import register_model\nfrom .vision_transformer import Block\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,\n 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'patch_embed.conv', 'classifier': 'head',\n **kwargs\n }\n\n\ndefault_cfgs = {\n # deit models (FB weights)\n 'pit_ti_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'),\n 'pit_xs_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'),\n 'pit_s_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'),\n 'pit_b_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'),\n 'pit_ti_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth',\n classifier=('head', 'head_dist')),\n 'pit_xs_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth',\n classifier=('head', 'head_dist')),\n 'pit_s_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth',\n classifier=('head', 'head_dist')),\n 'pit_b_distilled_224': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth',\n classifier=('head', 'head_dist')),\n}\n\n\nclass SequentialTuple(nn.Sequential):\n \"\"\" This module exists to work around torchscript typing issues list -> list\"\"\"\n\n def __init__(self, *args):\n super(SequentialTuple, self).__init__(*args)\n\n def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n for module in self:\n x = module(x)\n return x\n\n\nclass Transformer(nn.Module):\n def __init__(\n self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None):\n super(Transformer, self).__init__()\n self.layers = nn.ModuleList([])\n embed_dim = base_dim * heads\n\n self.blocks = nn.Sequential(*[\n Block(\n dim=embed_dim,\n num_heads=heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=True,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=drop_path_prob[i],\n norm_layer=partial(nn.LayerNorm, eps=1e-6)\n )\n for i in range(depth)])\n\n self.pool = pool\n\n def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n x, cls_tokens = x\n B, C, H, W = x.shape\n token_length = cls_tokens.shape[1]\n\n x = x.flatten(2).transpose(1, 2)\n x = torch.cat((cls_tokens, x), dim=1)\n\n x = self.blocks(x)\n\n cls_tokens = x[:, :token_length]\n x = x[:, token_length:]\n x = x.transpose(1, 2).reshape(B, C, H, W)\n\n if self.pool is not None:\n x, cls_tokens = self.pool(x, cls_tokens)\n return x, cls_tokens\n\n\nclass ConvHeadPooling(nn.Module):\n def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):\n super(ConvHeadPooling, self).__init__()\n\n self.conv = nn.Conv2d(\n in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride,\n padding_mode=padding_mode, groups=in_feature)\n self.fc = nn.Linear(in_feature, out_feature)\n\n def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:\n x = self.conv(x)\n cls_token = self.fc(cls_token)\n\n return x, cls_token\n\n\nclass ConvEmbedding(nn.Module):\n def __init__(self, in_channels, out_channels, patch_size, stride, padding):\n super(ConvEmbedding, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n\n\nclass PoolingVisionTransformer(nn.Module):\n \"\"\" Pooling-based Vision Transformer\n\n A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'\n - https://arxiv.org/abs/2103.16302\n \"\"\"\n\n def __init__(self, img_size, patch_size, stride, base_dims, depth, heads,\n mlp_ratio, num_classes=1000, in_chans=3, distilled=False,\n attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0):\n super(PoolingVisionTransformer, self).__init__()\n\n padding = 0\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1)\n width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1)\n\n self.base_dims = base_dims\n self.heads = heads\n self.num_classes = num_classes\n self.num_tokens = 2 if distilled else 1\n\n self.patch_size = patch_size\n self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width))\n self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding)\n\n self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0]))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n transformers = []\n # stochastic depth decay rule\n dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]\n for stage in range(len(depth)):\n pool = None\n if stage < len(heads) - 1:\n pool = ConvHeadPooling(\n base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2)\n transformers += [Transformer(\n base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool,\n drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage])\n ]\n self.transformers = SequentialTuple(*transformers)\n self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)\n self.num_features = self.embed_dim = base_dims[-1] * heads[-1]\n\n # Classifier head\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n self.head_dist = None\n if distilled:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'pos_embed', 'cls_token'}\n\n def get_classifier(self):\n if self.head_dist is not None:\n return self.head, self.head_dist\n else:\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=''):\n self.num_classes = num_classes\n self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n if self.head_dist is not None:\n self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n x = self.pos_drop(x + self.pos_embed)\n cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)\n x, cls_tokens = self.transformers((x, cls_tokens))\n cls_tokens = self.norm(cls_tokens)\n if self.head_dist is not None:\n return cls_tokens[:, 0], cls_tokens[:, 1]\n else:\n return cls_tokens[:, 0]\n\n def forward(self, x):\n x = self.forward_features(x)\n if self.head_dist is not None:\n x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple\n if self.training and not torch.jit.is_scripting():\n return x, x_dist\n else:\n return (x + x_dist) / 2\n else:\n return self.head(x)\n\n\ndef checkpoint_filter_fn(state_dict, model):\n \"\"\" preprocess checkpoints \"\"\"\n out_dict = {}\n p_blocks = re.compile(r'pools\\.(\\d)\\.')\n for k, v in state_dict.items():\n # FIXME need to update resize for PiT impl\n # if k == 'pos_embed' and v.shape != model.pos_embed.shape:\n # # To resize pos embedding when using model at different size from pretrained weights\n # v = resize_pos_embed(v, model.pos_embed)\n k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k)\n out_dict[k] = v\n return out_dict\n\n\ndef _create_pit(variant, pretrained=False, **kwargs):\n if kwargs.get('features_only', None):\n raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n model = build_model_with_cfg(\n PoolingVisionTransformer, variant, pretrained,\n default_cfg=default_cfgs[variant],\n pretrained_filter_fn=checkpoint_filter_fn,\n **kwargs)\n return model\n\n\n@register_model\ndef pit_b_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=14,\n stride=7,\n base_dims=[64, 64, 64],\n depth=[3, 6, 4],\n heads=[4, 8, 16],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_b_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_s_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[3, 6, 12],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_s_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_xs_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_xs_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_ti_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[32, 32, 32],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n **kwargs\n )\n return _create_pit('pit_ti_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_b_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=14,\n stride=7,\n base_dims=[64, 64, 64],\n depth=[3, 6, 4],\n heads=[4, 8, 16],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_s_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[3, 6, 12],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_xs_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[48, 48, 48],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs)\n\n\n@register_model\ndef pit_ti_distilled_224(pretrained, **kwargs):\n model_kwargs = dict(\n patch_size=16,\n stride=8,\n base_dims=[32, 32, 32],\n depth=[2, 6, 4],\n heads=[2, 4, 8],\n mlp_ratio=4,\n distilled=True,\n **kwargs\n )\n return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs)\n" ]
[ [ "torch.nn.Linear", "torch.nn.init.constant_", "torch.randn", "torch.nn.LayerNorm", "torch.nn.Conv2d", "torch.nn.ModuleList", "torch.nn.Identity", "torch.jit.is_scripting", "torch.cat", "torch.nn.Dropout" ] ]
ningdez/Tianchi_Cancer_303
[ "59e9b6f906e48e7508f455ce29b97d430791fcf5" ]
[ "mmdet/models/necks/m2fpn.py" ]
[ "'''\nThis code is based on pytorch_ssd and RFBNet.\nDetails about the modules:\n TUM - Thinned U-shaped Module\n MLFPN - Multi-Level Feature Pyramid Network\n M2Det - Multi-level Multi-scale single-shot object Detector\n\nAuthor: Qijie Zhao ([email protected])\nFinished Date: 01/17/2019\n\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import xavier_init\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom ..registry import NECKS\nfrom ..utils import ConvModule\n\nclass TUM(nn.Module):\n def __init__(self, first_level=True, input_planes=128, is_smooth=True, side_channel=512, scales=6,\n conv_cfg=None,\n norm_cfg=None\n ):\n super(TUM, self).__init__()\n self.is_smooth = is_smooth\n self.side_channel = side_channel\n self.input_planes = input_planes\n self.planes = 2 * self.input_planes\n self.first_level = first_level\n self.scales = scales\n self.in1 = input_planes + side_channel if not first_level else input_planes\n\n self.layers = nn.Sequential()\n self.layers.add_module('{}'.format(len(self.layers)), ConvModule(self.in1, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))\n for i in range(self.scales - 2):\n if not i == self.scales - 3:\n self.layers.add_module(\n '{}'.format(len(self.layers)),\n ConvModule(self.planes, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)\n )\n else:\n self.layers.add_module(\n '{}'.format(len(self.layers)),\n ConvModule(self.planes, self.planes, 3, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)\n )\n self.toplayer = nn.Sequential(ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg))\n\n self.latlayer = nn.Sequential()\n for i in range(self.scales - 2):\n self.latlayer.add_module(\n '{}'.format(len(self.latlayer)),\n ConvModule(self.planes, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)\n )\n self.latlayer.add_module('{}'.format(len(self.latlayer)), ConvModule(self.in1, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))\n\n if self.is_smooth:\n smooth = list()\n for i in range(self.scales - 1):\n smooth.append(\n ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)\n )\n self.smooth = nn.Sequential(*smooth)\n\n def _upsample_add(self, x, y, fuse_type='interp'):\n _, _, H, W = y.size()\n if fuse_type == 'interp':\n return F.interpolate(x, size=(H, W), mode='nearest') + y\n else:\n raise NotImplementedError\n # return nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)\n\n def forward(self, x, y):\n if not self.first_level:\n x = torch.cat([x, y], 1)\n conved_feat = [x]\n for i in range(len(self.layers)):\n x = self.layers[i](x)\n conved_feat.append(x)\n\n deconved_feat = [self.toplayer[0](conved_feat[-1])]\n for i in range(len(self.latlayer)):\n deconved_feat.append(\n self._upsample_add(\n deconved_feat[i], self.latlayer[i](conved_feat[len(self.layers) - 1 - i])\n )\n )\n if self.is_smooth:\n smoothed_feat = [deconved_feat[0]]\n for i in range(len(self.smooth)):\n smoothed_feat.append(\n self.smooth[i](deconved_feat[i + 1])\n )\n return smoothed_feat\n return deconved_feat\n\nclass SFAM(nn.Module):\n def __init__(self, planes, num_levels, num_scales, compress_ratio=16):\n super(SFAM, self).__init__()\n self.planes = planes\n self.num_levels = num_levels\n self.num_scales = num_scales\n self.compress_ratio = compress_ratio\n\n self.fc1 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels,\n self.planes * self.num_levels // 16,\n 1, 1, 0)] * self.num_scales)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels // 16,\n self.planes * self.num_levels,\n 1, 1, 0)] * self.num_scales)\n self.sigmoid = nn.Sigmoid()\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n\n def forward(self, x):\n attention_feat = []\n for i, _mf in enumerate(x):\n _tmp_f = self.avgpool(_mf)\n _tmp_f = self.fc1[i](_tmp_f)\n _tmp_f = self.relu(_tmp_f)\n _tmp_f = self.fc2[i](_tmp_f)\n _tmp_f = self.sigmoid(_tmp_f)\n attention_feat.append(_mf * _tmp_f)\n return attention_feat\n\[email protected]_module\nclass M2FPN(nn.Module):\n def __init__(self,\n num_levels = 8,\n num_scales = 5,\n sfam=False,\n smooth=True,\n in_channels = [512,2048],\n out_channels=256, conv_cfg=None,\n norm_cfg=None):\n '''\n M2Det: Multi-level Multi-scale single-shot object Detector\n '''\n super(M2FPN,self).__init__()\n self.planes = out_channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.num_levels = num_levels\n self.num_scales = num_scales\n self.sfam = sfam\n self.smooth = smooth\n self.in_channels = in_channels\n self.shallow_out =256\n self.deep_out =512\n self.construct_modules()\n\n def construct_modules(self,):\n # construct tums\n for i in range(self.num_levels):\n if i == 0:\n setattr(self,\n 'unet{}'.format(i+1),\n TUM(first_level=True, \n input_planes=self.planes//2, \n is_smooth=self.smooth,\n scales=self.num_scales,\n side_channel=512)) #side channel isn't fixed.\n else:\n setattr(self,\n 'unet{}'.format(i+1),\n TUM(first_level=False, \n input_planes=self.planes//2, \n is_smooth=self.smooth, \n scales=self.num_scales,\n side_channel=self.planes))\n\n self.reduce= ConvModule(self.in_channels[0], self.shallow_out, kernel_size=3, stride=1, padding=1)\n self.up_reduce_1= ConvModule(self.in_channels[2], self.in_channels[1], kernel_size=1, stride=1)\n self.up_reduce_2= ConvModule(self.in_channels[1], self.deep_out, kernel_size=1, stride=1)\n\n self.Norm = nn.BatchNorm2d(256*8)\n self.leach = nn.ModuleList([ConvModule(\n self.deep_out+self.shallow_out,\n self.planes//2,\n kernel_size=(1,1),stride=(1,1))]*self.num_levels)\n\n # construct localization and recognition layers\n conv_out = nn.ModuleList()\n for i in range(self.num_scales):\n conv_out.append(nn.Conv2d(self.planes*self.num_levels,\n self.planes,\n 3, 1, 1))\n self.conv_out = nn.ModuleList(conv_out)\n\n # construct SFAM module\n if self.sfam:\n self.sfam_module = SFAM(self.planes, self.num_levels, self.num_scales, compress_ratio=16)\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n def forward(self,x):\n assert len(x)==len(self.in_channels)\n # loc,conf = list(),list()\n # base_feats = list()\n # if 'vgg' in self.net_family:\n # for k in range(len(self.base)):\n # x = self.base[k](x)\n # if k in self.base_out:\n # base_feats.append(x)\n # elif 'res' in self.net_family:\n # base_feats = self.base(x, self.base_out)\n up_feats = x[1] + F.interpolate(self.up_reduce_1(x[2]),scale_factor=2,mode='nearest')\n base_feature = torch.cat(\n (self.reduce(x[0]), F.interpolate(self.up_reduce_2(up_feats),scale_factor=2,mode='nearest')),1\n )\n\n # tum_outs is the multi-level multi-scale feature\n tum_outs = [getattr(self, 'unet{}'.format(1))(self.leach[0](base_feature), 'none')]\n for i in range(1,self.num_levels,1):\n tum_outs.append(\n getattr(self, 'unet{}'.format(i+1))(\n self.leach[i](base_feature), tum_outs[i-1][-1]\n )\n )\n # concat with same scales\n sources = [torch.cat([_fx[i-1] for _fx in tum_outs],1) for i in range(self.num_scales, 0, -1)]\n \n # forward_sfam\n if self.sfam:\n sources = self.sfam_module(sources)\n sources[0] = self.Norm(sources[0])\n output = []\n for (x,cout) in zip(sources, self.conv_out):\n output.append(cout(x))\n\n return tuple(output)\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.cat", "torch.nn.functional.interpolate" ] ]
wja30/cortex_0.31
[ "522ec6226526dee6b4f8c3ed67bdf2b913d25de3" ]
[ "test/apis/tensorflow/sound-classifier/predictor.py" ]
[ "from scipy.io.wavfile import read\nimport numpy as np\nimport io\nimport csv\n\n\nclass TensorFlowPredictor:\n def __init__(self, tensorflow_client, config):\n self.client = tensorflow_client\n self.class_names = self.class_names_from_csv(\"class_names.csv\")\n\n def class_names_from_csv(self, csv_file):\n class_names = []\n with open(csv_file, \"r\", newline=\"\") as f:\n for row in csv.reader(f, delimiter=\",\"):\n class_names.append(row[2])\n return class_names\n\n def predict(self, payload):\n rate, data = read(io.BytesIO(payload))\n assert rate == 16000\n\n result = self.client.predict({\"waveform\": np.array(data, dtype=np.float32)})\n scores = np.array(result[\"output_0\"]).reshape((-1, 521))\n\n predicted_class = self.class_names[scores.mean(axis=0).argmax() + 1]\n return predicted_class\n" ]
[ [ "numpy.array" ] ]
MattKrecicki/PYTHON-ISOTOPIC-DEPLETION-PACKAGE
[ "d9da8be6eff4ba301f9689ce5c38a5e50856d033" ]
[ "pyIsoDep/tests/read_csv.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"read_csv\n\nRead the different csv files\n\nCreated on Mon Oct 11 21:30:00 2021 @author: Dan Kotlyar\nLast updated on Mon Oct 11 21:45:00 2021 @author: Dan Kotlyar\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\ndef ReadCsv(csvFile):\n\n data = pd.read_csv('bootstrap.csv')\n ID = np.array(data['ZAID'], dtype=int)\n xsTypes = np.array(data['MT'], dtype=int)\n xsVals = np.array(data[\"XS [barns]\"], dtype=float)\n N0 = np.array(data[\"N0 [atoms/b-cm]\"], dtype=float)\n\n fullID = np.unique(ID) # unique isotopes\n nIsotopes = len(fullID)\n # 1-ID, 2-ND, 3-cap, 4-fiss, 5-(n,alpha)\n xsTable = np.zeros((nIsotopes, 5))\n xsTable[:, 0] = fullID\n\n # obtain all the cross section types\n numMTs = np.array([102, 18, 107])\n\n for idx, numMT in enumerate(numMTs):\n vals, idxFull, idx0 =\\\n np.intersect1d(fullID, ID[xsTypes == numMT], assume_unique=False,\n return_indices=True)\n if idx == 0:\n xsTable[idxFull, 1] = N0[xsTypes == numMT][idx0]\n xsTable[idxFull, idx+2] = xsVals[xsTypes == numMT][idx0]\n\n idxFields = {\"ID\": 0, \"N0\": 1, \"sig_c\": 2, \"sig_alpha\": 3, \"sig_f\": 4}\n\n return xsTable, idxFields\n" ]
[ [ "numpy.zeros", "numpy.intersect1d", "pandas.read_csv", "numpy.array", "numpy.unique" ] ]
Sonata-Wang/tensorflow
[ "3e21fe5faedab3a8258d344c8ad1cec2612a8aa8" ]
[ "tensorflow/python/data/experimental/__init__.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Experimental API for building input pipelines.\n\nThis module contains experimental `Dataset` sources and transformations that can\nbe used in conjunction with the `tf.data.Dataset` API. Note that the\n`tf.data.experimental` API is not subject to the same backwards compatibility\nguarantees as `tf.data`, but we will provide deprecation advice in advance of\nremoving existing functionality.\n\nSee [Importing Data](https://tensorflow.org/guide/datasets) for an overview.\n\n@@Counter\n@@CheckpointInputPipelineHook\n@@CsvDataset\n@@DatasetStructure\n@@NestedStructure\n@@OptimizationOptions\n@@Optional\n@@OptionalStructure\n@@RandomDataset\n@@Reducer\n@@SparseTensorStructure\n@@SqlDataset\n@@StatsAggregator\n@@StatsOptions\n@@Structure\n@@TFRecordWriter\n@@TensorStructure\n@@ThreadingOptions\n\n@@bucket_by_sequence_length\n@@bytes_produced_stats\n@@cardinality\n@@choose_from_datasets\n@@copy_to_device\n@@dense_to_sparse_batch\n@@enumerate_dataset\n@@filter_for_shard\n@@get_next_as_optional\n@@get_single_element\n@@group_by_reducer\n@@group_by_window\n@@ignore_errors\n@@latency_stats\n@@make_batched_features_dataset\n@@make_csv_dataset\n@@make_saveable_from_iterator\n@@map_and_batch\n@@map_and_batch_with_legacy_function\n@@parallel_interleave\n@@parse_example_dataset\n@@prefetch_to_device\n@@rejection_resample\n@@sample_from_datasets\n@@scan\n@@shuffle_and_repeat\n@@take_while\n@@unbatch\n@@unique\n\n@@AUTOTUNE\n@@INFINITE_CARDINALITY\n@@UNKNOWN_CARDINALITY\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=unused-import\n\nfrom tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch\nfrom tensorflow.python.data.experimental.ops.batching import map_and_batch\nfrom tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function\nfrom tensorflow.python.data.experimental.ops.batching import unbatch\nfrom tensorflow.python.data.experimental.ops.cardinality import cardinality\nfrom tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY\nfrom tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY\nfrom tensorflow.python.data.experimental.ops.counter import Counter\nfrom tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset\nfrom tensorflow.python.data.experimental.ops.error_ops import ignore_errors\nfrom tensorflow.python.data.experimental.ops.filter_for_shard_ops import filter_for_shard\nfrom tensorflow.python.data.experimental.ops.get_single_element import get_single_element\nfrom tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length\nfrom tensorflow.python.data.experimental.ops.grouping import group_by_reducer\nfrom tensorflow.python.data.experimental.ops.grouping import group_by_window\nfrom tensorflow.python.data.experimental.ops.grouping import Reducer\nfrom tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets\nfrom tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave\nfrom tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets\nfrom tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook\nfrom tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator\nfrom tensorflow.python.data.experimental.ops.optimization import AUTOTUNE\nfrom tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions\nfrom tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset\nfrom tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device\nfrom tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device\nfrom tensorflow.python.data.experimental.ops.random_ops import RandomDataset\nfrom tensorflow.python.data.experimental.ops.readers import CsvDataset\nfrom tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset\nfrom tensorflow.python.data.experimental.ops.readers import make_csv_dataset\nfrom tensorflow.python.data.experimental.ops.readers import SqlDataset\nfrom tensorflow.python.data.experimental.ops.resampling import rejection_resample\nfrom tensorflow.python.data.experimental.ops.scan_ops import scan\nfrom tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat\nfrom tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator\nfrom tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats\nfrom tensorflow.python.data.experimental.ops.stats_ops import latency_stats\nfrom tensorflow.python.data.experimental.ops.stats_options import StatsOptions\nfrom tensorflow.python.data.experimental.ops.take_while_ops import take_while\nfrom tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions\nfrom tensorflow.python.data.experimental.ops.unique import unique\nfrom tensorflow.python.data.experimental.ops.writers import TFRecordWriter\nfrom tensorflow.python.data.ops.dataset_ops import DatasetStructure\nfrom tensorflow.python.data.ops.iterator_ops import get_next_as_optional\nfrom tensorflow.python.data.ops.optional_ops import Optional\nfrom tensorflow.python.data.ops.optional_ops import OptionalStructure\nfrom tensorflow.python.data.util.structure import NestedStructure\nfrom tensorflow.python.data.util.structure import SparseTensorStructure\nfrom tensorflow.python.data.util.structure import Structure\nfrom tensorflow.python.data.util.structure import TensorStructure\n# pylint: enable=unused-import\n\nfrom tensorflow.python.util.all_util import remove_undocumented\nremove_undocumented(__name__)\n" ]
[ [ "tensorflow.python.util.all_util.remove_undocumented" ] ]
DoranLyong/DeepFish
[ "3ea3e13653f708d4a8dcb54b990dcc2997edf4e9" ]
[ "trainval.py" ]
[ "import torch\nimport numpy as np\nimport argparse\nimport pandas as pd\nimport sys\nimport os\nfrom torch import nn\nfrom torch.nn import functional as F\nimport tqdm\nimport pprint\nfrom src import utils as ut\nimport torchvision\nfrom haven import haven_utils as hu\nfrom haven import haven_chk as hc\n\nfrom src import datasets, models\nfrom torch.utils.data import DataLoader\nimport exp_configs\nfrom torch.utils.data.sampler import RandomSampler\nfrom src import wrappers\n\n\n\ndef trainval(exp_dict, savedir_base, reset, metrics_flag=True, datadir=None, cuda=False):\n # bookkeeping\n # ---------------\n\n # get experiment directory\n exp_id = hu.hash_dict(exp_dict)\n savedir = os.path.join(savedir_base, exp_id)\n\n if reset:\n # delete and backup experiment\n hc.delete_experiment(savedir, backup_flag=True)\n \n # create folder and save the experiment dictionary\n os.makedirs(savedir, exist_ok=True)\n hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)\n print(pprint.pprint(exp_dict))\n print('Experiment saved in %s' % savedir)\n\n\n # set seed\n # ==================\n seed = 42\n np.random.seed(seed)\n torch.manual_seed(seed)\n if cuda:\n device = 'cuda'\n torch.cuda.manual_seed_all(seed)\n assert torch.cuda.is_available(), 'cuda is not, available please run with \"-c 0\"'\n else:\n device = 'cpu'\n\n print('Running on device: %s' % device)\n \n # Dataset\n # Load val set and train set\n val_set = datasets.get_dataset(dataset_name=exp_dict[\"dataset\"], split=\"val\",\n transform=exp_dict.get(\"transform\"),\n datadir=datadir)\n train_set = datasets.get_dataset(dataset_name=exp_dict[\"dataset\"],\n split=\"train\", \n transform=exp_dict.get(\"transform\"),\n datadir=datadir)\n \n # Load train loader, val loader, and vis loader\n train_loader = DataLoader(train_set, \n sampler=RandomSampler(train_set,\n replacement=True, num_samples=max(min(500, \n len(train_set)), \n len(val_set))),\n batch_size=exp_dict[\"batch_size\"])\n\n val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict[\"batch_size\"])\n vis_loader = DataLoader(val_set, sampler=ut.SubsetSampler(train_set,\n indices=[0, 1, 2]),\n batch_size=1)\n\n # Create model, opt, wrapper\n model_original = models.get_model(exp_dict[\"model\"], exp_dict=exp_dict).cuda()\n opt = torch.optim.Adam(model_original.parameters(), \n lr=1e-5, weight_decay=0.0005)\n\n model = wrappers.get_wrapper(exp_dict[\"wrapper\"], model=model_original, opt=opt).cuda()\n\n score_list = []\n\n # Checkpointing\n # =============\n score_list_path = os.path.join(savedir, \"score_list.pkl\")\n model_path = os.path.join(savedir, \"model_state_dict.pth\")\n opt_path = os.path.join(savedir, \"opt_state_dict.pth\")\n\n if os.path.exists(score_list_path):\n # resume experiment\n score_list = ut.load_pkl(score_list_path)\n model.load_state_dict(torch.load(model_path))\n opt.load_state_dict(torch.load(opt_path))\n s_epoch = score_list[-1][\"epoch\"] + 1\n else:\n # restart experiment\n score_list = []\n s_epoch = 0\n\n # Run training and validation\n for epoch in range(s_epoch, exp_dict[\"max_epoch\"]):\n score_dict = {\"epoch\": epoch}\n\n # visualize\n # model.vis_on_loader(vis_loader, savedir=os.path.join(savedir, \"images\"))\n\n # validate\n score_dict.update(model.val_on_loader(val_loader))\n \n # train\n score_dict.update(model.train_on_loader(train_loader))\n\n # Add score_dict to score_list\n score_list += [score_dict]\n\n # Report and save\n print(pd.DataFrame(score_list).tail())\n hu.save_pkl(score_list_path, score_list)\n hu.torch_save(model_path, model.state_dict())\n hu.torch_save(opt_path, opt.state_dict())\n print(\"Saved in %s\" % savedir)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-e', '--exp_group_list', nargs='+')\n parser.add_argument('-sb', '--savedir_base', required=True)\n parser.add_argument('-d', '--datadir', required=True)\n parser.add_argument('-r', '--reset', default=0, type=int)\n parser.add_argument('-ei', '--exp_id', default=None)\n parser.add_argument('-c', '--cuda', type=int, default=1)\n\n args = parser.parse_args()\n\n\n # Collect experiments\n # -------------------\n if args.exp_id is not None:\n # select one experiment\n savedir = os.path.join(args.savedir_base, args.exp_id)\n exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json')) \n \n exp_list = [exp_dict]\n \n else:\n # select exp group\n exp_list = []\n for exp_group_name in args.exp_group_list:\n exp_list += exp_configs.EXP_GROUPS[exp_group_name]\n\n ####\n # Run experiments or View them\n # ----------------------------\n \n # run experiments\n for exp_dict in exp_list:\n # do trainval\n trainval(exp_dict=exp_dict,\n savedir_base=args.savedir_base,\n reset=args.reset,\n datadir=args.datadir,\n cuda=args.cuda)\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.cuda.manual_seed_all", "torch.load", "torch.manual_seed", "pandas.DataFrame", "numpy.random.seed", "torch.cuda.is_available" ] ]
AshburnLee/models
[ "98fa58030f8ce352b3818f43897ac719ccffdffc" ]
[ "PaddleAudio/paddleaudio/features/augment.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Iterable, List, Optional, Tuple, TypeVar\n\nimport numpy as np\nfrom numpy import ndarray as array\nfrom paddleaudio.backends import depth_convert\nfrom paddleaudio.utils import ParameterError\n\n__all__ = [\n 'depth_augment',\n 'spect_augment',\n 'random_crop1d',\n 'random_crop2d',\n 'adaptive_spect_augment',\n]\n\n\ndef randint(high: int) -> int:\n \"\"\"Generate one random integer in range [0 high)\n\n This is a helper function for random data augmentaiton\n \"\"\"\n return int(np.random.randint(0, high=high))\n\n\ndef rand() -> float:\n \"\"\"Generate one floating-point number in range [0 1)\n\n This is a helper function for random data augmentaiton\n \"\"\"\n return float(np.random.rand(1))\n\n\ndef depth_augment(y: array,\n choices: List = ['int8', 'int16'],\n probs: List[float] = [0.5, 0.5]) -> array:\n \"\"\" Audio depth augmentation\n\n Do audio depth augmentation to simulate the distortion brought by quantization.\n \"\"\"\n assert len(probs) == len(\n choices\n ), 'number of choices {} must be equal to size of probs {}'.format(\n len(choices), len(probs))\n depth = np.random.choice(choices, p=probs)\n src_depth = y.dtype\n y1 = depth_convert(y, depth)\n y2 = depth_convert(y1, src_depth)\n\n return y2\n\n\ndef adaptive_spect_augment(spect: array,\n tempo_axis: int = 0,\n level: float = 0.1) -> array:\n \"\"\"Do adpative spectrogram augmentation\n\n The level of the augmentation is gowern by the paramter level,\n ranging from 0 to 1, with 0 represents no augmentation。\n\n \"\"\"\n assert spect.ndim == 2., 'only supports 2d tensor or numpy array'\n if tempo_axis == 0:\n nt, nf = spect.shape\n else:\n nf, nt = spect.shape\n\n time_mask_width = int(nt * level * 0.5)\n freq_mask_width = int(nf * level * 0.5)\n\n num_time_mask = int(10 * level)\n num_freq_mask = int(10 * level)\n\n if tempo_axis == 0:\n for _ in range(num_time_mask):\n start = randint(nt - time_mask_width)\n spect[start:start + time_mask_width, :] = 0\n for _ in range(num_freq_mask):\n start = randint(nf - freq_mask_width)\n spect[:, start:start + freq_mask_width] = 0\n else:\n for _ in range(num_time_mask):\n start = randint(nt - time_mask_width)\n spect[:, start:start + time_mask_width] = 0\n for _ in range(num_freq_mask):\n start = randint(nf - freq_mask_width)\n spect[start:start + freq_mask_width, :] = 0\n\n return spect\n\n\ndef spect_augment(spect: array,\n tempo_axis: int = 0,\n max_time_mask: int = 3,\n max_freq_mask: int = 3,\n max_time_mask_width: int = 30,\n max_freq_mask_width: int = 20) -> array:\n \"\"\"Do spectrogram augmentation in both time and freq axis\n\n Reference:\n\n \"\"\"\n assert spect.ndim == 2., 'only supports 2d tensor or numpy array'\n if tempo_axis == 0:\n nt, nf = spect.shape\n else:\n nf, nt = spect.shape\n\n num_time_mask = randint(max_time_mask)\n num_freq_mask = randint(max_freq_mask)\n\n time_mask_width = randint(max_time_mask_width)\n freq_mask_width = randint(max_freq_mask_width)\n\n if tempo_axis == 0:\n for _ in range(num_time_mask):\n start = randint(nt - time_mask_width)\n spect[start:start + time_mask_width, :] = 0\n for _ in range(num_freq_mask):\n start = randint(nf - freq_mask_width)\n spect[:, start:start + freq_mask_width] = 0\n else:\n for _ in range(num_time_mask):\n start = randint(nt - time_mask_width)\n spect[:, start:start + time_mask_width] = 0\n for _ in range(num_freq_mask):\n start = randint(nf - freq_mask_width)\n spect[start:start + freq_mask_width, :] = 0\n\n return spect\n\n\ndef random_crop1d(y: array, crop_len: int) -> array:\n \"\"\" Do random cropping on 1d input signal\n\n The input is a 1d signal, typically a sound waveform\n \"\"\"\n if y.ndim != 1:\n 'only accept 1d tensor or numpy array'\n n = len(y)\n idx = randint(n - crop_len)\n return y[idx:idx + crop_len]\n\n\ndef random_crop2d(s: array, crop_len: int, tempo_axis: int = 0) -> array:\n \"\"\" Do random cropping for 2D array, typically a spectrogram.\n\n The cropping is done in temporal direction on the time-freq input signal.\n \"\"\"\n if tempo_axis >= s.ndim:\n raise ParameterError('axis out of range')\n\n n = s.shape[tempo_axis]\n idx = randint(high=n - crop_len)\n sli = [slice(None) for i in range(s.ndim)]\n sli[tempo_axis] = slice(idx, idx + crop_len)\n out = s[tuple(sli)]\n return out\n" ]
[ [ "numpy.random.rand", "numpy.random.randint", "numpy.random.choice" ] ]
bahia14/Fedot_Times_Series_Forecast
[ "995751068733541ba2f546065082709ce0fb63ae" ]
[ "fedot/core/pipelines/tuning/unified.py" ]
[ "from datetime import timedelta\nfrom functools import partial\n\nimport numpy as np\nfrom hyperopt import fmin, space_eval, tpe\n\nfrom fedot.core.data.data_split import train_test_data_setup\nfrom fedot.core.log import Log\nfrom fedot.core.pipelines.tuning.hyperparams import convert_params, get_node_params\nfrom fedot.core.pipelines.tuning.tuner_interface import HyperoptTuner, _greater_is_better\n\nMAX_METRIC_VALUE = 10e6\n\n\nclass PipelineTuner(HyperoptTuner):\n \"\"\"\n Class for hyperparameters optimization for all nodes simultaneously\n \"\"\"\n\n def __init__(self, pipeline, task, iterations=100,\n timeout: timedelta = timedelta(minutes=5),\n log: Log = None):\n super().__init__(pipeline, task, iterations, timeout, log)\n\n def tune_pipeline(self, input_data, loss_function, loss_params=None):\n \"\"\" Function for hyperparameters tuning on the entire pipeline \"\"\"\n\n parameters_dict = self._get_parameters_for_tune(self.pipeline)\n\n # Train test split\n train_input, predict_input = train_test_data_setup(input_data)\n test_target = np.array(predict_input.target)\n\n is_need_to_maximize = _greater_is_better(target=test_target,\n loss_function=loss_function,\n loss_params=loss_params)\n self.is_need_to_maximize = is_need_to_maximize\n\n # Check source metrics for data\n self.init_check(train_input, predict_input, test_target,\n loss_function, loss_params)\n\n best = fmin(partial(self._objective,\n pipeline=self.pipeline,\n train_input=train_input,\n predict_input=predict_input,\n test_target=test_target,\n loss_function=loss_function,\n loss_params=loss_params),\n parameters_dict,\n algo=tpe.suggest,\n max_evals=self.iterations,\n timeout=self.max_seconds)\n\n best = space_eval(space=parameters_dict, hp_assignment=best)\n\n tuned_pipeline = self.set_arg_pipeline(pipeline=self.pipeline,\n parameters=best)\n\n # Validation is the optimization do well\n final_pipeline = self.final_check(train_input=train_input,\n predict_input=predict_input,\n test_target=test_target,\n tuned_pipeline=tuned_pipeline,\n loss_function=loss_function,\n loss_params=loss_params)\n\n return final_pipeline\n\n @staticmethod\n def set_arg_pipeline(pipeline, parameters):\n \"\"\" Method for parameters setting to a pipeline\n\n :param pipeline: pipeline to which parameters should ba assigned\n :param parameters: dictionary with parameters to set\n :return pipeline: pipeline with new hyperparameters in each node\n \"\"\"\n\n # Set hyperparameters for every node\n for node_id, _ in enumerate(pipeline.nodes):\n node_params = parameters.get(node_id)\n\n if node_params is not None:\n # Delete all prefix strings to get appropriate parameters names\n new_params = convert_params(node_params)\n\n # Update parameters in nodes\n pipeline.nodes[node_id].custom_params = new_params\n\n return pipeline\n\n @staticmethod\n def _get_parameters_for_tune(pipeline):\n \"\"\"\n Function for defining the search space\n\n :param pipeline: pipeline to optimize\n :return parameters_dict: dictionary with operation names and parameters\n \"\"\"\n\n parameters_dict = {}\n for node_id, node in enumerate(pipeline.nodes):\n operation_name = str(node.operation)\n\n # Assign unique prefix for each model hyperparameter\n # label - number of node in the pipeline\n node_params = get_node_params(node_id=node_id,\n operation_name=operation_name)\n\n parameters_dict.update({node_id: node_params})\n\n return parameters_dict\n\n def _objective(self, parameters_dict, pipeline, train_input, predict_input,\n test_target, loss_function, loss_params: dict):\n \"\"\"\n Objective function for minimization / maximization problem\n\n :param parameters_dict: dictionary with operation names and parameters\n :param pipeline: pipeline to optimize\n :param train_input: input for train pipeline model\n :param predict_input: input for test pipeline model\n :param test_target: target for validation\n :param loss_function: loss function to optimize\n :param loss_params: parameters for loss function\n\n :return metric_value: value of objective function\n \"\"\"\n\n # Set hyperparameters for every node\n pipeline = PipelineTuner.set_arg_pipeline(pipeline=pipeline, parameters=parameters_dict)\n\n try:\n metric_value = PipelineTuner.get_metric_value(train_input=train_input,\n predict_input=predict_input,\n test_target=test_target,\n pipeline=pipeline,\n loss_function=loss_function,\n loss_params=loss_params)\n except Exception:\n if self.is_need_to_maximize is True:\n metric_value = -MAX_METRIC_VALUE\n else:\n metric_value = MAX_METRIC_VALUE\n\n if self.is_need_to_maximize is True:\n return -metric_value\n else:\n return metric_value\n" ]
[ [ "numpy.array" ] ]
zploskey/Theano
[ "9b3f6351d41d9f5e01b198e3de7538d7f032c409" ]
[ "theano/tensor/signal/pool.py" ]
[ "\n\"\"\"\nOps for downsampling images.\nPlanned:\nPool, DownsampleAvg, DownsampleSoftmax.\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n# This file should move along with conv.py\nimport warnings\nimport itertools\n\nimport numpy as np\nfrom six.moves import xrange\nimport six.moves.builtins as builtins\nimport theano\nfrom theano import gof, OpenMPOp, tensor, Variable, Apply\nfrom theano.gof import ParamsType, EnumList\nfrom theano.gradient import DisconnectedType\nfrom theano.scalar import bool as bool_t\n\n\ndef max_pool_2d_same_size(input, patch_size):\n \"\"\"\n Takes as input a 4-D tensor. It sets all non maximum values\n of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,\n keeping only the maximum values. The output has the same dimensions as\n the input.\n\n Parameters\n ----------\n input : 4-D theano tensor of input images\n Input images. Max pooling will be done over the 2 last dimensions.\n patch_size : tuple of length 2 or theano vector of ints of size 2.\n Size of the patch (patch height, patch width).\n (2,2) will retain only one non-zero value per patch of 4 values.\n\n \"\"\"\n output = Pool(True)(input, patch_size)\n outs = MaxPoolGrad(True)(input, output, output, patch_size)\n return outs\n\n\ndef pool_2d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0),\n mode='max', ds=None, st=None, padding=None):\n \"\"\"Downscale the input by a specified factor\n\n Takes as input a N-D tensor, where N >= 2. It downscales the input image by\n the specified factor, by keeping only the maximum value of non-overlapping\n patches of size (ws[0],ws[1])\n\n Parameters\n ----------\n input : N-D theano tensor of input images\n Input images. Max pooling will be done over the 2 last dimensions.\n ws : tuple of length 2 or theano vector of ints of size 2.\n Factor by which to downscale (vertical ws, horizontal ws).\n (2,2) will halve the image in each dimension.\n ignore_border : bool (default None, will print a warning and set to False)\n When True, (5,5) input with ws=(2,2) will generate a (2,2) output.\n (3,3) otherwise.\n stride : tuple of two ints or theano vector of ints of size 2.\n Stride size, which is the number of shifts over rows/cols to get the\n next pool region. If stride is None, it is considered equal to ws\n (no overlap on pooling regions), eg: stride=(1,1) will shifts over\n one row and one col for every iteration.\n pad : tuple of two ints or theano vector of ints of size 2.\n (pad_h, pad_w), pad zeros to extend beyond four borders of the\n images, pad_h is the size of the top and bottom margins, and\n pad_w is the size of the left and right margins.\n mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}\n Operation executed on each window. `max` and `sum` always exclude\n the padding in the computation. `average` gives you the choice to\n include or exclude it.\n ds\n *deprecated*, use parameter ws instead.\n st\n *deprecated*, use parameter stride instead.\n padding\n *deprecated*, use parameter pad instead.\n\n \"\"\"\n # check for deprecated parameter names\n if ds is not None:\n if ws is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'ws' and 'ds'.\"\n \" Please provide a value only to 'ws'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'ds' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'ws'.\",\n stacklevel=2\n )\n ws = ds\n elif ds is None and ws is None:\n raise ValueError(\n \"You must provide a tuple value for the window size.\"\n )\n\n if st is not None:\n if stride is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'st and 'stride'.\"\n \" Please provide a value only to 'stride'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'st' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'stride'.\",\n stacklevel=2\n )\n stride = st\n\n if padding is not None:\n if pad not in {None, (0, 0)}:\n raise ValueError(\n \"You can't provide a tuple value to both 'padding' and pad.\"\n \" Please provide a value only to pad.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'padding' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'pad'.\",\n stacklevel=2\n )\n pad = padding\n\n if input.ndim < 2:\n raise NotImplementedError('pool_2d requires a dimension >= 2')\n if ignore_border is None:\n warnings.warn(\n \"pool_2d() will have the parameter ignore_border\"\n \" default value changed to True (currently\"\n \" False). To have consistent behavior with all Theano\"\n \" version, explicitly add the parameter ignore_border=True.\"\n \" On the GPU, using ignore_border=True is needed to use cuDNN.\"\n \" When using ignore_border=False and not using cuDNN, the only\"\n \" GPU combination supported is when\"\n \" `ws == stride and pad == (0, 0) and mode == 'max'`.\"\n \" Otherwise, the convolution will be executed on CPU.\",\n stacklevel=2)\n ignore_border = False\n op = Pool(ignore_border, ndim=2, mode=mode)\n output = op(input, ws, stride, pad)\n return output\n\n\ndef pool_3d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0, 0),\n mode='max', ds=None, st=None, padding=None):\n \"\"\"Downscale the input by a specified factor\n\n Takes as input a N-D tensor, where N >= 3. It downscales the input image by\n the specified factor, by keeping only the maximum value of non-overlapping\n patches of size (ws[0],ws[1],ws[2])\n\n Parameters\n ----------\n input : N-D theano tensor of input images\n Input images. Max pooling will be done over the 3 last dimensions.\n ws : tuple of length 3 or theano vector of ints of size 3\n Factor by which to downscale (vertical ws, horizontal ws, depth ws).\n (2,2,2) will halve the image in each dimension.\n ignore_border : bool (default None, will print a warning and set to False)\n When True, (5,5,5) input with ws=(2,2,2) will generate a (2,2,2) output.\n (3,3,3) otherwise.\n st : tuple of three ints or theano vector of ints of size 3\n Stride size, which is the number of shifts over rows/cols/slices to get\n the next pool region. If st is None, it is considered equal to ws\n (no overlap on pooling regions).\n pad : tuple of two ints or theano vector of ints of size 3\n (pad_h, pad_w, pad_d), pad zeros to extend beyond six borders of the\n images, pad_h is the size of the top and bottom margins,\n pad_w is the size of the left and right margins, and pad_d is the size\n of the front and back margins\n mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}\n Operation executed on each window. `max` and `sum` always exclude\n the padding in the computation. `average` gives you the choice to\n include or exclude it.\n ds\n *deprecated*, use parameter ws instead.\n st\n *deprecated*, use parameter st instead.\n padding\n *deprecated*, use parameter pad instead.\n\n \"\"\"\n # check for deprecated parameter names\n if ds is not None:\n if ws is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'ws' and 'ds'.\"\n \" Please provide a value only to 'ws'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'ds' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'ws'.\",\n stacklevel=2\n )\n ws = ds\n elif ds is None and ws is None:\n raise ValueError(\n \"You must provide a tuple value for the window size.\"\n )\n\n if st is not None:\n if stride is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'st and 'stride'.\"\n \" Please provide a value only to 'stride'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'st' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'stride'.\",\n stacklevel=2\n )\n stride = st\n\n if padding is not None:\n if pad not in {None, (0, 0, 0)}:\n raise ValueError(\n \"You can't provide a tuple value to both 'padding' and pad.\"\n \" Please provide a value only to pad.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'padding' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'pad'.\",\n stacklevel=2\n )\n pad = padding\n\n if input.ndim < 3:\n raise NotImplementedError('pool_3d requires a dimension >= 3')\n if ignore_border is None:\n warnings.warn(\n \"pool_3d() will have the parameter ignore_border\"\n \" default value changed to True (currently\"\n \" False). To have consistent behavior with all Theano\"\n \" version, explicitly add the parameter ignore_border=True.\"\n \" On the GPU, using ignore_border=True is needed to use cuDNN.\"\n \" When using ignore_border=False and not using cuDNN, the only\"\n \" GPU combination supported is when\"\n \" `ws == stride and pad == (0, 0, 0) and mode == 'max'`.\"\n \" Otherwise, the convolution will be executed on CPU.\",\n stacklevel=2)\n ignore_border = False\n op = Pool(ignore_border, ndim=3, mode=mode)\n output = op(input, ws, stride, pad)\n return output\n\n\n# NB: This enum type is currently used in gpuarray/pool.py.\n# It may be used later as op param in this current file.\n# Enum name and constants names are inspired from cuDNN type `cudnnPoolingMode_t`\n# (cf. `theano/gpuarray/cudnn_defs.py`).\nPoolingMode_t = EnumList(('POOLING_MAX', 'max'),\n ('POOLING_SUM', 'sum'),\n ('POOLING_AVERAGE_COUNT_INCLUDE_PADDING', 'average_inc_pad'),\n ('POOLING_AVERAGE_COUNT_EXCLUDE_PADDING', 'average_exc_pad'))\n\n\nclass Pool(OpenMPOp):\n \"\"\"\n sum or average over different patches.\n\n Parameters\n ----------\n ws : list or tuple of N ints\n Downsample factor over rows, columns etc.\n ws indicates the size of the pooling region.\n ignore_border : bool\n If ws doesn't divide imgshape, do we include an extra row/col/slice\n of partial downsampling (False) or ignore it (True).\n stride : list or tuple of N ints or None\n Stride size, which is the number of shifts over rows/cols/slices to get the\n next pool region. If stride is None, it is considered equal to ws\n (no overlap on pooling regions).\n pad : tuple of N ints or None\n For each downsampling dimension, this specifies the number of zeros to\n add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the\n size of the top and bottom margins, pad_w specifies the size of the left and\n right margins. No padding is added if pad is None.\n mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}\n ('average_inc_pad' excludes the padding from the count,\n 'average_exc_pad' include it)\n ndim : int\n The number of pooling dimensions N.\n The default is 2.\n ds\n *deprecated*, use parameter ws instead.\n st\n *deprecated*, use parameter st instead.\n padding\n *deprecated*, use parameter pad instead.\n\n\n \"\"\"\n\n __props__ = ('ignore_border', 'mode', 'ndim')\n params_type = ParamsType(ignore_border=bool_t,)\n\n @staticmethod\n def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None,\n ndim=2, ds=None, st=None, padding=None):\n \"\"\"\n Return the shape of the output from this op, for input of given\n shape and flags.\n\n Parameters\n ----------\n imgshape : tuple, list, or similar of integer or scalar Theano variable\n The shape of a tensor of images. The last N elements are\n interpreted as the number of rows, and the number of cols.\n ws : list or tuple of N ints\n Downsample factor over rows and column.\n ws indicates the pool region size.\n ignore_border : bool\n If ws doesn't divide imgshape, do we include an extra row/col/slice\n of partial downsampling (False) or ignore it (True).\n stride : list or tuple of N ints or None\n Stride size, which is the number of shifts over rows/cols/slices to get the\n next pool region. If stride is None, it is considered equal to ws\n (no overlap on pooling regions).\n pad : tuple of N ints or None\n For each downsampling dimension, this specifies the number of zeros to\n add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the\n size of the top and bottom margins, pad_w specifies the size of the left and\n right margins. No padding is added if pad is None.\n ndim : int\n The number of pooling dimensions N.\n The default is 2.\n ds\n *deprecated*, use parameter ws instead.\n st\n *deprecated*, use parameter st instead.\n padding\n *deprecated*, use parameter pad instead.\n\n Returns\n -------\n list\n The shape of the output from this op, for input of given shape.\n This will have the same length as imgshape, but with last N\n elements reduced as per the downsampling & ignore_border flags.\n\n \"\"\"\n # check for deprecated parameter names\n if ds is not None:\n if ws is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'ws' and 'ds'.\"\n \" Please provide a value only to 'ws'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'ds' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'ws'.\",\n stacklevel=2\n )\n ws = ds\n elif ds is None and ws is None:\n raise ValueError(\n \"You must provide a tuple value for the window size.\"\n )\n\n if st is not None:\n if stride is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'st and 'stride'.\"\n \" Please provide a value only to 'stride'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'st' parameter is not going to exist\"\n \" anymore as it is going to be replaced by the parameter\"\n \" 'stride'.\",\n stacklevel=2\n )\n stride = st\n\n if padding is not None:\n zero_pad = (0,) * ndim\n if pad not in {None, zero_pad}:\n raise ValueError(\n \"You can't provide a tuple value to both 'padding' and pad.\"\n \" Please provide a value only to pad.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'padding' parameter is not going to\"\n \" exist anymore as it is going to be replaced by the\"\n \" parameter 'pad'.\",\n stacklevel=2\n )\n pad = padding\n\n if ndim is None:\n ndim = 2\n assert ndim > 0\n if len(imgshape) < ndim:\n raise TypeError('imgshape must have at least {} dimensions'.format(ndim))\n\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * ndim\n patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2\n for i in xrange(ndim))\n\n def compute_out(v, downsample, stride):\n if ignore_border:\n if downsample == stride:\n return v // stride\n else:\n out = (v - downsample) // stride + 1\n if isinstance(out, theano.Variable):\n return tensor.maximum(out, 0)\n else:\n return np.maximum(out, 0)\n else:\n if isinstance(v, theano.Variable):\n return tensor.switch(tensor.ge(stride, downsample),\n (v - 1) // stride + 1,\n tensor.maximum(0, (v - 1 - downsample) //\n stride + 1) + 1)\n elif stride >= downsample:\n return (v - 1) // stride + 1\n else:\n return max(0, (v - 1 - downsample + stride) // stride) + 1\n\n out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]\n\n rval = list(imgshape[:-ndim]) + out_shape\n return rval\n\n def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):\n super(Pool, self).__init__(openmp=openmp)\n self.ndim = ndim\n self.ignore_border = ignore_border\n if mode == 'max_deterministic':\n # It seems max pool algo is already deterministic in CPU.\n mode = 'max'\n if mode not in ['max', 'average_inc_pad', 'average_exc_pad', 'sum']:\n raise ValueError(\n \"Pool mode parameter only support 'max', 'sum',\"\n \" 'average_inc_pad' and 'average_exc_pad'. Got %s\" % mode)\n self.mode = mode\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) == 1:\n # Old interface\n self.ndim = len(node.op.ds)\n self.mode = node.op.mode\n ws = theano.tensor.constant(node.op.ds)\n st = theano.tensor.constant(node.op.st)\n pad = theano.tensor.constant(node.op.padding)\n node.inputs.append(ws)\n node.inputs.append(st)\n node.inputs.append(pad)\n if isinstance(ws, theano.Constant):\n storage_map[ws] = [ws.data]\n compute_map[ws] = [True]\n else:\n storage_map[ws] = [None]\n compute_map[ws] = [False]\n if isinstance(st, theano.Constant):\n storage_map[st] = [st.data]\n compute_map[st] = [True]\n else:\n storage_map[st] = [None]\n compute_map[st] = [False]\n if isinstance(pad, theano.Constant):\n storage_map[pad] = [pad.data]\n compute_map[pad] = [True]\n else:\n storage_map[pad] = [None]\n compute_map[pad] = [False]\n\n def make_node(self, x, ws, stride=None, pad=None):\n # TODO: consider restricting the dtype?\n x = tensor.as_tensor_variable(x)\n nd = self.ndim\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * nd\n elif isinstance(pad, (tuple, list)):\n if max(pad) != 0 and not self.ignore_border:\n raise NotImplementedError(\n 'padding works only with ignore_border=True')\n if isinstance(ws, (tuple, list)):\n if any(pad[i] >= ws[i] for i in range(nd)):\n raise NotImplementedError(\n 'padding must be smaller than strides')\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert ws.ndim == 1\n assert stride.ndim == 1\n assert pad.ndim == 1\n if x.type.ndim < nd:\n raise TypeError()\n if ws.dtype not in tensor.int_dtypes:\n raise TypeError('Pool downsample parameters must be ints.')\n if stride.dtype not in tensor.int_dtypes:\n raise TypeError('Stride parameters must be ints.')\n if pad.dtype not in tensor.int_dtypes:\n raise TypeError('Padding parameters must be ints.')\n # If the input shape are broadcastable we can have 0 in the output shape\n broad = x.broadcastable[:-nd] + (False,) * nd\n out = tensor.TensorType(x.dtype, broad)\n return gof.Apply(self, [x, ws, stride, pad], [out()])\n\n def perform(self, node, inp, out, params):\n x, ws, stride, pad = inp\n z, = out\n nd = self.ndim\n assert ws.shape == stride.shape == pad.shape == (nd,)\n if len(x.shape) < nd:\n raise NotImplementedError(\n 'Pool requires input with {} or more dimensions'.format(nd))\n z_shape = self.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)\n if not params.ignore_border:\n assert all(z > 0 for z in z_shape[-nd:])\n if (z[0] is None) or (z[0].shape != z_shape):\n z[0] = np.empty(z_shape, dtype=x.dtype)\n zz = z[0]\n # size of pooling output\n pool_out_shp = zz.shape[-nd:]\n img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))\n inc_pad = self.mode == 'average_inc_pad'\n\n # pad the image\n if max(pad) != 0:\n y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)\n y[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x\n else:\n y = x\n func = np.max\n if self.mode == 'sum':\n func = np.sum\n elif self.mode != 'max':\n func = np.average\n\n # precompute the region boundaries for each dimension\n region_slices = [[] for i in xrange(nd)]\n for i in xrange(nd):\n for j in xrange(pool_out_shp[i]):\n start = j * stride[i]\n end = builtins.min(start + ws[i], img_shp[i])\n if not inc_pad:\n start = builtins.max(start, pad[i])\n end = builtins.min(end, img_shp[i] - pad[i])\n region_slices[i].append(slice(start, end))\n\n # iterate over non-pooling dimensions\n for k in np.ndindex(*x.shape[:-nd]):\n zzk = zz[k]\n yk = y[k]\n # iterate over pooling regions\n for r in np.ndindex(*pool_out_shp):\n zzk[r] = func(\n yk[[region_slices[i][r[i]] for i in xrange(nd)]])\n\n def infer_shape(self, node, in_shapes):\n ws, stride, pad = [node.inputs[1], node.inputs[2], node.inputs[3]]\n shp = self.out_shape(in_shapes[0], ws, self.ignore_border, stride,\n pad, self.ndim)\n return [shp]\n\n def L_op(self, inputs, outputs, grads):\n x, ws, stride, pad = inputs\n gz, = grads\n disc = [DisconnectedType()() for i in inputs[1:]]\n if self.mode == 'max':\n return [MaxPoolGrad(ndim=self.ndim,\n ignore_border=self.ignore_border)(\n x, outputs[0], gz, ws=ws, stride=stride, pad=pad)] + disc\n else:\n return [AveragePoolGrad(ndim=self.ndim,\n ignore_border=self.ignore_border,\n mode=self.mode)(\n x, gz, ws=ws, stride=stride, pad=pad)] + disc\n\n def connection_pattern(self, node):\n return [[1], [0], [0], [0]]\n\n def R_op(self, inputs, eval_points):\n if self.mode != 'max':\n # Rop for average or sum is simply pooling evaluated at eval point\n eval_inputs = [eval_points[0]] + inputs[1:]\n return [self(*eval_inputs)]\n\n # R_op can receive None as eval_points.\n # That mean there is no diferientiable path through that input\n # If this imply that you cannot compute some outputs,\n # return None for those.\n if eval_points[0] is None:\n return [None]\n z = self(*inputs)\n x, ws, stride, pad = inputs\n return [\n DownsampleFactorMaxGradGrad(self.ignore_border, self.mode,\n self.ndim)(x, z, eval_points[0], ws,\n stride, pad)\n ]\n\n def c_headers(self):\n headers = ['<algorithm>']\n headers += super(Pool, self).c_headers()\n return headers\n\n def c_code(self, node, name, inp, out, sub):\n if self.mode not in ('max', 'sum', 'average_exc_pad', 'average_inc_pad'):\n raise theano.gof.utils.MethodNotDefined()\n x, ws, stride, pad = inp\n z, = out\n nd = self.ndim\n total_ndim = node.inputs[0].ndim\n non_pool_ndim = total_ndim - nd\n fail = sub['fail']\n params = sub['params']\n if self.openmp:\n # run in parallel over each pooling block\n omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector) schedule(static)'\n else:\n omp_parallel = ''\n ccode = \"\"\"\n int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);\n if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"x must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"ws must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"stride must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"pad must be a vector of size %(nd)s\");\n %(fail)s;\n }\n int z[%(nd)s]; // shape of the output\n int r[%(nd)s]; // shape of the padded_input\n int ws[%(nd)s];\n int st[%(nd)s];\n int pd[%(nd)s];\n int nonzero_padding;\n nonzero_padding = 0;\n for (int i=0; i<%(nd)s; i++)\n {\n ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));\n pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];\n if (pd[i]>0)\n nonzero_padding = 1;\n }\n if (!%(params)s->ignore_border && nonzero_padding)\n {\n PyErr_SetString(PyExc_ValueError,\n \"padding must be zero when ignore border is False\");\n %(fail)s;\n }\n if (%(params)s->ignore_border)\n {\n for (int i=0; i<%(nd)s; i++)\n {\n // '/' in C is different from '/' in python\n if (r[i] - ws[i] < 0)\n {\n z[i] = 0;\n }\n else\n {\n z[i] = (r[i] - ws[i]) / st[i] + 1;\n }\n }\n }\n else\n {\n for (int i=0; i<%(nd)s; i++)\n {\n // decide how many rows/cols the output has\n if (st[i] >= ws[i])\n {\n z[i] = (r[i] - 1) / st[i] + 1;\n }\n else\n {\n z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;\n }\n assert(z[i] > 0);\n }\n }\n // memory allocation of z if necessary\n int mem_nec;\n mem_nec = 0;\n if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)\n {\n mem_nec = 1;\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(nd)s; i++)\n {\n if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (mem_nec)\n {\n if (%(z)s) Py_XDECREF(%(z)s);\n npy_intp dims[%(total_ndim)s];\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n dims[i] = PyArray_DIMS(%(x)s)[i];\n }\n for (int i=0; i<%(nd)s; i++)\n {\n dims[%(non_pool_ndim)s + i] = z[i];\n }\n //TODO: zeros not necessary\n %(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);\n }\n // initialize temp var for the value in a region\n dtype_%(x)s collector;\n int z_prod;\n // do not run if any z[i] is zero\n z_prod = 1;\n for (int i=0; i<%(nd)s; i++)\n {\n z_prod *= z[i];\n }\n if (z_prod)\n {\n // will be used to hold start and end index of a region\n int r_st[%(nd)s];\n int r_end[%(nd)s];\n // index for iterating over the pooling regions\n int r_idx[%(nd)s];\n // placeholder for PyArray indexing (output)\n npy_intp o_idx[%(total_ndim)s];\n // placeholder for PyArray indexing (input)\n npy_intp i_idx[%(total_ndim)s];\n // loop over non-pooling dimensions\n int non_pooling_prod = 1;\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n non_pooling_prod *= PyArray_DIMS(%(x)s)[i];\n }\n %(omp_parallel)s\n // first loop over non-pooling dimensions\n for (int t=0; t<non_pooling_prod; t++)\n {\n // compute the non-pooling index in each dimension\n if (%(non_pool_ndim)s!=0)\n {\n o_idx[0] = t;\n i_idx[0] = t;\n for (int i=1; i<%(non_pool_ndim)s; i++)\n {\n o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];\n o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];\n i_idx[i] = o_idx[i];\n i_idx[i - 1] = o_idx[i - 1];\n }\n }\n\n // then loop over each region in each pooling dimension\n \"\"\"\n\n for i in xrange(nd):\n ccode += \"\"\"\n for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {\n r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];\n r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];\n // skip the padding\n r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];\n r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];\n // from padded_img space to img space\n r_st[%(i)s] -= pd[%(i)s];\n r_end[%(i)s] -= pd[%(i)s];\n // handle the case where no padding, ignore border is True\n if (%(params)s->ignore_border)\n {\n r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];\n }\n // use the index to find the correct position in the output\n o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim, params=sub['params'])\n\n ccode += \"\"\"\n // get a pointer to the correct position in the output\n dtype_%(z)s * z;\n if (%(total_ndim)s == 4)\n z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));\n else\n z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));\n \"\"\"\n\n if self.mode == 'max':\n for i in xrange(nd):\n ccode += \"\"\"\n // set the first index of dimension %(i)s\n i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n // use the first element as the initial value of collector\n if (%(total_ndim)s == 4)\n collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n else\n collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n // go through the pooled region in the unpadded input\n for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)\n {\n i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n // update maximum\n dtype_%(x)s a;\n if (%(total_ndim)s == 4)\n a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n else\n a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n collector = (a > collector) ? a : collector;\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // for loop over region\n \"\"\"\n ccode += \"\"\"\n z[0] = collector;\n \"\"\"\n elif self.mode in ('sum', 'average_exc_pad', 'average_inc_pad'):\n ccode += \"\"\"\n // initialize the sum at zero\n collector = ((dtype_%(x)s)(0));\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n // go through the pooled region in the unpadded input\n for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)\n {\n i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n // update sum\n dtype_%(x)s a;\n if (%(total_ndim)s == 4)\n a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n else\n a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n collector += a;\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // for loop over region\n \"\"\"\n if self.mode == \"sum\":\n ccode += \"\"\"\n z[0] = collector;\n \"\"\"\n elif self.mode == 'average_inc_pad' and self.ignore_border:\n # region size = product over all pooling dimensions\n region_size = ' * '.join('ws[%d]' % i for i in xrange(nd))\n ccode += \"\"\"\n z[0] = collector / (%(region_size)s);\n \"\"\" % dict(region_size=region_size)\n else:\n # region size = number elements of in this region\n region_size = ' * '.join('(r_end[%d]-r_st[%d])' % (i, i) for i in xrange(nd))\n ccode += \"\"\"\n z[0] = collector / (%(region_size)s);\n \"\"\" % dict(region_size=region_size)\n for i in xrange(nd):\n ccode += \"\"\"\n } // loop over pooling dimension\n \"\"\"\n\n ccode += \"\"\"\n } // for loop over non-pooling dimensions\n } // if z_prod\n \"\"\"\n return ccode % locals()\n\n def c_code_cache_version(self):\n return (9, self.openmp)\n\n\nclass PoolGrad(OpenMPOp):\n __props__ = ('ignore_border', 'mode', 'ndim')\n\n @staticmethod\n def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,\n ds=None, st=None, padding=None):\n \"\"\"Return the shape of the output from this op, for input of given\n shape and flags.\n\n Parameters\n ----------\n imgshape : tuple of integers or scalar Theano variables\n the shape of a tensor of images. The last N elements are\n interpreted as the downsampling dimensions.\n ws : tuple of N ints\n downsample factor over rows and columns this parameter\n indicates the size of the pooling region\n ignore_border : bool\n If ws doesn't divide imgshape, do we include an extra row/col/slice\n of partial downsampling (False) or ignore it (True).\n stride : list or tuple of N ints or None\n Stride size, which is the number of shifts over rows/cols/slices to get the\n next pool region. If stride is None, it is considered equal to ws\n (no overlap on pooling regions).\n pad : tuple of N ints or None\n For each downsampling dimension, this specifies the number of zeros to\n add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the\n size of the top and bottom margins, pad_w specifies the size of the left and\n right margins. No padding is added if pad is None.\n ndim : int\n The number of pooling dimensions N.\n The default is 2.\n ds\n *deprecated*, use parameter ws instead.\n st\n *deprecated*, use parameter st instead.\n padding\n *deprecated*, use parameter pad instead.\n\n Returns\n -------\n list :\n the shape of the output from this op, for input of given\n shape. This will have the same length as imgshape, but\n with last N elements reduced as per the downsampling &\n ignore_border flags.\n\n \"\"\"\n # check for deprecated parameter names\n if ds is not None:\n if ws is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'ws' and 'ds'.\"\n \" Please provide a value only to 'ws'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'ds' parameter in PoolGrad is not going\"\n \" to exist anymore as it is going to be replaced by the\"\n \" parameter 'ws'.\",\n stacklevel=2\n )\n ws = ds\n elif ds is None and ws is None:\n raise ValueError(\n \"You must provide a tuple value for the window size.\"\n )\n\n if st is not None:\n if stride is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'st and 'stride'.\"\n \" Please provide a value only to 'stride'.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'st' parameter in PoolGrad is not going\"\n \" to exist anymore as it is going to be replaced by the\"\n \" parameter 'stride'.\",\n stacklevel=2\n )\n stride = st\n\n if padding is not None:\n if pad is not None:\n raise ValueError(\n \"You can't provide a tuple value to both 'padding' and pad.\"\n \" Please provide a value only to pad.\"\n )\n else:\n warnings.warn(\n \"DEPRECATION: the 'padding' parameter in PoolGrad is not\"\n \" going to exist anymore as it is going to be replaced\"\n \" by the parameter 'pad'.\",\n stacklevel=2\n )\n pad = padding\n\n if len(imgshape) < ndim:\n raise TypeError('imgshape must have at least {} dimensions'.format(ndim))\n\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * ndim\n patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2\n for i in xrange(ndim))\n\n def compute_out(v, downsample, stride):\n if ignore_border:\n out = (v - downsample) // stride + 1\n if isinstance(out, theano.Variable):\n return tensor.maximum(out, 0)\n else:\n return np.maximum(out, 0)\n else:\n if isinstance(v, theano.Variable):\n return tensor.switch(tensor.ge(stride, downsample),\n (v - 1) // stride + 1,\n tensor.maximum(0, (v - 1 - downsample) //\n stride + 1) + 1)\n elif stride >= downsample:\n return (v - 1) // stride + 1\n else:\n return max(0, (v - 1 - downsample) // stride + 1) + 1\n\n out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]\n\n rval = list(imgshape[:-ndim]) + out_shape\n return rval\n\n def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):\n self.ndim = ndim\n self.ignore_border = ignore_border\n if mode == 'max_deterministic':\n # It seems max pool grad algo is already deterministic in CPU.\n mode = 'max'\n if mode not in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:\n raise ValueError(\n \"Pool mode parameter only support 'max', 'sum',\"\n \" 'average_inc_pad' and 'average_exc_pad'. Got %s\" % mode)\n self.mode = mode\n super(PoolGrad, self).__init__(openmp=openmp)\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) < 5: # 5 for AveragePoolGrad, 6 for MaxPoolGrad\n # Old interface\n self.ndim = len(node.op.ds)\n self.mode = node.op.mode\n ws = theano.tensor.constant(node.op.ds)\n st = theano.tensor.constant(node.op.st)\n pad = theano.tensor.constant(node.op.padding)\n node.inputs.append(ws)\n node.inputs.append(st)\n node.inputs.append(pad)\n if isinstance(ws, theano.Constant):\n storage_map[ws] = [ws.data]\n compute_map[ws] = [True]\n else:\n storage_map[ws] = [None]\n compute_map[ws] = [False]\n if isinstance(st, theano.Constant):\n storage_map[st] = [st.data]\n compute_map[st] = [True]\n else:\n storage_map[st] = [None]\n compute_map[st] = [False]\n if isinstance(pad, theano.Constant):\n storage_map[pad] = [pad.data]\n compute_map[pad] = [True]\n else:\n storage_map[pad] = [None]\n compute_map[pad] = [False]\n\n def infer_shape(self, node, in_shapes):\n return [in_shapes[0]]\n\n\nclass MaxPoolGrad(PoolGrad):\n # params_type ignore_border don't change c code\n\n def __init__(self, ignore_border, ndim=2, openmp=None):\n PoolGrad.__init__(self, ignore_border, mode='max', ndim=ndim, openmp=openmp)\n\n def make_node(self, x, maxout, gz, ws, stride=None, pad=None):\n # make_node should only be called by the grad function of\n # Pool, so these asserts should not fail.\n x = tensor.as_tensor_variable(x)\n maxout = tensor.as_tensor_variable(maxout)\n gz = tensor.as_tensor_variable(gz)\n nd = self.ndim\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * nd\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert isinstance(x, Variable) and x.ndim >= nd\n assert isinstance(maxout, Variable) and maxout.ndim >= nd\n assert isinstance(gz, Variable) and gz.ndim >= nd\n assert isinstance(ws, Variable) and ws.ndim == 1\n assert isinstance(stride, Variable) and stride.ndim == 1\n assert isinstance(pad, Variable) and pad.ndim == 1\n assert x.ndim == maxout.ndim == gz.ndim >= nd\n if ws.dtype not in tensor.int_dtypes:\n raise TypeError('Pool downsample parameters must be ints.')\n if stride.dtype not in tensor.int_dtypes:\n raise TypeError('Stride parameters must be ints.')\n if pad.dtype not in tensor.int_dtypes:\n raise TypeError('Padding parameters must be ints.')\n return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])\n\n def perform(self, node, inp, out):\n assert self.mode == 'max'\n x, maxout, gz, ws, stride, pad = inp\n gx_stg, = out\n nd = self.ndim\n assert ws.shape == stride.shape == pad.shape == (nd,)\n if len(x.shape) < nd:\n raise NotImplementedError(\n 'MaxPoolGrad requires input with {} or more dimensions'.format(nd))\n pool_out_shp = maxout.shape[-nd:]\n img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))\n\n # pad the image\n if max(pad) != 0:\n y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)\n y[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x\n else:\n y = x\n gx = np.zeros_like(y)\n\n # precompute the region boundaries for each dimension\n region_ranges = [[] for i in xrange(nd)]\n for i in xrange(nd):\n for j in xrange(pool_out_shp[i]):\n start = builtins.max(j * stride[i], pad[i])\n end = builtins.min(start + ws[i], img_shp[i])\n region_ranges[i].append(xrange(start, end))\n\n # iterate over non-pooling dimensions\n for k in np.ndindex(*x.shape[:-nd]):\n gxk = gx[k]\n gzk = gz[k]\n yk = y[k]\n maxoutk = maxout[k]\n # iterate over pooling regions\n for r in np.ndindex(*pool_out_shp):\n maxout_value = maxoutk[r]\n # iterate inside region\n for c in itertools.product(*[region_ranges[i][r[i]]\n for i in xrange(nd)]):\n if maxout_value == yk[c]:\n gxk[c] += gzk[r]\n\n # unpad the image\n gx = gx[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]\n gx_stg[0] = gx\n\n def grad(self, inp, grads):\n x, maxout, gz, ws, stride, pad = inp\n ggx, = grads\n return ([theano.tensor.zeros_like(x),\n theano.tensor.zeros_like(maxout),\n DownsampleFactorMaxGradGrad(ndim=self.ndim,\n ignore_border=self.ignore_border)(\n x, maxout, ggx, ws, stride, pad)] +\n [DisconnectedType()() for i in inp[3:]])\n\n def connection_pattern(self, node):\n return [[1], [1], [1], [0], [0], [0]]\n\n def c_code(self, node, name, inp, out, sub):\n assert self.mode == 'max'\n x, z, gz, ws, stride, pad = inp\n gx, = out\n nd = self.ndim\n total_ndim = node.inputs[0].ndim\n non_pool_ndim = total_ndim - nd\n fail = sub['fail']\n\n if self.openmp:\n # run in parallel over each pooling block\n omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, maximum) schedule(static)'\n else:\n omp_parallel = ''\n\n ccode = \"\"\"\n // sanity checks\n int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);\n int z_typenum = PyArray_ObjectType((PyObject*)%(z)s, 0);\n int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);\n if ((x_typenum != z_typenum) || (x_typenum != gz_typenum))\n {\n PyErr_SetString(PyExc_ValueError, \"input types must all match\");\n %(fail)s;\n }\n if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"x must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_NDIM(%(z)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"z must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"gz must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"ws must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"stride must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"pad must be a vector of size %(nd)s\");\n %(fail)s;\n }\n int z[%(nd)s]; // shape of the output\n int r[%(nd)s]; // shape of the padded_input\n int ws[%(nd)s];\n int st[%(nd)s];\n int pd[%(nd)s];\n int nonzero_padding;\n nonzero_padding = 0;\n for (int i=0; i<%(nd)s; i++)\n {\n ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));\n pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n z[i] = PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i];\n r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];\n if (pd[i]>0)\n nonzero_padding = 1;\n }\n // allocating memory for output, if necessary\n int mem_nec;\n mem_nec = 0;\n if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)\n || *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)\n {\n mem_nec = 1;\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(total_ndim)s; i++)\n {\n if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (mem_nec)\n {\n Py_XDECREF(%(gx)s);\n %(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);\n }\n else {\n PyArray_FILLWBYTE(%(gx)s, 0);\n }\n dtype_%(z)s maximum; // temp var for maximum value in a region\n int z_prod;\n // do not run if any z[i] is zero\n z_prod = 1;\n for (int i=0; i<%(nd)s; i++)\n {\n z_prod *= z[i];\n }\n if (z_prod)\n {\n // will be used to hold start and end index of a region\n int r_st[%(nd)s];\n int r_end[%(nd)s];\n // index for iterating over the pooling regions\n int r_idx[%(nd)s];\n // placeholder for PyArray indexing (output)\n npy_intp o_idx[%(total_ndim)s];\n // placeholder for PyArray indexing (input)\n npy_intp i_idx[%(total_ndim)s];\n // loop over non-pooling dimensions\n int non_pooling_prod = 1;\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n non_pooling_prod *= PyArray_DIMS(%(x)s)[i];\n }\n %(omp_parallel)s\n // first loop over non-pooling dimensions\n for (int t=0; t<non_pooling_prod; t++)\n {\n // compute the non-pooling index in each dimension\n if (%(non_pool_ndim)s!=0)\n {\n o_idx[0] = t;\n i_idx[0] = t;\n for (int i=1; i<%(non_pool_ndim)s; i++)\n {\n o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];\n o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];\n i_idx[i] = o_idx[i];\n i_idx[i - 1] = o_idx[i - 1];\n }\n }\n\n // then loop over each region in each pooling dimension\n \"\"\"\n\n for i in xrange(nd):\n ccode += \"\"\"\n for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {\n r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];\n r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];\n // skip the padding\n r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];\n r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];\n // from padded_img space to img space\n r_st[%(i)s] -= pd[%(i)s];\n r_end[%(i)s] -= pd[%(i)s];\n // use the index to find the correct position in the output\n o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n\n ccode += \"\"\"\n dtype_%(gz)s * gz;\n if (%(total_ndim)s == 4)\n {\n // the maximum value\n maximum = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];\n // the gradient corresponding to this maximum value in z\n gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));\n }\n else\n {\n // the maximum value\n maximum = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)))[0];\n // the gradient corresponding to this maximum value in z\n gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));\n }\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n // go through the pooled region in the unpadded input\n for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)\n {\n i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n dtype_%(x)s a;\n dtype_%(gx)s * gx;\n if (%(total_ndim)s == 4)\n {\n a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));\n }\n else\n {\n a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));\n }\n if (a == maximum){\n gx[0] = gx[0] + gz[0];\n }\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // for loop over region\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // loop over pooling dimension\n \"\"\"\n\n ccode += \"\"\"\n } // for loop over non-pooling dimensions\n } // if z_prod\n \"\"\"\n return ccode % locals()\n\n def c_code_cache_version(self):\n return (0, 10, self.openmp)\n\n\nclass AveragePoolGrad(PoolGrad):\n # ignore_border is used for perform, but not c code. No need in params_type\n\n def __init__(self, ignore_border, mode='average_inc_pad', ndim=2):\n assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']\n PoolGrad.__init__(self, ignore_border, mode, ndim)\n\n # There is an extra dummy parameter to match the parameter count\n # of MaxPoolGrad. They have to keep the same interface because of\n # the DownsampleFactorMaxGrad trick to keep old scripts working\n # (see downsample.py for details on this).\n def make_node(self, x, gz, ws, stride=None, pad=None, dummy=None):\n # make_node should only be called by the grad function of\n # Pool, so these asserts should not fail.\n x = tensor.as_tensor_variable(x)\n gz = tensor.as_tensor_variable(gz)\n nd = self.ndim\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * nd\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert isinstance(x, Variable) and x.ndim >= nd\n assert isinstance(gz, Variable) and gz.ndim >= nd\n assert isinstance(ws, Variable) and ws.ndim == 1\n assert isinstance(stride, Variable) and stride.ndim == 1\n assert x.ndim == gz.ndim >= nd\n assert isinstance(pad, Variable) and pad.ndim == 1\n if ws.dtype not in tensor.int_dtypes:\n raise TypeError('Pool downsample parameters must be ints.')\n if stride.dtype not in tensor.int_dtypes:\n raise TypeError('Stride parameters must be ints.')\n if pad.dtype not in tensor.int_dtypes:\n raise TypeError('Padding parameters must be ints.')\n return Apply(self, [x, gz, ws, stride, pad], [x.type()])\n\n def perform(self, node, inp, out):\n x, gz, ws, stride, pad = inp\n gx_stg, = out\n nd = self.ndim\n assert ws.shape == stride.shape == pad.shape == (nd,)\n if len(x.shape) < nd:\n raise NotImplementedError(\n 'AveragePoolGrad requires input with {} or more dimensions'.format(nd))\n if self.mode == 'average_exc_pad' and max(pad) != 0:\n raise NotImplementedError()\n z_shape = self.out_shape(x.shape, ws, self.ignore_border, stride, pad, nd)\n if (gx_stg[0] is None) or (gx_stg[0].shape != z_shape):\n gx_stg[0] = np.empty(z_shape, dtype=x.dtype)\n zz = gx_stg[0]\n # size of pooling output\n pool_out_shp = zz.shape[-nd:]\n img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))\n inc_pad = self.mode == 'average_inc_pad'\n sum_mode = self.mode == 'sum'\n\n # initialize the padded output\n gx = np.zeros((x.shape[:-nd] + img_shp), dtype=x.dtype)\n\n # precompute the region boundaries and sizes for each dimension\n region_slices = [[] for i in xrange(nd)]\n region_sizes = [[] for i in xrange(nd)]\n for i in xrange(nd):\n for j in xrange(pool_out_shp[i]):\n if sum_mode or inc_pad:\n start = j * stride[i]\n else:\n start = builtins.max(j * stride[i], pad[i])\n end = builtins.min(start + ws[i], img_shp[i])\n region_slices[i].append(slice(start, end))\n region_sizes[i].append(end - start)\n\n # iterate over non-pooling dimensions\n region_slice = [None] * nd\n for k in np.ndindex(*x.shape[:-nd]):\n gzk = gz[k]\n gxk = gx[k]\n # iterate over pooling regions\n for r in np.ndindex(*pool_out_shp):\n region_size = 1\n for i in xrange(nd):\n region_slice[i] = region_slices[i][r[i]]\n region_size *= region_sizes[i][r[i]]\n if sum_mode:\n val = gzk[r]\n else:\n # divide by region size\n val = gzk[r] / region_size\n gxk[region_slice] += val\n\n # unpad the image\n gx = gx[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]\n gx_stg[0] = gx\n\n def grad(self, inp, grads):\n x, gz, ws, stride, pad = inp\n ggx, = grads\n return ([theano.tensor.zeros_like(x),\n Pool(ignore_border=self.ignore_border,\n ndim=self.ndim, mode=self.mode)(ggx,\n ws, stride, pad)] + [DisconnectedType()() for i in inp[2:]])\n\n def connection_pattern(self, node):\n return [[1], [1], [0], [0], [0]]\n\n def c_code(self, node, name, inp, out, sub):\n x, gz, ws, stride, pad = inp\n gx, = out\n nd = self.ndim\n total_ndim = node.inputs[0].ndim\n non_pool_ndim = total_ndim - nd\n fail = sub['fail']\n inc_pad = int(self.mode == 'average_inc_pad')\n sum_mode = int(self.mode == 'sum')\n if self.openmp:\n # run in parallel over each pooling block\n omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_pad_width, r_idx, i_idx, o_idx) schedule(static)'\n else:\n omp_parallel = ''\n\n ccode = \"\"\"\n // sanity checks\n int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);\n int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);\n if (x_typenum != gz_typenum)\n {\n PyErr_SetString(PyExc_ValueError, \"input types must all match\");\n %(fail)s;\n }\n if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"x must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"gz must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"ws must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"stride must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"pad must be a vector of size %(nd)s\");\n %(fail)s;\n }\n int z[%(nd)s]; // shape of the output\n int r[%(nd)s]; // shape of the padded_input\n int ws[%(nd)s];\n int st[%(nd)s];\n int pd[%(nd)s];\n int nonzero_padding;\n nonzero_padding = 0;\n for (int i=0; i<%(nd)s; i++)\n {\n ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));\n pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n z[i] = PyArray_DIMS(%(gz)s)[%(non_pool_ndim)s + i];\n r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];\n if (pd[i]>0)\n nonzero_padding = 1;\n }\n if (!%(inc_pad)s && !%(sum_mode)s && nonzero_padding)\n {\n PyErr_SetString(PyExc_ValueError,\n \"padding must be zero for average_exc_pad\");\n %(fail)s;\n }\n // allocating memory for output, if necessary\n int mem_nec;\n mem_nec = 0;\n if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)\n || *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)\n {\n mem_nec = 1;\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(total_ndim)s; i++)\n {\n if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (mem_nec)\n {\n Py_XDECREF(%(gx)s);\n %(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);\n }\n else {\n PyArray_FILLWBYTE(%(gx)s, 0);\n }\n int z_prod;\n // do not run if any z[i] is zero\n z_prod = 1;\n for (int i=0; i<%(nd)s; i++)\n {\n z_prod *= z[i];\n }\n if (z_prod)\n {\n // will be used to hold start and end index of a region\n int r_st[%(nd)s];\n int r_end[%(nd)s];\n // padded region size\n int r_pad_width[%(nd)s];\n // index for iterating over the pooling regions\n int r_idx[%(nd)s];\n // placeholder for PyArray indexing (output)\n npy_intp o_idx[%(total_ndim)s];\n // placeholder for PyArray indexing (input)\n npy_intp i_idx[%(total_ndim)s];\n // loop over non-pooling dimensions\n int non_pooling_prod = 1;\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n non_pooling_prod *= PyArray_DIMS(%(x)s)[i];\n }\n %(omp_parallel)s\n // first loop over non-pooling dimensions\n for (int t=0; t<non_pooling_prod; t++)\n {\n // compute the non-pooling index in each dimension\n if (%(non_pool_ndim)s!=0)\n {\n o_idx[0] = t;\n i_idx[0] = t;\n for (int i=1; i<%(non_pool_ndim)s; i++)\n {\n o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];\n o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];\n i_idx[i] = o_idx[i];\n i_idx[i - 1] = o_idx[i - 1];\n }\n }\n\n // then loop over each region in each pooling dimension\n \"\"\"\n\n for i in xrange(nd):\n ccode += \"\"\"\n for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {\n r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];\n if (!%(sum_mode)s && !%(inc_pad)s && r_st[%(i)s] < pd[%(i)s])\n {\n r_st[%(i)s] = pd[%(i)s];\n }\n r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];\n r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];\n r_pad_width[%(i)s] = r_end[%(i)s] - r_st[%(i)s];\n // from padded_img space to img space\n r_st[%(i)s] = r_st[%(i)s] - pd[%(i)s] > 0 ? r_st[%(i)s] - pd[%(i)s] : 0;\n r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] - pd[%(i)s] ? r[%(i)s] - 2 * pd[%(i)s] : r_end[%(i)s] - pd[%(i)s];\n\n // use the index to find the correct position in the output\n o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];\n \"\"\" % dict(i=i, sum_mode=sum_mode, inc_pad=inc_pad, non_pool_ndim=non_pool_ndim)\n\n ccode += \"\"\"\n dtype_%(gz)s * gz;\n dtype_%(gz)s val;\n if (%(total_ndim)s == 4)\n {\n // the gradient for this region\n gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));\n }\n else\n {\n // the gradient for this region\n gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));\n }\n // compute the contribution\n if (%(sum_mode)s)\n {\n val = gz[0];\n }\n else\n {\n val = gz[0] / (%(region_size)s);\n }\n \"\"\"\n region_size = ' * '.join('r_pad_width[%d]' % i for i in xrange(nd))\n for i in xrange(nd):\n ccode += \"\"\"\n // go through the pooled region in the unpadded input\n for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)\n {\n i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n dtype_%(gx)s * gx;\n if (%(total_ndim)s == 4)\n {\n gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));\n }\n else\n {\n gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));\n }\n gx[0] = gx[0] + val;\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // for loop over region\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // loop over pooling dimension\n \"\"\"\n\n ccode += \"\"\"\n } // for loop over non-pooling dimensions\n } // if z_prod\n \"\"\"\n return ccode % locals()\n\n def c_code_cache_version(self):\n return (0, 3, self.openmp)\n\n\nclass DownsampleFactorMaxGradGrad(OpenMPOp):\n __props__ = ('ignore_border', 'mode', 'ndim')\n\n def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):\n self.ndim = ndim\n self.ignore_border = ignore_border\n self.mode = mode\n super(DownsampleFactorMaxGradGrad, self).__init__(openmp=openmp)\n assert self.mode == 'max'\n\n def make_node(self, x, maxout, gz, ws, stride=None, pad=None):\n # make_node should only be called by the grad function of\n # MaxPoolGrad, so these asserts should not fail.\n x = tensor.as_tensor_variable(x)\n maxout = tensor.as_tensor_variable(maxout)\n gz = tensor.as_tensor_variable(gz)\n nd = self.ndim\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * nd\n elif isinstance(pad, (tuple, list)):\n if max(pad) != 0 and not self.ignore_border:\n raise NotImplementedError(\n 'padding works only with ignore_border=True')\n if isinstance(ws, (tuple, list)):\n if any(pad[i] >= ws[i] for i in range(nd)):\n raise NotImplementedError(\n 'padding must be smaller than strides')\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert ws.ndim == 1\n assert stride.ndim == 1\n assert pad.ndim == 1\n assert x.ndim == maxout.ndim == gz.ndim >= nd\n if ws.dtype not in tensor.int_dtypes:\n raise TypeError('Pool downsample parameters must be ints.')\n if stride.dtype not in tensor.int_dtypes:\n raise TypeError('Stride parameters must be ints.')\n if pad.dtype not in tensor.int_dtypes:\n raise TypeError('Padding parameters must be ints.')\n return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])\n\n def perform(self, node, inp, out):\n x, maxout, ggx, ws, stride, pad = inp\n z, = out\n nd = self.ndim\n assert ws.shape == stride.shape == pad.shape == (nd,)\n if len(x.shape) < nd:\n raise NotImplementedError(\n 'DownsampleFactorMaxGradGrad requires input '\n 'with {} or more dimensions'.format(nd))\n if (z[0] is None) or (z[0].shape != maxout.shape):\n z[0] = np.zeros(maxout.shape, dtype=x.dtype)\n ggz = z[0] # grad wrt maxout_grad has the same shape as maxout\n # size of pooling output\n pool_out_shp = ggz.shape[-nd:]\n img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))\n\n # pad the image and its gradients\n if max(pad) > 0:\n y_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)\n y_padded[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x\n ggx_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)\n ggx_padded[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ggx\n\n else:\n y_padded = x\n ggx_padded = ggx\n\n # precompute the region boundaries for each dimension\n region_ranges = [[] for i in xrange(nd)]\n for i in xrange(nd):\n for j in xrange(pool_out_shp[i]):\n start = j * stride[i]\n end = builtins.min(start + ws[i], img_shp[i])\n region_ranges[i].append(xrange(start, end))\n\n # iterate over non-pooling dimensions\n for k in np.ndindex(*x.shape[:-nd]):\n ggxk = ggx_padded[k]\n ggzk = ggz[k]\n yk = y_padded[k]\n maxoutk = maxout[k]\n # iterate over pooling regions\n for r in np.ndindex(*pool_out_shp):\n # iterate inside region\n maxout_value = maxoutk[r]\n for c in itertools.product(*[region_ranges[i][r[i]]\n for i in xrange(nd)]):\n if maxout_value == yk[c]:\n ggzk[r] += ggxk[c]\n\n def infer_shape(self, node, in_shapes):\n return [in_shapes[1]]\n\n def grad(self, inp, grads):\n x, maxout, ggx, ws, stride, pad = inp\n gz, = grads\n return [theano.tensor.zeros_like(x),\n theano.tensor.zeros_like(maxout),\n MaxPoolGrad(ignore_border=self.ignore_border,\n ndim=self.ndim)(x, maxout, gz,\n ws, stride, pad),\n DisconnectedType()(),\n DisconnectedType()(),\n DisconnectedType()()]\n\n def connection_pattern(self, node):\n return [[1], [1], [1], [0], [0], [0]]\n\n def c_code(self, node, name, inp, out, sub):\n if self.mode != 'max':\n raise theano.gof.utils.MethodNotDefined()\n x, maxout, ggx, ws, stride, pad = inp\n z, = out # the grad of grad\n nd = self.ndim\n total_ndim = node.inputs[0].ndim\n non_pool_ndim = total_ndim - nd\n fail = sub['fail']\n\n if self.openmp:\n # run in parallel over each pooling block\n omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, maximum) schedule(static)'\n else:\n omp_parallel = ''\n ccode = \"\"\"\n int z_typenum = PyArray_ObjectType((PyObject*)%(maxout)s, 0);\n int z[%(nd)s]; // shape of the output\n int r[%(nd)s]; // shape of the padded_input\n int ws[%(nd)s];\n int st[%(nd)s];\n int pd[%(nd)s];\n if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"ws must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"stride must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"pad must be a vector of size %(nd)s\");\n %(fail)s;\n }\n for (int i=0; i<%(nd)s; i++)\n {\n ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));\n pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n z[i] = PyArray_DIMS(%(maxout)s)[%(non_pool_ndim)s + i];\n r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];\n }\n // allocating memory for output, if necessary\n int mem_nec;\n mem_nec = 0;\n if ((!%(z)s) || !PyArray_ISCONTIGUOUS(%(z)s)\n || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)\n {\n mem_nec = 1;\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(total_ndim)s; i++)\n {\n if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(maxout)s)[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (mem_nec)\n {\n Py_XDECREF(%(z)s);\n %(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(maxout)s), z_typenum,0);\n }\n else {\n PyArray_FILLWBYTE(%(z)s, 0);\n }\n dtype_%(maxout)s maximum; // temp var for maximum value in a region\n // will be used to hold start and end index of a region\n int r_st[%(nd)s];\n int r_end[%(nd)s];\n // index for iterating over the pooling regions\n int r_idx[%(nd)s];\n // placeholder for PyArray indexing (output)\n npy_intp o_idx[%(total_ndim)s];\n // placeholder for PyArray indexing (input)\n npy_intp i_idx[%(total_ndim)s];\n // loop over non-pooling dimensions\n int non_pooling_prod;\n non_pooling_prod = 1;\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n non_pooling_prod *= PyArray_DIMS(%(x)s)[i];\n }\n %(omp_parallel)s\n // first loop over non-pooling dimensions\n for (int t=0; t<non_pooling_prod; t++)\n {\n // compute the non-pooling index in each dimension\n if (%(non_pool_ndim)s!=0)\n {\n o_idx[0] = t;\n i_idx[0] = t;\n for (int i=1; i<%(non_pool_ndim)s; i++)\n {\n o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];\n o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];\n i_idx[i] = o_idx[i];\n i_idx[i - 1] = o_idx[i - 1];\n }\n }\n\n // then loop over each region in each pooling dimension\n \"\"\"\n\n for i in xrange(nd):\n ccode += \"\"\"\n for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {\n r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];\n r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];\n // skip the padding\n r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];\n r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];\n // from padded_img space to img space\n r_st[%(i)s] -= pd[%(i)s];\n r_end[%(i)s] -= pd[%(i)s];\n // use the index to find the correct position in the output\n o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n\n ccode += \"\"\"\n dtype_%(z)s * z;\n if (%(total_ndim)s == 4)\n {\n // the maximum value\n maximum = ((dtype_%(maxout)s*)(PyArray_GETPTR4(%(maxout)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];\n // z at this position\n z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])));\n }\n else\n {\n // the maximum value\n maximum = ((dtype_%(maxout)s*)(PyArray_GetPtr(%(maxout)s,o_idx)))[0];\n // z at this position\n z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)));\n }\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n // go through the pooled region in the unpadded input\n for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)\n {\n i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n dtype_%(x)s a;\n dtype_%(ggx)s * ggx;\n if (%(total_ndim)s == 4)\n {\n a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n ggx = ((dtype_%(ggx)s*)(PyArray_GETPTR4(%(ggx)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])));\n }\n else\n {\n a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n ggx = ((dtype_%(ggx)s*)(PyArray_GetPtr(%(ggx)s,i_idx)));\n }\n if (a == maximum){\n z[0] += ggx[0];\n }\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // for loop over region\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // loop over pooling dimension\n \"\"\"\n\n ccode += \"\"\"\n } // for loop over non-pooling dimensions\n \"\"\"\n return ccode % locals()\n\n def c_code_cache_version(self):\n return (0, 4, self.openmp)\n\n\nclass MaxPoolRop(OpenMPOp):\n \"\"\"\n Implements the R-operator for the downsample operation.\n\n Parameters\n ----------\n ws : list or tuple of N ints\n Downsample factor over rows, columns etc.\n ws indicates the size of the pooling region.\n ignore_border : bool\n If ws doesn't divide imgshape, do we include an extra row/col/slice\n of partial downsampling (False) or ignore it (True).\n stride : list or tuple of N ints or None\n Stride size, which is the number of shifts over rows/cols/slices to get the\n next pool region. If stride is None, it is considered equal to ws\n (no overlap on pooling regions).\n pad : tuple of N ints or None\n For each downsampling dimension, this specifies the number of zeros to\n add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the\n size of the top and bottom margins, pad_w specifies the size of the left and\n right margins. No padding is added if pad is None.\n mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}\n ('average_inc_pad' excludes the padding from the count,\n 'average_exc_pad' include it)\n ndim : int\n The number of pooling dimensions N.\n The default is 2.\n \"\"\"\n\n __props__ = ('ignore_border', 'mode', 'ndim')\n params_type = ParamsType(ignore_border=bool_t,)\n\n def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):\n super(MaxPoolRop, self).__init__(openmp=openmp)\n self.ndim = ndim\n self.ignore_border = ignore_border\n self.mode = mode\n assert mode == 'max'\n\n def make_node(self, x, eval_point, ws, stride=None, pad=None):\n # TODO: consider restricting the dtype?\n x = tensor.as_tensor_variable(x)\n eval_point = tensor.as_tensor_variable(eval_point)\n nd = self.ndim\n if stride is None:\n stride = ws\n if pad is None:\n pad = (0,) * nd\n elif isinstance(pad, (tuple, list)):\n if max(pad) != 0 and not self.ignore_border:\n raise NotImplementedError(\n 'padding works only with ignore_border=True')\n if isinstance(ws, (tuple, list)):\n if any(pad[i] >= ws[i] for i in range(nd)):\n raise NotImplementedError(\n 'padding must be smaller than strides')\n ws = tensor.as_tensor_variable(ws)\n stride = tensor.as_tensor_variable(stride)\n pad = tensor.as_tensor_variable(pad)\n assert ws.ndim == 1\n assert stride.ndim == 1\n assert pad.ndim == 1\n if x.type.ndim < nd:\n raise TypeError()\n if not ws.dtype.startswith('int'):\n raise TypeError('Pool downsample parameters must be ints.')\n if not stride.dtype.startswith('int'):\n raise TypeError('Stride parameters must be ints.')\n if not pad.dtype.startswith('int'):\n raise TypeError('Padding parameters must be ints.')\n # If the input shape are broadcastable we can have 0 in the output shape\n broad = x.broadcastable[:-nd] + (False,) * nd\n out = tensor.TensorType(eval_point.dtype, broad)\n return gof.Apply(self, [x, eval_point, ws, stride, pad], [out()])\n\n def perform(self, node, inp, out, params):\n x, ex, ws, stride, pad = inp\n z, = out\n nd = self.ndim\n assert ws.shape == stride.shape == pad.shape == (nd,)\n if len(x.shape) < nd:\n raise NotImplementedError(\n 'Pool requires input with {} or more dimensions'.format(nd))\n z_shape = Pool.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)\n if not self.ignore_border:\n assert all(z > 0 for z in z_shape[-nd:])\n if (z[0] is None) or (z[0].shape != z_shape):\n z[0] = np.empty(z_shape, dtype=x.dtype)\n zz = z[0]\n # size of pooling output\n pool_out_shp = zz.shape[-nd:]\n img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))\n inc_pad = self.mode == 'average_inc_pad'\n\n # pad the image and the eval point\n if max(pad) != 0:\n y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)\n y[(slice(None),) * (len(x.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x\n ey = np.zeros(ex.shape[:-nd] + img_shp, dtype=ex.dtype)\n ey[(slice(None),) * (len(ex.shape) - nd) +\n tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ex\n else:\n y = x\n ey = ex\n\n # precompute the region boundaries for each dimension\n region_slices = [[] for i in xrange(nd)]\n for i in xrange(nd):\n for j in xrange(pool_out_shp[i]):\n start = j * stride[i]\n end = builtins.min(start + ws[i], img_shp[i])\n if not inc_pad:\n start = builtins.max(start, pad[i])\n end = builtins.min(end, img_shp[i] - pad[i])\n region_slices[i].append(slice(start, end))\n\n # iterate over non-pooling dimensions\n for k in np.ndindex(*x.shape[:-nd]):\n zzk = zz[k]\n yk = y[k]\n eyk = ey[k]\n # iterate over pooling regions\n for r in np.ndindex(*pool_out_shp):\n # current slice in padded input\n ykslice = yk[[region_slices[i][r[i]] for i in xrange(nd)]]\n # current slice in eval points\n eykslice = eyk[[region_slices[i][r[i]] for i in xrange(nd)]]\n # indices of maximum\n idx = np.unravel_index(np.argmax(ykslice), ykslice.shape)\n zzk[r] = eykslice[idx]\n\n def c_headers(self):\n headers = ['<algorithm>']\n headers += super(MaxPoolRop, self).c_headers()\n return headers\n\n def c_code(self, node, name, inp, out, sub):\n if self.mode != 'max':\n raise theano.gof.utils.MethodNotDefined()\n x, ex, ws, stride, pad = inp\n z, = out\n nd = self.ndim\n total_ndim = node.inputs[0].ndim\n non_pool_ndim = total_ndim - nd\n fail = sub['fail']\n params = sub['params']\n\n if self.openmp:\n # run in parallel over each pooling block\n omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector, eval_collector) schedule(static)'\n else:\n omp_parallel = ''\n ccode = \"\"\"\n int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);\n if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"x must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_NDIM(%(ex)s)!=%(total_ndim)s)\n {\n PyErr_SetString(PyExc_ValueError, \"eval_point must be a %(total_ndim)sD ndarray\");\n %(fail)s;\n }\n if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"ws must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"stride must be a vector of size %(nd)s\");\n %(fail)s;\n }\n if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)\n {\n PyErr_SetString(PyExc_ValueError, \"pad must be a vector of size %(nd)s\");\n %(fail)s;\n }\n int z[%(nd)s]; // shape of the output\n int r[%(nd)s]; // shape of the padded_input\n int ws[%(nd)s];\n int st[%(nd)s];\n int pd[%(nd)s];\n int nonzero_padding;\n nonzero_padding = 0;\n for (int i=0; i<%(nd)s; i++)\n {\n ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));\n st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));\n pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));\n r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];\n if (pd[i]>0)\n nonzero_padding = 1;\n }\n if (!%(params)s->ignore_border && nonzero_padding)\n {\n PyErr_SetString(PyExc_ValueError,\n \"padding must be zero when ignore border is False\");\n %(fail)s;\n }\n if (%(params)s->ignore_border)\n {\n for (int i=0; i<%(nd)s; i++)\n {\n // '/' in C is different from '/' in python\n if (r[i] - ws[i] < 0)\n {\n z[i] = 0;\n }\n else\n {\n z[i] = (r[i] - ws[i]) / st[i] + 1;\n }\n }\n }\n else\n {\n for (int i=0; i<%(nd)s; i++)\n {\n // decide how many rows/cols the output has\n if (st[i] >= ws[i])\n {\n z[i] = (r[i] - 1) / st[i] + 1;\n }\n else\n {\n z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;\n }\n assert(z[i] > 0);\n }\n }\n // memory allocation of z if necessary\n int mem_nec;\n mem_nec = 0;\n if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)\n {\n mem_nec = 1;\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (!mem_nec)\n {\n for (int i=0; i<%(nd)s; i++)\n {\n if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])\n {\n mem_nec = 1;\n break;\n }\n }\n }\n if (mem_nec)\n {\n if (%(z)s) Py_XDECREF(%(z)s);\n npy_intp dims[%(total_ndim)s];\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n dims[i] = PyArray_DIMS(%(x)s)[i];\n }\n for (int i=0; i<%(nd)s; i++)\n {\n dims[%(non_pool_ndim)s + i] = z[i];\n }\n //TODO: zeros not necessary\n %(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);\n }\n // initialize temp var for the value in a region\n dtype_%(x)s collector;\n dtype_%(ex)s eval_collector;\n int z_prod;\n // do not run if any z[i] is zero\n z_prod = 1;\n for (int i=0; i<%(nd)s; i++)\n {\n z_prod *= z[i];\n }\n if (z_prod)\n {\n // will be used to hold start and end index of a region\n int r_st[%(nd)s];\n int r_end[%(nd)s];\n // index for iterating over the pooling regions\n int r_idx[%(nd)s];\n // placeholder for PyArray indexing (output)\n npy_intp o_idx[%(total_ndim)s];\n // placeholder for PyArray indexing (input)\n npy_intp i_idx[%(total_ndim)s];\n // loop over non-pooling dimensions\n int non_pooling_prod = 1;\n for (int i=0; i<%(non_pool_ndim)s; i++)\n {\n non_pooling_prod *= PyArray_DIMS(%(x)s)[i];\n }\n %(omp_parallel)s\n // first loop over non-pooling dimensions\n for (int t=0; t<non_pooling_prod; t++)\n {\n // compute the non-pooling index in each dimension\n if (%(non_pool_ndim)s!=0)\n {\n o_idx[0] = t;\n i_idx[0] = t;\n for (int i=1; i<%(non_pool_ndim)s; i++)\n {\n o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];\n o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];\n i_idx[i] = o_idx[i];\n i_idx[i - 1] = o_idx[i - 1];\n }\n }\n\n // then loop over each region in each pooling dimension\n \"\"\"\n\n for i in xrange(nd):\n ccode += \"\"\"\n for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {\n r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];\n r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];\n // skip the padding\n r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];\n r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];\n // from padded_img space to img space\n r_st[%(i)s] -= pd[%(i)s];\n r_end[%(i)s] -= pd[%(i)s];\n // handle the case where no padding, ignore border is True\n if (%(params)s->ignore_border)\n {\n r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];\n }\n // use the index to find the correct position in the output\n o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];\n \"\"\" % dict(i=i, params=sub['params'], non_pool_ndim=non_pool_ndim)\n\n ccode += \"\"\"\n // get a pointer to the correct position in the output\n dtype_%(z)s * z;\n if (%(total_ndim)s == 4)\n z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));\n else\n z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));\n \"\"\"\n\n for i in xrange(nd):\n ccode += \"\"\"\n // set the first index of dimension %(i)s\n i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n // use the first element as the initial value of collector\n if (%(total_ndim)s == 4) {\n collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n eval_collector = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n } else {\n collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n eval_collector = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];\n }\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n // go through the pooled region in the unpadded input\n for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)\n {\n i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;\n \"\"\" % dict(i=i, non_pool_ndim=non_pool_ndim)\n ccode += \"\"\"\n // update maximum\n dtype_%(x)s a;\n dtype_%(ex)s ea;\n if (%(total_ndim)s == 4) {\n a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n ea = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];\n }\n else {\n a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];\n ea = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];\n }\n if (a > collector) {\n collector = a;\n eval_collector = ea;\n }\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // for loop over region\n \"\"\"\n ccode += \"\"\"\n z[0] = eval_collector;\n \"\"\"\n for i in xrange(nd):\n ccode += \"\"\"\n } // loop over pooling dimension\n \"\"\"\n\n ccode += \"\"\"\n } // for loop over non-pooling dimensions\n } // if z_prod\n \"\"\"\n return ccode % locals()\n\n def c_code_cache_version(self):\n return (1, self.openmp)\n" ]
[ [ "numpy.zeros_like", "numpy.empty", "numpy.zeros", "numpy.argmax", "numpy.maximum", "numpy.ndindex" ] ]
huangruizhe/espresso
[ "ee658bcc959bfbe8a7a61d7374d532d082d2aa26" ]
[ "examples/classify_names/rough_work.py" ]
[ "# Online Python compiler (interpreter) to run Python online.\n# Write Python 3 code in this online editor and run it.\nimport numpy as np\nlist_a = []\nfor i in range(2):\n for j in range(5):\n list_a.append(i)\n\nlist_a = np.random.permutation(list_a)\nprint('class labels')\nprint(list_a)\nlist_a = np.array(list_a)\n\n\nindex_i = 0\nclassid_of_index0=list_a[index_i]\nprint('class_of_index0: ', classid_of_index0)\nclassid_of_index0_locations = np.where(list_a == classid_of_index0)\nclassid_of_index0_locations = classid_of_index0_locations[0]\nprint('class_of_index0_locations', classid_of_index0_locations)\nprint(classid_of_index0_locations != index_i)\nsame_index_list = classid_of_index0_locations[classid_of_index0_locations != index_i]\nprint(same_index_list)\nprint(same_index_list[0:2])\n\nnum_tokens_vec = [5,6,7,5,4,3,5,4,6,7]\nfor pos in same_index_list[0:2]:\n print(num_tokens_vec[pos])\nmax_val = tuple(num_tokens_vec[pos] for pos in same_index_list[0:2])\nmax_val1 = max(max_val)\nprint(max_val)\nprint(max_val1)\n" ]
[ [ "numpy.random.permutation", "numpy.where", "numpy.array" ] ]
jaedong27/calipy
[ "ed5b5af2654b2a25e16af4267683cafc83d72729" ]
[ "calipy/viewer.py" ]
[ "import vtk\nfrom vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor\nimport math\nimport numpy as np\nimport numpy.matlib\nimport os\nimport json\nimport cv2\n\n# Z\n# /\n# /\n# /\n# ---------- X\n# |\n# |\n# |\n# Y\n\nclass vtkRenderer():\n def __init__(self, widget=None):\n self.ren = vtk.vtkRenderer()\n\n if widget is not None:\n # Qt Widget Mode\n self.qtwidget_mode = True\n \n #### Init\n # self.vtkWidget = QVTKRenderWindowInteractor(self.centralwidget)\n # self.vtkWidget.setGeometry(0,0,200,200)\n # self.vtkRenderer = calipy.vtkRenderer(self.vtkWidget)\n\n # Qt Widget\n self.vtkWidget = widget\n self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)\n self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()\n self.iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n self.iren.Initialize()\n self.iren.Start()\n else:\n # Window Mode\n self.qtwidget_mode = False\n\n # Make empty window\n self.renWin = vtk.vtkRenderWindow()\n self.renWin.AddRenderer(self.ren)\n self.renWin.SetSize(960, 540)\n\n self.iren = vtk.vtkRenderWindowInteractor()\n self.iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n self.iren.SetRenderWindow(self.renWin)\n self.iren.Initialize()\n\n self.ren.SetBackground(0, 0.1, 0)\n\n self.actor_list = {}\n\n axes = vtk.vtkAxesActor()\n self.ren.AddActor(axes)\n self.actor_list[\"axes\"] = axes\n self.ren.ResetCamera()\n\n self.iren.AddObserver('LeftButtonPressEvent', self.pushLeftButtonPressEventOnVTK, 1.0)\n\n # Add Event for get Position\n def pushLeftButtonPressEventOnVTK(self, obj, ev):\n clickPos = self.iren.GetEventPosition()\n #print(clickPos)\n picker = vtk.vtkPropPicker()\n picker.Pick(clickPos[0], clickPos[1], 0, self.ren)\n print(picker.GetPickPosition())\n\n\n def setMainCamera(self, R = np.eye(3), t = np.zeros((3,1)), fov = 80):\n camera = vtk.vtkCamera()\n camera.SetPosition(t[0,0],t[1,0],t[2,0])\n #camera.SetFocalPoint(0,1,0)\n focalpoint = np.array([[0],[0],[1]])\n focalpoint = np.dot(R,focalpoint) + t\n camera.SetFocalPoint(focalpoint[0],focalpoint[1],focalpoint[2])\n ref = np.array([[0],[-1],[0]])\n cam_up = np.dot(R, ref)\n #camera.SetPosition(0,1,0)\n #camera.SetViewUp(0,1,0)\n camera.SetViewUp(cam_up[0],cam_up[1],cam_up[2])\n camera.SetViewAngle(fov)\n self.ren.SetActiveCamera(camera)\n\n def setMainCameraToSeeTarget(self, t = np.zeros((3,1)), target = np.zeros((3,1)), fov = 80):\n camera = vtk.vtkCamera()\n camera.SetPosition(t[0,0],t[1,0],t[2,0])\n #print(\"Position :\", t)\n #camera.SetFocalPoint(0,1,0)\n #focalpoint = np.array([[0],[0],[1]])\n #focalpoint = np.dot(R,focalpoint) + t\n target_focalpoint = (target - t).ravel()\n #print(target_focalpoint)\n target_focalpoint = target_focalpoint / np.linalg.norm(target_focalpoint)\n #print(\"focalpoint\", target)\n camera.SetFocalPoint(target[0],target[1],target[2])\n ref = np.array([[0],[-1],[0]]).ravel()\n #print(focalpoint, ref)\n ref_right = np.cross(target_focalpoint, ref)\n ref_right = ref_right / np.linalg.norm(ref_right)\n #print(ref_right, focalpoint)\n cam_up = np.cross(ref_right, target_focalpoint)\n cam_up = cam_up / np.linalg.norm(cam_up)\n print(\"Up\",cam_up)\n #cam_up = np.dot(R, ref)\n #camera.SetPosition(0,1,0)\n #camera.SetViewUp(0,1,0)\n camera.SetViewUp(cam_up[0],cam_up[1],cam_up[2])\n camera.SetViewAngle(fov)\n self.ren.SetActiveCamera(camera)\n\n def getActorList(self):\n return self.actor_list.keys()\n\n def removeActorByName(self, name):\n #print(self.actor_list)\n if name in self.actor_list.keys():\n actor = self.actor_list.pop(name)\n self.ren.RemoveActor(actor)\n #print(\"remove! \", name)\n \n def addText(self, name, text, pos_x, pos_y):\n self.removeActorByName(name)\n textActor = vtk.vtkTextActor()\n textActor.SetInput( text )\n textActor.SetPosition( pos_x, pos_y )\n textActor.GetTextProperty().SetFontSize ( 50 )\n textActor.GetTextProperty().SetColor ( 1.0, 1.0, 1.0 )\n self.ren.AddActor2D(textActor)\n self.actor_list[name] = textActor\n \n def addPlane(self, name, point1, point2, point3, color=np.array([255.0,255.0,255.0]), opacity=1.0):\n self.removeActorByName(name)\n\n # Create a plane\n planeSource = vtk.vtkPlaneSource()\n # planeSource.SetOrigin(center_point[0], center_point[1], center_point[2])\n # #planeSource.SetNormal(normal_vector[0], normal_vector[1], normal_vector[2])\n # #print(dir(planeSource))\n # planeSource.SetPoint1(top_left_point[0], top_left_point[1], top_left_point[2])\n # planeSource.SetPoint2(bot_right_point[0], bot_right_point[1], bot_right_point[2])\n # planeSource.SetXResolution(10)\n # planeSource.SetYResolution(340)\n planeSource.SetOrigin(point1[0], point1[1], point1[2])\n planeSource.SetPoint1(point2[0], point2[1], point2[2])\n planeSource.SetPoint2(point3[0], point3[1], point3[2])\n planeSource.SetXResolution(10)\n planeSource.SetYResolution(340)\n\n planeSource.Update()\n\n plane = planeSource.GetOutput()\n\n # Create a mapper and actor\n polygonMapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n polygonMapper.SetInputConnection(polygon.GetProducerPort())\n else:\n polygonMapper.SetInputData(plane)\n polygonMapper.Update()\n\n polygonActor = vtk.vtkActor()\n polygonActor.SetMapper(polygonMapper)\n polygonActor.GetProperty().SetColor([color[0],color[1],color[2]])\n polygonActor.GetProperty().SetOpacity(opacity)\n #actor.GetProperty().SetColor(colors->GetColor3d(\"Cyan\").GetData());\n\n self.ren.AddActor(polygonActor)\n self.actor_list[name] = polygonActor\n\n def addPlanWithTexture(self, name, point1, point2, point3, path, opacity=1.0):\n self.removeActorByName(name)\n\n #png_file = vtk.vtkPNGReader()\n #print(png_file.CanReadFile(path))\n\n # Read the image which will be the texture\n #vtkSmartPointer<vtkJPEGReader> jPEGReader = vtkSmartPointer<vtkJPEGReader>::New();\n #jPEGReader->SetFileName ( inputFilename.c_str() );\n img = vtk.vtkJPEGReader()\n img.SetFileName(path)\n \n #print(img.CanReadFile(path))\n #print(path)\n\n # Create a plane\n #vtkSmartPointer<vtkPlaneSource> plane = vtkSmartPointer<vtkPlaneSource>::New();\n #plane->SetCenter(0.0, 0.0, 0.0);\n #plane->SetNormal(0.0, 0.0, 1.0);\n plane = vtk.vtkPlaneSource()\n # planeSource.SetOrigin(center_point[0], center_point[1], center_point[2])\n # #planeSource.SetNormal(normal_vector[0], normal_vector[1], normal_vector[2])\n # #print(dir(planeSource))\n # planeSource.SetPoint1(top_left_point[0], top_left_point[1], top_left_point[2])\n # planeSource.SetPoint2(bot_right_point[0], bot_right_point[1], bot_right_point[2])\n # planeSource.SetXResolution(10)\n # planeSource.SetYResolution(340)\n #plane.SetCenter(0.0,0.0,0.0)\n #plane.SetNormal(0.0,0.0,1.0)\n plane.SetOrigin(point1[0], point1[1], point1[2])\n plane.SetPoint1(point2[0], point2[1], point2[2])\n plane.SetPoint2(point3[0], point3[1], point3[2])\n plane.SetXResolution(1920)\n plane.SetYResolution(1080)\n\n # Apply the texture\n #vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New();\n #texture->SetInputConnection(jPEGReader->GetOutputPort());\n texture = vtk.vtkTexture()\n texture.SetInputConnection(img.GetOutputPort())\n\n #vtkSmartPointer<vtkTextureMapToPlane> texturePlane = vtkSmartPointer<vtkTextureMapToPlane>::New();\n #texturePlane->SetInputConnection(plane->GetOutputPort());\n texturePlane = vtk.vtkTextureMapToPlane()\n texturePlane.SetInputConnection(plane.GetOutputPort())\n\n #planeSource.Update()\n #plane = planeSource.GetOutput()\n\n #vtkSmartPointer<vtkPolyDataMapper> planeMapper = vtkSmartPointer<vtkPolyDataMapper>::New();\n #planeMapper->SetInputConnection(texturePlane->GetOutputPort());\n planeMapper = vtk.vtkPolyDataMapper()\n planeMapper.SetInputConnection(texturePlane.GetOutputPort())\n\n #vtkSmartPointer<vtkActor> texturedPlane = vtkSmartPointer<vtkActor>::New();\n #texturedPlane->SetMapper(planeMapper);\n #texturedPlane->SetTexture(texture);\n texturedPlane = vtk.vtkActor()\n texturedPlane.SetMapper(planeMapper)\n texturedPlane.SetTexture(texture)\n\n # Create a mapper and actor\n #polygonMapper = vtk.vtkPolyDataMapper()\n #if vtk.VTK_MAJOR_VERSION <= 5:\n # polygonMapper.SetInputConnection(texturePlane.GetProducePort())\n #else:\n # polygonMapper.SetInputData(texturePlane.GetOutput())\n # polygonMapper.Update()\n\n #polygonActor = vtk.vtkActor()\n #polygonActor.SetMapper(polygonMapper)\n #polygonActor.SetTexture(texture)\n #polygonActor.GetProperty().SetColor([color[0],color[1],color[2]])\n #polygonActor.GetProperty().SetOpacity(opacity)\n #actor.GetProperty().SetColor(colors->GetColor3d(\"Cyan\").GetData());\n\n self.ren.AddActor(texturedPlane)\n self.actor_list[name] = texturedPlane\n\n def addLines(self, name, points, idx_list = None, line_width = 1, color=np.array([255.0,255.0,255.0])): # points => numpy vector [3, 0~n]\n self.removeActorByName(name)\n vtkpoints = vtk.vtkPoints()\n vtklines = vtk.vtkCellArray()\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(3)\n\n points_size = points.shape[0] \n vtkpoints.SetNumberOfPoints(points_size)\n for idx, point in enumerate(points):\n vtkpoints.SetPoint(idx, point[0], point[1], point[2])\n colors.InsertNextTuple(color)\n colors.SetName(name+\"_colors\")\n\n if idx_list is None:\n vtklines.InsertNextCell(points_size)\n for idx in range(points_size):\n vtklines.InsertCellPoint(idx)\n else:\n vtklines.InsertNextCell(len(idx_list))\n for idx in idx_list:\n vtklines.InsertCellPoint(idx)\n\n polygon = vtk.vtkPolyData()\n polygon.SetPoints(vtkpoints)\n polygon.SetLines(vtklines)\n polygon.GetCellData().SetScalars(colors)\n\n polygonMapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n polygonMapper.SetInputConnection(polygon.GetProducerPort())\n else:\n polygonMapper.SetInputData(polygon)\n polygonMapper.Update()\n\n polygonActor = vtk.vtkActor()\n polygonActor.SetMapper(polygonMapper)\n polygonActor.GetProperty().SetLineWidth(line_width)\n\n self.ren.AddActor(polygonActor)\n self.actor_list[name] = polygonActor\n\n def addCamera(self, name, R = np.eye(3), t = np.zeros((3,1)), cs = 0.1, line_width = 2, color=np.array([255,255,255])):\n self.removeActorByName(name)\n camera_points = np.zeros((12,3))\n camera_points[0,:] = np.array([-cs/2, -cs/2, cs])\n camera_points[1] = np.array([ cs/2, -cs/2, cs])\n camera_points[2] = np.array([-cs/2, cs/2, cs])\n camera_points[3] = np.array([ cs/2, cs/2, cs])\n camera_points[4] = np.array([-cs/4, -cs/4, cs/2])\n camera_points[5] = np.array([ cs/4, -cs/4, cs/2])\n camera_points[6] = np.array([-cs/4, cs/4, cs/2])\n camera_points[7] = np.array([ cs/4, cs/4, cs/2])\n camera_points[8] = np.array([-cs/4, -cs/4, 0])\n camera_points[9] = np.array([ cs/4, -cs/4, 0])\n camera_points[10] = np.array([-cs/4, cs/4, 0])\n camera_points[11] = np.array([ cs/4, cs/4, 0])\n\n camera_points = np.transpose(camera_points)\n camera_points = np.dot(R, camera_points) + np.matlib.repmat(t, 1, camera_points.shape[1])\n camera_points = np.transpose(camera_points)\n\n points = vtk.vtkPoints()\n points.SetNumberOfPoints(12)\n colors = vtk.vtkUnsignedCharArray()\n points.SetNumberOfPoints(12)\n colors.SetNumberOfComponents(3)\n\n for idx, point in enumerate(camera_points):\n points.SetPoint(idx, point[0], point[1], point[2])\n colors.InsertNextTuple(color)\n colors.SetName(name+\"_colors\")\n\n lines = vtk.vtkCellArray()\n lines.InsertNextCell(24)\n lines.InsertCellPoint(0)\n lines.InsertCellPoint(1)\n lines.InsertCellPoint(3)\n lines.InsertCellPoint(2)\n lines.InsertCellPoint(0)\n lines.InsertCellPoint(4)\n lines.InsertCellPoint(5)\n lines.InsertCellPoint(7)\n lines.InsertCellPoint(6)\n lines.InsertCellPoint(4)\n lines.InsertCellPoint(8)\n lines.InsertCellPoint(9)\n lines.InsertCellPoint(11)\n lines.InsertCellPoint(10)\n lines.InsertCellPoint(8)\n lines.InsertCellPoint(9)\n lines.InsertCellPoint(5)\n lines.InsertCellPoint(1)\n lines.InsertCellPoint(3)\n lines.InsertCellPoint(7)\n lines.InsertCellPoint(11)\n lines.InsertCellPoint(10)\n lines.InsertCellPoint(6)\n lines.InsertCellPoint(2)\n\n polygon = vtk.vtkPolyData()\n polygon.SetPoints(points)\n polygon.SetLines(lines)\n polygon.GetCellData().SetScalars(colors)\n\n polygonMapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n polygonMapper.SetInputConnection(polygon.GetProducerPort())\n else:\n polygonMapper.SetInputData(polygon)\n polygonMapper.Update()\n\n polygonActor = vtk.vtkActor()\n polygonActor.SetMapper(polygonMapper)\n polygonActor.GetProperty().SetPointSize(0.1)\n polygonActor.GetProperty().SetLineWidth(line_width)\n self.ren.AddActor(polygonActor)\n self.actor_list[name] = polygonActor\n\n def drawPoints(self, name, point_list, input_color=np.array([[255,0,0]]), point_size = 2):\n self.removeActorByName(name)\n points = vtk.vtkPoints()\n vertices = vtk.vtkCellArray()\n colors = vtk.vtkUnsignedCharArray()\n colors.SetNumberOfComponents(3)\n #colors.SetName(\"Colors\")\n #colors.SetNumberOfComponents(3)\n\n if input_color.shape[0] == 1:\n color_list = np.ones(point_list.shape) * input_color[0]\n else:\n color_list = input_color\n\n for point, color in zip(point_list, color_list):\n id = points.InsertNextPoint(point.tolist())\n vertices.InsertNextCell(1)\n vertices.InsertCellPoint(id)\n colors.InsertNextTuple(color)\n\n point = vtk.vtkPolyData()\n # Set the points and vertices we created as the geometry and topology of the polydata\n point.SetPoints(points)\n point.SetVerts(vertices)\n point.GetPointData().SetScalars(colors)\n\n polygonMapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n polygonMapper.SetInputConnection(ps.GetProducerPort())\n else:\n polygonMapper.SetInputData(point)\n polygonMapper.Update()\n polygonActor = vtk.vtkActor()\n polygonActor.SetMapper(polygonMapper)\n polygonActor.GetProperty().SetPointSize(point_size)\n self.ren.AddActor(polygonActor)\n self.actor_list[name] = polygonActor\n\n def render(self):\n self.iren.Render()\n if self.qtwidget_mode == False:\n self.iren.Start()\n\nif __name__ == \"__main__\":\n window_width = 1.18\n window_height = 0.75\n window_points = [[-window_width/2, -window_height*math.cos((5.0/180.0) * math.pi), -window_height*math.sin((5.0/180.0) * math.pi)],\n [ window_width/2, -window_height*math.cos((5.0/180.0) * math.pi), -window_height*math.sin((5.0/180.0) * math.pi)],\n [-window_width/2, 0, 0],\n [ window_width/2, 0, 0]]\n index = np.array([0,1,3,2,0])\n\n ren = vtkRenderer()\n ren.addLines(np.transpose(window_points), index)\n ren.showImage()" ]
[ [ "numpy.eye", "numpy.ones", "numpy.transpose", "numpy.zeros", "numpy.cross", "numpy.matlib.repmat", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
wdar/cdippy
[ "ef38b3445351ec8d9d7ea30b5b0d15825d794b0b" ]
[ "cdippy/stndata.py" ]
[ "from datetime import datetime, timedelta\nfrom bisect import bisect_left\n\nimport numpy.ma as ma\n\nfrom cdippy.cdippy import CDIPnc, Archive, Realtime, RealtimeXY, Historic\nimport cdippy.timestamp_utils as tsu\nimport cdippy.utils as cu\n\n\nclass StnData(CDIPnc):\n \"\"\" \n Returns data and metadata for the specified station. \n\n This class handles the problem that neither the Realtime \n nor the Historic .nc file may exist for either data or metadata,\n and the number of deployment files is unknown apriori.\n It tries to seam the multiple station files together.\n \"\"\"\n\n max_deployments = 99 # Checks at most this number of deployment nc files\n\n # Commonly requested sets of variables\n parameter_vars = ['waveHs', 'waveTp', 'waveDp', 'waveTa']\n xyz_vars = ['xyzXDisplacement', 'xyzYDisplacement', 'xyzZDisplacement']\n spectrum_vars = [\n 'waveEnergyDensity', 'waveMeanDirection', \n 'waveA1Value', 'waveB1Value', 'waveA2Value', 'waveB2Value',\n 'waveCheckFactor',]\n meta_vars = [\n 'metaStationName', \n 'metaDeployLatitude', 'metaDeployLongitude', 'metaWaterDepth',\n 'metaDeclilnation']\n meta_attributes = [\n 'wmo_id', \n 'geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lat_units', 'geospatial_lat_resolution',\n 'geospatial_lon_min', 'geospatial_lon_max', 'geospatial_lon_units', 'geospatial_lon_resolution',\n 'geospatial_vertical_min', 'geospatial_vertical_max', 'geospatial_vertical_units', 'geospatial_vertical_resolution',\n 'time_coverage_start', 'time_coverage_end',\n 'date_created', 'date_modified' ]\n \n def __init__(cls, stn, data_dir=None, org=None):\n cls.nc = None\n cls.stn = stn\n cls.data_dir = data_dir\n cls.org = org\n cls.historic = Historic(cls.stn, cls.data_dir, cls.org)\n cls.realtime = Realtime(cls.stn, cls.data_dir, cls.org)\n if cls.historic and cls.historic.nc :\n cls.meta = cls.historic\n else: \n if cls.realtime and cls.realtime.nc :\n cls.meta = cls.realtime\n else:\n return None\n\n def get_parameters(cls, start=None, end=None, pub_set='public', apply_mask=True, target_records=0):\n return cls.get_series(start, end, cls.parameter_vars, pub_set, apply_mask, target_records)\n\n def get_stn_meta(cls):\n \"\"\" Returns a dict of station meta data using historic or realtime file. \"\"\"\n result = {}\n if cls.meta is None:\n return result\n cls.meta.set_request_info(vrs=cls.meta_vars)\n result = cls.meta.get_request()\n for attr_name in cls.meta_attributes:\n if hasattr(cls.meta.nc, attr_name):\n result[attr_name] = getattr(cls.meta.nc, attr_name)\n return result\n\n def get_xyz(cls, start=None, end=None, pub_set='public'):\n return cls.get_series(start, end, cls.xyz_vars, pub_set)\n\n def get_spectra(cls, start=None, end=None, pub_set='public', apply_mask=True, target_records=0):\n return cls.get_series(start, end, cls.spectrum_vars, pub_set, apply_mask, target_records)\n\n def get_series(cls, start=None, end=None, vrs=None, pub_set='public', apply_mask=True, target_records=0):\n \"\"\" \n Returns a dict of data between start and end dates with specified quality. \n\n Use this to get series that may span realtime and historic files. \n If end is None, then start is considered a target date.\n \"\"\"\n if vrs is None:\n vrs = cls.parameter_vars\n prefix = cls.get_var_prefix(vrs[0])\n\n if start is not None and end is None: # Target time\n ts_I = cls.get_target_timespan(cu.datetime_to_timestamp(start), target_records, prefix+'Time')\n if ts_I[0] is not None:\n start = cu.timestamp_to_datetime(ts_I[0])\n end = cu.timestamp_to_datetime(ts_I[1])\n else:\n return None\n elif start is None: # Use default 3 days back\n start = datetime.utcnow()-timedelta(days=3)\n end = datetime.utcnow()\n\n cls.set_request_info(start, end, vrs, pub_set, apply_mask)\n if vrs is not None and prefix == 'xyz':\n return cls.merge_xyz_request()\n else:\n return cls.merge_request()\n\n def aggregate_dicts(cls, dict1, dict2):\n \"\"\" Aggregate the data in two dictionaries. Dict1 has oldest data. \"\"\"\n #- Union the keys to make sure we check each one\n ukeys = set(dict1.keys()) | set(dict2.keys())\n result = { }\n #- Combine the variables\n for key in ukeys :\n if key in dict2 and key in dict1:\n result[key] = ma.concatenate([dict1[key], dict2[key]])\n elif key in dict2:\n result[key] = dict2[key]\n else:\n result[key] = dict1[key]\n return result\n\n def merge_xyz_request(cls):\n \"\"\" Merge xyz data from realtime and archive nc files. \"\"\"\n if cls.vrs and cls.vrs[0] == 'xyzData':\n cls.vrs = ['xyzXDisplacement','xyzYDisplacement','xyzZDisplacement']\n request_timespan = cu.Timespan(cls.start_stamp, cls.end_stamp)\n result = {}\n\n def helper(cdip_nc, request_timespan, result):\n # Try the next file if it is without xyz data\n z = cdip_nc.get_var('xyzZDisplacement')\n if z is None:\n return result, cls.start_stamp\n # Try the next file if start_stamp cannot be calculated\n start_stamp = cdip_nc.get_xyz_timestamp(0)\n end_stamp = cdip_nc.get_xyz_timestamp(len(z)-1)\n if start_stamp is None:\n return result, cls.start_stamp\n file_timespan = cu.Timespan(start_stamp, end_stamp)\n # Add data if request timespan overlaps data timespan\n if request_timespan.overlap(file_timespan):\n cdip_nc.start_stamp = cls.start_stamp\n cdip_nc.end_stamp = cls.end_stamp\n cdip_nc.pub_set = cls.pub_set\n cdip_nc.apply_mask = cls.apply_mask\n cdip_nc.vrs = cls.vrs\n tmp_result = cdip_nc.get_request()\n result = cls.aggregate_dicts(result, tmp_result)\n return result, start_stamp\n\n # First get realtime data if it exists\n rt = RealtimeXY(cls.stn)\n if rt.nc is not None:\n result, start_stamp = helper(rt, request_timespan, result)\n\n # If the request start time is more recent than the realtime\n # start time, no need to look in the archives\n if cls.start_stamp > start_stamp:\n return result\n\n # Second, look in archive files for data\n for dep in range(1, cls.max_deployments):\n deployment = 'd'+'{:02d}'.format(dep)\n ar = Archive(cls.stn, deployment, cls.data_dir, cls.org)\n if ar.nc is None:\n break\n result, start_stamp = helper(ar, request_timespan, result)\n \n # Break if file start stamp is greater than request end stamp\n if start_stamp > cls.end_stamp :\n break\n return result\n\n def merge_request(cls):\n \"\"\" Returns data for given request across realtime and historic files \"\"\"\n rt = {};\n r = cls.realtime\n # Note that we are assuming that waveTime will work for every time dim.\n if r.nc is not None and r.get_var('waveTime')[0] <= cls.end_stamp:\n r.vrs = cls.vrs\n r.start_stamp = cls.start_stamp\n r.end_stamp = cls.end_stamp\n r.pub_set = cls.pub_set\n r.apply_mask = cls.apply_mask\n rt = r.get_request()\n ht = {};\n h = cls.historic\n if h.nc is not None and h.get_var('waveTime')[-1] >= cls.start_stamp:\n h.vrs = cls.vrs\n h.start_stamp = cls.start_stamp\n h.end_stamp = cls.end_stamp\n h.pub_set = cls.pub_set\n h.apply_mask = cls.apply_mask\n ht = h.get_request()\n return cls.aggregate_dicts(ht, rt)\n\n def get_nc_files(cls, types=['realtime','historic','archive']):\n \"\"\" Returns dict of netcdf4 objects of a station's netcdf files \"\"\"\n result = {}\n for type in types:\n if type == 'realtime':\n rt = Realtime(cls.stn, cls.data_dir, cls.org)\n if rt.nc:\n result[rt.filename] = rt.nc\n if type == 'historic':\n ht = Historic(cls.stn, cls.data_dir, cls.org)\n if ht.nc:\n result[ht.filename] = ht.nc\n if type == 'archive':\n for dep in range(1,cls.max_deployments):\n deployment = 'd'+'{:02d}'.format(dep)\n ar = Archive(cls.stn, deployment, cls.data_dir, cls.org)\n if ar.nc is None:\n break\n result[ar.filename] = ar\n return result\n\n def get_target_timespan(cls, target_timestamp, n, time_var):\n \"\"\" \n Returns a 2-tuple of timestamps, an interval corresponding to n records to \n the right or left of target_timestamp.\n \n Given a time_var (e.g. 'waveTime') and target timestamp, returns a 2-tuple \n of timestamps corresponding to i and i+n (n<0 or n>=0) taken from\n the realtime and historic nc files. Those timestamps can then be used in\n set_request_info().\n \"\"\"\n r_ok = False\n if cls.realtime.nc is not None:\n r_ok = True\n h_ok = False\n if cls.historic.nc is not None:\n h_ok = True\n\n # Check realtime to find closest index\n\n r_closest_idx = None\n if r_ok: \n r_stamps = cls.realtime.get_var(time_var)[:] \n r_last_idx = len(r_stamps) - 1\n i_b = bisect_left(r_stamps, target_timestamp)\n # i_b will be possibly one more than the last index\n i_b = min(i_b, r_last_idx)\n # Target timestamp is exactly equal to a data time \n if i_b == r_last_idx or r_stamps[i_b] == target_timestamp:\n r_closest_idx = i_b\n elif i_b > 0:\n r_closest_idx = tsu.get_closest_index(i_b-1, i_b, r_stamps, target_timestamp)\n\n # If closest index not found, check historic\n\n h_closest_idx = None\n h_last_idx = None # Let's us know if h_stamps has been loaded\n if h_ok and not r_closest_idx:\n h_stamps = cls.historic.get_var(time_var)[:] \n h_last_idx = len(h_stamps) - 1\n i_b = bisect_left(h_stamps, target_timestamp)\n i_b = min(i_b, h_last_idx)\n # Target timestamp is exactly equal to a data time \n if (i_b <= h_last_idx and h_stamps[i_b] == target_timestamp) or i_b == 0:\n h_closest_idx = i_b\n elif i_b >= h_last_idx: # Target is between the two files\n if r_ok:\n if abs(h_stamps[h_last_idx]-target_timestamp) < abs(r_stamps[0]-target_timestamp):\n h_closest_idx = i_b\n else:\n r_closest_idx = 0\n else: # No realtime file \n h_closest_idx = i_b\n else: # Within middle of historic stamps\n h_closest_idx = tsu.get_closest_index(i_b-1, i_b, h_stamps, target_timestamp)\n\n # Now we have the closest index, find the intervals\n\n if r_closest_idx is not None:\n r_interval = tsu.get_interval(r_stamps, r_closest_idx, n)\n # If bound exceeded toward H and H exists, cacluate h_interval\n if r_interval[2] < 0 and h_ok:\n if not h_last_idx:\n h_stamps = cls.historic.get_var(time_var)[:] \n h_last_idx = len(h_stamps) - 1\n h_interval = tsu.get_interval(h_stamps, h_last_idx, n+r_closest_idx+1)\n #print(\"Rx H interval: \", h_interval)\n #print(\"Rx R interval: \", r_interval)\n return tsu.combine_intervals(h_interval, r_interval)\n else:\n return r_interval \n elif h_closest_idx is not None:\n h_interval = tsu.get_interval(h_stamps, h_closest_idx, n)\n # If bound exceeded toward R and R exists, cacluate r_interval\n if h_interval[2] > 0 and r_ok: \n r_interval = tsu.get_interval(r_stamps, 0, n+h_closest_idx-h_last_idx-1)\n #print(\"Hx H interval: \", h_interval)\n #print(\"Hx R interval: \", r_interval)\n return tsu.combine_intervals(h_interval, r_interval)\n else:\n return h_interval \n\n # If we get to here there's a problem\n return (None, None, None)\n\nif __name__ == \"__main__\":\n #- Tests\n def t0():\n s = StnData('100p1')\n d = s.get_stn_meta()\n print(d)\n def t1():\n s = StnData('100p1')\n d = s.get_spectra(datetime(2016,8,1), target_records=3)\n print(d.keys())\n print(d['waveEnergyDensity'].shape)\n def t2():\n s = StnData('100p1',org='ww3')\n d = s.get_series('2016-08-01 00:00:00','2016-08-02 23:59:59',['waveHs'],'public')\n print(d)\n def t3():\n s = StnData('100p1',data_dir='./gdata')\n d = s.get_nc_files(['historic','archive','realtime'])\n print(d.keys())\n def t4():\n s = StnData('100p1')\n # Across deployments 5 and 6\n d = s.get_series('2007-05-30 00:00:00','2007-06-01 23:59:59',['xyzData'],'public')\n print(len(d['xyzXDisplacement']))\n print(len(d['xyzTime']))\n print(d['xyzTime'][0],d['xyzTime'][-1])\n def t5():\n s = StnData('100p1')\n dt = datetime(2010,4,1,0,0)\n d = s.get_series(dt, target_records=-4)\n print(d)\n def t6():\n # Mark 1 filter delay set to -999.9\n s = StnData('071p1')\n end = datetime.utcnow()\n end = datetime(1996,1,22,15,57,00)\n start = end - timedelta(hours=2)\n d = s.get_xyz(start, end)\n print(\"D: \"+repr(d))\n print(\"Len: \"+repr(len(d['xyzTime'])))\n\n t6()\n" ]
[ [ "numpy.ma.concatenate" ] ]
zoumt1633/pytorch-project-template
[ "871e00ebde6c2191de5f61b4cb7010c72b93c198" ]
[ "model/model.py" ]
[ "import torch\nimport torch.nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom collections import OrderedDict\nimport os.path as osp\nimport wandb\n\nfrom utils.utils import DotDict\n\n\nclass Model:\n def __init__(self, hp, net_arch, loss_f, rank=0, world_size=1):\n self.hp = hp\n self.device = self.hp.model.device\n self.net = net_arch.to(self.device)\n self.rank = rank\n self.world_size = world_size\n if self.device != \"cpu\" and self.world_size != 0:\n self.net = DDP(self.net, device_ids=[self.rank])\n self.input = None\n self.GT = None\n self.step = 0\n self.epoch = -1\n\n # init optimizer\n optimizer_mode = self.hp.train.optimizer.mode\n if optimizer_mode == \"adam\":\n self.optimizer = torch.optim.Adam(\n self.net.parameters(), **(self.hp.train.optimizer[optimizer_mode])\n )\n else:\n raise Exception(\"%s optimizer not supported\" % optimizer_mode)\n\n # init loss\n self.loss_f = loss_f\n self.log = DotDict()\n\n def feed_data(self, **data): # data's keys: input, GT\n for k, v in data.items():\n data[k] = v.to(self.device)\n self.input = data.get(\"input\")\n self.GT = data.get(\"GT\")\n\n def optimize_parameters(self):\n self.net.train()\n self.optimizer.zero_grad()\n output = self.run_network()\n loss_v = self.loss_f(output, self.GT)\n loss_v.backward()\n self.optimizer.step()\n\n # set log\n self.log.loss_v = loss_v.item()\n\n def inference(self):\n self.net.eval()\n output = self.run_network()\n return output\n\n def run_network(self):\n output = self.net(self.input)\n return output\n\n def save_network(self, logger, save_file=True):\n if self.rank == 0:\n net = self.net.module if isinstance(self.net, DDP) else self.net\n state_dict = net.state_dict()\n for key, param in state_dict.items():\n state_dict[key] = param.to(\"cpu\")\n if save_file:\n save_filename = \"%s_%d.pt\" % (self.hp.log.name, self.step)\n save_path = osp.join(self.hp.log.chkpt_dir, save_filename)\n torch.save(state_dict, save_path)\n if self.hp.log.use_wandb:\n wandb.save(save_path)\n if logger is not None:\n logger.info(\"Saved network checkpoint to: %s\" % save_path)\n return state_dict\n\n def load_network(self, loaded_net=None, logger=None):\n add_log = False\n if loaded_net is None:\n add_log = True\n if self.hp.load.wandb_load_path is not None:\n self.hp.load.network_chkpt_path = wandb.restore(\n self.hp.load.network_chkpt_path,\n run_path=self.hp.load.wandb_load_path,\n ).name\n loaded_net = torch.load(\n self.hp.load.network_chkpt_path, map_location=torch.device(self.device)\n )\n loaded_clean_net = OrderedDict() # remove unnecessary 'module.'\n for k, v in loaded_net.items():\n if k.startswith(\"module.\"):\n loaded_clean_net[k[7:]] = v\n else:\n loaded_clean_net[k] = v\n\n self.net.load_state_dict(loaded_clean_net, strict=self.hp.load.strict_load)\n if logger is not None and add_log:\n logger.info(\"Checkpoint %s is loaded\" % self.hp.load.network_chkpt_path)\n\n def save_training_state(self, logger):\n if self.rank == 0:\n save_filename = \"%s_%d.state\" % (self.hp.log.name, self.step)\n save_path = osp.join(self.hp.log.chkpt_dir, save_filename)\n net_state_dict = self.save_network(None, False)\n state = {\n \"model\": net_state_dict,\n \"optimizer\": self.optimizer.state_dict(),\n \"step\": self.step,\n \"epoch\": self.epoch,\n }\n torch.save(state, save_path)\n if self.hp.log.use_wandb:\n wandb.save(save_path)\n if logger is not None:\n logger.info(\"Saved training state to: %s\" % save_path)\n\n def load_training_state(self, logger):\n if self.hp.load.wandb_load_path is not None:\n self.hp.load.resume_state_path = wandb.restore(\n self.hp.load.resume_state_path, run_path=self.hp.load.wandb_load_path\n ).name\n resume_state = torch.load(\n self.hp.load.resume_state_path, map_location=torch.device(self.device)\n )\n\n self.load_network(loaded_net=resume_state[\"model\"], logger=logger)\n self.optimizer.load_state_dict(resume_state[\"optimizer\"])\n self.step = resume_state[\"step\"]\n self.epoch = resume_state[\"epoch\"]\n if logger is not None:\n logger.info(\n \"Resuming from training state: %s\" % self.hp.load.resume_state_path\n )\n" ]
[ [ "torch.nn.parallel.DistributedDataParallel", "torch.save", "torch.device" ] ]
ericagol/exoplanet
[ "ec270622f28cd53d3052ed44d20f30b5d2b4dcb6" ]
[ "src/exoplanet/distributions/physical_test.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pymc3 as pm\nfrom scipy.stats import kstest\n\nfrom .base_test import _Base\nfrom .physical import ImpactParameter, QuadLimbDark\n\n\nclass TestPhysical(_Base):\n random_seed = 19860925\n\n def test_quad_limb_dark(self):\n with self._model():\n dist = QuadLimbDark(\"u\", shape=2)\n\n # Test random sampling\n samples = dist.random(size=100)\n assert np.shape(samples) == (100, 2)\n\n logp = QuadLimbDark.dist(shape=2).logp(samples).eval().flatten()\n assert np.all(np.isfinite(logp))\n assert np.allclose(logp[0], logp)\n\n trace = self._sample()\n\n u1 = trace[\"u\"][:, 0]\n u2 = trace[\"u\"][:, 1]\n\n # Make sure that the physical constraints are satisfied\n assert np.all(u1 + u2 < 1)\n assert np.all(u1 > 0)\n assert np.all(u1 + 2 * u2 > 0)\n\n # Make sure that the qs are uniform\n q1 = (u1 + u2) ** 2\n q2 = 0.5 * u1 / (u1 + u2)\n\n cdf = lambda x: np.clip(x, 0, 1) # NOQA\n for q in (q1, q2):\n s, p = kstest(q, cdf)\n assert s < 0.05\n\n def test_impact(self):\n lower = 0.1\n upper = 1.0\n with self._model():\n ror = pm.Uniform(\"ror\", lower=lower, upper=upper, shape=(5, 2))\n dist = ImpactParameter(\"b\", ror=ror)\n\n # Test random sampling\n samples = dist.random(size=100)\n assert np.shape(samples) == (100, 5, 2)\n assert np.all((0 <= samples) & (samples <= 1 + upper))\n\n trace = self._sample()\n\n u = trace[\"ror\"]\n u = np.reshape(u, (len(u), -1))\n cdf = lambda x: np.clip((x - lower) / (upper - lower), 0, 1) # NOQA\n for i in range(u.shape[1]):\n s, p = kstest(u[:, i], cdf)\n assert s < 0.05\n\n assert np.all(trace[\"b\"] <= 1 + trace[\"ror\"])\n" ]
[ [ "numpy.allclose", "scipy.stats.kstest", "numpy.all", "numpy.shape", "numpy.clip", "numpy.isfinite" ] ]
Leopard-X/MXNET
[ "7ac046c58f0815223712f77288722a7b06755ec3" ]
[ "python/mxnet/image.py" ]
[ "# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name\n# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements\n\"\"\"Read invidual image files and perform augmentations.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport random\nimport logging\nimport numpy as np\n\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\n\nfrom .base import numeric_types\nfrom . import ndarray as nd\nfrom . import _ndarray_internal as _internal\nfrom ._ndarray_internal import _cvimresize as imresize\nfrom ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder\nfrom . import io\nfrom . import recordio\n\n\ndef imdecode(buf, **kwargs):\n \"\"\"Decode an image to an NDArray.\n\n Note: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n \"\"\"\n if not isinstance(buf, nd.NDArray):\n buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)\n return _internal._cvimdecode(buf, **kwargs)\n\n\ndef scale_down(src_size, size):\n \"\"\"Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n \"\"\"\n w, h = size\n sw, sh = src_size\n if sh < h:\n w, h = float(w * sh) / h, sh\n if sw < w:\n w, h = sw, float(h * sw) / w\n return int(w), int(h)\n\n\ndef resize_short(src, size, interp=2):\n \"\"\"Resizes shorter edge to size.\n\n Note: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Default method is bicubic interpolation.\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An 'NDArray' containing the resized image.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n <NDArray 2321x3482x3 @cpu(0)>\n \"\"\"\n h, w, _ = src.shape\n if h > w:\n new_h, new_w = size * h / w, size\n else:\n new_h, new_w = size, size * w / h\n return imresize(src, new_w, new_h, interp=interp)\n\n\ndef fixed_crop(src, x0, y0, w, h, size=None, interp=2):\n \"\"\"Crop src at fixed location, and (optionally) resize it to size.\"\"\"\n out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))\n if size is not None and (w, h) != size:\n out = imresize(out, *size, interp=interp)\n return out\n\n\ndef random_crop(src, size, interp=2):\n \"\"\"Randomly crop `src` with `size` (width, height).\n Upsample result if `src` is smaller than `size`.\n\n Parameters\n ----------\n src: Source image `NDArray`\n size: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\n interp: Interpolation method to be used in case the size is larger (default: bicubic).\n Uses OpenCV convention for the parameters. Nearest - 0, Bilinear - 1, Bicubic - 2,\n Area - 3. See OpenCV imresize function for more details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n Example\n -------\n >>> im = mx.nd.array(cv2.imread(\"flower.jpg\"))\n >>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n >>> print cropped_im\n <NDArray 100x100x1 @cpu(0)>\n >>> print rect\n (20, 21, 100, 100)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef center_crop(src, size, interp=2):\n \"\"\"Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : interpolation, optional, default=Area-based\n The type of interpolation that is done to the image.\n\n Possible values:\n\n 0: Nearest Neighbors Interpolation.\n\n 1: Bilinear interpolation.\n\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n <NDArray 500x1000x3 @cpu(0)>\n >>> x, y, width, height\n (1241, 910, 1000, 500)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = int((w - new_w) / 2)\n y0 = int((h - new_h) / 2)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef color_normalize(src, mean, std=None):\n \"\"\"Normalize src with mean and std.\"\"\"\n src -= mean\n if std is not None:\n src /= std\n return src\n\n\ndef random_size_crop(src, size, min_area, ratio, interp=2):\n \"\"\"Randomly crop src with size. Randomize area and aspect ratio.\"\"\"\n h, w, _ = src.shape\n new_ratio = random.uniform(*ratio)\n if new_ratio * h > w:\n max_area = w * int(w / new_ratio)\n else:\n max_area = h * int(h * new_ratio)\n\n min_area *= h * w\n if max_area < min_area:\n return random_crop(src, size, interp)\n new_area = random.uniform(min_area, max_area)\n new_w = int(np.sqrt(new_area * new_ratio))\n new_h = int(np.sqrt(new_area / new_ratio))\n\n assert new_w <= w and new_h <= h\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef ResizeAug(size, interp=2):\n \"\"\"Make resize shorter edge to size augmenter.\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n return [resize_short(src, size, interp)]\n\n return aug\n\n\ndef RandomCropAug(size, interp=2):\n \"\"\"Make random crop augmenter\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n return [random_crop(src, size, interp)[0]]\n\n return aug\n\n\ndef RandomSizedCropAug(size, min_area, ratio, interp=2):\n \"\"\"Make random crop with random resizing and random aspect ratio jitter augmenter.\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n return [random_size_crop(src, size, min_area, ratio, interp)[0]]\n\n return aug\n\n\ndef CenterCropAug(size, interp=2):\n \"\"\"Make center crop augmenter.\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n return [center_crop(src, size, interp)[0]]\n\n return aug\n\n\ndef RandomOrderAug(ts):\n \"\"\"Apply list of augmenters in random order\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n src = [src]\n random.shuffle(ts)\n for t in ts:\n src = [j for i in src for j in t(i)]\n return src\n\n return aug\n\n\ndef ColorJitterAug(brightness, contrast, saturation):\n \"\"\"Apply random brightness, contrast and saturation jitter in random order.\"\"\"\n ts = []\n coef = nd.array([[[0.299, 0.587, 0.114]]])\n if brightness > 0:\n def baug(src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-brightness, brightness)\n src *= alpha\n return [src]\n\n ts.append(baug)\n\n if contrast > 0:\n def caug(src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-contrast, contrast)\n gray = src * coef\n gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)\n src *= alpha\n src += gray\n return [src]\n\n ts.append(caug)\n\n if saturation > 0:\n def saug(src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-saturation, saturation)\n gray = src * coef\n gray = nd.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return [src]\n\n ts.append(saug)\n return RandomOrderAug(ts)\n\n\ndef LightingAug(alphastd, eigval, eigvec):\n \"\"\"Add PCA based noise.\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n alpha = np.random.normal(0, alphastd, size=(3,))\n rgb = np.dot(eigvec * alpha, eigval)\n src += nd.array(rgb)\n return [src]\n\n return aug\n\n\ndef ColorNormalizeAug(mean, std):\n \"\"\"Mean and std normalization.\"\"\"\n mean = nd.array(mean)\n std = nd.array(std)\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n return [color_normalize(src, mean, std)]\n\n return aug\n\n\ndef HorizontalFlipAug(p):\n \"\"\"Random horizontal flipping.\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < p:\n src = nd.flip(src, axis=1)\n return [src]\n\n return aug\n\n\ndef CastAug():\n \"\"\"Cast to float32\"\"\"\n\n def aug(src):\n \"\"\"Augmenter body\"\"\"\n src = src.astype(np.float32)\n return [src]\n\n return aug\n\n\ndef CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,\n mean=None, std=None, brightness=0, contrast=0, saturation=0,\n pca_noise=0, inter_method=2):\n \"\"\"Creates an augmenter list.\"\"\"\n auglist = []\n\n if resize > 0:\n auglist.append(ResizeAug(resize, inter_method))\n\n crop_size = (data_shape[2], data_shape[1])\n if rand_resize:\n assert rand_crop\n auglist.append(RandomSizedCropAug(crop_size, 0.3, (3.0 / 4.0, 4.0 / 3.0), inter_method))\n elif rand_crop:\n auglist.append(RandomCropAug(crop_size, inter_method))\n else:\n auglist.append(CenterCropAug(crop_size, inter_method))\n\n if rand_mirror:\n auglist.append(HorizontalFlipAug(0.5))\n\n auglist.append(CastAug())\n\n if brightness or contrast or saturation:\n auglist.append(ColorJitterAug(brightness, contrast, saturation))\n\n if pca_noise > 0:\n eigval = np.array([55.46, 4.794, 1.148])\n eigvec = np.array([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n auglist.append(LightingAug(pca_noise, eigval, eigvec))\n\n if mean is True:\n mean = np.array([123.68, 116.28, 103.53])\n elif mean is not None:\n assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]\n\n if std is True:\n std = np.array([58.395, 57.12, 57.375])\n elif std is not None:\n assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]\n\n if mean is not None and std is not None:\n auglist.append(ColorNormalizeAug(mean, std))\n\n return auglist\n\n\nclass ImageIter(io.DataIter):\n \"\"\"Image data iterator with a large number of augmentation choices.\n This iterator supports reading from both .rec files and raw image files.\n\n To load input images from .rec files, use `path_imgrec` parameter and to load from raw image\n files, use `path_imglist` and `path_root` parameters.\n\n To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.\n\n Parameters\n ----------\n batch_size : int\n Number of examples per batch.\n data_shape : tuple\n Data shape in (channels, height, width) format.\n For now, only RGB image with 3 channels is supported.\n label_width : int, optional\n Number of labels per example. The default label width is 1.\n path_imgrec : str\n Path to image record file (.rec).\n Created with tools/im2rec.py or bin/im2rec.\n path_imglist : str\n Path to image list (.lst).\n Created with tools/im2rec.py or with custom script.\n Format: Tab separated record of index, one or more labels and relative_path_from_root.\n imglist: list\n A list of images with the label(s).\n Each item is a list [imagelabel: float or list of float, imgpath].\n path_root : str\n Root folder of image files.\n path_imgidx : str\n Path to image index file. Needed for partition and shuffling when using .rec source.\n shuffle : bool\n Whether to shuffle all images at the start of each iteration or not.\n Can be slow for HDD.\n part_index : int\n Partition index.\n num_parts : int\n Total number of partitions.\n data_name : str\n Data name for provided symbols.\n label_name : str\n Label name for provided symbols.\n kwargs : ...\n More arguments for creating augmenter. See mx.image.CreateAugmenter.\n \"\"\"\n\n def __init__(self, batch_size, data_shape, label_width=1,\n path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,\n shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,\n data_name='data', label_name='softmax_label', **kwargs):\n super(ImageIter, self).__init__()\n assert path_imgrec or path_imglist or (isinstance(imglist, list))\n if path_imgrec:\n print('loading recordio...')\n if path_imgidx:\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = list(self.imgrec.keys)\n else:\n self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = None\n else:\n self.imgrec = None\n\n if path_imglist:\n print('loading image list...')\n with open(path_imglist) as fin:\n imglist = {}\n imgkeys = []\n for line in iter(fin.readline, ''):\n line = line.strip().split('\\t')\n label = nd.array([float(i) for i in line[1:-1]])\n key = int(line[0])\n imglist[key] = (label, line[-1])\n imgkeys.append(key)\n self.imglist = imglist\n elif isinstance(imglist, list):\n print('loading image list...')\n result = {}\n imgkeys = []\n index = 1\n for img in imglist:\n key = str(index) # pylint: disable=redefined-variable-type\n index += 1\n if isinstance(img[0], numeric_types):\n label = nd.array([img[0]])\n else:\n label = nd.array(img[0])\n result[key] = (label, img[1])\n imgkeys.append(str(key))\n self.imglist = result\n else:\n self.imglist = None\n self.path_root = path_root\n\n self.check_data_shape(data_shape)\n self.provide_data = [(data_name, (batch_size,) + data_shape)]\n if label_width > 1:\n self.provide_label = [(label_name, (batch_size, label_width))]\n else:\n self.provide_label = [(label_name, (batch_size,))]\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.label_width = label_width\n\n self.shuffle = shuffle\n if self.imgrec is None:\n self.seq = imgkeys\n elif shuffle or num_parts > 1:\n assert self.imgidx is not None\n self.seq = self.imgidx\n else:\n self.seq = None\n\n if num_parts > 1:\n assert part_index < num_parts\n N = len(self.seq)\n C = N / num_parts\n self.seq = self.seq[part_index * C:(part_index + 1) * C]\n if aug_list is None:\n self.auglist = CreateAugmenter(data_shape, **kwargs)\n else:\n self.auglist = aug_list\n self.cur = 0\n self.reset()\n\n def reset(self):\n \"\"\"Resets the iterator to the beginning of the data.\"\"\"\n if self.shuffle:\n random.shuffle(self.seq)\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n\n def next_sample(self):\n \"\"\"Helper function for reading in next sample.\"\"\"\n if self.seq is not None:\n if self.cur >= len(self.seq):\n raise StopIteration\n idx = self.seq[self.cur]\n self.cur += 1\n if self.imgrec is not None:\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n if self.imglist is None:\n return header.label, img\n else:\n return self.imglist[idx][0], img\n else:\n label, fname = self.imglist[idx]\n return label, self.read_image(fname)\n else:\n s = self.imgrec.read()\n if s is None:\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img\n\n def next(self):\n \"\"\"Returns the next batch of data.\"\"\"\n batch_size = self.batch_size\n c, h, w = self.data_shape\n batch_data = nd.empty((batch_size, c, h, w))\n batch_label = nd.empty(self.provide_label[0][1])\n i = 0\n try:\n while i < batch_size:\n label, s = self.next_sample()\n data = [self.imdecode(s)]\n try:\n self.check_valid_image(data)\n except RuntimeError as e:\n logging.debug('Invalid image, skipping: %s', str(e))\n continue\n data = self.augmentation_transform(data)\n for datum in data:\n assert i < batch_size, 'Batch size must be multiples of augmenter output length'\n batch_data[i][:] = self.postprocess_data(datum)\n batch_label[i][:] = label\n i += 1\n except StopIteration:\n if not i:\n raise StopIteration\n\n return io.DataBatch([batch_data], [batch_label], batch_size - i)\n\n def check_data_shape(self, data_shape):\n \"\"\"Checks if the input data shape is valid\"\"\"\n if not len(data_shape) == 3:\n raise ValueError('data_shape should have length 3, with dimensions CxHxW')\n if not data_shape[0] == 3:\n raise ValueError('This iterator expects inputs to have 3 channels.')\n\n def check_valid_image(self, data):\n \"\"\"Checks if the input data is valid\"\"\"\n if len(data[0].shape) == 0:\n raise RuntimeError('Data shape is wrong')\n\n def imdecode(self, s):\n \"\"\"Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.\"\"\"\n return imdecode(s)\n\n def read_image(self, fname):\n \"\"\"Reads an input image `fname` and returns the decoded raw bytes.\n\n Example usage:\n ----------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n '\\xff\\xd8\\xff\\xe0\\x00...'\n \"\"\"\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return img\n\n def augmentation_transform(self, data):\n \"\"\"Transforms input data with specified augmentation.\"\"\"\n for aug in self.auglist:\n data = [ret for src in data for ret in aug(src)]\n return data\n\n def postprocess_data(self, datum):\n \"\"\"Final postprocessing step before image is loaded into the batch.\"\"\"\n return nd.transpose(datum, axes=(2, 0, 1))\n" ]
[ [ "numpy.random.normal", "numpy.sqrt", "numpy.dot", "numpy.array", "numpy.frombuffer" ] ]
qma16443/AIcamp_MTCNN
[ "431c3ce1cabf24266690322d525bdf7133666dc0" ]
[ "Detection/MtcnnDetector.py" ]
[ "import cv2\nimport time\nimport numpy as np\nimport sys\nsys.path.append(\"../\")\nfrom train_models.MTCNN_config import config\nfrom Detection.nms import py_nms\n\n\nclass MtcnnDetector(object):\n\n\n def __init__(self,\n detectors,\n min_face_size=25,\n stride=2,\n threshold=[0.6, 0.7, 0.7],\n scale_factor=0.79,\n #scale_factor=0.709,#change\n slide_window=False):\n\n self.pnet_detector = detectors[0]\n self.rnet_detector = detectors[1]\n self.onet_detector = detectors[2]\n self.min_face_size = min_face_size\n self.stride = stride\n self.thresh = threshold\n self.scale_factor = scale_factor\n self.slide_window = slide_window\n\n def convert_to_square(self, bbox):\n \"\"\"\n convert bbox to square\n Parameters:\n ----------\n bbox: numpy array , shape n x 5\n input bbox\n Returns:\n -------\n square bbox\n \"\"\"\n square_bbox = bbox.copy()\n\n h = bbox[:, 3] - bbox[:, 1] + 1\n w = bbox[:, 2] - bbox[:, 0] + 1\n max_side = np.maximum(h, w)\n square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5\n square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5\n square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1\n square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1\n return square_bbox\n\n def calibrate_box(self, bbox, reg):\n \"\"\"\n calibrate bboxes\n Parameters:\n ----------\n bbox: numpy array, shape n x 5\n input bboxes\n reg: numpy array, shape n x 4\n bboxes adjustment\n Returns:\n -------\n bboxes after refinement\n \"\"\"\n\n bbox_c = bbox.copy()\n w = bbox[:, 2] - bbox[:, 0] + 1\n w = np.expand_dims(w, 1)\n h = bbox[:, 3] - bbox[:, 1] + 1\n h = np.expand_dims(h, 1)\n reg_m = np.hstack([w, h, w, h])\n aug = reg_m * reg\n bbox_c[:, 0:4] = bbox_c[:, 0:4] + aug\n return bbox_c\n\n def generate_bbox(self, cls_map, reg, scale, threshold):\n \"\"\"\n generate bbox from feature cls_map\n Parameters:\n ----------\n cls_map: numpy array , n x m \n detect score for each position\n reg: numpy array , n x m x 4\n bbox\n scale: float number\n scale of this detection\n threshold: float number\n detect threshold\n Returns:\n -------\n bbox array\n \"\"\"\n stride = 2\n #stride = 4\n cellsize = 12\n #cellsize = 25\n\n t_index = np.where(cls_map > threshold)\n\n # find nothing\n if t_index[0].size == 0:\n return np.array([])\n #offset\n dx1, dy1, dx2, dy2 = [reg[t_index[0], t_index[1], i] for i in range(4)]\n\n reg = np.array([dx1, dy1, dx2, dy2])\n score = cls_map[t_index[0], t_index[1]]\n boundingbox = np.vstack([np.round((stride * t_index[1]) / scale),\n np.round((stride * t_index[0]) / scale),\n np.round((stride * t_index[1] + cellsize) / scale),\n np.round((stride * t_index[0] + cellsize) / scale),\n score,\n reg])\n\n return boundingbox.T\n #pre-process images\n def processed_image(self, img, scale):\n height, width, channels = img.shape\n new_height = int(height * scale) # resized new height\n new_width = int(width * scale) # resized new width\n new_dim = (new_width, new_height)\n img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR) # resized image\n img_resized = (img_resized - 127.5) / 128\n return img_resized\n\n def pad(self, bboxes, w, h):\n \"\"\"\n pad the the bboxes, alse restrict the size of it\n Parameters:\n ----------\n bboxes: numpy array, n x 5\n input bboxes\n w: float number\n width of the input image\n h: float number\n height of the input image\n Returns :\n ------\n dy, dx : numpy array, n x 1\n start point of the bbox in target image\n edy, edx : numpy array, n x 1\n end point of the bbox in target image\n y, x : numpy array, n x 1\n start point of the bbox in original image\n ex, ex : numpy array, n x 1\n end point of the bbox in original image\n tmph, tmpw: numpy array, n x 1\n height and width of the bbox\n \"\"\"\n tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1\n num_box = bboxes.shape[0]\n\n dx, dy = np.zeros((num_box,)), np.zeros((num_box,))\n edx, edy = tmpw.copy() - 1, tmph.copy() - 1\n\n x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]\n\n tmp_index = np.where(ex > w - 1)\n edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]\n ex[tmp_index] = w - 1\n\n tmp_index = np.where(ey > h - 1)\n edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]\n ey[tmp_index] = h - 1\n\n tmp_index = np.where(x < 0)\n dx[tmp_index] = 0 - x[tmp_index]\n x[tmp_index] = 0\n\n tmp_index = np.where(y < 0)\n dy[tmp_index] = 0 - y[tmp_index]\n y[tmp_index] = 0\n\n return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]\n return_list = [item.astype(np.int32) for item in return_list]\n\n return return_list\n \n def detect_pnet(self, im):\n \"\"\"Get face candidates through pnet\n\n Parameters:\n ----------\n im: numpy array\n input image array\n\n Returns:\n -------\n boxes: numpy array\n detected boxes before calibration\n boxes_c: numpy array\n boxes after calibration\n \"\"\"\n h, w, c = im.shape\n net_size = 12\n \n current_scale = float(net_size) / self.min_face_size # find initial scale\n # print(\"current_scale\", net_size, self.min_face_size, current_scale)\n im_resized = self.processed_image(im, current_scale)\n current_height, current_width, _ = im_resized.shape\n # fcn\n all_boxes = list()\n while min(current_height, current_width) > net_size:\n #return the result predicted by pnet\n #cls_cls_map : H*w*2\n #reg: H*w*4\n cls_cls_map, reg = self.pnet_detector.predict(im_resized)\n #boxes: num*9(x1,y1,x2,y2,score,x1_offset,y1_offset,x2_offset,y2_offset)\n boxes = self.generate_bbox(cls_cls_map[:, :,1], reg, current_scale, self.thresh[0])\n\n current_scale *= self.scale_factor\n im_resized = self.processed_image(im, current_scale)\n current_height, current_width, _ = im_resized.shape\n\n if boxes.size == 0:\n continue\n keep = py_nms(boxes[:, :5], 0.5, 'Union')\n boxes = boxes[keep]\n all_boxes.append(boxes)\n\n if len(all_boxes) == 0:\n return None, None, None\n\n all_boxes = np.vstack(all_boxes)\n\n # merge the detection from first stage\n keep = py_nms(all_boxes[:, 0:5], 0.7, 'Union')\n all_boxes = all_boxes[keep]\n boxes = all_boxes[:, :5]\n\n bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1\n bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1\n\n # refine the boxes\n boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,\n all_boxes[:, 1] + all_boxes[:, 6] * bbh,\n all_boxes[:, 2] + all_boxes[:, 7] * bbw,\n all_boxes[:, 3] + all_boxes[:, 8] * bbh,\n all_boxes[:, 4]])\n boxes_c = boxes_c.T\n\n return boxes, boxes_c, None\n def detect_rnet(self, im, dets):\n \"\"\"Get face candidates using rnet\n\n Parameters:\n ----------\n im: numpy array\n input image array\n dets: numpy array\n detection results of pnet\n\n Returns:\n -------\n boxes: numpy array\n detected boxes before calibration\n boxes_c: numpy array\n boxes after calibration\n \"\"\"\n h, w, c = im.shape\n dets = self.convert_to_square(dets)\n dets[:, 0:4] = np.round(dets[:, 0:4])\n\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)\n num_boxes = dets.shape[0]\n cropped_ims = np.zeros((num_boxes, 24, 24, 3), dtype=np.float32)\n for i in range(num_boxes):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)\n tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]\n cropped_ims[i, :, :, :] = (cv2.resize(tmp, (24, 24))-127.5) / 128\n #cls_scores : num_data*2\n #reg: num_data*4\n #landmark: num_data*10\n cls_scores, reg, _ = self.rnet_detector.predict(cropped_ims)\n cls_scores = cls_scores[:,1]\n keep_inds = np.where(cls_scores > self.thresh[1])[0]\n if len(keep_inds) > 0:\n boxes = dets[keep_inds]\n boxes[:, 4] = cls_scores[keep_inds]\n reg = reg[keep_inds]\n #landmark = landmark[keep_inds]\n else:\n return None, None, None\n \n \n keep = py_nms(boxes, 0.6)\n boxes = boxes[keep]\n boxes_c = self.calibrate_box(boxes, reg[keep])\n return boxes, boxes_c,None\n def detect_onet(self, im, dets):\n \"\"\"Get face candidates using onet\n\n Parameters:\n ----------\n im: numpy array\n input image array\n dets: numpy array\n detection results of rnet\n\n Returns:\n -------\n boxes: numpy array\n detected boxes before calibration\n boxes_c: numpy array\n boxes after calibration\n \"\"\"\n h, w, c = im.shape\n dets = self.convert_to_square(dets)\n dets[:, 0:4] = np.round(dets[:, 0:4])\n [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)\n num_boxes = dets.shape[0]\n cropped_ims = np.zeros((num_boxes, 48, 48, 3), dtype=np.float32)\n for i in range(num_boxes):\n tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)\n tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]\n cropped_ims[i, :, :, :] = (cv2.resize(tmp, (48, 48))-127.5) / 128\n \n cls_scores, reg,landmark = self.onet_detector.predict(cropped_ims)\n #prob belongs to face\n cls_scores = cls_scores[:,1] \n keep_inds = np.where(cls_scores > self.thresh[2])[0] \n if len(keep_inds) > 0:\n #pickout filtered box\n boxes = dets[keep_inds]\n boxes[:, 4] = cls_scores[keep_inds]\n reg = reg[keep_inds]\n landmark = landmark[keep_inds]\n else:\n return None, None, None\n \n #width\n w = boxes[:,2] - boxes[:,0] + 1\n #height\n h = boxes[:,3] - boxes[:,1] + 1\n landmark[:,0::2] = (np.tile(w,(5,1)) * landmark[:,0::2].T + np.tile(boxes[:,0],(5,1)) - 1).T\n landmark[:,1::2] = (np.tile(h,(5,1)) * landmark[:,1::2].T + np.tile(boxes[:,1],(5,1)) - 1).T \n boxes_c = self.calibrate_box(boxes, reg)\n \n \n boxes = boxes[py_nms(boxes, 0.6, \"Minimum\")]\n keep = py_nms(boxes_c, 0.6, \"Minimum\")\n boxes_c = boxes_c[keep]\n landmark = landmark[keep]\n return boxes, boxes_c,landmark\n #use for video\n def detect(self, img):\n \"\"\"Detect face over image\n \"\"\"\n boxes = None\n t = time.time()\n \n # pnet\n t1 = 0\n if self.pnet_detector:\n boxes, boxes_c,_ = self.detect_pnet(img)\n if boxes_c is None:\n return np.array([]),np.array([])\n \n t1 = time.time() - t\n t = time.time()\n \n # rnet\n t2 = 0\n if self.rnet_detector:\n boxes, boxes_c,_ = self.detect_rnet(img, boxes_c)\n if boxes_c is None:\n return np.array([]),np.array([])\n \n t2 = time.time() - t\n t = time.time()\n \n # onet\n t3 = 0\n if self.onet_detector:\n boxes, boxes_c,landmark = self.detect_onet(img, boxes_c)\n if boxes_c is None:\n return np.array([]),np.array([])\n \n t3 = time.time() - t\n t = time.time()\n print(\n \"time cost \" + '{:.3f}'.format(t1 + t2 + t3) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,\n t3))\n \n return boxes_c,landmark\n def detect_face(self, test_data):\n all_boxes = []#save each image's bboxes\n landmarks = []\n batch_idx = 0\n sum_time = 0\n #test_data is iter_\n for databatch in test_data:\n #databatch(image returned)\n if batch_idx % 100 == 0:\n print(\"%d images done\" % batch_idx)\n im = databatch\n # pnet\n t1 = 0\n if self.pnet_detector:\n t = time.time()\n #ignore landmark \n boxes, boxes_c, landmark = self.detect_pnet(im)\n t1 = time.time() - t\n sum_time += t1\n if boxes_c is None:\n print(\"boxes_c is None...\")\n all_boxes.append(np.array([]))\n #pay attention\n landmarks.append(np.array([]))\n batch_idx += 1\n continue\n # rnet\n t2 = 0\n if self.rnet_detector:\n t = time.time()\n #ignore landmark \n boxes, boxes_c, landmark = self.detect_rnet(im, boxes_c)\n t2 = time.time() - t\n sum_time += t2\n if boxes_c is None:\n all_boxes.append(np.array([]))\n landmarks.append(np.array([]))\n batch_idx += 1\n continue\n # onet\n t3 = 0\n if self.onet_detector:\n t = time.time()\n boxes, boxes_c, landmark = self.detect_onet(im, boxes_c)\n t3 = time.time() - t\n sum_time += t3\n if boxes_c is None:\n all_boxes.append(np.array([]))\n landmarks.append(np.array([])) \n batch_idx += 1\n continue\n print(\n \"time cost \" + '{:.3f}'.format(sum_time) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,t3))\n \n \n all_boxes.append(boxes_c)\n landmarks.append(landmark)\n batch_idx += 1\n #num_of_data*9,num_of_data*10\n return all_boxes,landmarks\n\n" ]
[ [ "numpy.vstack", "numpy.tile", "numpy.zeros", "numpy.round", "numpy.hstack", "numpy.expand_dims", "numpy.maximum", "numpy.where", "numpy.array" ] ]
stjordanis/graph4nlp
[ "c6ebde32bc77d3a7b78f86a93f19b1c057963ffa", "c6ebde32bc77d3a7b78f86a93f19b1c057963ffa" ]
[ "graph4nlp/pytorch/test/seq_decoder/graph2seq/src/g2s_v2/core/utils/vocab_utils.py", "graph4nlp/pytorch/modules/prediction/classification/link_prediction/ConcatFeedForwardNN.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport re\nimport pickle\nimport numpy as np\nfrom collections import Counter\nfrom functools import lru_cache\n\nfrom . import constants\nfrom .data_utils import tokenize\n\nword_detector = re.compile('\\w')\n\nclass VocabModel(object):\n def __init__(self, data_set, config):\n print('Building vocabs...')\n (allWords, allEdgeTypes) = collect_vocabs(data_set)\n print('Number of words: {}'.format(len(allWords)))\n print('Number of edge types: {}'.format(len(allEdgeTypes)))\n\n self.word_vocab = Vocab()\n self.word_vocab.build_vocab(allWords, vocab_size=config['top_word_vocab'], min_freq=config['min_word_freq'])\n if config.get('pretrained_word_embed_file', None):\n self.word_vocab.load_embeddings(config['pretrained_word_embed_file'])\n print('Using pretrained word embeddings')\n else:\n self.word_vocab.randomize_embeddings(config['word_embed_dim'])\n print('Using randomized word embeddings')\n print('word_vocab: {}'.format(self.word_vocab.embeddings.shape))\n\n self.edge_vocab = Vocab()\n self.edge_vocab.build_vocab(allEdgeTypes)\n print('edge_vocab: {}'.format((self.edge_vocab.get_vocab_size())))\n\n\n @classmethod\n def build(cls, saved_vocab_file=None, data_set=None, config=None):\n \"\"\"\n Loads a Vocabulary from disk.\n\n Args:\n saved_vocab_file (str): path to the saved vocab file\n data_set:\n config:\n\n Returns:\n Vocabulary: loaded Vocabulary\n \"\"\"\n if os.path.exists(saved_vocab_file):\n print('Loading pre-built vocab model stored in {}'.format(saved_vocab_file))\n vocab_model = pickle.load(open(saved_vocab_file, 'rb'))\n\n else:\n vocab_model = VocabModel(data_set, config)\n print('Saving vocab model to {}'.format(saved_vocab_file))\n pickle.dump(vocab_model, open(saved_vocab_file, 'wb'))\n return vocab_model\n\nclass Vocab(object):\n def __init__(self):\n self.PAD = 0\n self.SOS = 1\n self.EOS = 2\n self.UNK = 3\n self.pad_token = constants._PAD_TOKEN\n self.sos_token = constants._SOS_TOKEN\n self.eos_token = constants._EOS_TOKEN\n self.unk_token = constants._UNK_TOKEN\n\n self.reserved = [self.pad_token, self.sos_token, self.eos_token, self.unk_token]\n self.index2word = self.reserved[:]\n self.word2index = dict(zip(self.reserved, range(len(self.reserved))))\n self.word2count = Counter()\n self.embeddings = None\n\n def build_vocab(self, vocab_counter, vocab_size=None, min_freq=1):\n self.word2count = vocab_counter\n self._add_words(vocab_counter.keys())\n self._trim(vocab_size=vocab_size, min_freq=min_freq)\n\n def _add_words(self, words):\n for word in words:\n if word not in self.word2index:\n self.word2index[word] = len(self.index2word)\n self.index2word.append(word)\n assert len(self.word2index) == len(self.index2word)\n\n def _trim(self, vocab_size: int=None, min_freq: int=1):\n if min_freq <= 1 and (vocab_size is None or vocab_size >= len(self.word2index)):\n return\n ordered_words = sorted(((c, w) for (w, c) in self.word2count.items()), reverse=True)\n if vocab_size:\n ordered_words = ordered_words[:vocab_size]\n self.index2word = self.reserved[:]\n self.word2index = dict(zip(self.reserved, range(len(self.reserved))))\n self.word2count = Counter()\n for count, word in ordered_words:\n if count < min_freq: break\n if word not in self.word2index:\n self.word2index[word] = len(self.index2word)\n self.word2count[word] = count\n self.index2word.append(word)\n assert len(self.word2index) == len(self.index2word)\n\n def load_embeddings(self, file_path, scale=0.08, dtype=np.float32):\n hit_words = set()\n vocab_size = len(self)\n with open(file_path, 'rb') as f:\n for line in f:\n line = line.split()\n word = line[0].decode('utf-8')\n idx = self.word2index.get(word.lower(), None)\n if idx is None or idx in hit_words:\n continue\n\n vec = np.array(line[1:], dtype=dtype)\n if self.embeddings is None:\n n_dims = len(vec)\n self.embeddings = np.array(np.random.uniform(low=-scale, high=scale, size=(vocab_size, n_dims)), dtype=dtype)\n self.embeddings[self.PAD] = np.zeros(n_dims)\n self.embeddings[idx] = vec\n hit_words.add(idx)\n print('Pretrained word embeddings hit ratio: {}'.format(len(hit_words) / len(self.index2word)))\n\n def randomize_embeddings(self, n_dims, scale=0.08):\n vocab_size = self.get_vocab_size()\n shape = (vocab_size, n_dims)\n self.embeddings = np.array(np.random.uniform(low=-scale, high=scale, size=shape), dtype=np.float32)\n self.embeddings[self.PAD] = np.zeros(n_dims)\n\n def __getitem__(self, item):\n if type(item) is int:\n return self.index2word[item]\n return self.word2index.get(item, self.UNK)\n\n def __len__(self):\n return len(self.index2word)\n\n @lru_cache(maxsize=None)\n def is_word(self, token_id: int) -> bool:\n \"\"\"Return whether the token at `token_id` is a word; False for punctuations.\"\"\"\n if token_id < 4: return False\n if token_id >= len(self): return True # OOV is assumed to be words\n token_str = self.index2word[token_id]\n if not word_detector.search(token_str) or token_str == '<P>':\n return False\n return True\n\n def get_vocab_size(self):\n return len(self.index2word)\n\n def getIndex(self, word):\n return self.word2index.get(word, self.UNK)\n\n def getWord(self, idx):\n return self.index2word[idx] if idx < len(self.index2word) else self.unk_token\n\n def to_word_sequence(self, seq):\n sentence = []\n for idx in seq:\n word = self.getWord(idx)\n sentence.append(word)\n return sentence\n\n def to_index_sequence(self, sentence):\n sentence = sentence.strip()\n seq = []\n for word in tokenize(sentence):\n idx = self.getIndex(word)\n seq.append(idx)\n return seq\n\n def to_index_sequence_for_list(self, words):\n seq = []\n for word in words:\n idx = self.getIndex(word)\n seq.append(idx)\n return seq\n\ndef collect_vocabs(all_instances):\n all_words = Counter()\n all_edge_types = Counter()\n for (sent1, sent2) in all_instances:\n # for each in sent1.words:\n # all_words.update(each)\n for each in sent1.graph['g_features']:\n all_words.update(each)\n all_words.update(sent2.words)\n\n # for node, value in sent1.graph['g_adj'].items():\n # all_edge_types.update([each['edge'] for each in value])\n return all_words, all_edge_types\n", "from torch import nn\nimport torch\nfrom ..base import LinkPredictionBase\nfrom .ConcatFeedForwardNNLayer import ConcatFeedForwardNNLayer\n\nclass ConcatFeedForwardNN(LinkPredictionBase):\n r\"\"\"Specific class for link prediction task.\n\n Parameters\n ----------\n\n input_size : int \n The length of input node embeddings\n num_class : int \n The number of node catrgoriey for classification\n hidden_size : list of int type values\n Example for two layers's FeedforwardNN: [50, 20]\n activation: the activation function class for each fully connected layer\n Default: nn.ReLU()\n Example: nn.ReLU(),nn.Sigmoid(). \n\n \"\"\" \n def __init__(self, input_size, hidden_size,num_class,activation=nn.ReLU()): \n super(ConcatFeedForwardNN, self).__init__()\n \n \n self.classifier=ConcatFeedForwardNNLayer(input_size, num_class, hidden_size,activation)\n\n def forward(self, input_graph):\n r\"\"\"\n Forward functions to compute the logits tensor for link prediction.\n \n \n Parameters\n ----------\n \n input graph : GraphData\n The tensors stored in the node feature field named \"node_emb\" in the \n input_graph are used for link prediction.\n\n \n Returns \n ---------\n \n output_graph : GraphData\n The computed logit tensor for each pair of nodes in the graph are stored\n in the node feature field named \"edge_logits\".\n logit tensor shape is: [num_class] \n \"\"\" \n #get the nod embedding from the graph \n node_emb=input_graph.node_features['node_emb']\n \n #add the edges and edge prediction logits into the graph\n num_node=node_emb.shape[0]\n node_idx_list=[idx for idx in range(num_node)]\n src_idx=torch.tensor(node_idx_list).view(-1,1).repeat(1,num_node).view(-1)\n dst_idx=torch.tensor(node_idx_list).view(1,-1).repeat(num_node,1).view(-1)\n \n input_graph.add_edges(src_idx,dst_idx)\n input_graph.edge_features['logits']=self.classifier(node_emb)\n \n return input_graph\n\n\n\n\n\n\n\n" ]
[ [ "numpy.random.uniform", "numpy.array", "numpy.zeros" ], [ "torch.tensor", "torch.nn.ReLU" ] ]
YinchaoGao/detectron2
[ "04958b93e1232935e126c2fd9e6ccd3f57c3a8f3" ]
[ "detectron2/export/api.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport copy\nimport logging\nimport os\nimport torch\nfrom caffe2.proto import caffe2_pb2\nfrom torch import nn\n\nfrom detectron2.config import CfgNode as CN\n\nfrom .caffe2_export import export_caffe2_detection_model\nfrom .caffe2_export import export_onnx_model as export_onnx_model_impl\nfrom .caffe2_export import run_and_save_graph\nfrom .caffe2_inference import ProtobufDetectionModel\nfrom .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format\nfrom .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph\n\n__all__ = [\"add_export_config\", \"export_caffe2_model\", \"Caffe2Model\", \"export_onnx_model\"]\n\n\ndef add_export_config(cfg):\n \"\"\"\n Args:\n cfg (CfgNode): a detectron2 config\n\n Returns:\n CfgNode: an updated config with new options that will be used\n by :class:`Caffe2Tracer`.\n \"\"\"\n is_frozen = cfg.is_frozen()\n cfg.defrost()\n cfg.EXPORT_CAFFE2 = CN()\n cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False\n if is_frozen:\n cfg.freeze()\n return cfg\n\n\nclass Caffe2Tracer:\n \"\"\"\n Make a detectron2 model traceable with caffe2 style.\n\n An original detectron2 model may not be traceable, or\n cannot be deployed directly after being traced, due to some reasons:\n 1. control flow in some ops\n 2. custom ops\n 3. complicated pre/post processing\n\n This class provides a traceable version of a detectron2 model by:\n 1. Rewrite parts of the model using ops in caffe2\n 2. Define the inputs \"after pre-processing\" as inputs to the model\n 3. Remove post-processing and produce raw layer outputs\n\n More specifically about inputs: all builtin models take two input tensors.\n (1) NCHW float \"data\" which is an image (usually in [0, 255])\n (2) Nx3 float \"im_info\", each row of which is (height, width, 1.0)\n\n After making a traceable model, the class provide methods to export such a\n model to different deployment formats.\n\n The class currently only supports models using builtin meta architectures.\n\n Experimental. Don't use.\n \"\"\"\n\n def __init__(self, cfg, model, inputs):\n \"\"\"\n Args:\n cfg (CfgNode): a detectron2 config, with extra export-related options\n added by :func:`add_export_config`.\n model (nn.Module): a model built by\n :func:`detectron2.modeling.build_model`.\n inputs: sample inputs that the given model takes for inference.\n Will be used to trace the model.\n \"\"\"\n assert isinstance(cfg, CN), cfg\n assert isinstance(model, torch.nn.Module), type(model)\n if \"EXPORT_CAFFE2\" not in cfg:\n cfg = add_export_config(cfg) # will just the defaults\n\n self.cfg = cfg\n self.model = model\n self.inputs = inputs\n\n def _get_traceable(self):\n # TODO how to make it extensible to support custom models\n C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[self.cfg.MODEL.META_ARCHITECTURE]\n traceable_model = C2MetaArch(self.cfg, copy.deepcopy(self.model))\n traceable_inputs = traceable_model.get_caffe2_inputs(self.inputs)\n return traceable_model, traceable_inputs\n\n def export_caffe2(self):\n \"\"\"\n Export the model to Caffe2's protobuf format.\n The returned object can be saved with `.save_protobuf()` method.\n The result can be loaded and executed using Caffe2 runtime.\n\n Returns:\n Caffe2Model\n \"\"\"\n model, inputs = self._get_traceable()\n predict_net, init_net = export_caffe2_detection_model(model, inputs)\n return Caffe2Model(predict_net, init_net)\n\n def export_onnx(self):\n \"\"\"\n Export the model to ONNX format.\n Note that the exported model contains custom ops only available in caffe2, therefore it\n cannot be directly executed by other runtime. Post-processing or transformation passes\n may be applied on the model to accommodate different runtimes.\n\n Returns:\n onnx.ModelProto: an onnx model.\n \"\"\"\n model, inputs = self._get_traceable()\n return export_onnx_model_impl(model, (inputs,))\n\n def export_torchscript(self):\n \"\"\"\n Export the model to a `torch.jit.TracedModule` by tracing.\n The returned object can be saved to a file by \".save()\".\n\n Returns:\n torch.jit.TracedModule: a torch TracedModule\n \"\"\"\n model, inputs = self._get_traceable()\n logger = logging.getLogger(__name__)\n logger.info(\"Tracing the model with torch.jit.trace ...\")\n with torch.no_grad():\n return torch.jit.trace(model, (inputs,))\n\n\ndef export_caffe2_model(cfg, model, inputs):\n \"\"\"\n Export a detectron2 model to caffe2 format.\n\n Args:\n cfg (CfgNode): a detectron2 config, with extra export-related options\n added by :func:`add_export_config`.\n model (nn.Module): a model built by\n :func:`detectron2.modeling.build_model`.\n It will be modified by this function.\n inputs: sample inputs that the given model takes for inference.\n Will be used to trace the model.\n\n Returns:\n Caffe2Model\n \"\"\"\n return Caffe2Tracer(cfg, model, inputs).export_caffe2()\n\n\ndef export_onnx_model(cfg, model, inputs):\n \"\"\"\n Export a detectron2 model to ONNX format.\n Note that the exported model contains custom ops only available in caffe2, therefore it\n cannot be directly executed by other runtime. Post-processing or transformation passes\n may be applied on the model to accommodate different runtimes.\n Args:\n cfg (CfgNode): a detectron2 config, with extra export-related options\n added by :func:`add_export_config`.\n model (nn.Module): a model built by\n :func:`detectron2.modeling.build_model`.\n It will be modified by this function.\n inputs: sample inputs that the given model takes for inference.\n Will be used to trace the model.\n Returns:\n onnx.ModelProto: an onnx model.\n \"\"\"\n return Caffe2Tracer(cfg, model, inputs).export_onnx()\n\n\nclass Caffe2Model(nn.Module):\n \"\"\"\n A wrapper around the traced model in caffe2's pb format.\n \"\"\"\n\n def __init__(self, predict_net, init_net):\n super().__init__()\n self.eval() # always in eval mode\n self._predict_net = predict_net\n self._init_net = init_net\n self._predictor = None\n\n @property\n def predict_net(self):\n \"\"\"\n Returns:\n core.Net: the underlying caffe2 predict net\n \"\"\"\n return self._predict_net\n\n @property\n def init_net(self):\n \"\"\"\n Returns:\n core.Net: the underlying caffe2 init net\n \"\"\"\n return self._init_net\n\n __init__.__HIDE_SPHINX_DOC__ = True\n\n def save_protobuf(self, output_dir):\n \"\"\"\n Save the model as caffe2's protobuf format.\n\n Args:\n output_dir (str): the output directory to save protobuf files.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info(\"Saving model to {} ...\".format(output_dir))\n os.makedirs(output_dir, exist_ok=True)\n\n with open(os.path.join(output_dir, \"model.pb\"), \"wb\") as f:\n f.write(self._predict_net.SerializeToString())\n with open(os.path.join(output_dir, \"model.pbtxt\"), \"w\") as f:\n f.write(str(self._predict_net))\n with open(os.path.join(output_dir, \"model_init.pb\"), \"wb\") as f:\n f.write(self._init_net.SerializeToString())\n\n def save_graph(self, output_file, inputs=None):\n \"\"\"\n Save the graph as SVG format.\n\n Args:\n output_file (str): a SVG file\n inputs: optional inputs given to the model.\n If given, the inputs will be used to run the graph to record\n shape of every tensor. The shape information will be\n saved together with the graph.\n \"\"\"\n if inputs is None:\n save_graph(self._predict_net, output_file, op_only=False)\n else:\n size_divisibility = get_pb_arg_vali(self._predict_net, \"size_divisibility\", 0)\n device = get_pb_arg_vals(self._predict_net, \"device\", b\"cpu\").decode(\"ascii\")\n inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)\n inputs = [x.cpu().numpy() for x in inputs]\n run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)\n\n @staticmethod\n def load_protobuf(dir):\n \"\"\"\n Args:\n dir (str): a directory used to save Caffe2Model with\n :meth:`save_protobuf`.\n The files \"model.pb\" and \"model_init.pb\" are needed.\n\n Returns:\n Caffe2Model: the caffe2 model loaded from this directory.\n \"\"\"\n predict_net = caffe2_pb2.NetDef()\n with open(os.path.join(dir, \"model.pb\"), \"rb\") as f:\n predict_net.ParseFromString(f.read())\n\n init_net = caffe2_pb2.NetDef()\n with open(os.path.join(dir, \"model_init.pb\"), \"rb\") as f:\n init_net.ParseFromString(f.read())\n\n return Caffe2Model(predict_net, init_net)\n\n def __call__(self, inputs):\n \"\"\"\n An interface that wraps around a caffe2 model and mimics detectron2's models'\n input & output format. This is used to compare the outputs of caffe2 model\n with its original torch model.\n\n Due to the extra conversion between torch/caffe2,\n this method is not meant for benchmark.\n \"\"\"\n if self._predictor is None:\n self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)\n return self._predictor(inputs)\n" ]
[ [ "torch.no_grad", "torch.jit.trace" ] ]
UrielMaD/pandas
[ "b5233c447f3ed0ecfe256501e357326b82ce9120" ]
[ "pandas/core/frame.py" ]
[ "\"\"\"\nDataFrame\n---------\nAn efficient 2D container for potentially mixed-type time series or other\nlabeled data series.\n\nSimilar to its R counterpart, data.frame, except providing automatic data\nalignment and a host of useful data manipulation methods having to do with the\nlabeling information\n\"\"\"\nfrom __future__ import annotations\n\nimport collections\nfrom collections import abc\nimport datetime\nfrom io import StringIO\nimport itertools\nimport mmap\nfrom textwrap import dedent\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n AnyStr,\n Dict,\n FrozenSet,\n Hashable,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n Union,\n cast,\n overload,\n)\nimport warnings\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import algos as libalgos, lib, properties\nfrom pandas._libs.lib import no_default\nfrom pandas._typing import (\n AggFuncType,\n ArrayLike,\n Axes,\n Axis,\n CompressionOptions,\n Dtype,\n FilePathOrBuffer,\n FrameOrSeriesUnion,\n IndexKeyFunc,\n Label,\n Level,\n Renamer,\n StorageOptions,\n ValueKeyFunc,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.compat.numpy import function as nv\nfrom pandas.util._decorators import (\n Appender,\n Substitution,\n deprecate_kwarg,\n doc,\n rewrite_axis_style_signature,\n)\nfrom pandas.util._validators import (\n validate_axis_style_args,\n validate_bool_kwarg,\n validate_percentile,\n)\n\nfrom pandas.core.dtypes.cast import (\n cast_scalar_to_array,\n coerce_to_dtypes,\n construct_1d_arraylike_from_scalar,\n find_common_type,\n infer_dtype_from_scalar,\n invalidate_string_dtypes,\n maybe_box_datetimelike,\n maybe_cast_to_datetime,\n maybe_casted_values,\n maybe_convert_platform,\n maybe_downcast_to_dtype,\n maybe_infer_to_datetimelike,\n maybe_upcast,\n validate_numeric_casting,\n)\nfrom pandas.core.dtypes.common import (\n ensure_int64,\n ensure_platform_int,\n infer_dtype_from_object,\n is_bool_dtype,\n is_dataclass,\n is_datetime64_any_dtype,\n is_dict_like,\n is_dtype_equal,\n is_extension_array_dtype,\n is_float,\n is_float_dtype,\n is_hashable,\n is_integer,\n is_integer_dtype,\n is_iterator,\n is_list_like,\n is_named_tuple,\n is_object_dtype,\n is_scalar,\n is_sequence,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.missing import isna, notna\n\nfrom pandas.core import algorithms, common as com, generic, nanops, ops\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.aggregation import (\n aggregate,\n reconstruct_func,\n relabel_result,\n transform,\n)\nfrom pandas.core.arraylike import OpsMixin\nfrom pandas.core.arrays import Categorical, ExtensionArray\nfrom pandas.core.arrays.sparse import SparseFrameAccessor\nfrom pandas.core.construction import extract_array\nfrom pandas.core.generic import NDFrame, _shared_docs\nfrom pandas.core.indexes import base as ibase\nfrom pandas.core.indexes.api import (\n DatetimeIndex,\n Index,\n PeriodIndex,\n ensure_index,\n ensure_index_from_sequences,\n)\nfrom pandas.core.indexes.multi import MultiIndex, maybe_droplevels\nfrom pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable\nfrom pandas.core.internals import BlockManager\nfrom pandas.core.internals.construction import (\n arrays_to_mgr,\n dataclasses_to_dicts,\n get_names_from_index,\n init_dict,\n init_ndarray,\n masked_rec_array_to_mgr,\n reorder_arrays,\n sanitize_index,\n to_arrays,\n)\nfrom pandas.core.reshape.melt import melt\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index, lexsort_indexer, nargsort\n\nfrom pandas.io.common import get_handle\nfrom pandas.io.formats import console, format as fmt\nfrom pandas.io.formats.info import BaseInfo, DataFrameInfo\nimport pandas.plotting\n\nif TYPE_CHECKING:\n from typing import Literal\n\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n from pandas.io.formats.style import Styler\n\n# ---------------------------------------------------------------------\n# Docstring templates\n\n_shared_doc_kwargs = {\n \"axes\": \"index, columns\",\n \"klass\": \"DataFrame\",\n \"axes_single_arg\": \"{0 or 'index', 1 or 'columns'}\",\n \"axis\": \"\"\"axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index': apply function to each column.\n If 1 or 'columns': apply function to each row.\"\"\",\n \"optional_by\": \"\"\"\n by : str or list of str\n Name or list of names to sort by.\n\n - if `axis` is 0 or `'index'` then `by` may contain index\n levels and/or column labels.\n - if `axis` is 1 or `'columns'` then `by` may contain column\n levels and/or index labels.\"\"\",\n \"optional_labels\": \"\"\"labels : array-like, optional\n New labels / index to conform the axis specified by 'axis' to.\"\"\",\n \"optional_axis\": \"\"\"axis : int or str, optional\n Axis to target. Can be either the axis name ('index', 'columns')\n or number (0, 1).\"\"\",\n}\n\n_numeric_only_doc = \"\"\"numeric_only : boolean, default None\n Include only float, int, boolean data. If None, will attempt to use\n everything, then use only numeric data\n\"\"\"\n\n_merge_doc = \"\"\"\nMerge DataFrame or named Series objects with a database-style join.\n\nThe join is done on columns or indexes. If joining columns on\ncolumns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes\non indexes or indexes on a column or columns, the index will be passed on.\nWhen performing a cross merge, no column specifications to merge on are\nallowed.\n\nParameters\n----------%s\nright : DataFrame or named Series\n Object to merge with.\nhow : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'\n Type of merge to be performed.\n\n * left: use only keys from left frame, similar to a SQL left outer join;\n preserve key order.\n * right: use only keys from right frame, similar to a SQL right outer join;\n preserve key order.\n * outer: use union of keys from both frames, similar to a SQL full outer\n join; sort keys lexicographically.\n * inner: use intersection of keys from both frames, similar to a SQL inner\n join; preserve the order of the left keys.\n * cross: creates the cartesian product from both frames, preserves the order\n of the left keys.\n\n .. versionadded:: 1.2.0\n\non : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If `on` is None and not merging on indexes then this defaults\n to the intersection of the columns in both DataFrames.\nleft_on : label or list, or array-like\n Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\nright_on : label or list, or array-like\n Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\nleft_index : bool, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels.\nright_index : bool, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\nsort : bool, default False\n Sort the join keys lexicographically in the result DataFrame. If False,\n the order of the join keys depends on the join type (how keyword).\nsuffixes : list-like, default is (\"_x\", \"_y\")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\ncopy : bool, default True\n If False, avoid copy if possible.\nindicator : bool or str, default False\n If True, adds a column to the output DataFrame called \"_merge\" with\n information on the source of each row. The column can be given a different\n name by providing a string argument. The column will have a Categorical\n type with the value of \"left_only\" for observations whose merge key only\n appears in the left DataFrame, \"right_only\" for observations\n whose merge key only appears in the right DataFrame, and \"both\"\n if the observation's merge key is found in both DataFrames.\n\nvalidate : str, optional\n If specified, checks if merge is of specified type.\n\n * \"one_to_one\" or \"1:1\": check if merge keys are unique in both\n left and right datasets.\n * \"one_to_many\" or \"1:m\": check if merge keys are unique in left\n dataset.\n * \"many_to_one\" or \"m:1\": check if merge keys are unique in right\n dataset.\n * \"many_to_many\" or \"m:m\": allowed, but does not result in checks.\n\nReturns\n-------\nDataFrame\n A DataFrame of the two merged objects.\n\nSee Also\n--------\nmerge_ordered : Merge with optional filling/interpolation.\nmerge_asof : Merge on nearest keys.\nDataFrame.join : Similar method using indices.\n\nNotes\n-----\nSupport for specifying index levels as the `on`, `left_on`, and\n`right_on` parameters was added in version 0.23.0\nSupport for merging named Series objects was added in version 0.24.0\n\nExamples\n--------\n>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [1, 2, 3, 5]})\n>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n... 'value': [5, 6, 7, 8]})\n>>> df1\n lkey value\n0 foo 1\n1 bar 2\n2 baz 3\n3 foo 5\n>>> df2\n rkey value\n0 foo 5\n1 bar 6\n2 baz 7\n3 foo 8\n\nMerge df1 and df2 on the lkey and rkey columns. The value columns have\nthe default suffixes, _x and _y, appended.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2 with specified left and right suffixes\nappended to any overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey',\n... suffixes=('_left', '_right'))\n lkey value_left rkey value_right\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 foo 5 foo 5\n3 foo 5 foo 8\n4 bar 2 bar 6\n5 baz 3 baz 7\n\nMerge DataFrames df1 and df2, but raise an exception if the DataFrames have\nany overlapping columns.\n\n>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))\nTraceback (most recent call last):\n...\nValueError: columns overlap but no suffix specified:\n Index(['value'], dtype='object')\n\n>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})\n>>> df1\n a b\n0 foo 1\n1 bar 2\n>>> df2\n a c\n0 foo 3\n1 baz 4\n\n>>> df1.merge(df2, how='inner', on='a')\n a b c\n0 foo 1 3\n\n>>> df1.merge(df2, how='left', on='a')\n a b c\n0 foo 1 3.0\n1 bar 2 NaN\n\n>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})\n>>> df2 = pd.DataFrame({'right': [7, 8]})\n>>> df1\n left\n0 foo\n1 bar\n>>> df2\n right\n0 7\n1 8\n\n>>> df1.merge(df2, how='cross')\n left right\n0 foo 7\n1 foo 8\n2 bar 7\n3 bar 8\n\"\"\"\n\n\n# -----------------------------------------------------------------------\n# DataFrame class\n\n\nclass DataFrame(NDFrame, OpsMixin):\n \"\"\"\n Two-dimensional, size-mutable, potentially heterogeneous tabular data.\n\n Data structure also contains labeled axes (rows and columns).\n Arithmetic operations align on both row and column labels. Can be\n thought of as a dict-like container for Series objects. The primary\n pandas data structure.\n\n Parameters\n ----------\n data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame\n Dict can contain Series, arrays, constants, dataclass or list-like objects. If\n data is a dict, column order follows insertion-order.\n\n .. versionchanged:: 0.25.0\n If data is a list of dicts, column order follows insertion-order.\n\n index : Index or array-like\n Index to use for resulting frame. Will default to RangeIndex if\n no indexing information part of input data and no index provided.\n columns : Index or array-like\n Column labels to use for resulting frame. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n dtype : dtype, default None\n Data type to force. Only a single dtype is allowed. If None, infer.\n copy : bool, default False\n Copy data from inputs. Only affects DataFrame / 2d ndarray input.\n\n See Also\n --------\n DataFrame.from_records : Constructor from tuples, also record arrays.\n DataFrame.from_dict : From dicts of Series, arrays, or dicts.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n read_table : Read general delimited file into DataFrame.\n read_clipboard : Read text from clipboard into DataFrame.\n\n Examples\n --------\n Constructing DataFrame from a dictionary.\n\n >>> d = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df = pd.DataFrame(data=d)\n >>> df\n col1 col2\n 0 1 3\n 1 2 4\n\n Notice that the inferred dtype is int64.\n\n >>> df.dtypes\n col1 int64\n col2 int64\n dtype: object\n\n To enforce a single dtype:\n\n >>> df = pd.DataFrame(data=d, dtype=np.int8)\n >>> df.dtypes\n col1 int8\n col2 int8\n dtype: object\n\n Constructing DataFrame from numpy ndarray:\n\n >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),\n ... columns=['a', 'b', 'c'])\n >>> df2\n a b c\n 0 1 2 3\n 1 4 5 6\n 2 7 8 9\n\n Constructing DataFrame from dataclass:\n\n >>> from dataclasses import make_dataclass\n >>> Point = make_dataclass(\"Point\", [(\"x\", int), (\"y\", int)])\n >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])\n x y\n 0 0 0\n 1 0 3\n 2 2 3\n \"\"\"\n\n _internal_names_set = {\"columns\", \"index\"} | NDFrame._internal_names_set\n _typ = \"dataframe\"\n _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)\n\n @property\n def _constructor(self) -> Type[DataFrame]:\n return DataFrame\n\n _constructor_sliced: Type[Series] = Series\n _hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])\n _accessors: Set[str] = {\"sparse\"}\n\n @property\n def _constructor_expanddim(self):\n # GH#31549 raising NotImplementedError on a property causes trouble\n # for `inspect`\n def constructor(*args, **kwargs):\n raise NotImplementedError(\"Not supported for DataFrames!\")\n\n return constructor\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n data=None,\n index: Optional[Axes] = None,\n columns: Optional[Axes] = None,\n dtype: Optional[Dtype] = None,\n copy: bool = False,\n ):\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, DataFrame):\n data = data._mgr\n\n if isinstance(data, BlockManager):\n if index is None and columns is None and dtype is None and copy is False:\n # GH#33357 fastpath\n NDFrame.__init__(self, data)\n return\n\n mgr = self._init_mgr(\n data, axes={\"index\": index, \"columns\": columns}, dtype=dtype, copy=copy\n )\n\n elif isinstance(data, dict):\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif isinstance(data, ma.MaskedArray):\n import numpy.ma.mrecords as mrecords\n\n # masked recarray\n if isinstance(data, mrecords.MaskedRecords):\n mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)\n\n # a masked array\n else:\n mask = ma.getmaskarray(data)\n if mask.any():\n data, fill_value = maybe_upcast(data, copy=True)\n data.soften_mask() # set hardmask False if it was True\n data[mask] = fill_value\n else:\n data = data.copy()\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n elif isinstance(data, (np.ndarray, Series, Index)):\n if data.dtype.names:\n data_columns = list(data.dtype.names)\n data = {k: data[k] for k in data_columns}\n if columns is None:\n columns = data_columns\n mgr = init_dict(data, index, columns, dtype=dtype)\n elif getattr(data, \"name\", None) is not None:\n mgr = init_dict({data.name: data}, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n\n # For data is list-like, or Iterable (will consume into list)\n elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):\n if not isinstance(data, (abc.Sequence, ExtensionArray)):\n data = list(data)\n if len(data) > 0:\n if is_dataclass(data[0]):\n data = dataclasses_to_dicts(data)\n if is_list_like(data[0]) and getattr(data[0], \"ndim\", 1) == 1:\n if is_named_tuple(data[0]) and columns is None:\n columns = data[0]._fields\n arrays, columns = to_arrays(data, columns, dtype=dtype)\n columns = ensure_index(columns)\n\n # set the index\n if index is None:\n if isinstance(data[0], Series):\n index = get_names_from_index(data)\n elif isinstance(data[0], Categorical):\n index = ibase.default_index(len(data[0]))\n else:\n index = ibase.default_index(len(data))\n\n mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)\n else:\n mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)\n else:\n mgr = init_dict({}, index, columns, dtype=dtype)\n # For data is scalar\n else:\n if index is None or columns is None:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n if not dtype:\n dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)\n\n # For data is a scalar extension dtype\n if is_extension_array_dtype(dtype):\n\n values = [\n construct_1d_arraylike_from_scalar(data, len(index), dtype)\n for _ in range(len(columns))\n ]\n mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)\n else:\n # Attempt to coerce to a numpy array\n try:\n arr = np.array(data, dtype=dtype, copy=copy)\n except (ValueError, TypeError) as err:\n exc = TypeError(\n \"DataFrame constructor called with \"\n f\"incompatible data and dtype: {err}\"\n )\n raise exc from err\n\n if arr.ndim != 0:\n raise ValueError(\"DataFrame constructor not properly called!\")\n\n values = cast_scalar_to_array(\n (len(index), len(columns)), data, dtype=dtype\n )\n\n mgr = init_ndarray(\n values, index, columns, dtype=values.dtype, copy=False\n )\n\n NDFrame.__init__(self, mgr)\n\n # ----------------------------------------------------------------------\n\n @property\n def axes(self) -> List[Index]:\n \"\"\"\n Return a list representing the axes of the DataFrame.\n\n It has the row axis labels and column axis labels as the only members.\n They are returned in that order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.axes\n [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],\n dtype='object')]\n \"\"\"\n return [self.index, self.columns]\n\n @property\n def shape(self) -> Tuple[int, int]:\n \"\"\"\n Return a tuple representing the dimensionality of the DataFrame.\n\n See Also\n --------\n ndarray.shape : Tuple of array dimensions.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})\n >>> df.shape\n (2, 2)\n\n >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],\n ... 'col3': [5, 6]})\n >>> df.shape\n (2, 3)\n \"\"\"\n return len(self.index), len(self.columns)\n\n @property\n def _is_homogeneous_type(self) -> bool:\n \"\"\"\n Whether all the columns in a DataFrame have the same type.\n\n Returns\n -------\n bool\n\n See Also\n --------\n Index._is_homogeneous_type : Whether the object has a single\n dtype.\n MultiIndex._is_homogeneous_type : Whether all the levels of a\n MultiIndex have the same dtype.\n\n Examples\n --------\n >>> DataFrame({\"A\": [1, 2], \"B\": [3, 4]})._is_homogeneous_type\n True\n >>> DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.0]})._is_homogeneous_type\n False\n\n Items with the same type but different sizes are considered\n different types.\n\n >>> DataFrame({\n ... \"A\": np.array([1, 2], dtype=np.int32),\n ... \"B\": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type\n False\n \"\"\"\n if self._mgr.any_extension_types:\n return len({block.dtype for block in self._mgr.blocks}) == 1\n else:\n return not self._is_mixed_type\n\n @property\n def _can_fast_transpose(self) -> bool:\n \"\"\"\n Can we transpose this DataFrame without creating any new array objects.\n \"\"\"\n if self._mgr.any_extension_types:\n # TODO(EA2D) special case would be unnecessary with 2D EAs\n return False\n return len(self._mgr.blocks) == 1\n\n # ----------------------------------------------------------------------\n # Rendering Methods\n\n def _repr_fits_vertical_(self) -> bool:\n \"\"\"\n Check length against max_rows.\n \"\"\"\n max_rows = get_option(\"display.max_rows\")\n return len(self) <= max_rows\n\n def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:\n \"\"\"\n Check if full repr fits in horizontal boundaries imposed by the display\n options width and max_columns.\n\n In case of non-interactive session, no boundaries apply.\n\n `ignore_width` is here so ipynb+HTML output can behave the way\n users expect. display.max_columns remains in effect.\n GH3541, GH3573\n \"\"\"\n width, height = console.get_console_size()\n max_columns = get_option(\"display.max_columns\")\n nb_columns = len(self.columns)\n\n # exceed max columns\n if (max_columns and nb_columns > max_columns) or (\n (not ignore_width) and width and nb_columns > (width // 2)\n ):\n return False\n\n # used by repr_html under IPython notebook or scripts ignore terminal\n # dims\n if ignore_width or not console.in_interactive_session():\n return True\n\n if get_option(\"display.width\") is not None or console.in_ipython_frontend():\n # check at least the column row for excessive width\n max_rows = 1\n else:\n max_rows = get_option(\"display.max_rows\")\n\n # when auto-detecting, so width=None and not in ipython front end\n # check whether repr fits horizontal by actually checking\n # the width of the rendered repr\n buf = StringIO()\n\n # only care about the stuff we'll actually print out\n # and to_string on entire frame may be expensive\n d = self\n\n if not (max_rows is None): # unlimited rows\n # min of two, where one may be None\n d = d.iloc[: min(max_rows, len(d))]\n else:\n return True\n\n d.to_string(buf=buf)\n value = buf.getvalue()\n repr_width = max(len(line) for line in value.split(\"\\n\"))\n\n return repr_width < width\n\n def _info_repr(self) -> bool:\n \"\"\"\n True if the repr should show the info view.\n \"\"\"\n info_repr_option = get_option(\"display.large_repr\") == \"info\"\n return info_repr_option and not (\n self._repr_fits_horizontal_() and self._repr_fits_vertical_()\n )\n\n def __repr__(self) -> str:\n \"\"\"\n Return a string representation for a particular DataFrame.\n \"\"\"\n buf = StringIO(\"\")\n if self._info_repr():\n self.info(buf=buf)\n return buf.getvalue()\n\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n max_colwidth = get_option(\"display.max_colwidth\")\n show_dimensions = get_option(\"display.show_dimensions\")\n if get_option(\"display.expand_frame_repr\"):\n width, _ = console.get_console_size()\n else:\n width = None\n self.to_string(\n buf=buf,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n line_width=width,\n max_colwidth=max_colwidth,\n show_dimensions=show_dimensions,\n )\n\n return buf.getvalue()\n\n def _repr_html_(self) -> Optional[str]:\n \"\"\"\n Return a html representation for a particular DataFrame.\n\n Mainly for IPython notebook.\n \"\"\"\n if self._info_repr():\n buf = StringIO(\"\")\n self.info(buf=buf)\n # need to escape the <class>, should be the first line.\n val = buf.getvalue().replace(\"<\", r\"&lt;\", 1)\n val = val.replace(\">\", r\"&gt;\", 1)\n return \"<pre>\" + val + \"</pre>\"\n\n if get_option(\"display.notebook_repr_html\"):\n max_rows = get_option(\"display.max_rows\")\n min_rows = get_option(\"display.min_rows\")\n max_cols = get_option(\"display.max_columns\")\n show_dimensions = get_option(\"display.show_dimensions\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=None,\n col_space=None,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n justify=None,\n index_names=True,\n header=True,\n index=True,\n bold_rows=True,\n escape=True,\n max_rows=max_rows,\n min_rows=min_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=\".\",\n )\n return fmt.DataFrameRenderer(formatter).to_html(notebook=True)\n else:\n return None\n\n @Substitution(\n header_type=\"bool or sequence\",\n header=\"Write out the column names. If a list of strings \"\n \"is given, it is assumed to be aliases for the \"\n \"column names\",\n col_space_type=\"int, list or dict of int\",\n col_space=\"The minimum width of each column\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_string(\n self,\n buf: Optional[FilePathOrBuffer[str]] = None,\n columns: Optional[Sequence[str]] = None,\n col_space: Optional[int] = None,\n header: Union[bool, Sequence[str]] = True,\n index: bool = True,\n na_rep: str = \"NaN\",\n formatters: Optional[fmt.FormattersType] = None,\n float_format: Optional[fmt.FloatFormatType] = None,\n sparsify: Optional[bool] = None,\n index_names: bool = True,\n justify: Optional[str] = None,\n max_rows: Optional[int] = None,\n min_rows: Optional[int] = None,\n max_cols: Optional[int] = None,\n show_dimensions: bool = False,\n decimal: str = \".\",\n line_width: Optional[int] = None,\n max_colwidth: Optional[int] = None,\n encoding: Optional[str] = None,\n ) -> Optional[str]:\n \"\"\"\n Render a DataFrame to a console-friendly tabular output.\n %(shared_params)s\n line_width : int, optional\n Width to wrap a line in characters.\n max_colwidth : int, optional\n Max width to truncate each column in characters. By default, no limit.\n\n .. versionadded:: 1.0.0\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n %(returns)s\n See Also\n --------\n to_html : Convert DataFrame to HTML.\n\n Examples\n --------\n >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}\n >>> df = pd.DataFrame(d)\n >>> print(df.to_string())\n col1 col2\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n from pandas import option_context\n\n with option_context(\"display.max_colwidth\", max_colwidth):\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n formatters=formatters,\n float_format=float_format,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n header=header,\n index=index,\n min_rows=min_rows,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n decimal=decimal,\n )\n return fmt.DataFrameRenderer(formatter).to_string(\n buf=buf,\n encoding=encoding,\n line_width=line_width,\n )\n\n # ----------------------------------------------------------------------\n\n @property\n def style(self) -> Styler:\n \"\"\"\n Returns a Styler object.\n\n Contains methods for building a styled HTML representation of the DataFrame.\n\n See Also\n --------\n io.formats.style.Styler : Helps style a DataFrame or Series according to the\n data with HTML and CSS.\n \"\"\"\n from pandas.io.formats.style import Styler\n\n return Styler(self)\n\n _shared_docs[\n \"items\"\n ] = r\"\"\"\n Iterate over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Yields\n ------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as\n (index, Series) pairs.\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples\n of the values.\n\n Examples\n --------\n >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n >>> for label, content in df.items():\n ... print(f'label: {label}')\n ... print(f'content: {content}', sep='\\n')\n ...\n label: species\n content:\n panda bear\n polar bear\n koala marsupial\n Name: species, dtype: object\n label: population\n content:\n panda 1864\n polar 22000\n koala 80000\n Name: population, dtype: int64\n \"\"\"\n\n @Appender(_shared_docs[\"items\"])\n def items(self) -> Iterable[Tuple[Label, Series]]:\n if self.columns.is_unique and hasattr(self, \"_item_cache\"):\n for k in self.columns:\n yield k, self._get_item_cache(k)\n else:\n for i, k in enumerate(self.columns):\n yield k, self._ixs(i, axis=1)\n\n @Appender(_shared_docs[\"items\"])\n def iteritems(self) -> Iterable[Tuple[Label, Series]]:\n yield from self.items()\n\n def iterrows(self) -> Iterable[Tuple[Label, Series]]:\n \"\"\"\n Iterate over DataFrame rows as (index, Series) pairs.\n\n Yields\n ------\n index : label or tuple of label\n The index of the row. A tuple for a `MultiIndex`.\n data : Series\n The data of the row as a Series.\n\n See Also\n --------\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n 1. Because ``iterrows`` returns a Series for each row,\n it does **not** preserve dtypes across the rows (dtypes are\n preserved across columns for DataFrames). For example,\n\n >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])\n >>> row = next(df.iterrows())[1]\n >>> row\n int 1.0\n float 1.5\n Name: 0, dtype: float64\n >>> print(row['int'].dtype)\n float64\n >>> print(df['int'].dtype)\n int64\n\n To preserve dtypes while iterating over the rows, it is better\n to use :meth:`itertuples` which returns namedtuples of the values\n and which is generally faster than ``iterrows``.\n\n 2. You should **never modify** something you are iterating over.\n This is not guaranteed to work in all cases. Depending on the\n data types, the iterator returns a copy and not a view, and writing\n to it will have no effect.\n \"\"\"\n columns = self.columns\n klass = self._constructor_sliced\n for k, v in zip(self.index, self.values):\n s = klass(v, index=columns, name=k)\n yield k, s\n\n def itertuples(self, index: bool = True, name: Optional[str] = \"Pandas\"):\n \"\"\"\n Iterate over DataFrame rows as namedtuples.\n\n Parameters\n ----------\n index : bool, default True\n If True, return the index as the first element of the tuple.\n name : str or None, default \"Pandas\"\n The name of the returned namedtuples or None to return regular\n tuples.\n\n Returns\n -------\n iterator\n An object to iterate over namedtuples for each row in the\n DataFrame with the first field possibly being the index and\n following fields being the column values.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)\n pairs.\n DataFrame.items : Iterate over (column name, Series) pairs.\n\n Notes\n -----\n The column names will be renamed to positional names if they are\n invalid Python identifiers, repeated, or start with an underscore.\n On python versions < 3.7 regular tuples are returned for DataFrames\n with a large number of columns (>254).\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},\n ... index=['dog', 'hawk'])\n >>> df\n num_legs num_wings\n dog 4 0\n hawk 2 2\n >>> for row in df.itertuples():\n ... print(row)\n ...\n Pandas(Index='dog', num_legs=4, num_wings=0)\n Pandas(Index='hawk', num_legs=2, num_wings=2)\n\n By setting the `index` parameter to False we can remove the index\n as the first element of the tuple:\n\n >>> for row in df.itertuples(index=False):\n ... print(row)\n ...\n Pandas(num_legs=4, num_wings=0)\n Pandas(num_legs=2, num_wings=2)\n\n With the `name` parameter set we set a custom name for the yielded\n namedtuples:\n\n >>> for row in df.itertuples(name='Animal'):\n ... print(row)\n ...\n Animal(Index='dog', num_legs=4, num_wings=0)\n Animal(Index='hawk', num_legs=2, num_wings=2)\n \"\"\"\n arrays = []\n fields = list(self.columns)\n if index:\n arrays.append(self.index)\n fields.insert(0, \"Index\")\n\n # use integer indexing because of possible duplicate column names\n arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))\n\n if name is not None:\n # https://github.com/python/mypy/issues/9046\n # error: namedtuple() expects a string literal as the first argument\n itertuple = collections.namedtuple( # type: ignore[misc]\n name, fields, rename=True\n )\n return map(itertuple._make, zip(*arrays))\n\n # fallback to regular tuples\n return zip(*arrays)\n\n def __len__(self) -> int:\n \"\"\"\n Returns length of info axis, but here we use the index.\n \"\"\"\n return len(self.index)\n\n def dot(self, other):\n \"\"\"\n Compute the matrix multiplication between the DataFrame and other.\n\n This method computes the matrix product between the DataFrame and the\n values of an other Series, DataFrame or a numpy array.\n\n It can also be called using ``self @ other`` in Python >= 3.5.\n\n Parameters\n ----------\n other : Series, DataFrame or array-like\n The other object to compute the matrix product with.\n\n Returns\n -------\n Series or DataFrame\n If other is a Series, return the matrix product between self and\n other as a Series. If other is a DataFrame or a numpy.array, return\n the matrix product of self and other in a DataFrame of a np.array.\n\n See Also\n --------\n Series.dot: Similar method for Series.\n\n Notes\n -----\n The dimensions of DataFrame and other must be compatible in order to\n compute the matrix multiplication. In addition, the column names of\n DataFrame and the index of other must contain the same values, as they\n will be aligned prior to the multiplication.\n\n The dot method for Series computes the inner product, instead of the\n matrix product here.\n\n Examples\n --------\n Here we multiply a DataFrame with a Series.\n\n >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])\n >>> s = pd.Series([1, 1, 2, 1])\n >>> df.dot(s)\n 0 -4\n 1 5\n dtype: int64\n\n Here we multiply a DataFrame with another DataFrame.\n\n >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(other)\n 0 1\n 0 1 4\n 1 2 2\n\n Note that the dot method give the same result as @\n\n >>> df @ other\n 0 1\n 0 1 4\n 1 2 2\n\n The dot method works also if other is an np.array.\n\n >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])\n >>> df.dot(arr)\n 0 1\n 0 1 4\n 1 2 2\n\n Note how shuffling of the objects does not change the result.\n\n >>> s2 = s.reindex([1, 0, 2, 3])\n >>> df.dot(s2)\n 0 -4\n 1 5\n dtype: int64\n \"\"\"\n if isinstance(other, (Series, DataFrame)):\n common = self.columns.union(other.index)\n if len(common) > len(self.columns) or len(common) > len(other.index):\n raise ValueError(\"matrices are not aligned\")\n\n left = self.reindex(columns=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right._values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[1] != rvals.shape[0]:\n raise ValueError(\n f\"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}\"\n )\n\n if isinstance(other, DataFrame):\n return self._constructor(\n np.dot(lvals, rvals), index=left.index, columns=other.columns\n )\n elif isinstance(other, Series):\n return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)\n elif isinstance(rvals, (np.ndarray, Index)):\n result = np.dot(lvals, rvals)\n if result.ndim == 2:\n return self._constructor(result, index=left.index)\n else:\n return self._constructor_sliced(result, index=left.index)\n else: # pragma: no cover\n raise TypeError(f\"unsupported type: {type(other)}\")\n\n def __matmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n return self.dot(other)\n\n def __rmatmul__(self, other):\n \"\"\"\n Matrix multiplication using binary `@` operator in Python>=3.5.\n \"\"\"\n try:\n return self.T.dot(np.transpose(other)).T\n except ValueError as err:\n if \"shape mismatch\" not in str(err):\n raise\n # GH#21581 give exception message for original shapes\n msg = f\"shapes {np.shape(other)} and {self.shape} not aligned\"\n raise ValueError(msg) from err\n\n # ----------------------------------------------------------------------\n # IO methods (to / from other formats)\n\n @classmethod\n def from_dict(cls, data, orient=\"columns\", dtype=None, columns=None) -> DataFrame:\n \"\"\"\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {'columns', 'index'}, default 'columns'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass 'columns'\n (default). Otherwise if the keys should be rows, pass 'index'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient='index'``. Raises a ValueError\n if used with ``orient='columns'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Specify ``orient='index'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}\n >>> pd.DataFrame.from_dict(data, orient='index')\n 0 1 2 3\n row_1 3 2 1 0\n row_2 a b c d\n\n When using the 'index' orientation, the column names can be\n specified manually:\n\n >>> pd.DataFrame.from_dict(data, orient='index',\n ... columns=['A', 'B', 'C', 'D'])\n A B C D\n row_1 3 2 1 0\n row_2 a b c d\n \"\"\"\n index = None\n orient = orient.lower()\n if orient == \"index\":\n if len(data) > 0:\n # TODO speed up Series case\n if isinstance(list(data.values())[0], (Series, dict)):\n data = _from_nested_dict(data)\n else:\n data, index = list(data.values()), list(data.keys())\n elif orient == \"columns\":\n if columns is not None:\n raise ValueError(\"cannot use columns parameter with orient='columns'\")\n else: # pragma: no cover\n raise ValueError(\"only recognize index or columns for orient\")\n\n return cls(data, index=index, columns=columns, dtype=dtype)\n\n def to_numpy(\n self, dtype=None, copy: bool = False, na_value=lib.no_default\n ) -> np.ndarray:\n \"\"\"\n Convert the DataFrame to a NumPy array.\n\n .. versionadded:: 0.24.0\n\n By default, the dtype of the returned array will be the common NumPy\n dtype of all types in the DataFrame. For example, if the dtypes are\n ``float16`` and ``float32``, the results dtype will be ``float32``.\n This may require copying data and coercing values, which may be\n expensive.\n\n Parameters\n ----------\n dtype : str or numpy.dtype, optional\n The dtype to pass to :meth:`numpy.asarray`.\n copy : bool, default False\n Whether to ensure that the returned value is not a view on\n another array. Note that ``copy=False`` does not *ensure* that\n ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that\n a copy is made, even if not strictly necessary.\n na_value : Any, optional\n The value to use for missing values. The default value depends\n on `dtype` and the dtypes of the DataFrame columns.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n Series.to_numpy : Similar method for Series.\n\n Examples\n --------\n >>> pd.DataFrame({\"A\": [1, 2], \"B\": [3, 4]}).to_numpy()\n array([[1, 3],\n [2, 4]])\n\n With heterogeneous data, the lowest common type will have to\n be used.\n\n >>> df = pd.DataFrame({\"A\": [1, 2], \"B\": [3.0, 4.5]})\n >>> df.to_numpy()\n array([[1. , 3. ],\n [2. , 4.5]])\n\n For a mix of numeric and non-numeric types, the output array will\n have object dtype.\n\n >>> df['C'] = pd.date_range('2000', periods=2)\n >>> df.to_numpy()\n array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],\n [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)\n \"\"\"\n self._consolidate_inplace()\n result = self._mgr.as_array(\n transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value\n )\n if result.dtype is not dtype:\n result = np.array(result, dtype=dtype, copy=False)\n\n return result\n\n def to_dict(self, orient=\"dict\", into=dict):\n \"\"\"\n Convert the DataFrame to a dictionary.\n\n The type of the key-value pairs can be customized with the parameters\n (see below).\n\n Parameters\n ----------\n orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}\n Determines the type of the values of the dictionary.\n\n - 'dict' (default) : dict like {column -> {index -> value}}\n - 'list' : dict like {column -> [values]}\n - 'series' : dict like {column -> Series(values)}\n - 'split' : dict like\n {'index' -> [index], 'columns' -> [columns], 'data' -> [values]}\n - 'records' : list like\n [{column -> value}, ... , {column -> value}]\n - 'index' : dict like {index -> {column -> value}}\n\n Abbreviations are allowed. `s` indicates `series` and `sp`\n indicates `split`.\n\n into : class, default dict\n The collections.abc.Mapping subclass used for all Mappings\n in the return value. Can be the actual class or an empty\n instance of the mapping type you want. If you want a\n collections.defaultdict, you must pass it initialized.\n\n Returns\n -------\n dict, list or collections.abc.Mapping\n Return a collections.abc.Mapping object representing the DataFrame.\n The resulting transformation depends on the `orient` parameter.\n\n See Also\n --------\n DataFrame.from_dict: Create a DataFrame from a dictionary.\n DataFrame.to_json: Convert a DataFrame to JSON format.\n\n Examples\n --------\n >>> df = pd.DataFrame({'col1': [1, 2],\n ... 'col2': [0.5, 0.75]},\n ... index=['row1', 'row2'])\n >>> df\n col1 col2\n row1 1 0.50\n row2 2 0.75\n >>> df.to_dict()\n {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}\n\n You can specify the return orientation.\n\n >>> df.to_dict('series')\n {'col1': row1 1\n row2 2\n Name: col1, dtype: int64,\n 'col2': row1 0.50\n row2 0.75\n Name: col2, dtype: float64}\n\n >>> df.to_dict('split')\n {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],\n 'data': [[1, 0.5], [2, 0.75]]}\n\n >>> df.to_dict('records')\n [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]\n\n >>> df.to_dict('index')\n {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}\n\n You can also specify the mapping type.\n\n >>> from collections import OrderedDict, defaultdict\n >>> df.to_dict(into=OrderedDict)\n OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),\n ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])\n\n If you want a `defaultdict`, you need to initialize it:\n\n >>> dd = defaultdict(list)\n >>> df.to_dict('records', into=dd)\n [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),\n defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]\n \"\"\"\n if not self.columns.is_unique:\n warnings.warn(\n \"DataFrame columns are not unique, some columns will be omitted.\",\n UserWarning,\n stacklevel=2,\n )\n # GH16122\n into_c = com.standardize_mapping(into)\n\n orient = orient.lower()\n # GH32515\n if orient.startswith((\"d\", \"l\", \"s\", \"r\", \"i\")) and orient not in {\n \"dict\",\n \"list\",\n \"series\",\n \"split\",\n \"records\",\n \"index\",\n }:\n warnings.warn(\n \"Using short name for 'orient' is deprecated. Only the \"\n \"options: ('dict', list, 'series', 'split', 'records', 'index') \"\n \"will be used in a future version. Use one of the above \"\n \"to silence this warning.\",\n FutureWarning,\n )\n\n if orient.startswith(\"d\"):\n orient = \"dict\"\n elif orient.startswith(\"l\"):\n orient = \"list\"\n elif orient.startswith(\"sp\"):\n orient = \"split\"\n elif orient.startswith(\"s\"):\n orient = \"series\"\n elif orient.startswith(\"r\"):\n orient = \"records\"\n elif orient.startswith(\"i\"):\n orient = \"index\"\n\n if orient == \"dict\":\n return into_c((k, v.to_dict(into)) for k, v in self.items())\n\n elif orient == \"list\":\n return into_c((k, v.tolist()) for k, v in self.items())\n\n elif orient == \"split\":\n return into_c(\n (\n (\"index\", self.index.tolist()),\n (\"columns\", self.columns.tolist()),\n (\n \"data\",\n [\n list(map(maybe_box_datetimelike, t))\n for t in self.itertuples(index=False, name=None)\n ],\n ),\n )\n )\n\n elif orient == \"series\":\n return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())\n\n elif orient == \"records\":\n columns = self.columns.tolist()\n rows = (\n dict(zip(columns, row))\n for row in self.itertuples(index=False, name=None)\n )\n return [\n into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())\n for row in rows\n ]\n\n elif orient == \"index\":\n if not self.index.is_unique:\n raise ValueError(\"DataFrame index must be unique for orient='index'.\")\n return into_c(\n (t[0], dict(zip(self.columns, t[1:])))\n for t in self.itertuples(name=None)\n )\n\n else:\n raise ValueError(f\"orient '{orient}' not understood\")\n\n def to_gbq(\n self,\n destination_table,\n project_id=None,\n chunksize=None,\n reauth=False,\n if_exists=\"fail\",\n auth_local_webserver=False,\n table_schema=None,\n location=None,\n progress_bar=True,\n credentials=None,\n ) -> None:\n \"\"\"\n Write a DataFrame to a Google BigQuery table.\n\n This function requires the `pandas-gbq package\n <https://pandas-gbq.readthedocs.io>`__.\n\n See the `How to authenticate with Google BigQuery\n <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__\n guide for authentication instructions.\n\n Parameters\n ----------\n destination_table : str\n Name of table to be written, in the form ``dataset.tablename``.\n project_id : str, optional\n Google BigQuery Account project ID. Optional when available from\n the environment.\n chunksize : int, optional\n Number of rows to be inserted in each chunk from the dataframe.\n Set to ``None`` to load the whole dataframe at once.\n reauth : bool, default False\n Force Google BigQuery to re-authenticate the user. This is useful\n if multiple accounts are used.\n if_exists : str, default 'fail'\n Behavior when the destination table exists. Value can be one of:\n\n ``'fail'``\n If table exists raise pandas_gbq.gbq.TableCreationError.\n ``'replace'``\n If table exists, drop it, recreate it, and insert data.\n ``'append'``\n If table exists, insert data. Create if does not exist.\n auth_local_webserver : bool, default False\n Use the `local webserver flow`_ instead of the `console flow`_\n when getting user credentials.\n\n .. _local webserver flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server\n .. _console flow:\n https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console\n\n *New in version 0.2.0 of pandas-gbq*.\n table_schema : list of dicts, optional\n List of BigQuery table fields to which according DataFrame\n columns conform to, e.g. ``[{'name': 'col1', 'type':\n 'STRING'},...]``. If schema is not provided, it will be\n generated according to dtypes of DataFrame columns. See\n BigQuery API documentation on available names of a field.\n\n *New in version 0.3.1 of pandas-gbq*.\n location : str, optional\n Location where the load job should run. See the `BigQuery locations\n documentation\n <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a\n list of available locations. The location must match that of the\n target dataset.\n\n *New in version 0.5.0 of pandas-gbq*.\n progress_bar : bool, default True\n Use the library `tqdm` to show the progress bar for the upload,\n chunk by chunk.\n\n *New in version 0.5.0 of pandas-gbq*.\n credentials : google.auth.credentials.Credentials, optional\n Credentials for accessing Google APIs. Use this parameter to\n override default credentials, such as to use Compute Engine\n :class:`google.auth.compute_engine.Credentials` or Service\n Account :class:`google.oauth2.service_account.Credentials`\n directly.\n\n *New in version 0.8.0 of pandas-gbq*.\n\n .. versionadded:: 0.24.0\n\n See Also\n --------\n pandas_gbq.to_gbq : This function in the pandas-gbq library.\n read_gbq : Read a DataFrame from Google BigQuery.\n \"\"\"\n from pandas.io import gbq\n\n gbq.to_gbq(\n self,\n destination_table,\n project_id=project_id,\n chunksize=chunksize,\n reauth=reauth,\n if_exists=if_exists,\n auth_local_webserver=auth_local_webserver,\n table_schema=table_schema,\n location=location,\n progress_bar=progress_bar,\n credentials=credentials,\n )\n\n @classmethod\n def from_records(\n cls,\n data,\n index=None,\n exclude=None,\n columns=None,\n coerce_float=False,\n nrows=None,\n ) -> DataFrame:\n \"\"\"\n Convert structured or record ndarray to DataFrame.\n\n Creates a DataFrame object from a structured ndarray, sequence of\n tuples or dicts, or DataFrame.\n\n Parameters\n ----------\n data : structured ndarray, sequence of tuples or dicts, or DataFrame\n Structured input data.\n index : str, list of fields, array-like\n Field of array to use as the index, alternately a specific set of\n input labels to use.\n exclude : sequence, default None\n Columns or fields to exclude.\n columns : sequence, default None\n Column names to use. If the passed data do not have names\n associated with them, this argument provides names for the\n columns. Otherwise this argument indicates the order of the columns\n in the result (any names not found in the data will become all-NA\n columns).\n coerce_float : bool, default False\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n nrows : int, default None\n Number of rows to read if data is an iterator.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_dict : DataFrame from dict of array-like or dicts.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n Data can be provided as a structured ndarray:\n\n >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],\n ... dtype=[('col_1', 'i4'), ('col_2', 'U1')])\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of dicts:\n\n >>> data = [{'col_1': 3, 'col_2': 'a'},\n ... {'col_1': 2, 'col_2': 'b'},\n ... {'col_1': 1, 'col_2': 'c'},\n ... {'col_1': 0, 'col_2': 'd'}]\n >>> pd.DataFrame.from_records(data)\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n\n Data can be provided as a list of tuples with corresponding columns:\n\n >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]\n >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])\n col_1 col_2\n 0 3 a\n 1 2 b\n 2 1 c\n 3 0 d\n \"\"\"\n # Make a copy of the input columns so we can modify it\n if columns is not None:\n columns = ensure_index(columns)\n\n if is_iterator(data):\n if nrows == 0:\n return cls()\n\n try:\n first_row = next(data)\n except StopIteration:\n return cls(index=index, columns=columns)\n\n dtype = None\n if hasattr(first_row, \"dtype\") and first_row.dtype.names:\n dtype = first_row.dtype\n\n values = [first_row]\n\n if nrows is None:\n values += data\n else:\n values.extend(itertools.islice(data, nrows - 1))\n\n if dtype is not None:\n data = np.array(values, dtype=dtype)\n else:\n data = values\n\n if isinstance(data, dict):\n if columns is None:\n columns = arr_columns = ensure_index(sorted(data))\n arrays = [data[k] for k in columns]\n else:\n arrays = []\n arr_columns_list = []\n for k, v in data.items():\n if k in columns:\n arr_columns_list.append(k)\n arrays.append(v)\n\n arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)\n\n elif isinstance(data, (np.ndarray, DataFrame)):\n arrays, columns = to_arrays(data, columns)\n if columns is not None:\n columns = ensure_index(columns)\n arr_columns = columns\n else:\n arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)\n\n arr_columns = ensure_index(arr_columns)\n if columns is not None:\n columns = ensure_index(columns)\n else:\n columns = arr_columns\n\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n\n result_index = None\n if index is not None:\n if isinstance(index, str) or not hasattr(index, \"__iter__\"):\n i = columns.get_loc(index)\n exclude.add(index)\n if len(arrays) > 0:\n result_index = Index(arrays[i], name=index)\n else:\n result_index = Index([], name=index)\n else:\n try:\n index_data = [arrays[arr_columns.get_loc(field)] for field in index]\n except (KeyError, TypeError):\n # raised by get_loc, see GH#29258\n result_index = index\n else:\n result_index = ensure_index_from_sequences(index_data, names=index)\n exclude.update(index)\n\n if any(exclude):\n arr_exclude = [x for x in exclude if x in arr_columns]\n to_remove = [arr_columns.get_loc(col) for col in arr_exclude]\n arrays = [v for i, v in enumerate(arrays) if i not in to_remove]\n\n arr_columns = arr_columns.drop(arr_exclude)\n columns = columns.drop(exclude)\n\n mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)\n\n return cls(mgr)\n\n def to_records(\n self, index=True, column_dtypes=None, index_dtypes=None\n ) -> np.recarray:\n \"\"\"\n Convert DataFrame to a NumPy record array.\n\n Index will be included as the first field of the record array if\n requested.\n\n Parameters\n ----------\n index : bool, default True\n Include index in resulting record array, stored in 'index'\n field or using the index label, if set.\n column_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all columns. If\n a dictionary, a mapping of column names and indices (zero-indexed)\n to specific data types.\n index_dtypes : str, type, dict, default None\n .. versionadded:: 0.24.0\n\n If a string or type, the data type to store all index levels. If\n a dictionary, a mapping of index level names and indices\n (zero-indexed) to specific data types.\n\n This mapping is applied only if `index=True`.\n\n Returns\n -------\n numpy.recarray\n NumPy ndarray with the DataFrame labels as fields and each row\n of the DataFrame as entries.\n\n See Also\n --------\n DataFrame.from_records: Convert structured or record ndarray\n to DataFrame.\n numpy.recarray: An ndarray that allows field access using\n attributes, analogous to typed columns in a\n spreadsheet.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},\n ... index=['a', 'b'])\n >>> df\n A B\n a 1 0.50\n b 2 0.75\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n If the DataFrame index has no label then the recarray field name\n is set to 'index'. If the index has a label then this is used as the\n field name:\n\n >>> df.index = df.index.rename(\"I\")\n >>> df.to_records()\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])\n\n The index can be excluded from the record array:\n\n >>> df.to_records(index=False)\n rec.array([(1, 0.5 ), (2, 0.75)],\n dtype=[('A', '<i8'), ('B', '<f8')])\n\n Data types can be specified for the columns:\n\n >>> df.to_records(column_dtypes={\"A\": \"int32\"})\n rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],\n dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])\n\n As well as for the index:\n\n >>> df.to_records(index_dtypes=\"<S2\")\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])\n\n >>> index_dtypes = f\"<S{df.index.str.len().max()}\"\n >>> df.to_records(index_dtypes=index_dtypes)\n rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],\n dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])\n \"\"\"\n if index:\n if isinstance(self.index, MultiIndex):\n # array of tuples to numpy cols. copy copy copy\n ix_vals = list(map(np.array, zip(*self.index._values)))\n else:\n ix_vals = [self.index.values]\n\n arrays = ix_vals + [\n np.asarray(self.iloc[:, i]) for i in range(len(self.columns))\n ]\n\n count = 0\n index_names = list(self.index.names)\n\n if isinstance(self.index, MultiIndex):\n for i, n in enumerate(index_names):\n if n is None:\n index_names[i] = f\"level_{count}\"\n count += 1\n elif index_names[0] is None:\n index_names = [\"index\"]\n\n names = [str(name) for name in itertools.chain(index_names, self.columns)]\n else:\n arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]\n names = [str(c) for c in self.columns]\n index_names = []\n\n index_len = len(index_names)\n formats = []\n\n for i, v in enumerate(arrays):\n index = i\n\n # When the names and arrays are collected, we\n # first collect those in the DataFrame's index,\n # followed by those in its columns.\n #\n # Thus, the total length of the array is:\n # len(index_names) + len(DataFrame.columns).\n #\n # This check allows us to see whether we are\n # handling a name / array in the index or column.\n if index < index_len:\n dtype_mapping = index_dtypes\n name = index_names[index]\n else:\n index -= index_len\n dtype_mapping = column_dtypes\n name = self.columns[index]\n\n # We have a dictionary, so we get the data type\n # associated with the index or column (which can\n # be denoted by its name in the DataFrame or its\n # position in DataFrame's array of indices or\n # columns, whichever is applicable.\n if is_dict_like(dtype_mapping):\n if name in dtype_mapping:\n dtype_mapping = dtype_mapping[name]\n elif index in dtype_mapping:\n dtype_mapping = dtype_mapping[index]\n else:\n dtype_mapping = None\n\n # If no mapping can be found, use the array's\n # dtype attribute for formatting.\n #\n # A valid dtype must either be a type or\n # string naming a type.\n if dtype_mapping is None:\n formats.append(v.dtype)\n elif isinstance(dtype_mapping, (type, np.dtype, str)):\n formats.append(dtype_mapping)\n else:\n element = \"row\" if i < index_len else \"column\"\n msg = f\"Invalid dtype {dtype_mapping} specified for {element} {name}\"\n raise ValueError(msg)\n\n return np.rec.fromarrays(arrays, dtype={\"names\": names, \"formats\": formats})\n\n @classmethod\n def _from_arrays(\n cls,\n arrays,\n columns,\n index,\n dtype: Optional[Dtype] = None,\n verify_integrity: bool = True,\n ) -> DataFrame:\n \"\"\"\n Create DataFrame from a list of arrays corresponding to the columns.\n\n Parameters\n ----------\n arrays : list-like of arrays\n Each array in the list corresponds to one column, in order.\n columns : list-like, Index\n The column names for the resulting DataFrame.\n index : list-like, Index\n The rows labels for the resulting DataFrame.\n dtype : dtype, optional\n Optional dtype to enforce for all arrays.\n verify_integrity : bool, default True\n Validate and homogenize all input. If set to False, it is assumed\n that all elements of `arrays` are actual arrays how they will be\n stored in a block (numpy ndarray or ExtensionArray), have the same\n length as and are aligned with the index, and that `columns` and\n `index` are ensured to be an Index object.\n\n Returns\n -------\n DataFrame\n \"\"\"\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n\n mgr = arrays_to_mgr(\n arrays,\n columns,\n index,\n columns,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n return cls(mgr)\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_stata(\n self,\n path: FilePathOrBuffer,\n convert_dates: Optional[Dict[Label, str]] = None,\n write_index: bool = True,\n byteorder: Optional[str] = None,\n time_stamp: Optional[datetime.datetime] = None,\n data_label: Optional[str] = None,\n variable_labels: Optional[Dict[Label, str]] = None,\n version: Optional[int] = 114,\n convert_strl: Optional[Sequence[Label]] = None,\n compression: CompressionOptions = \"infer\",\n storage_options: StorageOptions = None,\n ) -> None:\n \"\"\"\n Export DataFrame object to Stata dta format.\n\n Writes the DataFrame to a Stata dataset file.\n \"dta\" files contain a Stata dataset.\n\n Parameters\n ----------\n path : str, buffer or path object\n String, path object (pathlib.Path or py._path.local.LocalPath) or\n object implementing a binary write() function. If using a buffer\n then the buffer will not be automatically closed after the file\n data has been written.\n\n .. versionchanged:: 1.0.0\n\n Previously this was \"fname\"\n\n convert_dates : dict\n Dictionary mapping columns containing datetime types to stata\n internal format to use when writing the dates. Options are 'tc',\n 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer\n or a name. Datetime columns that do not have a conversion type\n specified will be converted to 'tc'. Raises NotImplementedError if\n a datetime column has timezone information.\n write_index : bool\n Write the index to Stata dataset.\n byteorder : str\n Can be \">\", \"<\", \"little\", or \"big\". default is `sys.byteorder`.\n time_stamp : datetime\n A datetime to use as file creation date. Default is the current\n time.\n data_label : str, optional\n A label for the data set. Must be 80 characters or smaller.\n variable_labels : dict\n Dictionary containing columns as keys and variable labels as\n values. Each label must be 80 characters or smaller.\n version : {{114, 117, 118, 119, None}}, default 114\n Version to use in the output dta file. Set to None to let pandas\n decide between 118 or 119 formats depending on the number of\n columns in the frame. Version 114 can be read by Stata 10 and\n later. Version 117 can be read by Stata 13 or later. Version 118\n is supported in Stata 14 and later. Version 119 is supported in\n Stata 15 and later. Version 114 limits string variables to 244\n characters or fewer while versions 117 and later allow strings\n with lengths up to 2,000,000 characters. Versions 118 and 119\n support Unicode characters, and version 119 supports more than\n 32,767 variables.\n\n Version 119 should usually only be used when the number of\n variables exceeds the capacity of dta format 118. Exporting\n smaller datasets in format 119 may have unintended consequences,\n and, as of November 2020, Stata SE cannot read version 119 files.\n\n .. versionchanged:: 1.0.0\n\n Added support for formats 118 and 119.\n\n convert_strl : list, optional\n List of column names to convert to string columns to Stata StrL\n format. Only available if version is 117. Storing strings in the\n StrL format can produce smaller dta files if strings have more than\n 8 characters and values are repeated.\n compression : str or dict, default 'infer'\n For on-the-fly compression of the output dta. If string, specifies\n compression mode. If dict, value at key 'method' specifies\n compression mode. Compression mode must be one of {{'infer', 'gzip',\n 'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and\n `fname` is path-like, then detect compression from the following\n extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n compression). If dict and compression mode is one of {{'zip',\n 'gzip', 'bz2'}}, or inferred as one of the above, other entries\n passed as additional compression options.\n\n .. versionadded:: 1.1.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n Raises\n ------\n NotImplementedError\n * If datetimes contain timezone information\n * Column dtype is not representable in Stata\n ValueError\n * Columns listed in convert_dates are neither datetime64[ns]\n or datetime.datetime\n * Column listed in convert_dates is not in DataFrame\n * Categorical label contains more than 32,000 characters\n\n See Also\n --------\n read_stata : Import Stata data files.\n io.stata.StataWriter : Low-level writer for Stata data files.\n io.stata.StataWriter117 : Low-level writer for version 117 files.\n\n Examples\n --------\n >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',\n ... 'parrot'],\n ... 'speed': [350, 18, 361, 15]}})\n >>> df.to_stata('animals.dta') # doctest: +SKIP\n \"\"\"\n if version not in (114, 117, 118, 119, None):\n raise ValueError(\"Only formats 114, 117, 118 and 119 are supported.\")\n if version == 114:\n if convert_strl is not None:\n raise ValueError(\"strl is not supported in format 114\")\n from pandas.io.stata import StataWriter as statawriter\n elif version == 117:\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriter117 as statawriter,\n )\n else: # versions 118 and 119\n # mypy: Name 'statawriter' already defined (possibly by an import)\n from pandas.io.stata import ( # type: ignore[no-redef]\n StataWriterUTF8 as statawriter,\n )\n\n kwargs: Dict[str, Any] = {}\n if version is None or version >= 117:\n # strl conversion is only supported >= 117\n kwargs[\"convert_strl\"] = convert_strl\n if version is None or version >= 118:\n # Specifying the version is only supported for UTF8 (118 or 119)\n kwargs[\"version\"] = version\n\n # mypy: Too many arguments for \"StataWriter\"\n writer = statawriter( # type: ignore[call-arg]\n path,\n self,\n convert_dates=convert_dates,\n byteorder=byteorder,\n time_stamp=time_stamp,\n data_label=data_label,\n write_index=write_index,\n variable_labels=variable_labels,\n compression=compression,\n storage_options=storage_options,\n **kwargs,\n )\n writer.write_file()\n\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:\n \"\"\"\n Write a DataFrame to the binary Feather format.\n\n Parameters\n ----------\n path : str or file-like object\n If a string, it will be used as Root Directory path.\n **kwargs :\n Additional keywords passed to :func:`pyarrow.feather.write_feather`.\n Starting with pyarrow 0.17, this includes the `compression`,\n `compression_level`, `chunksize` and `version` keywords.\n\n .. versionadded:: 1.1.0\n \"\"\"\n from pandas.io.feather_format import to_feather\n\n to_feather(self, path, **kwargs)\n\n @doc(\n Series.to_markdown,\n klass=_shared_doc_kwargs[\"klass\"],\n storage_options=_shared_docs[\"storage_options\"],\n examples=\"\"\"Examples\n --------\n >>> df = pd.DataFrame(\n ... data={\"animal_1\": [\"elk\", \"pig\"], \"animal_2\": [\"dog\", \"quetzal\"]}\n ... )\n >>> print(df.to_markdown())\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(df.to_markdown(tablefmt=\"grid\"))\n +----+------------+------------+\n | | animal_1 | animal_2 |\n +====+============+============+\n | 0 | elk | dog |\n +----+------------+------------+\n | 1 | pig | quetzal |\n +----+------------+------------+\n \"\"\",\n )\n def to_markdown(\n self,\n buf: Optional[Union[IO[str], str]] = None,\n mode: str = \"wt\",\n index: bool = True,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[str]:\n if \"showindex\" in kwargs:\n warnings.warn(\n \"'showindex' is deprecated. Only 'index' will be used \"\n \"in a future version. Use 'index' to silence this warning.\",\n FutureWarning,\n stacklevel=2,\n )\n\n kwargs.setdefault(\"headers\", \"keys\")\n kwargs.setdefault(\"tablefmt\", \"pipe\")\n kwargs.setdefault(\"showindex\", index)\n tabulate = import_optional_dependency(\"tabulate\")\n result = tabulate.tabulate(self, **kwargs)\n if buf is None:\n return result\n\n with get_handle(buf, mode, storage_options=storage_options) as handles:\n assert not isinstance(handles.handle, (str, mmap.mmap))\n handles.handle.writelines(result)\n return None\n\n @doc(storage_options=generic._shared_docs[\"storage_options\"])\n @deprecate_kwarg(old_arg_name=\"fname\", new_arg_name=\"path\")\n def to_parquet(\n self,\n path: Optional[FilePathOrBuffer] = None,\n engine: str = \"auto\",\n compression: Optional[str] = \"snappy\",\n index: Optional[bool] = None,\n partition_cols: Optional[List[str]] = None,\n storage_options: StorageOptions = None,\n **kwargs,\n ) -> Optional[bytes]:\n \"\"\"\n Write a DataFrame to the binary parquet format.\n\n This function writes the dataframe as a `parquet file\n <https://parquet.apache.org/>`_. You can choose different parquet\n backends, and have the option of compression. See\n :ref:`the user guide <io.parquet>` for more details.\n\n Parameters\n ----------\n path : str or file-like object, default None\n If a string, it will be used as Root Directory path\n when writing a partitioned dataset. By file-like object,\n we refer to objects with a write() method, such as a file handle\n (e.g. via builtin open function) or io.BytesIO. The engine\n fastparquet does not accept file-like objects. If path is None,\n a bytes object is returned.\n\n .. versionchanged:: 1.2.0\n\n Previously this was \"fname\"\n\n engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'\n Parquet library to use. If 'auto', then the option\n ``io.parquet.engine`` is used. The default ``io.parquet.engine``\n behavior is to try 'pyarrow', falling back to 'fastparquet' if\n 'pyarrow' is unavailable.\n compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'\n Name of the compression to use. Use ``None`` for no compression.\n index : bool, default None\n If ``True``, include the dataframe's index(es) in the file output.\n If ``False``, they will not be written to the file.\n If ``None``, similar to ``True`` the dataframe's index(es)\n will be saved. However, instead of being saved as values,\n the RangeIndex will be stored as a range in the metadata so it\n doesn't require much space and is faster. Other indexes will\n be included as columns in the file output.\n\n .. versionadded:: 0.24.0\n\n partition_cols : list, optional, default None\n Column names by which to partition the dataset.\n Columns are partitioned in the order they are given.\n Must be None if path is not a string.\n\n .. versionadded:: 0.24.0\n\n {storage_options}\n\n .. versionadded:: 1.2.0\n\n **kwargs\n Additional arguments passed to the parquet library. See\n :ref:`pandas io <io.parquet>` for more details.\n\n Returns\n -------\n bytes if no path argument is provided else None\n\n See Also\n --------\n read_parquet : Read a parquet file.\n DataFrame.to_csv : Write a csv file.\n DataFrame.to_sql : Write to a sql table.\n DataFrame.to_hdf : Write to hdf.\n\n Notes\n -----\n This function requires either the `fastparquet\n <https://pypi.org/project/fastparquet>`_ or `pyarrow\n <https://arrow.apache.org/docs/python/>`_ library.\n\n Examples\n --------\n >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})\n >>> df.to_parquet('df.parquet.gzip',\n ... compression='gzip') # doctest: +SKIP\n >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP\n col1 col2\n 0 1 3\n 1 2 4\n\n If you want to get a buffer to the parquet content you can use a io.BytesIO\n object, as long as you don't use partition_cols, which creates multiple files.\n\n >>> import io\n >>> f = io.BytesIO()\n >>> df.to_parquet(f)\n >>> f.seek(0)\n 0\n >>> content = f.read()\n \"\"\"\n from pandas.io.parquet import to_parquet\n\n return to_parquet(\n self,\n path,\n engine,\n compression=compression,\n index=index,\n partition_cols=partition_cols,\n storage_options=storage_options,\n **kwargs,\n )\n\n @Substitution(\n header_type=\"bool\",\n header=\"Whether to print column labels, default True\",\n col_space_type=\"str or int, list or dict of int or str\",\n col_space=\"The minimum width of each column in CSS length \"\n \"units. An int is assumed to be px units.\\n\\n\"\n \" .. versionadded:: 0.25.0\\n\"\n \" Ability to use str\",\n )\n @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)\n def to_html(\n self,\n buf=None,\n columns=None,\n col_space=None,\n header=True,\n index=True,\n na_rep=\"NaN\",\n formatters=None,\n float_format=None,\n sparsify=None,\n index_names=True,\n justify=None,\n max_rows=None,\n max_cols=None,\n show_dimensions=False,\n decimal=\".\",\n bold_rows=True,\n classes=None,\n escape=True,\n notebook=False,\n border=None,\n table_id=None,\n render_links=False,\n encoding=None,\n ):\n \"\"\"\n Render a DataFrame as an HTML table.\n %(shared_params)s\n bold_rows : bool, default True\n Make the row labels bold in the output.\n classes : str or list or tuple, default None\n CSS class(es) to apply to the resulting html table.\n escape : bool, default True\n Convert the characters <, >, and & to HTML-safe sequences.\n notebook : {True, False}, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n `<table>` tag. Default ``pd.options.display.html.border``.\n encoding : str, default \"utf-8\"\n Set character encoding.\n\n .. versionadded:: 1.0\n\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links.\n\n .. versionadded:: 0.24.0\n %(returns)s\n See Also\n --------\n to_string : Convert DataFrame to a string.\n \"\"\"\n if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:\n raise ValueError(\"Invalid value for justify parameter\")\n\n formatter = fmt.DataFrameFormatter(\n self,\n columns=columns,\n col_space=col_space,\n na_rep=na_rep,\n header=header,\n index=index,\n formatters=formatters,\n float_format=float_format,\n bold_rows=bold_rows,\n sparsify=sparsify,\n justify=justify,\n index_names=index_names,\n escape=escape,\n decimal=decimal,\n max_rows=max_rows,\n max_cols=max_cols,\n show_dimensions=show_dimensions,\n )\n # TODO: a generic formatter wld b in DataFrameFormatter\n return fmt.DataFrameRenderer(formatter).to_html(\n buf=buf,\n classes=classes,\n notebook=notebook,\n border=border,\n encoding=encoding,\n table_id=table_id,\n render_links=render_links,\n )\n\n # ----------------------------------------------------------------------\n @Substitution(\n klass=\"DataFrame\",\n type_sub=\" and columns\",\n max_cols_sub=dedent(\n \"\"\"\\\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used. By default, the setting in\n ``pandas.options.display.max_info_columns`` is used.\"\"\"\n ),\n show_counts_sub=dedent(\n \"\"\"\\\n show_counts : bool, optional\n Whether to show the non-null counts. By default, this is shown\n only if the DataFrame is smaller than\n ``pandas.options.display.max_info_rows`` and\n ``pandas.options.display.max_info_columns``. A value of True always\n shows the counts, and False never shows the counts.\n null_counts : bool, optional\n .. deprecated:: 1.2.0\n Use show_counts instead.\"\"\"\n ),\n examples_sub=dedent(\n \"\"\"\\\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = pd.DataFrame({\"int_col\": int_values, \"text_col\": text_values,\n ... \"float_col\": float_values})\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open(\"df_info.txt\", \"w\",\n ... encoding=\"utf-8\") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big DataFrames and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)\n >>> df = pd.DataFrame({\n ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)\n ... })\n >>> df.info()\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 22.9+ MB\n\n >>> df.info(memory_usage='deep')\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 165.9 MB\"\"\"\n ),\n see_also_sub=dedent(\n \"\"\"\\\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n DataFrame.memory_usage: Memory usage of DataFrame columns.\"\"\"\n ),\n version_added_sub=\"\",\n )\n @doc(BaseInfo.render)\n def info(\n self,\n verbose: Optional[bool] = None,\n buf: Optional[IO[str]] = None,\n max_cols: Optional[int] = None,\n memory_usage: Optional[Union[bool, str]] = None,\n show_counts: Optional[bool] = None,\n null_counts: Optional[bool] = None,\n ) -> None:\n if null_counts is not None:\n if show_counts is not None:\n raise ValueError(\"null_counts used with show_counts. Use show_counts.\")\n warnings.warn(\n \"null_counts is deprecated. Use show_counts instead\",\n FutureWarning,\n stacklevel=2,\n )\n show_counts = null_counts\n info = DataFrameInfo(\n data=self,\n memory_usage=memory_usage,\n )\n info.render(\n buf=buf,\n max_cols=max_cols,\n verbose=verbose,\n show_counts=show_counts,\n )\n\n def memory_usage(self, index=True, deep=False) -> Series:\n \"\"\"\n Return the memory usage of each column in bytes.\n\n The memory usage can optionally include the contribution of\n the index and elements of `object` dtype.\n\n This value is displayed in `DataFrame.info` by default. This can be\n suppressed by setting ``pandas.options.display.memory_usage`` to False.\n\n Parameters\n ----------\n index : bool, default True\n Specifies whether to include the memory usage of the DataFrame's\n index in returned Series. If ``index=True``, the memory usage of\n the index is the first item in the output.\n deep : bool, default False\n If True, introspect the data deeply by interrogating\n `object` dtypes for system-level memory consumption, and include\n it in the returned values.\n\n Returns\n -------\n Series\n A Series whose index is the original column names and whose values\n is the memory usage of each column in bytes.\n\n See Also\n --------\n numpy.ndarray.nbytes : Total bytes consumed by the elements of an\n ndarray.\n Series.memory_usage : Bytes consumed by a Series.\n Categorical : Memory-efficient array for string values with\n many repeated values.\n DataFrame.info : Concise summary of a DataFrame.\n\n Examples\n --------\n >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']\n >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))\n ... for t in dtypes])\n >>> df = pd.DataFrame(data)\n >>> df.head()\n int64 float64 complex128 object bool\n 0 1 1.0 1.0+0.0j 1 True\n 1 1 1.0 1.0+0.0j 1 True\n 2 1 1.0 1.0+0.0j 1 True\n 3 1 1.0 1.0+0.0j 1 True\n 4 1 1.0 1.0+0.0j 1 True\n\n >>> df.memory_usage()\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n >>> df.memory_usage(index=False)\n int64 40000\n float64 40000\n complex128 80000\n object 40000\n bool 5000\n dtype: int64\n\n The memory footprint of `object` dtype columns is ignored by default:\n\n >>> df.memory_usage(deep=True)\n Index 128\n int64 40000\n float64 40000\n complex128 80000\n object 180000\n bool 5000\n dtype: int64\n\n Use a Categorical for efficient storage of an object-dtype column with\n many repeated values.\n\n >>> df['object'].astype('category').memory_usage(deep=True)\n 5244\n \"\"\"\n result = self._constructor_sliced(\n [c.memory_usage(index=False, deep=deep) for col, c in self.items()],\n index=self.columns,\n )\n if index:\n result = self._constructor_sliced(\n self.index.memory_usage(deep=deep), index=[\"Index\"]\n ).append(result)\n return result\n\n def transpose(self, *args, copy: bool = False) -> DataFrame:\n \"\"\"\n Transpose index and columns.\n\n Reflect the DataFrame over its main diagonal by writing rows as columns\n and vice-versa. The property :attr:`.T` is an accessor to the method\n :meth:`transpose`.\n\n Parameters\n ----------\n *args : tuple, optional\n Accepted for compatibility with NumPy.\n copy : bool, default False\n Whether to copy the data after transposing, even for DataFrames\n with a single dtype.\n\n Note that a copy is always required for mixed dtype DataFrames,\n or for DataFrames with any extension types.\n\n Returns\n -------\n DataFrame\n The transposed DataFrame.\n\n See Also\n --------\n numpy.transpose : Permute the dimensions of a given array.\n\n Notes\n -----\n Transposing a DataFrame with mixed dtypes will result in a homogeneous\n DataFrame with the `object` dtype. In such a case, a copy of the data\n is always made.\n\n Examples\n --------\n **Square DataFrame with homogeneous dtype**\n\n >>> d1 = {'col1': [1, 2], 'col2': [3, 4]}\n >>> df1 = pd.DataFrame(data=d1)\n >>> df1\n col1 col2\n 0 1 3\n 1 2 4\n\n >>> df1_transposed = df1.T # or df1.transpose()\n >>> df1_transposed\n 0 1\n col1 1 2\n col2 3 4\n\n When the dtype is homogeneous in the original DataFrame, we get a\n transposed DataFrame with the same dtype:\n\n >>> df1.dtypes\n col1 int64\n col2 int64\n dtype: object\n >>> df1_transposed.dtypes\n 0 int64\n 1 int64\n dtype: object\n\n **Non-square DataFrame with mixed dtypes**\n\n >>> d2 = {'name': ['Alice', 'Bob'],\n ... 'score': [9.5, 8],\n ... 'employed': [False, True],\n ... 'kids': [0, 0]}\n >>> df2 = pd.DataFrame(data=d2)\n >>> df2\n name score employed kids\n 0 Alice 9.5 False 0\n 1 Bob 8.0 True 0\n\n >>> df2_transposed = df2.T # or df2.transpose()\n >>> df2_transposed\n 0 1\n name Alice Bob\n score 9.5 8.0\n employed False True\n kids 0 0\n\n When the DataFrame has mixed dtypes, we get a transposed DataFrame with\n the `object` dtype:\n\n >>> df2.dtypes\n name object\n score float64\n employed bool\n kids int64\n dtype: object\n >>> df2_transposed.dtypes\n 0 object\n 1 object\n dtype: object\n \"\"\"\n nv.validate_transpose(args, {})\n # construct the args\n\n dtypes = list(self.dtypes)\n if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):\n # We have EAs with the same dtype. We can preserve that dtype in transpose.\n dtype = dtypes[0]\n arr_type = dtype.construct_array_type()\n values = self.values\n\n new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]\n result = self._constructor(\n dict(zip(self.index, new_values)), index=self.columns\n )\n\n else:\n new_values = self.values.T\n if copy:\n new_values = new_values.copy()\n result = self._constructor(\n new_values, index=self.columns, columns=self.index\n )\n\n return result.__finalize__(self, method=\"transpose\")\n\n @property\n def T(self) -> DataFrame:\n return self.transpose()\n\n # ----------------------------------------------------------------------\n # Indexing Methods\n\n def _ixs(self, i: int, axis: int = 0):\n \"\"\"\n Parameters\n ----------\n i : int\n axis : int\n\n Notes\n -----\n If slice passed, the resulting data will be a view.\n \"\"\"\n # irow\n if axis == 0:\n new_values = self._mgr.fast_xs(i)\n\n # if we are a copy, mark as such\n copy = isinstance(new_values, np.ndarray) and new_values.base is None\n result = self._constructor_sliced(\n new_values,\n index=self.columns,\n name=self.index[i],\n dtype=new_values.dtype,\n )\n result._set_is_copy(self, copy=copy)\n return result\n\n # icol\n else:\n label = self.columns[i]\n\n values = self._mgr.iget(i)\n result = self._box_col_values(values, i)\n\n # this is a cached value, mark it so\n result._set_as_cached(label, self)\n\n return result\n\n def _get_column_array(self, i: int) -> ArrayLike:\n \"\"\"\n Get the values of the i'th column (ndarray or ExtensionArray, as stored\n in the Block)\n \"\"\"\n return self._mgr.iget_values(i)\n\n def _iter_column_arrays(self) -> Iterator[ArrayLike]:\n \"\"\"\n Iterate over the arrays of all columns in order.\n This returns the values as stored in the Block (ndarray or ExtensionArray).\n \"\"\"\n for i in range(len(self.columns)):\n yield self._get_column_array(i)\n\n def __getitem__(self, key):\n key = lib.item_from_zerodim(key)\n key = com.apply_if_callable(key, self)\n\n if is_hashable(key):\n # shortcut if the key is in columns\n if self.columns.is_unique and key in self.columns:\n if isinstance(self.columns, MultiIndex):\n return self._getitem_multilevel(key)\n return self._get_item_cache(key)\n\n # Do we have a slicer (on rows)?\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n if isinstance(indexer, np.ndarray):\n indexer = lib.maybe_indices_to_slice(\n indexer.astype(np.intp, copy=False), len(self)\n )\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._slice(indexer, axis=0)\n\n # Do we have a (boolean) DataFrame?\n if isinstance(key, DataFrame):\n return self.where(key)\n\n # Do we have a (boolean) 1d indexer?\n if com.is_bool_indexer(key):\n return self._getitem_bool_array(key)\n\n # We are left with two options: a single key, and a collection of keys,\n # We interpret tuples as collections only for non-MultiIndex\n is_single_key = isinstance(key, tuple) or not is_list_like(key)\n\n if is_single_key:\n if self.columns.nlevels > 1:\n return self._getitem_multilevel(key)\n indexer = self.columns.get_loc(key)\n if is_integer(indexer):\n indexer = [indexer]\n else:\n if is_iterator(key):\n key = list(key)\n indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]\n\n # take() does not accept boolean indexers\n if getattr(indexer, \"dtype\", None) == bool:\n indexer = np.where(indexer)[0]\n\n data = self._take_with_is_copy(indexer, axis=1)\n\n if is_single_key:\n # What does looking for a single key in a non-unique index return?\n # The behavior is inconsistent. It returns a Series, except when\n # - the key itself is repeated (test on data.shape, #9519), or\n # - we have a MultiIndex on columns (test on self.columns, #21309)\n if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):\n # GH#26490 using data[key] can cause RecursionError\n data = data._get_item_cache(key)\n\n return data\n\n def _getitem_bool_array(self, key):\n # also raises Exception if object array with NA values\n # warning here just in case -- previously __setitem__ was\n # reindexing but __getitem__ was not; it seems more reasonable to\n # go with the __setitem__ behavior since that is more consistent\n # with all other indexing behavior\n if isinstance(key, Series) and not key.index.equals(self.index):\n warnings.warn(\n \"Boolean Series key will be reindexed to match DataFrame index.\",\n UserWarning,\n stacklevel=3,\n )\n elif len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}.\"\n )\n\n # check_bool_indexer will throw exception if Series key cannot\n # be reindexed to match DataFrame rows\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n return self._take_with_is_copy(indexer, axis=0)\n\n def _getitem_multilevel(self, key):\n # self.columns is a MultiIndex\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, np.ndarray)):\n new_columns = self.columns[loc]\n result_columns = maybe_droplevels(new_columns, key)\n if self._is_mixed_type:\n result = self.reindex(columns=new_columns)\n result.columns = result_columns\n else:\n new_values = self.values[:, loc]\n result = self._constructor(\n new_values, index=self.index, columns=result_columns\n )\n result = result.__finalize__(self)\n\n # If there is only one column being returned, and its name is\n # either an empty string, or a tuple with an empty string as its\n # first element, then treat the empty string as a placeholder\n # and return the column as if the user had provided that empty\n # string in the key. If the result is a Series, exclude the\n # implied empty string from its name.\n if len(result.columns) == 1:\n top = result.columns[0]\n if isinstance(top, tuple):\n top = top[0]\n if top == \"\":\n result = result[\"\"]\n if isinstance(result, Series):\n result = self._constructor_sliced(\n result, index=self.index, name=key\n )\n\n result._set_is_copy(self)\n return result\n else:\n # loc is neither a slice nor ndarray, so must be an int\n return self._ixs(loc, axis=1)\n\n def _get_value(self, index, col, takeable: bool = False):\n \"\"\"\n Quickly retrieve single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n takeable : interpret the index/col as indexers, default False\n\n Returns\n -------\n scalar\n \"\"\"\n if takeable:\n series = self._ixs(col, axis=1)\n return series._values[index]\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n\n try:\n loc = engine.get_loc(index)\n return series._values[loc]\n except KeyError:\n # GH 20629\n if self.index.nlevels > 1:\n # partial indexing forbidden\n raise\n\n # we cannot handle direct indexing\n # use positional\n col = self.columns.get_loc(col)\n index = self.index.get_loc(index)\n return self._get_value(index, col, takeable=True)\n\n def __setitem__(self, key, value):\n key = com.apply_if_callable(key, self)\n\n # see if we can slice the rows\n indexer = convert_to_index_sliceable(self, key)\n if indexer is not None:\n # either we have a slice or we have a string that can be converted\n # to a slice for partial-string date indexing\n return self._setitem_slice(indexer, value)\n\n if isinstance(key, DataFrame) or getattr(key, \"ndim\", None) == 2:\n self._setitem_frame(key, value)\n elif isinstance(key, (Series, np.ndarray, list, Index)):\n self._setitem_array(key, value)\n else:\n # set column\n self._set_item(key, value)\n\n def _setitem_slice(self, key: slice, value):\n # NB: we can't just use self.loc[key] = value because that\n # operates on labels and we need to operate positional for\n # backwards-compat, xref GH#31469\n self._check_setitem_copy()\n self.iloc._setitem_with_indexer(key, value)\n\n def _setitem_array(self, key, value):\n # also raises Exception if object array with NA values\n if com.is_bool_indexer(key):\n if len(key) != len(self.index):\n raise ValueError(\n f\"Item wrong length {len(key)} instead of {len(self.index)}!\"\n )\n key = check_bool_indexer(self.index, key)\n indexer = key.nonzero()[0]\n self._check_setitem_copy()\n self.iloc._setitem_with_indexer(indexer, value)\n else:\n if isinstance(value, DataFrame):\n if len(value.columns) != len(key):\n raise ValueError(\"Columns must be same length as key\")\n for k1, k2 in zip(key, value.columns):\n self[k1] = value[k2]\n else:\n self.loc._ensure_listlike_indexer(key, axis=1, value=value)\n indexer = self.loc._get_listlike_indexer(\n key, axis=1, raise_missing=False\n )[1]\n self._check_setitem_copy()\n self.iloc._setitem_with_indexer((slice(None), indexer), value)\n\n def _setitem_frame(self, key, value):\n # support boolean setting with DataFrame input, e.g.\n # df[df > df2] = 0\n if isinstance(key, np.ndarray):\n if key.shape != self.shape:\n raise ValueError(\"Array conditional must be same shape as self\")\n key = self._constructor(key, **self._construct_axes_dict())\n\n if key.size and not is_bool_dtype(key.values):\n raise TypeError(\n \"Must pass DataFrame or 2-d ndarray with boolean values only\"\n )\n\n self._check_inplace_setting(value)\n self._check_setitem_copy()\n self._where(-key, value, inplace=True)\n\n def _iset_item(self, loc: int, value):\n self._ensure_valid_index(value)\n\n # technically _sanitize_column expects a label, not a position,\n # but the behavior is the same as long as we pass broadcast=False\n value = self._sanitize_column(loc, value, broadcast=False)\n NDFrame._iset_item(self, loc, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _set_item(self, key, value):\n \"\"\"\n Add series to DataFrame in specified column.\n\n If series is a numpy-array (not a Series/TimeSeries), it must be the\n same length as the DataFrames index or an error will be thrown.\n\n Series/TimeSeries will be conformed to the DataFrames index to\n ensure homogeneity.\n \"\"\"\n self._ensure_valid_index(value)\n value = self._sanitize_column(key, value)\n NDFrame._set_item(self, key, value)\n\n # check if we are modifying a copy\n # try to set first as we want an invalid\n # value exception to occur first\n if len(self):\n self._check_setitem_copy()\n\n def _set_value(self, index, col, value, takeable: bool = False):\n \"\"\"\n Put single value at passed column and index.\n\n Parameters\n ----------\n index : row label\n col : column label\n value : scalar\n takeable : interpret the index/col as indexers, default False\n \"\"\"\n try:\n if takeable is True:\n series = self._ixs(col, axis=1)\n series._set_value(index, value, takeable=True)\n return\n\n series = self._get_item_cache(col)\n engine = self.index._engine\n loc = engine.get_loc(index)\n validate_numeric_casting(series.dtype, value)\n\n series._values[loc] = value\n # Note: trying to use series._set_value breaks tests in\n # tests.frame.indexing.test_indexing and tests.indexing.test_partial\n except (KeyError, TypeError):\n # set using a non-recursive method & reset the cache\n if takeable:\n self.iloc[index, col] = value\n else:\n self.loc[index, col] = value\n self._item_cache.pop(col, None)\n\n def _ensure_valid_index(self, value):\n \"\"\"\n Ensure that if we don't have an index, that we can create one from the\n passed value.\n \"\"\"\n # GH5632, make sure that we are a Series convertible\n if not len(self.index) and is_list_like(value) and len(value):\n try:\n value = Series(value)\n except (ValueError, NotImplementedError, TypeError) as err:\n raise ValueError(\n \"Cannot set a frame with no defined index \"\n \"and a value that cannot be converted to a Series\"\n ) from err\n\n # GH31368 preserve name of index\n index_copy = value.index.copy()\n if self.index.name is not None:\n index_copy.name = self.index.name\n\n self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)\n\n def _box_col_values(self, values, loc: int) -> Series:\n \"\"\"\n Provide boxed values for a column.\n \"\"\"\n # Lookup in columns so that if e.g. a str datetime was passed\n # we attach the Timestamp object as the name.\n name = self.columns[loc]\n klass = self._constructor_sliced\n return klass(values, index=self.index, name=name, fastpath=True)\n\n # ----------------------------------------------------------------------\n # Unsorted\n\n def query(self, expr, inplace=False, **kwargs):\n \"\"\"\n Query the columns of a DataFrame with a boolean expression.\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to variables\n in the environment by prefixing them with an '@' character like\n ``@a + b``.\n\n You can refer to column names that are not valid Python variable names\n by surrounding them in backticks. Thus, column names containing spaces\n or punctuations (besides underscores) or starting with digits must be\n surrounded by backticks. (For example, a column named \"Area (cm^2) would\n be referenced as `Area (cm^2)`). Column names which are Python keywords\n (like \"list\", \"for\", \"import\", etc) cannot be used.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n .. versionadded:: 0.25.0\n Backtick quoting introduced.\n\n .. versionadded:: 1.0.0\n Expanding functionality of backtick quoting for more than only spaces.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by :meth:`DataFrame.query`.\n\n Returns\n -------\n DataFrame or None\n DataFrame resulting from the provided query expression or\n None if ``inplace=True``.\n\n See Also\n --------\n eval : Evaluate a string describing operations on\n DataFrame columns.\n DataFrame.eval : Evaluate a string describing operations on\n DataFrame columns.\n\n Notes\n -----\n The result of the evaluation of this expression is first passed to\n :attr:`DataFrame.loc` and if that fails because of a\n multidimensional key (e.g., a DataFrame) then the result will be passed\n to :meth:`DataFrame.__getitem__`.\n\n This method uses the top-level :func:`eval` function to\n evaluate the passed query.\n\n The :meth:`~pandas.DataFrame.query` method uses a slightly\n modified Python syntax by default. For example, the ``&`` and ``|``\n (bitwise) operators have the precedence of their boolean cousins,\n :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,\n however the semantics are different.\n\n You can change the semantics of the expression by passing the keyword\n argument ``parser='python'``. This enforces the same semantics as\n evaluation in Python space. Likewise, you can pass ``engine='python'``\n to evaluate an expression using Python itself as a backend. This is not\n recommended as it is inefficient compared to using ``numexpr`` as the\n engine.\n\n The :attr:`DataFrame.index` and\n :attr:`DataFrame.columns` attributes of the\n :class:`~pandas.DataFrame` instance are placed in the query namespace\n by default, which allows you to treat both the index and columns of the\n frame as a column in the frame.\n The identifier ``index`` is used for the frame index; you can also\n use the name of the index to identify it in a query. Please note that\n Python keywords may not be used as identifiers.\n\n For further details and examples see the ``query`` documentation in\n :ref:`indexing <indexing.query>`.\n\n *Backtick quoted variables*\n\n Backtick quoted variables are parsed as literal Python code and\n are converted internally to a Python valid identifier.\n This can lead to the following problems.\n\n During parsing a number of disallowed characters inside the backtick\n quoted string are replaced by strings that are allowed as a Python identifier.\n These characters include all operators in Python, the space character, the\n question mark, the exclamation mark, the dollar sign, and the euro sign.\n For other characters that fall outside the ASCII range (U+0001..U+007F)\n and those that are not further specified in PEP 3131,\n the query parser will raise an error.\n This excludes whitespace different than the space character,\n but also the hashtag (as it is used for comments) and the backtick\n itself (backtick can also not be escaped).\n\n In a special case, quotes that make a pair around a backtick can\n confuse the parser.\n For example, ```it's` > `that's``` will raise an error,\n as it forms a quoted string (``'s > `that'``) with a backtick inside.\n\n See also the Python documentation about lexical analysis\n (https://docs.python.org/3/reference/lexical_analysis.html)\n in combination with the source code in :mod:`pandas.core.computation.parsing`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not isinstance(expr, str):\n msg = f\"expr must be a string to be evaluated, {type(expr)} given\"\n raise ValueError(msg)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n kwargs[\"target\"] = None\n res = self.eval(expr, **kwargs)\n\n try:\n result = self.loc[res]\n except ValueError:\n # when res is multi-dimensional loc raises, but this is sometimes a\n # valid query\n result = self[res]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def eval(self, expr, inplace=False, **kwargs):\n \"\"\"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n **kwargs\n See the documentation for :func:`eval` for complete details\n on the keyword arguments accepted by\n :meth:`~pandas.DataFrame.query`.\n\n Returns\n -------\n ndarray, scalar, pandas object, or None\n The result of the evaluation or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Notes\n -----\n For more details see the API documentation for :func:`~eval`.\n For detailed examples see :ref:`enhancing performance with eval\n <enhancingperf.eval>`.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n\n Multiple columns can be assigned to using multi-line expressions:\n\n >>> df.eval(\n ... '''\n ... C = A + B\n ... D = A - B\n ... '''\n ... )\n A B C D\n 0 1 10 11 -9\n 1 2 8 10 -6\n 2 3 6 9 -3\n 3 4 4 8 0\n 4 5 2 7 3\n \"\"\"\n from pandas.core.computation.eval import eval as _eval\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n resolvers = kwargs.pop(\"resolvers\", None)\n kwargs[\"level\"] = kwargs.pop(\"level\", 0) + 1\n if resolvers is None:\n index_resolvers = self._get_index_resolvers()\n column_resolvers = self._get_cleaned_column_resolvers()\n resolvers = column_resolvers, index_resolvers\n if \"target\" not in kwargs:\n kwargs[\"target\"] = self\n kwargs[\"resolvers\"] = kwargs.get(\"resolvers\", ()) + tuple(resolvers)\n\n return _eval(expr, inplace=inplace, **kwargs)\n\n def select_dtypes(self, include=None, exclude=None) -> DataFrame:\n \"\"\"\n Return a subset of the DataFrame's columns based on the column dtypes.\n\n Parameters\n ----------\n include, exclude : scalar or list-like\n A selection of dtypes or strings to be included/excluded. At least\n one of these parameters must be supplied.\n\n Returns\n -------\n DataFrame\n The subset of the frame including the dtypes in ``include`` and\n excluding the dtypes in ``exclude``.\n\n Raises\n ------\n ValueError\n * If both of ``include`` and ``exclude`` are empty\n * If ``include`` and ``exclude`` have overlapping elements\n * If any kind of string dtype is passed in.\n\n See Also\n --------\n DataFrame.dtypes: Return Series with the data type of each column.\n\n Notes\n -----\n * To select all *numeric* types, use ``np.number`` or ``'number'``\n * To select strings you must use the ``object`` dtype, but note that\n this will return *all* object dtype columns\n * See the `numpy dtype hierarchy\n <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__\n * To select datetimes, use ``np.datetime64``, ``'datetime'`` or\n ``'datetime64'``\n * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or\n ``'timedelta64'``\n * To select Pandas categorical dtypes, use ``'category'``\n * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in\n 0.20.0) or ``'datetime64[ns, tz]'``\n\n Examples\n --------\n >>> df = pd.DataFrame({'a': [1, 2] * 3,\n ... 'b': [True, False] * 3,\n ... 'c': [1.0, 2.0] * 3})\n >>> df\n a b c\n 0 1 True 1.0\n 1 2 False 2.0\n 2 1 True 1.0\n 3 2 False 2.0\n 4 1 True 1.0\n 5 2 False 2.0\n\n >>> df.select_dtypes(include='bool')\n b\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n\n >>> df.select_dtypes(include=['float64'])\n c\n 0 1.0\n 1 2.0\n 2 1.0\n 3 2.0\n 4 1.0\n 5 2.0\n\n >>> df.select_dtypes(exclude=['int64'])\n b c\n 0 True 1.0\n 1 False 2.0\n 2 True 1.0\n 3 False 2.0\n 4 True 1.0\n 5 False 2.0\n \"\"\"\n if not is_list_like(include):\n include = (include,) if include is not None else ()\n if not is_list_like(exclude):\n exclude = (exclude,) if exclude is not None else ()\n\n selection = (frozenset(include), frozenset(exclude))\n\n if not any(selection):\n raise ValueError(\"at least one of include or exclude must be nonempty\")\n\n # convert the myriad valid dtypes object to a single representation\n include = frozenset(infer_dtype_from_object(x) for x in include)\n exclude = frozenset(infer_dtype_from_object(x) for x in exclude)\n for dtypes in (include, exclude):\n invalidate_string_dtypes(dtypes)\n\n # can't both include AND exclude!\n if not include.isdisjoint(exclude):\n raise ValueError(f\"include and exclude overlap on {(include & exclude)}\")\n\n # We raise when both include and exclude are empty\n # Hence, we can just shrink the columns we want to keep\n keep_these = np.full(self.shape[1], True)\n\n def extract_unique_dtypes_from_dtypes_set(\n dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray\n ) -> List[Dtype]:\n extracted_dtypes = [\n unique_dtype\n for unique_dtype in unique_dtypes\n # error: Argument 1 to \"tuple\" has incompatible type\n # \"FrozenSet[Union[ExtensionDtype, str, Any, Type[str],\n # Type[float], Type[int], Type[complex], Type[bool]]]\";\n # expected \"Iterable[Union[type, Tuple[Any, ...]]]\"\n if issubclass(\n unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]\n )\n ]\n return extracted_dtypes\n\n unique_dtypes = self.dtypes.unique()\n\n if include:\n included_dtypes = extract_unique_dtypes_from_dtypes_set(\n include, unique_dtypes\n )\n keep_these &= self.dtypes.isin(included_dtypes)\n\n if exclude:\n excluded_dtypes = extract_unique_dtypes_from_dtypes_set(\n exclude, unique_dtypes\n )\n keep_these &= ~self.dtypes.isin(excluded_dtypes)\n\n return self.iloc[:, keep_these.values]\n\n def insert(self, loc, column, value, allow_duplicates=False) -> None:\n \"\"\"\n Insert column into DataFrame at specified location.\n\n Raises a ValueError if `column` is already contained in the DataFrame,\n unless `allow_duplicates` is set to True.\n\n Parameters\n ----------\n loc : int\n Insertion index. Must verify 0 <= loc <= len(columns).\n column : str, number, or hashable object\n Label of the inserted column.\n value : int, Series, or array-like\n allow_duplicates : bool, optional\n \"\"\"\n if allow_duplicates and not self.flags.allows_duplicate_labels:\n raise ValueError(\n \"Cannot specify 'allow_duplicates=True' when \"\n \"'self.flags.allows_duplicate_labels' is False.\"\n )\n self._ensure_valid_index(value)\n value = self._sanitize_column(column, value, broadcast=False)\n self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)\n\n def assign(self, **kwargs) -> DataFrame:\n r\"\"\"\n Assign new columns to a DataFrame.\n\n Returns a new object with all original columns in addition to new ones.\n Existing columns that are re-assigned will be overwritten.\n\n Parameters\n ----------\n **kwargs : dict of {str: callable or Series}\n The column names are keywords. If the values are\n callable, they are computed on the DataFrame and\n assigned to the new columns. The callable must not\n change input DataFrame (though pandas doesn't check it).\n If the values are not callable, (e.g. a Series, scalar, or array),\n they are simply assigned.\n\n Returns\n -------\n DataFrame\n A new DataFrame with the new columns in addition to\n all the existing columns.\n\n Notes\n -----\n Assigning multiple columns within the same ``assign`` is possible.\n Later items in '\\*\\*kwargs' may refer to newly created or modified\n columns in 'df'; items are computed and assigned into 'df' in order.\n\n Examples\n --------\n >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},\n ... index=['Portland', 'Berkeley'])\n >>> df\n temp_c\n Portland 17.0\n Berkeley 25.0\n\n Where the value is a callable, evaluated on `df`:\n\n >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n Alternatively, the same behavior can be achieved by directly\n referencing an existing Series or sequence:\n\n >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)\n temp_c temp_f\n Portland 17.0 62.6\n Berkeley 25.0 77.0\n\n You can create multiple columns within the same assign where one\n of the columns depends on another one defined within the same assign:\n\n >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,\n ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)\n temp_c temp_f temp_k\n Portland 17.0 62.6 290.15\n Berkeley 25.0 77.0 298.15\n \"\"\"\n data = self.copy()\n\n for k, v in kwargs.items():\n data[k] = com.apply_if_callable(v, data)\n return data\n\n def _sanitize_column(self, key, value, broadcast=True):\n \"\"\"\n Ensures new columns (which go into the BlockManager as new blocks) are\n always copied and converted into an array.\n\n Parameters\n ----------\n key : object\n value : scalar, Series, or array-like\n broadcast : bool, default True\n If ``key`` matches multiple duplicate column names in the\n DataFrame, this parameter indicates whether ``value`` should be\n tiled so that the returned array contains a (duplicated) column for\n each occurrence of the key. If False, ``value`` will not be tiled.\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n def reindexer(value):\n # reindex if necessary\n\n if value.index.equals(self.index) or not len(self.index):\n value = value._values.copy()\n else:\n\n # GH 4107\n try:\n value = value.reindex(self.index)._values\n except ValueError as err:\n # raised in MultiIndex.from_tuples, see test_insert_error_msmgs\n if not value.index.is_unique:\n # duplicate axis\n raise err\n\n # other\n raise TypeError(\n \"incompatible index of inserted column with frame index\"\n ) from err\n return value\n\n if isinstance(value, Series):\n value = reindexer(value)\n\n elif isinstance(value, DataFrame):\n # align right-hand-side columns if self.columns\n # is multi-index and self[key] is a sub-frame\n if isinstance(self.columns, MultiIndex) and key in self.columns:\n loc = self.columns.get_loc(key)\n if isinstance(loc, (slice, Series, np.ndarray, Index)):\n cols = maybe_droplevels(self.columns[loc], key)\n if len(cols) and not cols.equals(value.columns):\n value = value.reindex(cols, axis=1)\n # now align rows\n value = reindexer(value).T\n\n elif isinstance(value, ExtensionArray):\n # Explicitly copy here, instead of in sanitize_index,\n # as sanitize_index won't copy an EA, even with copy=True\n value = value.copy()\n value = sanitize_index(value, self.index)\n\n elif isinstance(value, Index) or is_sequence(value):\n\n # turn me into an ndarray\n value = sanitize_index(value, self.index)\n if not isinstance(value, (np.ndarray, Index)):\n if isinstance(value, list) and len(value) > 0:\n value = maybe_convert_platform(value)\n else:\n value = com.asarray_tuplesafe(value)\n elif value.ndim == 2:\n value = value.copy().T\n elif isinstance(value, Index):\n value = value.copy(deep=True)\n else:\n value = value.copy()\n\n # possibly infer to datetimelike\n if is_object_dtype(value.dtype):\n value = maybe_infer_to_datetimelike(value)\n\n else:\n # cast ignores pandas dtypes. so save the dtype first\n infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)\n\n # upcast\n if is_extension_array_dtype(infer_dtype):\n value = construct_1d_arraylike_from_scalar(\n value, len(self.index), infer_dtype\n )\n else:\n # pandas\\core\\frame.py:3827: error: Argument 1 to\n # \"cast_scalar_to_array\" has incompatible type \"int\"; expected\n # \"Tuple[Any, ...]\" [arg-type]\n value = cast_scalar_to_array(\n len(self.index), value # type: ignore[arg-type]\n )\n\n value = maybe_cast_to_datetime(value, infer_dtype)\n\n # return internal types directly\n if is_extension_array_dtype(value):\n return value\n\n # broadcast across multiple columns if necessary\n if broadcast and key in self.columns and value.ndim == 1:\n if not self.columns.is_unique or isinstance(self.columns, MultiIndex):\n existing_piece = self[key]\n if isinstance(existing_piece, DataFrame):\n value = np.tile(value, (len(existing_piece.columns), 1))\n\n return np.atleast_2d(np.asarray(value))\n\n @property\n def _series(self):\n return {\n item: Series(\n self._mgr.iget(idx), index=self.index, name=item, fastpath=True\n )\n for idx, item in enumerate(self.columns)\n }\n\n def lookup(self, row_labels, col_labels) -> np.ndarray:\n \"\"\"\n Label-based \"fancy indexing\" function for DataFrame.\n Given equal-length arrays of row and column labels, return an\n array of the values corresponding to each (row, col) pair.\n\n .. deprecated:: 1.2.0\n DataFrame.lookup is deprecated,\n use DataFrame.melt and DataFrame.loc instead.\n For an example see :meth:`~pandas.DataFrame.lookup`\n in the user guide.\n\n Parameters\n ----------\n row_labels : sequence\n The row labels to use for lookup.\n col_labels : sequence\n The column labels to use for lookup.\n\n Returns\n -------\n numpy.ndarray\n The found values.\n \"\"\"\n msg = (\n \"The 'lookup' method is deprecated and will be\"\n \"removed in a future version.\"\n \"You can use DataFrame.melt and DataFrame.loc\"\n \"as a substitute.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n n = len(row_labels)\n if n != len(col_labels):\n raise ValueError(\"Row labels must have same size as column labels\")\n if not (self.index.is_unique and self.columns.is_unique):\n # GH#33041\n raise ValueError(\"DataFrame.lookup requires unique index and columns\")\n\n thresh = 1000\n if not self._is_mixed_type or n > thresh:\n values = self.values\n ridx = self.index.get_indexer(row_labels)\n cidx = self.columns.get_indexer(col_labels)\n if (ridx == -1).any():\n raise KeyError(\"One or more row labels was not found\")\n if (cidx == -1).any():\n raise KeyError(\"One or more column labels was not found\")\n flat_index = ridx * len(self.columns) + cidx\n result = values.flat[flat_index]\n else:\n result = np.empty(n, dtype=\"O\")\n for i, (r, c) in enumerate(zip(row_labels, col_labels)):\n result[i] = self._get_value(r, c)\n\n if is_object_dtype(result):\n result = lib.maybe_convert_objects(result)\n\n return result\n\n # ----------------------------------------------------------------------\n # Reindexing and alignment\n\n def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):\n frame = self\n\n columns = axes[\"columns\"]\n if columns is not None:\n frame = frame._reindex_columns(\n columns, method, copy, level, fill_value, limit, tolerance\n )\n\n index = axes[\"index\"]\n if index is not None:\n frame = frame._reindex_index(\n index, method, copy, level, fill_value, limit, tolerance\n )\n\n return frame\n\n def _reindex_index(\n self,\n new_index,\n method,\n copy,\n level,\n fill_value=np.nan,\n limit=None,\n tolerance=None,\n ):\n new_index, indexer = self.index.reindex(\n new_index, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {0: [new_index, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_columns(\n self,\n new_columns,\n method,\n copy,\n level,\n fill_value=None,\n limit=None,\n tolerance=None,\n ):\n new_columns, indexer = self.columns.reindex(\n new_columns, method=method, level=level, limit=limit, tolerance=tolerance\n )\n return self._reindex_with_indexers(\n {1: [new_columns, indexer]},\n copy=copy,\n fill_value=fill_value,\n allow_dups=False,\n )\n\n def _reindex_multi(self, axes, copy, fill_value) -> DataFrame:\n \"\"\"\n We are guaranteed non-Nones in the axes.\n \"\"\"\n new_index, row_indexer = self.index.reindex(axes[\"index\"])\n new_columns, col_indexer = self.columns.reindex(axes[\"columns\"])\n\n if row_indexer is not None and col_indexer is not None:\n indexer = row_indexer, col_indexer\n new_values = algorithms.take_2d_multi(\n self.values, indexer, fill_value=fill_value\n )\n return self._constructor(new_values, index=new_index, columns=new_columns)\n else:\n return self._reindex_with_indexers(\n {0: [new_index, row_indexer], 1: [new_columns, col_indexer]},\n copy=copy,\n fill_value=fill_value,\n )\n\n @doc(NDFrame.align, **_shared_doc_kwargs)\n def align(\n self,\n other,\n join=\"outer\",\n axis=None,\n level=None,\n copy=True,\n fill_value=None,\n method=None,\n limit=None,\n fill_axis=0,\n broadcast_axis=None,\n ) -> DataFrame:\n return super().align(\n other,\n join=join,\n axis=axis,\n level=level,\n copy=copy,\n fill_value=fill_value,\n method=method,\n limit=limit,\n fill_axis=fill_axis,\n broadcast_axis=broadcast_axis,\n )\n\n @Appender(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis(['a', 'b', 'c'], axis='index')\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis(['I', 'II'], axis='columns')\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n\n Now, update the labels inplace.\n\n >>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)\n >>> df\n i ii\n 0 1 4\n 1 2 5\n 2 3 6\n \"\"\"\n )\n @Substitution(\n **_shared_doc_kwargs,\n extended_summary_sub=\" column or\",\n axis_description_sub=\", and 1 identifies the columns\",\n see_also_sub=\" or columns\",\n )\n @Appender(NDFrame.set_axis.__doc__)\n def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):\n return super().set_axis(labels, axis=axis, inplace=inplace)\n\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.reindex.__doc__)\n @rewrite_axis_style_signature(\n \"labels\",\n [\n (\"method\", None),\n (\"copy\", True),\n (\"level\", None),\n (\"fill_value\", np.nan),\n (\"limit\", None),\n (\"tolerance\", None),\n ],\n )\n def reindex(self, *args, **kwargs) -> DataFrame:\n axes = validate_axis_style_args(self, args, kwargs, \"labels\", \"reindex\")\n kwargs.update(axes)\n # Pop these, since the values are in `kwargs` under different names\n kwargs.pop(\"axis\", None)\n kwargs.pop(\"labels\", None)\n return super().reindex(**kwargs)\n\n def drop(\n self,\n labels=None,\n axis=0,\n index=None,\n columns=None,\n level=None,\n inplace=False,\n errors=\"raise\",\n ):\n \"\"\"\n Drop specified labels from rows or columns.\n\n Remove rows or columns by specifying label names and corresponding\n axis, or by specifying directly index or column names. When using a\n multi-index, labels on different levels can be removed by specifying\n the level.\n\n Parameters\n ----------\n labels : single label or list-like\n Index or column labels to drop.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Whether to drop labels from the index (0 or 'index') or\n columns (1 or 'columns').\n index : single label or list-like\n Alternative to specifying axis (``labels, axis=0``\n is equivalent to ``index=labels``).\n columns : single label or list-like\n Alternative to specifying axis (``labels, axis=1``\n is equivalent to ``columns=labels``).\n level : int or level name, optional\n For MultiIndex, level from which the labels will be removed.\n inplace : bool, default False\n If False, return a copy. Otherwise, do operation\n inplace and return None.\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and only existing labels are\n dropped.\n\n Returns\n -------\n DataFrame or None\n DataFrame without the removed index or column labels or\n None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis.\n\n See Also\n --------\n DataFrame.loc : Label-location based indexer for selection by label.\n DataFrame.dropna : Return DataFrame with labels on given axis omitted\n where (all or any) data are missing.\n DataFrame.drop_duplicates : Return DataFrame with duplicate rows\n removed, optionally only considering certain columns.\n Series.drop : Return Series with specified index labels removed.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),\n ... columns=['A', 'B', 'C', 'D'])\n >>> df\n A B C D\n 0 0 1 2 3\n 1 4 5 6 7\n 2 8 9 10 11\n\n Drop columns\n\n >>> df.drop(['B', 'C'], axis=1)\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n >>> df.drop(columns=['B', 'C'])\n A D\n 0 0 3\n 1 4 7\n 2 8 11\n\n Drop a row by index\n\n >>> df.drop([0, 1])\n A B C D\n 2 8 9 10 11\n\n Drop columns and/or rows of MultiIndex DataFrame\n\n >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],\n ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],\n ... [250, 150], [1.5, 0.8], [320, 250],\n ... [1, 0.8], [0.3, 0.2]])\n >>> df\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n length 1.5 1.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n length 1.5 0.8\n falcon speed 320.0 250.0\n weight 1.0 0.8\n length 0.3 0.2\n\n >>> df.drop(index='cow', columns='small')\n big\n lama speed 45.0\n weight 200.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n\n >>> df.drop(index='length', level=1)\n big small\n lama speed 45.0 30.0\n weight 200.0 100.0\n cow speed 30.0 20.0\n weight 250.0 150.0\n falcon speed 320.0 250.0\n weight 1.0 0.8\n \"\"\"\n return super().drop(\n labels=labels,\n axis=axis,\n index=index,\n columns=columns,\n level=level,\n inplace=inplace,\n errors=errors,\n )\n\n @rewrite_axis_style_signature(\n \"mapper\",\n [(\"copy\", True), (\"inplace\", False), (\"level\", None), (\"errors\", \"ignore\")],\n )\n def rename(\n self,\n mapper: Optional[Renamer] = None,\n *,\n index: Optional[Renamer] = None,\n columns: Optional[Renamer] = None,\n axis: Optional[Axis] = None,\n copy: bool = True,\n inplace: bool = False,\n level: Optional[Level] = None,\n errors: str = \"ignore\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Alter axes labels.\n\n Function / dict values must be unique (1-to-1). Labels not contained in\n a dict / Series will be left as-is. Extra labels listed don't throw an\n error.\n\n See the :ref:`user guide <basics.rename>` for more.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or function transformations to apply to\n that axis' values. Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index`` and\n ``columns``.\n index : dict-like or function\n Alternative to specifying axis (``mapper, axis=0``\n is equivalent to ``index=mapper``).\n columns : dict-like or function\n Alternative to specifying axis (``mapper, axis=1``\n is equivalent to ``columns=mapper``).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis to target with ``mapper``. Can be either the axis name\n ('index', 'columns') or number (0, 1). The default is 'index'.\n copy : bool, default True\n Also copy underlying data.\n inplace : bool, default False\n Whether to return a new DataFrame. If True then value of copy is\n ignored.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified\n level.\n errors : {'ignore', 'raise'}, default 'ignore'\n If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,\n or `columns` contains labels that are not present in the Index\n being transformed.\n If 'ignore', existing keys will be renamed and extra keys will be\n ignored.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the renamed axis labels or None if ``inplace=True``.\n\n Raises\n ------\n KeyError\n If any of the labels is not found in the selected axis and\n \"errors='raise'\".\n\n See Also\n --------\n DataFrame.rename_axis : Set the name of the axis.\n\n Examples\n --------\n ``DataFrame.rename`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={'index', 'columns'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Rename columns using a mapping:\n\n >>> df = pd.DataFrame({\"A\": [1, 2, 3], \"B\": [4, 5, 6]})\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"c\"})\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n Rename index using a mapping:\n\n >>> df.rename(index={0: \"x\", 1: \"y\", 2: \"z\"})\n A B\n x 1 4\n y 2 5\n z 3 6\n\n Cast index labels to a different type:\n\n >>> df.index\n RangeIndex(start=0, stop=3, step=1)\n >>> df.rename(index=str).index\n Index(['0', '1', '2'], dtype='object')\n\n >>> df.rename(columns={\"A\": \"a\", \"B\": \"b\", \"C\": \"c\"}, errors=\"raise\")\n Traceback (most recent call last):\n KeyError: ['C'] not found in axis\n\n Using axis-style parameters:\n\n >>> df.rename(str.lower, axis='columns')\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> df.rename({1: 2, 2: 4}, axis='index')\n A B\n 0 1 4\n 2 2 5\n 4 3 6\n \"\"\"\n return super().rename(\n mapper=mapper,\n index=index,\n columns=columns,\n axis=axis,\n copy=copy,\n inplace=inplace,\n level=level,\n errors=errors,\n )\n\n @doc(NDFrame.fillna, **_shared_doc_kwargs)\n def fillna(\n self,\n value=None,\n method=None,\n axis=None,\n inplace=False,\n limit=None,\n downcast=None,\n ) -> Optional[DataFrame]:\n return super().fillna(\n value=value,\n method=method,\n axis=axis,\n inplace=inplace,\n limit=limit,\n downcast=downcast,\n )\n\n def pop(self, item: Label) -> Series:\n \"\"\"\n Return item and drop from frame. Raise KeyError if not found.\n\n Parameters\n ----------\n item : label\n Label of column to be popped.\n\n Returns\n -------\n Series\n\n Examples\n --------\n >>> df = pd.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=('name', 'class', 'max_speed'))\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n >>> df.pop('class')\n 0 bird\n 1 bird\n 2 mammal\n 3 mammal\n Name: class, dtype: object\n\n >>> df\n name max_speed\n 0 falcon 389.0\n 1 parrot 24.0\n 2 lion 80.5\n 3 monkey NaN\n \"\"\"\n return super().pop(item=item)\n\n @doc(NDFrame.replace, **_shared_doc_kwargs)\n def replace(\n self,\n to_replace=None,\n value=None,\n inplace=False,\n limit=None,\n regex=False,\n method=\"pad\",\n ):\n return super().replace(\n to_replace=to_replace,\n value=value,\n inplace=inplace,\n limit=limit,\n regex=regex,\n method=method,\n )\n\n def _replace_columnwise(\n self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex\n ):\n \"\"\"\n Dispatch to Series.replace column-wise.\n\n\n Parameters\n ----------\n mapping : dict\n of the form {col: (target, value)}\n inplace : bool\n regex : bool or same types as `to_replace` in DataFrame.replace\n\n Returns\n -------\n DataFrame or None\n \"\"\"\n # Operate column-wise\n res = self if inplace else self.copy()\n ax = self.columns\n\n for i in range(len(ax)):\n if ax[i] in mapping:\n ser = self.iloc[:, i]\n\n target, value = mapping[ax[i]]\n newobj = ser.replace(target, value, regex=regex)\n\n res.iloc[:, i] = newobj\n\n if inplace:\n return\n return res.__finalize__(self)\n\n @doc(NDFrame.shift, klass=_shared_doc_kwargs[\"klass\"])\n def shift(\n self, periods=1, freq=None, axis=0, fill_value=lib.no_default\n ) -> DataFrame:\n axis = self._get_axis_number(axis)\n\n ncols = len(self.columns)\n if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:\n # We will infer fill_value to match the closest column\n\n if periods > 0:\n result = self.iloc[:, :-periods]\n for col in range(min(ncols, abs(periods))):\n # TODO(EA2D): doing this in a loop unnecessary with 2D EAs\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, 0].shift(len(self))\n result.insert(0, col, filler, allow_duplicates=True)\n else:\n result = self.iloc[:, -periods:]\n for col in range(min(ncols, abs(periods))):\n # Define filler inside loop so we get a copy\n filler = self.iloc[:, -1].shift(len(self))\n result.insert(\n len(result.columns), col, filler, allow_duplicates=True\n )\n\n result.columns = self.columns.copy()\n return result\n\n return super().shift(\n periods=periods, freq=freq, axis=axis, fill_value=fill_value\n )\n\n def set_index(\n self, keys, drop=True, append=False, inplace=False, verify_integrity=False\n ):\n \"\"\"\n Set the DataFrame index using existing columns.\n\n Set the DataFrame index (row labels) using one or more existing\n columns or arrays (of the correct length). The index can replace the\n existing index or expand on it.\n\n Parameters\n ----------\n keys : label or array-like or list of labels/arrays\n This parameter can be either a single column key, a single array of\n the same length as the calling DataFrame, or a list containing an\n arbitrary combination of column keys and arrays. Here, \"array\"\n encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and\n instances of :class:`~collections.abc.Iterator`.\n drop : bool, default True\n Delete columns to be used as the new index.\n append : bool, default False\n Whether to append columns to existing index.\n inplace : bool, default False\n If True, modifies the DataFrame in place (do not create a new object).\n verify_integrity : bool, default False\n Check the new index for duplicates. Otherwise defer the check until\n necessary. Setting to False will improve the performance of this\n method.\n\n Returns\n -------\n DataFrame or None\n Changed row labels or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.reset_index : Opposite of set_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'month': [1, 4, 7, 10],\n ... 'year': [2012, 2014, 2013, 2014],\n ... 'sale': [55, 40, 84, 31]})\n >>> df\n month year sale\n 0 1 2012 55\n 1 4 2014 40\n 2 7 2013 84\n 3 10 2014 31\n\n Set the index to become the 'month' column:\n\n >>> df.set_index('month')\n year sale\n month\n 1 2012 55\n 4 2014 40\n 7 2013 84\n 10 2014 31\n\n Create a MultiIndex using columns 'year' and 'month':\n\n >>> df.set_index(['year', 'month'])\n sale\n year month\n 2012 1 55\n 2014 4 40\n 2013 7 84\n 2014 10 31\n\n Create a MultiIndex using an Index and a column:\n\n >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])\n month sale\n year\n 1 2012 1 55\n 2 2014 4 40\n 3 2013 7 84\n 4 2014 10 31\n\n Create a MultiIndex using two Series:\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> df.set_index([s, s**2])\n month year sale\n 1 1 1 2012 55\n 2 4 4 2014 40\n 3 9 7 2013 84\n 4 16 10 2014 31\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if not isinstance(keys, list):\n keys = [keys]\n\n err_msg = (\n 'The parameter \"keys\" may be a column key, one-dimensional '\n \"array, or a list containing only valid column keys and \"\n \"one-dimensional arrays.\"\n )\n\n missing: List[Label] = []\n for col in keys:\n if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):\n # arrays are fine as long as they are one-dimensional\n # iterators get converted to list below\n if getattr(col, \"ndim\", 1) != 1:\n raise ValueError(err_msg)\n else:\n # everything else gets tried as a key; see GH 24969\n try:\n found = col in self.columns\n except TypeError as err:\n raise TypeError(\n f\"{err_msg}. Received column of type {type(col)}\"\n ) from err\n else:\n if not found:\n missing.append(col)\n\n if missing:\n raise KeyError(f\"None of {missing} are in the columns\")\n\n if inplace:\n frame = self\n else:\n frame = self.copy()\n\n arrays = []\n names: List[Label] = []\n if append:\n names = list(self.index.names)\n if isinstance(self.index, MultiIndex):\n for i in range(self.index.nlevels):\n arrays.append(self.index._get_level_values(i))\n else:\n arrays.append(self.index)\n\n to_remove: List[Label] = []\n for col in keys:\n if isinstance(col, MultiIndex):\n for n in range(col.nlevels):\n arrays.append(col._get_level_values(n))\n names.extend(col.names)\n elif isinstance(col, (Index, Series)):\n # if Index then not MultiIndex (treated above)\n arrays.append(col)\n names.append(col.name)\n elif isinstance(col, (list, np.ndarray)):\n arrays.append(col)\n names.append(None)\n elif isinstance(col, abc.Iterator):\n arrays.append(list(col))\n names.append(None)\n # from here, col can only be a column label\n else:\n arrays.append(frame[col]._values)\n names.append(col)\n if drop:\n to_remove.append(col)\n\n if len(arrays[-1]) != len(self):\n # check newest element against length of calling frame, since\n # ensure_index_from_sequences would not raise for append=False.\n raise ValueError(\n f\"Length mismatch: Expected {len(self)} rows, \"\n f\"received array of length {len(arrays[-1])}\"\n )\n\n index = ensure_index_from_sequences(arrays, names)\n\n if verify_integrity and not index.is_unique:\n duplicates = index[index.duplicated()].unique()\n raise ValueError(f\"Index has duplicate keys: {duplicates}\")\n\n # use set to handle duplicate column names gracefully in case of drop\n for c in set(to_remove):\n del frame[c]\n\n # clear up memory usage\n index._cleanup()\n\n frame.index = index\n\n if not inplace:\n return frame\n\n @overload\n # https://github.com/python/mypy/issues/6580\n # Overloaded function signatures 1 and 2 overlap with incompatible return types\n def reset_index( # type: ignore[misc]\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[False] = ...,\n col_level: Hashable = ...,\n col_fill: Label = ...,\n ) -> DataFrame:\n ...\n\n @overload\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,\n drop: bool = ...,\n inplace: Literal[True] = ...,\n col_level: Hashable = ...,\n col_fill: Label = ...,\n ) -> None:\n ...\n\n def reset_index(\n self,\n level: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n drop: bool = False,\n inplace: bool = False,\n col_level: Hashable = 0,\n col_fill: Label = \"\",\n ) -> Optional[DataFrame]:\n \"\"\"\n Reset the index, or a level of it.\n\n Reset the index of the DataFrame, and use the default one instead.\n If the DataFrame has a MultiIndex, this method can remove one or more\n levels.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default.\n drop : bool, default False\n Do not try to insert index into dataframe columns. This resets\n the index to the default integer index.\n inplace : bool, default False\n Modify the DataFrame in place (do not create a new object).\n col_level : int or str, default 0\n If the columns have multiple levels, determines which level the\n labels are inserted into. By default it is inserted into the first\n level.\n col_fill : object, default ''\n If the columns have multiple levels, determines how the other\n levels are named. If None then the index name is repeated.\n\n Returns\n -------\n DataFrame or None\n DataFrame with the new index or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.set_index : Opposite of reset_index.\n DataFrame.reindex : Change to new indices or expand indices.\n DataFrame.reindex_like : Change to same indices as other DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 389.0),\n ... ('bird', 24.0),\n ... ('mammal', 80.5),\n ... ('mammal', np.nan)],\n ... index=['falcon', 'parrot', 'lion', 'monkey'],\n ... columns=('class', 'max_speed'))\n >>> df\n class max_speed\n falcon bird 389.0\n parrot bird 24.0\n lion mammal 80.5\n monkey mammal NaN\n\n When we reset the index, the old index is added as a column, and a\n new sequential index is used:\n\n >>> df.reset_index()\n index class max_speed\n 0 falcon bird 389.0\n 1 parrot bird 24.0\n 2 lion mammal 80.5\n 3 monkey mammal NaN\n\n We can use the `drop` parameter to avoid the old index being added as\n a column:\n\n >>> df.reset_index(drop=True)\n class max_speed\n 0 bird 389.0\n 1 bird 24.0\n 2 mammal 80.5\n 3 mammal NaN\n\n You can also use `reset_index` with `MultiIndex`.\n\n >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),\n ... ('bird', 'parrot'),\n ... ('mammal', 'lion'),\n ... ('mammal', 'monkey')],\n ... names=['class', 'name'])\n >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),\n ... ('species', 'type')])\n >>> df = pd.DataFrame([(389.0, 'fly'),\n ... ( 24.0, 'fly'),\n ... ( 80.5, 'run'),\n ... (np.nan, 'jump')],\n ... index=index,\n ... columns=columns)\n >>> df\n speed species\n max type\n class name\n bird falcon 389.0 fly\n parrot 24.0 fly\n mammal lion 80.5 run\n monkey NaN jump\n\n If the index has multiple levels, we can reset a subset of them:\n\n >>> df.reset_index(level='class')\n class speed species\n max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we are not dropping the index, by default, it is placed in the top\n level. We can place it in another level:\n\n >>> df.reset_index(level='class', col_level=1)\n speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n When the index is inserted under another level, we can specify under\n which one with the parameter `col_fill`:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='species')\n species speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n\n If we specify a nonexistent level for `col_fill`, it is created:\n\n >>> df.reset_index(level='class', col_level=1, col_fill='genus')\n genus speed species\n class max type\n name\n falcon bird 389.0 fly\n parrot bird 24.0 fly\n lion mammal 80.5 run\n monkey mammal NaN jump\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n self._check_inplace_and_allows_duplicate_labels(inplace)\n if inplace:\n new_obj = self\n else:\n new_obj = self.copy()\n\n new_index = ibase.default_index(len(new_obj))\n if level is not None:\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < self.index.nlevels:\n new_index = self.index.droplevel(level)\n\n if not drop:\n to_insert: Iterable[Tuple[Any, Optional[Any]]]\n if isinstance(self.index, MultiIndex):\n names = [\n (n if n is not None else f\"level_{i}\")\n for i, n in enumerate(self.index.names)\n ]\n to_insert = zip(self.index.levels, self.index.codes)\n else:\n default = \"index\" if \"index\" not in self else \"level_0\"\n names = [default] if self.index.name is None else [self.index.name]\n to_insert = ((self.index, None),)\n\n multi_col = isinstance(self.columns, MultiIndex)\n for i, (lev, lab) in reversed(list(enumerate(to_insert))):\n if not (level is None or i in level):\n continue\n name = names[i]\n if multi_col:\n col_name = list(name) if isinstance(name, tuple) else [name]\n if col_fill is None:\n if len(col_name) not in (1, self.columns.nlevels):\n raise ValueError(\n \"col_fill=None is incompatible \"\n f\"with incomplete column name {name}\"\n )\n col_fill = col_name[0]\n\n lev_num = self.columns._get_level_number(col_level)\n name_lst = [col_fill] * lev_num + col_name\n missing = self.columns.nlevels - len(name_lst)\n name_lst += [col_fill] * missing\n name = tuple(name_lst)\n # to ndarray and maybe infer different dtype\n level_values = maybe_casted_values(lev, lab)\n new_obj.insert(0, name, level_values)\n\n new_obj.index = new_index\n if not inplace:\n return new_obj\n\n return None\n\n # ----------------------------------------------------------------------\n # Reindex-based selection methods\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isna(self) -> DataFrame:\n result = self._constructor(self._mgr.isna(func=isna))\n return result.__finalize__(self, method=\"isna\")\n\n @doc(NDFrame.isna, klass=_shared_doc_kwargs[\"klass\"])\n def isnull(self) -> DataFrame:\n return self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notna(self) -> DataFrame:\n return ~self.isna()\n\n @doc(NDFrame.notna, klass=_shared_doc_kwargs[\"klass\"])\n def notnull(self) -> DataFrame:\n return ~self.isna()\n\n def dropna(self, axis=0, how=\"any\", thresh=None, subset=None, inplace=False):\n \"\"\"\n Remove missing values.\n\n See the :ref:`User Guide <missing_data>` for more on which values are\n considered missing, and how to work with missing data.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Determine if rows or columns which contain missing values are\n removed.\n\n * 0, or 'index' : Drop rows which contain missing values.\n * 1, or 'columns' : Drop columns which contain missing value.\n\n .. versionchanged:: 1.0.0\n\n Pass tuple or list to drop on multiple axes.\n Only a single axis is allowed.\n\n how : {'any', 'all'}, default 'any'\n Determine if row or column is removed from DataFrame, when we have\n at least one NA or all NA.\n\n * 'any' : If any NA values are present, drop that row or column.\n * 'all' : If all values are NA, drop that row or column.\n\n thresh : int, optional\n Require that many non-NA values.\n subset : array-like, optional\n Labels along other axis to consider, e.g. if you are dropping rows\n these would be a list of columns to include.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n DataFrame or None\n DataFrame with NA entries dropped from it or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.isna: Indicate missing values.\n DataFrame.notna : Indicate existing (non-missing) values.\n DataFrame.fillna : Replace missing values.\n Series.dropna : Drop missing values.\n Index.dropna : Drop missing indices.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"name\": ['Alfred', 'Batman', 'Catwoman'],\n ... \"toy\": [np.nan, 'Batmobile', 'Bullwhip'],\n ... \"born\": [pd.NaT, pd.Timestamp(\"1940-04-25\"),\n ... pd.NaT]})\n >>> df\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Drop the rows where at least one element is missing.\n\n >>> df.dropna()\n name toy born\n 1 Batman Batmobile 1940-04-25\n\n Drop the columns where at least one element is missing.\n\n >>> df.dropna(axis='columns')\n name\n 0 Alfred\n 1 Batman\n 2 Catwoman\n\n Drop the rows where all elements are missing.\n\n >>> df.dropna(how='all')\n name toy born\n 0 Alfred NaN NaT\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep only the rows with at least 2 non-NA values.\n\n >>> df.dropna(thresh=2)\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Define in which columns to look for missing values.\n\n >>> df.dropna(subset=['name', 'toy'])\n name toy born\n 1 Batman Batmobile 1940-04-25\n 2 Catwoman Bullwhip NaT\n\n Keep the DataFrame with valid entries in the same variable.\n\n >>> df.dropna(inplace=True)\n >>> df\n name toy born\n 1 Batman Batmobile 1940-04-25\n \"\"\"\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if isinstance(axis, (tuple, list)):\n # GH20987\n raise TypeError(\"supplying multiple axes to axis is no longer supported.\")\n\n axis = self._get_axis_number(axis)\n agg_axis = 1 - axis\n\n agg_obj = self\n if subset is not None:\n ax = self._get_axis(agg_axis)\n indices = ax.get_indexer_for(subset)\n check = indices == -1\n if check.any():\n raise KeyError(list(np.compress(check, subset)))\n agg_obj = self.take(indices, axis=agg_axis)\n\n count = agg_obj.count(axis=agg_axis)\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == len(agg_obj._get_axis(agg_axis))\n elif how == \"all\":\n mask = count > 0\n else:\n if how is not None:\n raise ValueError(f\"invalid how option: {how}\")\n else:\n raise TypeError(\"must specify how or thresh\")\n\n result = self.loc(axis=axis)[mask]\n\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n def drop_duplicates(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n inplace: bool = False,\n ignore_index: bool = False,\n ) -> Optional[DataFrame]:\n \"\"\"\n Return DataFrame with duplicate rows removed.\n\n Considering certain columns is optional. Indexes, including time indexes\n are ignored.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : bool, default False\n Whether to drop duplicates in place or to return a copy.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n DataFrame or None\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n See Also\n --------\n DataFrame.value_counts: Count unique combinations of columns.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, it removes duplicate rows based on all columns.\n\n >>> df.drop_duplicates()\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n To remove duplicates on specific column(s), use ``subset``.\n\n >>> df.drop_duplicates(subset=['brand'])\n brand style rating\n 0 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n\n To remove duplicates and keep last occurrences, use ``keep``.\n\n >>> df.drop_duplicates(subset=['brand', 'style'], keep='last')\n brand style rating\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 4 Indomie pack 5.0\n \"\"\"\n if self.empty:\n return self.copy()\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n ignore_index = validate_bool_kwarg(ignore_index, \"ignore_index\")\n duplicated = self.duplicated(subset, keep=keep)\n\n result = self[-duplicated]\n if ignore_index:\n result.index = ibase.default_index(len(result))\n\n if inplace:\n self._update_inplace(result)\n return None\n else:\n return result\n\n def duplicated(\n self,\n subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,\n keep: Union[str, bool] = \"first\",\n ) -> Series:\n \"\"\"\n Return boolean Series denoting duplicate rows.\n\n Considering certain columns is optional.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to mark.\n\n - ``first`` : Mark duplicates as ``True`` except for the first occurrence.\n - ``last`` : Mark duplicates as ``True`` except for the last occurrence.\n - False : Mark all duplicates as ``True``.\n\n Returns\n -------\n Series\n Boolean series for each duplicated rows.\n\n See Also\n --------\n Index.duplicated : Equivalent method on index.\n Series.duplicated : Equivalent method on Series.\n Series.drop_duplicates : Remove duplicate values from Series.\n DataFrame.drop_duplicates : Remove duplicate values from DataFrame.\n\n Examples\n --------\n Consider dataset containing ramen rating.\n\n >>> df = pd.DataFrame({\n ... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],\n ... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],\n ... 'rating': [4, 4, 3.5, 15, 5]\n ... })\n >>> df\n brand style rating\n 0 Yum Yum cup 4.0\n 1 Yum Yum cup 4.0\n 2 Indomie cup 3.5\n 3 Indomie pack 15.0\n 4 Indomie pack 5.0\n\n By default, for each set of duplicated values, the first occurrence\n is set on False and all others on True.\n\n >>> df.duplicated()\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True.\n\n >>> df.duplicated(keep='last')\n 0 True\n 1 False\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n By setting ``keep`` on False, all duplicates are True.\n\n >>> df.duplicated(keep=False)\n 0 True\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n To find duplicates on specific column(s), use ``subset``.\n\n >>> df.duplicated(subset=['brand'])\n 0 False\n 1 True\n 2 False\n 3 True\n 4 True\n dtype: bool\n \"\"\"\n from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64\n\n if self.empty:\n return self._constructor_sliced(dtype=bool)\n\n def f(vals):\n labels, shape = algorithms.factorize(\n vals, size_hint=min(len(self), SIZE_HINT_LIMIT)\n )\n return labels.astype(\"i8\", copy=False), len(shape)\n\n if subset is None:\n subset = self.columns\n elif (\n not np.iterable(subset)\n or isinstance(subset, str)\n or isinstance(subset, tuple)\n and subset in self.columns\n ):\n subset = (subset,)\n\n # needed for mypy since can't narrow types using np.iterable\n subset = cast(Iterable, subset)\n\n # Verify all columns in subset exist in the queried dataframe\n # Otherwise, raise a KeyError, same as if you try to __getitem__ with a\n # key that doesn't exist.\n diff = Index(subset).difference(self.columns)\n if not diff.empty:\n raise KeyError(diff)\n\n vals = (col.values for name, col in self.items() if name in subset)\n labels, shape = map(list, zip(*map(f, vals)))\n\n ids = get_group_index(labels, shape, sort=False, xnull=False)\n result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)\n return result.__finalize__(self, method=\"duplicated\")\n\n # ----------------------------------------------------------------------\n # Sorting\n # TODO: Just move the sort_values doc here.\n @Substitution(**_shared_doc_kwargs)\n @Appender(NDFrame.sort_values.__doc__)\n # error: Signature of \"sort_values\" incompatible with supertype \"NDFrame\"\n def sort_values( # type: ignore[override]\n self,\n by,\n axis=0,\n ascending=True,\n inplace=False,\n kind=\"quicksort\",\n na_position=\"last\",\n ignore_index=False,\n key: ValueKeyFunc = None,\n ):\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n axis = self._get_axis_number(axis)\n\n if not isinstance(by, list):\n by = [by]\n if is_sequence(ascending) and len(by) != len(ascending):\n raise ValueError(\n f\"Length of ascending ({len(ascending)}) != length of by ({len(by)})\"\n )\n if len(by) > 1:\n\n keys = [self._get_label_or_level_values(x, axis=axis) for x in by]\n\n # need to rewrap columns in Series to apply key function\n if key is not None:\n keys = [Series(k, name=name) for (k, name) in zip(keys, by)]\n\n indexer = lexsort_indexer(\n keys, orders=ascending, na_position=na_position, key=key\n )\n indexer = ensure_platform_int(indexer)\n else:\n\n by = by[0]\n k = self._get_label_or_level_values(by, axis=axis)\n\n # need to rewrap column in Series to apply key function\n if key is not None:\n k = Series(k, name=by)\n\n if isinstance(ascending, (tuple, list)):\n ascending = ascending[0]\n\n indexer = nargsort(\n k, kind=kind, ascending=ascending, na_position=na_position, key=key\n )\n\n new_data = self._mgr.take(\n indexer, axis=self._get_block_manager_axis(axis), verify=False\n )\n\n if ignore_index:\n new_data.axes[1] = ibase.default_index(len(indexer))\n\n result = self._constructor(new_data)\n if inplace:\n return self._update_inplace(result)\n else:\n return result.__finalize__(self, method=\"sort_values\")\n\n def sort_index(\n self,\n axis=0,\n level=None,\n ascending: bool = True,\n inplace: bool = False,\n kind: str = \"quicksort\",\n na_position: str = \"last\",\n sort_remaining: bool = True,\n ignore_index: bool = False,\n key: IndexKeyFunc = None,\n ):\n \"\"\"\n Sort object by labels (along an axis).\n\n Returns a new DataFrame sorted by label if `inplace` argument is\n ``False``, otherwise updates the original DataFrame and returns None.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis along which to sort. The value 0 identifies the rows,\n and 1 identifies the columns.\n level : int or level name or list of ints or list of level names\n If not None, sort on values in specified index level(s).\n ascending : bool or list of bools, default True\n Sort ascending vs. descending. When the index is a MultiIndex the\n sort direction can be controlled for each level individually.\n inplace : bool, default False\n If True, perform operation in-place.\n kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See also ndarray.np.sort for more\n information. `mergesort` is the only stable algorithm. For\n DataFrames, this option is only applied when sorting on a single\n column or label.\n na_position : {'first', 'last'}, default 'last'\n Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.\n Not implemented for MultiIndex.\n sort_remaining : bool, default True\n If True and sorting by level and index is multilevel, sort by other\n levels too (in order) after sorting by specified level.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.0.0\n\n key : callable, optional\n If not None, apply the key function to the index values\n before sorting. This is similar to the `key` argument in the\n builtin :meth:`sorted` function, with the notable difference that\n this `key` function should be *vectorized*. It should expect an\n ``Index`` and return an ``Index`` of the same shape. For MultiIndex\n inputs, the key is applied *per level*.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame or None\n The original DataFrame sorted by the labels or None if ``inplace=True``.\n\n See Also\n --------\n Series.sort_index : Sort Series by the index.\n DataFrame.sort_values : Sort DataFrame by the value.\n Series.sort_values : Sort Series by the value.\n\n Examples\n --------\n >>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],\n ... columns=['A'])\n >>> df.sort_index()\n A\n 1 4\n 29 2\n 100 1\n 150 5\n 234 3\n\n By default, it sorts in ascending order, to sort in descending order,\n use ``ascending=False``\n\n >>> df.sort_index(ascending=False)\n A\n 234 3\n 150 5\n 100 1\n 29 2\n 1 4\n\n A key function can be specified which is applied to the index before\n sorting. For a ``MultiIndex`` this is applied to each level separately.\n\n >>> df = pd.DataFrame({\"a\": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])\n >>> df.sort_index(key=lambda x: x.str.lower())\n a\n A 1\n b 2\n C 3\n d 4\n \"\"\"\n return super().sort_index(\n axis,\n level,\n ascending,\n inplace,\n kind,\n na_position,\n sort_remaining,\n ignore_index,\n key,\n )\n\n def value_counts(\n self,\n subset: Optional[Sequence[Label]] = None,\n normalize: bool = False,\n sort: bool = True,\n ascending: bool = False,\n ):\n \"\"\"\n Return a Series containing counts of unique rows in the DataFrame.\n\n .. versionadded:: 1.1.0\n\n Parameters\n ----------\n subset : list-like, optional\n Columns to use when counting unique combinations.\n normalize : bool, default False\n Return proportions rather than frequencies.\n sort : bool, default True\n Sort by frequencies.\n ascending : bool, default False\n Sort in ascending order.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.value_counts: Equivalent method on Series.\n\n Notes\n -----\n The returned Series will have a MultiIndex with one level per input\n column. By default, rows that contain any NA values are omitted from\n the result. By default, the resulting Series will be in descending\n order so that the first element is the most frequently-occurring row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],\n ... 'num_wings': [2, 0, 0, 0]},\n ... index=['falcon', 'dog', 'cat', 'ant'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n cat 4 0\n ant 6 0\n\n >>> df.value_counts()\n num_legs num_wings\n 4 0 2\n 2 2 1\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(sort=False)\n num_legs num_wings\n 2 2 1\n 4 0 2\n 6 0 1\n dtype: int64\n\n >>> df.value_counts(ascending=True)\n num_legs num_wings\n 2 2 1\n 6 0 1\n 4 0 2\n dtype: int64\n\n >>> df.value_counts(normalize=True)\n num_legs num_wings\n 4 0 0.50\n 2 2 0.25\n 6 0 0.25\n dtype: float64\n \"\"\"\n if subset is None:\n subset = self.columns.tolist()\n\n counts = self.groupby(subset).grouper.size()\n\n if sort:\n counts = counts.sort_values(ascending=ascending)\n if normalize:\n counts /= counts.sum()\n\n # Force MultiIndex for single column\n if len(subset) == 1:\n counts.index = MultiIndex.from_arrays(\n [counts.index], names=[counts.index.name]\n )\n\n return counts\n\n def nlargest(self, n, columns, keep=\"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in descending order.\n\n Return the first `n` rows with the largest values in `columns`, in\n descending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=False).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of rows to return.\n columns : label or list of labels\n Column label(s) to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - `first` : prioritize the first occurrence(s)\n - `last` : prioritize the last occurrence(s)\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The first `n` rows ordered by the given columns in descending\n order.\n\n See Also\n --------\n DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in\n ascending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Notes\n -----\n This function cannot be used with all column types. For example, when\n specifying columns with `object` or `category` dtypes, ``TypeError`` is\n raised.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 11300,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 11300 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nlargest`` to select the three\n rows having the largest values in column \"population\".\n\n >>> df.nlargest(3, 'population')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nlargest(3, 'population', keep='last')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nlargest(3, 'population', keep='all')\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n\n To order by the largest values in column \"population\" and then \"GDP\",\n we can specify multiple columns like in the next example.\n\n >>> df.nlargest(3, ['population', 'GDP'])\n population GDP alpha-2\n France 65000000 2583560 FR\n Italy 59000000 1937894 IT\n Brunei 434000 12128 BN\n \"\"\"\n return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()\n\n def nsmallest(self, n, columns, keep=\"first\") -> DataFrame:\n \"\"\"\n Return the first `n` rows ordered by `columns` in ascending order.\n\n Return the first `n` rows with the smallest values in `columns`, in\n ascending order. The columns that are not specified are returned as\n well, but not used for ordering.\n\n This method is equivalent to\n ``df.sort_values(columns, ascending=True).head(n)``, but more\n performant.\n\n Parameters\n ----------\n n : int\n Number of items to retrieve.\n columns : list or str\n Column name or names to order by.\n keep : {'first', 'last', 'all'}, default 'first'\n Where there are duplicate values:\n\n - ``first`` : take the first occurrence.\n - ``last`` : take the last occurrence.\n - ``all`` : do not drop any duplicates, even it means\n selecting more than `n` items.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.nlargest : Return the first `n` rows ordered by `columns` in\n descending order.\n DataFrame.sort_values : Sort DataFrame by the values.\n DataFrame.head : Return the first `n` rows without re-ordering.\n\n Examples\n --------\n >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,\n ... 434000, 434000, 337000, 337000,\n ... 11300, 11300],\n ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,\n ... 17036, 182, 38, 311],\n ... 'alpha-2': [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\",\n ... \"IS\", \"NR\", \"TV\", \"AI\"]},\n ... index=[\"Italy\", \"France\", \"Malta\",\n ... \"Maldives\", \"Brunei\", \"Iceland\",\n ... \"Nauru\", \"Tuvalu\", \"Anguilla\"])\n >>> df\n population GDP alpha-2\n Italy 59000000 1937894 IT\n France 65000000 2583560 FR\n Malta 434000 12011 MT\n Maldives 434000 4520 MV\n Brunei 434000 12128 BN\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n\n In the following example, we will use ``nsmallest`` to select the\n three rows having the smallest values in column \"population\".\n\n >>> df.nsmallest(3, 'population')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n\n When using ``keep='last'``, ties are resolved in reverse order:\n\n >>> df.nsmallest(3, 'population', keep='last')\n population GDP alpha-2\n Anguilla 11300 311 AI\n Tuvalu 11300 38 TV\n Nauru 337000 182 NR\n\n When using ``keep='all'``, all duplicate items are maintained:\n\n >>> df.nsmallest(3, 'population', keep='all')\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Iceland 337000 17036 IS\n Nauru 337000 182 NR\n\n To order by the smallest values in column \"population\" and then \"GDP\", we can\n specify multiple columns like in the next example.\n\n >>> df.nsmallest(3, ['population', 'GDP'])\n population GDP alpha-2\n Tuvalu 11300 38 TV\n Anguilla 11300 311 AI\n Nauru 337000 182 NR\n \"\"\"\n return algorithms.SelectNFrame(\n self, n=n, keep=keep, columns=columns\n ).nsmallest()\n\n def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame:\n \"\"\"\n Swap levels i and j in a MultiIndex on a particular axis.\n\n Parameters\n ----------\n i, j : int or str\n Levels of the indices to be swapped. Can pass level name as string.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to swap levels on. 0 or 'index' for row-wise, 1 or\n 'columns' for column-wise.\n\n Returns\n -------\n DataFrame\n \"\"\"\n result = self.copy()\n\n axis = self._get_axis_number(axis)\n\n if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only swap levels on a hierarchical axis.\")\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.swaplevel(i, j)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.swaplevel(i, j)\n return result\n\n def reorder_levels(self, order, axis=0) -> DataFrame:\n \"\"\"\n Rearrange index levels using input order. May not drop or duplicate levels.\n\n Parameters\n ----------\n order : list of int or list of str\n List representing new level order. Reference level by number\n (position) or by key (label).\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Where to reorder levels.\n\n Returns\n -------\n DataFrame\n \"\"\"\n axis = self._get_axis_number(axis)\n if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover\n raise TypeError(\"Can only reorder levels on a hierarchical axis.\")\n\n result = self.copy()\n\n if axis == 0:\n assert isinstance(result.index, MultiIndex)\n result.index = result.index.reorder_levels(order)\n else:\n assert isinstance(result.columns, MultiIndex)\n result.columns = result.columns.reorder_levels(order)\n return result\n\n # ----------------------------------------------------------------------\n # Arithmetic Methods\n\n def _cmp_method(self, other, op):\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)\n\n # See GH#4537 for discussion of scalar op behavior\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n def _arith_method(self, other, op):\n if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):\n return ops.frame_arith_method_with_reindex(self, other, op)\n\n axis = 1 # only relevant for Series other case\n\n self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)\n\n new_data = self._dispatch_frame_op(other, op, axis=axis)\n return self._construct_result(new_data)\n\n _logical_method = _arith_method\n\n def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):\n \"\"\"\n Evaluate the frame operation func(left, right) by evaluating\n column-by-column, dispatching to the Series implementation.\n\n Parameters\n ----------\n right : scalar, Series, or DataFrame\n func : arithmetic or comparison operator\n axis : {None, 0, 1}\n\n Returns\n -------\n DataFrame\n \"\"\"\n # Get the appropriate array-op to apply to each column/block's values.\n array_op = ops.get_array_op(func)\n\n right = lib.item_from_zerodim(right)\n if not is_list_like(right):\n # i.e. scalar, faster than checking np.ndim(right) == 0\n bm = self._mgr.apply(array_op, right=right)\n return type(self)(bm)\n\n elif isinstance(right, DataFrame):\n assert self.index.equals(right.index)\n assert self.columns.equals(right.columns)\n # TODO: The previous assertion `assert right._indexed_same(self)`\n # fails in cases with empty columns reached via\n # _frame_arith_method_with_reindex\n\n bm = self._mgr.operate_blockwise(right._mgr, array_op)\n return type(self)(bm)\n\n elif isinstance(right, Series) and axis == 1:\n # axis=1 means we want to operate row-by-row\n assert right.index.equals(self.columns)\n\n right = right._values\n # maybe_align_as_frame ensures we do not have an ndarray here\n assert not isinstance(right, np.ndarray)\n\n arrays = [\n array_op(_left, _right)\n for _left, _right in zip(self._iter_column_arrays(), right)\n ]\n\n elif isinstance(right, Series):\n assert right.index.equals(self.index) # Handle other cases later\n right = right._values\n\n arrays = [array_op(left, right) for left in self._iter_column_arrays()]\n\n else:\n # Remaining cases have less-obvious dispatch rules\n raise NotImplementedError(right)\n\n return type(self)._from_arrays(\n arrays, self.columns, self.index, verify_integrity=False\n )\n\n def _combine_frame(self, other: DataFrame, func, fill_value=None):\n # at this point we have `self._indexed_same(other)`\n\n if fill_value is None:\n # since _arith_op may be called in a loop, avoid function call\n # overhead if possible by doing this check once\n _arith_op = func\n\n else:\n\n def _arith_op(left, right):\n # for the mixed_type case where we iterate over columns,\n # _arith_op(left, right) is equivalent to\n # left._binop(right, func, fill_value=fill_value)\n left, right = ops.fill_binop(left, right, fill_value)\n return func(left, right)\n\n new_data = self._dispatch_frame_op(other, _arith_op)\n return new_data\n\n def _construct_result(self, result) -> DataFrame:\n \"\"\"\n Wrap the result of an arithmetic, comparison, or logical operation.\n\n Parameters\n ----------\n result : DataFrame\n\n Returns\n -------\n DataFrame\n \"\"\"\n out = self._constructor(result, copy=False)\n # Pin columns instead of passing to constructor for compat with\n # non-unique columns case\n out.columns = self.columns\n out.index = self.index\n return out\n\n def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = self // other\n mod = self - div * other\n return div, mod\n\n def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:\n # Naive implementation, room for optimization\n div = other // self\n mod = other - div * self\n return div, mod\n\n # ----------------------------------------------------------------------\n # Combination-Related\n\n @doc(\n _shared_docs[\"compare\"],\n \"\"\"\nReturns\n-------\nDataFrame\n DataFrame that shows the differences stacked side by side.\n\n The resulting index will be a MultiIndex with 'self' and 'other'\n stacked alternately at the inner level.\n\nRaises\n------\nValueError\n When the two DataFrames don't have identical labels or shape.\n\nSee Also\n--------\nSeries.compare : Compare with another Series and show differences.\nDataFrame.equals : Test whether two objects contain the same elements.\n\nNotes\n-----\nMatching NaNs will not appear as a difference.\n\nCan only compare identically-labeled\n(i.e. same shape, identical row and column labels) DataFrames\n\nExamples\n--------\n>>> df = pd.DataFrame(\n... {{\n... \"col1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n... \"col2\": [1.0, 2.0, 3.0, np.nan, 5.0],\n... \"col3\": [1.0, 2.0, 3.0, 4.0, 5.0]\n... }},\n... columns=[\"col1\", \"col2\", \"col3\"],\n... )\n>>> df\n col1 col2 col3\n0 a 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 3.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\n>>> df2 = df.copy()\n>>> df2.loc[0, 'col1'] = 'c'\n>>> df2.loc[2, 'col3'] = 4.0\n>>> df2\n col1 col2 col3\n0 c 1.0 1.0\n1 a 2.0 2.0\n2 b 3.0 4.0\n3 b NaN 4.0\n4 a 5.0 5.0\n\nAlign the differences on columns\n\n>>> df.compare(df2)\n col1 col3\n self other self other\n0 a c NaN NaN\n2 NaN NaN 3.0 4.0\n\nStack the differences on rows\n\n>>> df.compare(df2, align_axis=0)\n col1 col3\n0 self a NaN\n other c NaN\n2 self NaN 3.0\n other NaN 4.0\n\nKeep the equal values\n\n>>> df.compare(df2, keep_equal=True)\n col1 col3\n self other self other\n0 a c 1.0 1.0\n2 b b 3.0 4.0\n\nKeep all original rows and columns\n\n>>> df.compare(df2, keep_shape=True)\n col1 col2 col3\n self other self other self other\n0 a c NaN NaN NaN NaN\n1 NaN NaN NaN NaN NaN NaN\n2 NaN NaN NaN NaN 3.0 4.0\n3 NaN NaN NaN NaN NaN NaN\n4 NaN NaN NaN NaN NaN NaN\n\nKeep all original rows and columns and also all original values\n\n>>> df.compare(df2, keep_shape=True, keep_equal=True)\n col1 col2 col3\n self other self other self other\n0 a c 1.0 1.0 1.0 1.0\n1 a a 2.0 2.0 2.0 2.0\n2 b b 3.0 3.0 3.0 4.0\n3 b b NaN NaN 4.0 4.0\n4 a a 5.0 5.0 5.0 5.0\n\"\"\",\n klass=_shared_doc_kwargs[\"klass\"],\n )\n def compare(\n self,\n other: DataFrame,\n align_axis: Axis = 1,\n keep_shape: bool = False,\n keep_equal: bool = False,\n ) -> DataFrame:\n return super().compare(\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n )\n\n def combine(\n self, other: DataFrame, func, fill_value=None, overwrite=True\n ) -> DataFrame:\n \"\"\"\n Perform column-wise combine with another DataFrame.\n\n Combines a DataFrame with `other` DataFrame using `func`\n to element-wise combine columns. The row and column indexes of the\n resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n The DataFrame to merge column-wise.\n func : function\n Function that takes two series as inputs and return a Series or a\n scalar. Used to merge the two dataframes column by columns.\n fill_value : scalar value, default None\n The value to fill NaNs with prior to passing any column to the\n merge func.\n overwrite : bool, default True\n If True, columns in `self` that do not exist in `other` will be\n overwritten with NaNs.\n\n Returns\n -------\n DataFrame\n Combination of the provided DataFrames.\n\n See Also\n --------\n DataFrame.combine_first : Combine two DataFrame objects and default to\n non-null values in frame calling the method.\n\n Examples\n --------\n Combine using a simple function that chooses the smaller column.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2\n >>> df1.combine(df2, take_smaller)\n A B\n 0 0 3\n 1 0 3\n\n Example using a true element-wise combine function.\n\n >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, np.minimum)\n A B\n 0 1 2\n 1 0 3\n\n Using `fill_value` fills Nones prior to passing the column to the\n merge function.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 4.0\n\n However, if the same element in both dataframes is None, that None\n is preserved\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})\n >>> df1.combine(df2, take_smaller, fill_value=-5)\n A B\n 0 0 -5.0\n 1 0 3.0\n\n Example that demonstrates the use of `overwrite` and behavior when\n the axis differ between the dataframes.\n\n >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])\n >>> df1.combine(df2, take_smaller)\n A B C\n 0 NaN NaN NaN\n 1 NaN 3.0 -10.0\n 2 NaN 3.0 1.0\n\n >>> df1.combine(df2, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 -10.0\n 2 NaN 3.0 1.0\n\n Demonstrating the preference of the passed in dataframe.\n\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])\n >>> df2.combine(df1, take_smaller)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 NaN\n 2 NaN 3.0 NaN\n\n >>> df2.combine(df1, take_smaller, overwrite=False)\n A B C\n 0 0.0 NaN NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n other_idxlen = len(other.index) # save for compare\n\n this, other = self.align(other, copy=False)\n new_index = this.index\n\n if other.empty and len(new_index) == len(self.index):\n return self.copy()\n\n if self.empty and len(other) == other_idxlen:\n return other.copy()\n\n # sorts if possible\n new_columns = this.columns.union(other.columns)\n do_fill = fill_value is not None\n result = {}\n for col in new_columns:\n series = this[col]\n otherSeries = other[col]\n\n this_dtype = series.dtype\n other_dtype = otherSeries.dtype\n\n this_mask = isna(series)\n other_mask = isna(otherSeries)\n\n # don't overwrite columns unnecessarily\n # DO propagate if this column is not in the intersection\n if not overwrite and other_mask.all():\n result[col] = this[col].copy()\n continue\n\n if do_fill:\n series = series.copy()\n otherSeries = otherSeries.copy()\n series[this_mask] = fill_value\n otherSeries[other_mask] = fill_value\n\n if col not in self.columns:\n # If self DataFrame does not have col in other DataFrame,\n # try to promote series, which is all NaN, as other_dtype.\n new_dtype = other_dtype\n try:\n series = series.astype(new_dtype, copy=False)\n except ValueError:\n # e.g. new_dtype is integer types\n pass\n else:\n # if we have different dtypes, possibly promote\n new_dtype = find_common_type([this_dtype, other_dtype])\n if not is_dtype_equal(this_dtype, new_dtype):\n series = series.astype(new_dtype)\n if not is_dtype_equal(other_dtype, new_dtype):\n otherSeries = otherSeries.astype(new_dtype)\n\n arr = func(series, otherSeries)\n arr = maybe_downcast_to_dtype(arr, new_dtype)\n\n result[col] = arr\n\n # convert_objects just in case\n return self._constructor(result, index=new_index, columns=new_columns)\n\n def combine_first(self, other: DataFrame) -> DataFrame:\n \"\"\"\n Update null elements with value in the same location in `other`.\n\n Combine two DataFrame objects by filling null values in one DataFrame\n with non-null values from other DataFrame. The row and column indexes\n of the resulting DataFrame will be the union of the two.\n\n Parameters\n ----------\n other : DataFrame\n Provided DataFrame to use to fill null values.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.combine : Perform series-wise operation on two DataFrames\n using a given function.\n\n Examples\n --------\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})\n >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})\n >>> df1.combine_first(df2)\n A B\n 0 1.0 3.0\n 1 0.0 4.0\n\n Null values still persist if the location of that null value\n does not exist in `other`\n\n >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})\n >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])\n >>> df1.combine_first(df2)\n A B C\n 0 NaN 4.0 NaN\n 1 0.0 3.0 1.0\n 2 NaN 3.0 1.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n def combiner(x, y):\n mask = extract_array(isna(x))\n\n x_values = extract_array(x, extract_numpy=True)\n y_values = extract_array(y, extract_numpy=True)\n\n # If the column y in other DataFrame is not in first DataFrame,\n # just return y_values.\n if y.name not in self.columns:\n return y_values\n\n return expressions.where(mask, y_values, x_values)\n\n return self.combine(other, combiner, overwrite=False)\n\n def update(\n self, other, join=\"left\", overwrite=True, filter_func=None, errors=\"ignore\"\n ) -> None:\n \"\"\"\n Modify in place using non-NA values from another DataFrame.\n\n Aligns on indices. There is no return value.\n\n Parameters\n ----------\n other : DataFrame, or object coercible into a DataFrame\n Should have at least one matching index/column label\n with the original DataFrame. If a Series is passed,\n its name attribute must be set, and that will be\n used as the column name to align with the original DataFrame.\n join : {'left'}, default 'left'\n Only left join is implemented, keeping the index and columns of the\n original object.\n overwrite : bool, default True\n How to handle non-NA values for overlapping keys:\n\n * True: overwrite original DataFrame's values\n with values from `other`.\n * False: only update values that are NA in\n the original DataFrame.\n\n filter_func : callable(1d-array) -> bool 1d-array, optional\n Can choose to replace values other than NA. Return True for values\n that should be updated.\n errors : {'raise', 'ignore'}, default 'ignore'\n If 'raise', will raise a ValueError if the DataFrame and `other`\n both contain non-NA data in the same place.\n\n .. versionchanged:: 0.24.0\n Changed from `raise_conflict=False|True`\n to `errors='ignore'|'raise'`.\n\n Returns\n -------\n None : method directly changes calling object\n\n Raises\n ------\n ValueError\n * When `errors='raise'` and there's overlapping non-NA data.\n * When `errors` is not either `'ignore'` or `'raise'`\n NotImplementedError\n * If `join != 'left'`\n\n See Also\n --------\n dict.update : Similar method for dictionaries.\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, 5, 6],\n ... 'C': [7, 8, 9]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4\n 1 2 5\n 2 3 6\n\n The DataFrame's length does not increase as a result of the update,\n only values at matching index/column labels are updated.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})\n >>> df.update(new_df)\n >>> df\n A B\n 0 a d\n 1 b e\n 2 c f\n\n For Series, its name attribute must be set.\n\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])\n >>> df.update(new_column)\n >>> df\n A B\n 0 a d\n 1 b y\n 2 c e\n >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],\n ... 'B': ['x', 'y', 'z']})\n >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])\n >>> df.update(new_df)\n >>> df\n A B\n 0 a x\n 1 b d\n 2 c e\n\n If `other` contains NaNs the corresponding values are not updated\n in the original dataframe.\n\n >>> df = pd.DataFrame({'A': [1, 2, 3],\n ... 'B': [400, 500, 600]})\n >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})\n >>> df.update(new_df)\n >>> df\n A B\n 0 1 4.0\n 1 2 500.0\n 2 3 6.0\n \"\"\"\n import pandas.core.computation.expressions as expressions\n\n # TODO: Support other joins\n if join != \"left\": # pragma: no cover\n raise NotImplementedError(\"Only left join is supported\")\n if errors not in [\"ignore\", \"raise\"]:\n raise ValueError(\"The parameter errors must be either 'ignore' or 'raise'\")\n\n if not isinstance(other, DataFrame):\n other = DataFrame(other)\n\n other = other.reindex_like(self)\n\n for col in self.columns:\n this = self[col]._values\n that = other[col]._values\n if filter_func is not None:\n with np.errstate(all=\"ignore\"):\n mask = ~filter_func(this) | isna(that)\n else:\n if errors == \"raise\":\n mask_this = notna(that)\n mask_that = notna(this)\n if any(mask_this & mask_that):\n raise ValueError(\"Data overlaps.\")\n\n if overwrite:\n mask = isna(that)\n else:\n mask = notna(this)\n\n # don't overwrite columns unnecessarily\n if mask.all():\n continue\n\n self[col] = expressions.where(mask, this, that)\n\n # ----------------------------------------------------------------------\n # Data reshaping\n @Appender(\n \"\"\"\nExamples\n--------\n>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',\n... 'Parrot', 'Parrot'],\n... 'Max Speed': [380., 370., 24., 26.]})\n>>> df\n Animal Max Speed\n0 Falcon 380.0\n1 Falcon 370.0\n2 Parrot 24.0\n3 Parrot 26.0\n>>> df.groupby(['Animal']).mean()\n Max Speed\nAnimal\nFalcon 375.0\nParrot 25.0\n\n**Hierarchical Indexes**\n\nWe can groupby different levels of a hierarchical index\nusing the `level` parameter:\n\n>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n... ['Captive', 'Wild', 'Captive', 'Wild']]\n>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))\n>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},\n... index=index)\n>>> df\n Max Speed\nAnimal Type\nFalcon Captive 390.0\n Wild 350.0\nParrot Captive 30.0\n Wild 20.0\n>>> df.groupby(level=0).mean()\n Max Speed\nAnimal\nFalcon 370.0\nParrot 25.0\n>>> df.groupby(level=\"Type\").mean()\n Max Speed\nType\nCaptive 210.0\nWild 185.0\n\nWe can also choose to include NA in group keys or not by setting\n`dropna` parameter, the default setting is `True`:\n\n>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=[\"b\"]).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\n\n>>> df.groupby(by=[\"b\"], dropna=False).sum()\n a c\nb\n1.0 2 3\n2.0 2 5\nNaN 1 4\n\n>>> l = [[\"a\", 12, 12], [None, 12.3, 33.], [\"b\", 12.3, 123], [\"a\", 1, 1]]\n>>> df = pd.DataFrame(l, columns=[\"a\", \"b\", \"c\"])\n\n>>> df.groupby(by=\"a\").sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\n\n>>> df.groupby(by=\"a\", dropna=False).sum()\n b c\na\na 13.0 13.0\nb 12.3 123.0\nNaN 12.3 33.0\n\"\"\"\n )\n @Appender(_shared_docs[\"groupby\"] % _shared_doc_kwargs)\n def groupby(\n self,\n by=None,\n axis=0,\n level=None,\n as_index: bool = True,\n sort: bool = True,\n group_keys: bool = True,\n squeeze: bool = no_default,\n observed: bool = False,\n dropna: bool = True,\n ) -> DataFrameGroupBy:\n from pandas.core.groupby.generic import DataFrameGroupBy\n\n if squeeze is not no_default:\n warnings.warn(\n (\n \"The `squeeze` parameter is deprecated and \"\n \"will be removed in a future version.\"\n ),\n FutureWarning,\n stacklevel=2,\n )\n else:\n squeeze = False\n\n if level is None and by is None:\n raise TypeError(\"You have to supply one of 'by' and 'level'\")\n axis = self._get_axis_number(axis)\n\n return DataFrameGroupBy(\n obj=self,\n keys=by,\n axis=axis,\n level=level,\n as_index=as_index,\n sort=sort,\n group_keys=group_keys,\n squeeze=squeeze,\n observed=observed,\n dropna=dropna,\n )\n\n _shared_docs[\n \"pivot\"\n ] = \"\"\"\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a \"pivot\" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation, multiple values will result in a MultiIndex in the\n columns. See the :ref:`User Guide <reshaping>` for more on reshaping.\n\n Parameters\n ----------%s\n index : str or object or a list of str, optional\n Column to use to make new frame's index. If None, uses\n existing index.\n\n .. versionchanged:: 1.1.0\n Also accept list of index names.\n\n columns : str or object or a list of str\n Column to use to make new frame's columns.\n\n .. versionchanged:: 1.1.0\n Also accept list of columns names.\n\n values : str, object or a list of the previous, optional\n Column(s) to use for populating new frame's values. If not\n specified, all remaining columns will be used and the result will\n have hierarchically indexed columns.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n Raises\n ------\n ValueError:\n When there are any `index`, `columns` combinations with multiple\n values. `DataFrame.pivot_table` when you need to aggregate.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods.\n\n Examples\n --------\n >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',\n ... 'two'],\n ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],\n ... 'baz': [1, 2, 3, 4, 5, 6],\n ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar')['baz']\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])\n baz zoo\n bar A B C A B C\n foo\n one 1 2 3 x y z\n two 4 5 6 q w t\n\n You could also assign a list of column names or a list of index names.\n\n >>> df = pd.DataFrame({\n ... \"lev1\": [1, 1, 1, 2, 2, 2],\n ... \"lev2\": [1, 1, 2, 1, 1, 2],\n ... \"lev3\": [1, 2, 1, 2, 1, 2],\n ... \"lev4\": [1, 2, 3, 4, 5, 6],\n ... \"values\": [0, 1, 2, 3, 4, 5]})\n >>> df\n lev1 lev2 lev3 lev4 values\n 0 1 1 1 1 0\n 1 1 1 2 2 1\n 2 1 2 1 3 2\n 3 2 1 2 4 3\n 4 2 1 1 5 4\n 5 2 2 2 6 5\n\n >>> df.pivot(index=\"lev1\", columns=[\"lev2\", \"lev3\"],values=\"values\")\n lev2 1 2\n lev3 1 2 1 2\n lev1\n 1 0.0 1.0 2.0 NaN\n 2 4.0 3.0 NaN 5.0\n\n >>> df.pivot(index=[\"lev1\", \"lev2\"], columns=[\"lev3\"],values=\"values\")\n lev3 1 2\n lev1 lev2\n 1 1 0.0 1.0\n 2 2.0 NaN\n 2 1 4.0 3.0\n 2 NaN 5.0\n\n A ValueError is raised if there are any duplicates.\n\n >>> df = pd.DataFrame({\"foo\": ['one', 'one', 'two', 'two'],\n ... \"bar\": ['A', 'A', 'B', 'C'],\n ... \"baz\": [1, 2, 3, 4]})\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n Notice that the first two rows are the same for our `index`\n and `columns` arguments.\n\n >>> df.pivot(index='foo', columns='bar', values='baz')\n Traceback (most recent call last):\n ...\n ValueError: Index contains duplicate entries, cannot reshape\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot\"])\n def pivot(self, index=None, columns=None, values=None) -> DataFrame:\n from pandas.core.reshape.pivot import pivot\n\n return pivot(self, index=index, columns=columns, values=values)\n\n _shared_docs[\n \"pivot_table\"\n ] = \"\"\"\n Create a spreadsheet-style pivot table as a DataFrame.\n\n The levels in the pivot table will be stored in MultiIndex objects\n (hierarchical indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------%s\n values : column to aggregate, optional\n index : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table index. If an array is passed,\n it is being used as the same manner as column values.\n columns : column, Grouper, array, or list of the previous\n If an array is passed, it must be the same length as the data. The\n list can contain any of the other types (except list).\n Keys to group by on the pivot table column. If an array is passed,\n it is being used as the same manner as column values.\n aggfunc : function, list of functions, dict, default numpy.mean\n If list of functions passed, the resulting pivot table will have\n hierarchical columns whose top level are the function names\n (inferred from the function objects themselves)\n If dict is passed, the key is column to aggregate and value\n is function or list of functions.\n fill_value : scalar, default None\n Value to replace missing values with (in the resulting pivot table,\n after aggregation).\n margins : bool, default False\n Add all row / columns (e.g. for subtotal / grand totals).\n dropna : bool, default True\n Do not include columns whose entries are all NaN.\n margins_name : str, default 'All'\n Name of the row / column that will contain the totals\n when margins is True.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionchanged:: 0.25.0\n\n Returns\n -------\n DataFrame\n An Excel style pivot table.\n\n See Also\n --------\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.melt: Unpivot a DataFrame from wide to long format,\n optionally leaving identifiers set.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Examples\n --------\n >>> df = pd.DataFrame({\"A\": [\"foo\", \"foo\", \"foo\", \"foo\", \"foo\",\n ... \"bar\", \"bar\", \"bar\", \"bar\"],\n ... \"B\": [\"one\", \"one\", \"one\", \"two\", \"two\",\n ... \"one\", \"one\", \"two\", \"two\"],\n ... \"C\": [\"small\", \"large\", \"large\", \"small\",\n ... \"small\", \"large\", \"small\", \"small\",\n ... \"large\"],\n ... \"D\": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... \"E\": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum)\n >>> table\n C large small\n A B\n bar one 4.0 5.0\n two 7.0 6.0\n foo one 4.0 1.0\n two NaN 6.0\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],\n ... columns=['C'], aggfunc=np.sum, fill_value=0)\n >>> table\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n The next example aggregates by taking the mean across multiple columns.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': np.mean})\n >>> table\n D E\n A C\n bar large 5.500000 7.500000\n small 5.500000 8.500000\n foo large 2.000000 4.500000\n small 2.333333 4.333333\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],\n ... aggfunc={'D': np.mean,\n ... 'E': [min, max, np.mean]})\n >>> table\n D E\n mean max mean min\n A C\n bar large 5.500000 9.0 7.500000 6.0\n small 5.500000 9.0 8.500000 8.0\n foo large 2.000000 5.0 4.500000 4.0\n small 2.333333 6.0 4.333333 2.0\n \"\"\"\n\n @Substitution(\"\")\n @Appender(_shared_docs[\"pivot_table\"])\n def pivot_table(\n self,\n values=None,\n index=None,\n columns=None,\n aggfunc=\"mean\",\n fill_value=None,\n margins=False,\n dropna=True,\n margins_name=\"All\",\n observed=False,\n ) -> DataFrame:\n from pandas.core.reshape.pivot import pivot_table\n\n return pivot_table(\n self,\n values=values,\n index=index,\n columns=columns,\n aggfunc=aggfunc,\n fill_value=fill_value,\n margins=margins,\n dropna=dropna,\n margins_name=margins_name,\n observed=observed,\n )\n\n def stack(self, level=-1, dropna=True):\n \"\"\"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n Parameters\n ----------\n level : int, str, list, default -1\n Level(s) to stack from the column axis onto the index\n axis, defined as one index or label, or a list of indices\n or labels.\n dropna : bool, default True\n Whether to drop rows in the resulting Frame/Series with\n missing values. Stacking a column level onto the index\n axis can create combinations of index and column values\n that are missing from the original dataframe. See Examples\n section.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack()\n cat weight 0\n height 1\n dog weight 2\n height 3\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack()\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n\n **Prescribing the level(s) to be stacked**\n\n The first parameter controls which level or levels are stacked:\n\n >>> df_multi_level_cols2.stack(0)\n kg m\n cat height NaN 2.0\n weight 1.0 NaN\n dog height NaN 4.0\n weight 3.0 NaN\n >>> df_multi_level_cols2.stack([0, 1])\n cat height m 2.0\n weight kg 1.0\n dog height m 4.0\n weight kg 3.0\n dtype: float64\n\n **Dropping missing values**\n\n >>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n Note that rows where all values are missing are dropped by\n default but this behaviour can be controlled via the dropna\n keyword parameter:\n\n >>> df_multi_level_cols3\n weight height\n kg m\n cat NaN 1.0\n dog 2.0 3.0\n >>> df_multi_level_cols3.stack(dropna=False)\n height weight\n cat kg NaN NaN\n m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n >>> df_multi_level_cols3.stack(dropna=True)\n height weight\n cat m 1.0 NaN\n dog kg NaN 2.0\n m 3.0 NaN\n \"\"\"\n from pandas.core.reshape.reshape import stack, stack_multiple\n\n if isinstance(level, (tuple, list)):\n result = stack_multiple(self, level, dropna=dropna)\n else:\n result = stack(self, level, dropna=dropna)\n\n return result.__finalize__(self, method=\"stack\")\n\n def explode(\n self, column: Union[str, Tuple], ignore_index: bool = False\n ) -> DataFrame:\n \"\"\"\n Transform each element of a list-like to a row, replicating index values.\n\n .. versionadded:: 0.25.0\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n ignore_index : bool, default False\n If True, the resulting index will be labeled 0, 1, …, n - 1.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n Raises\n ------\n ValueError :\n if columns of the frame are not unique.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n Series.explode : Explode a DataFrame from list-like columns to long format.\n\n Notes\n -----\n This routine will explode list-likes including lists, tuples, sets,\n Series, and np.ndarray. The result dtype of the subset rows will\n be object. Scalars will be returned unchanged, and empty list-likes will\n result in a np.nan for that row. In addition, the ordering of rows in the\n output will be non-deterministic when exploding sets.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 foo 1\n 2 [] 1\n 3 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1 1\n 0 2 1\n 0 3 1\n 1 foo 1\n 2 NaN 1\n 3 3 1\n 3 4 1\n \"\"\"\n if not (is_scalar(column) or isinstance(column, tuple)):\n raise ValueError(\"column must be a scalar\")\n if not self.columns.is_unique:\n raise ValueError(\"columns must be unique\")\n\n df = self.reset_index(drop=True)\n result = df[column].explode()\n result = df.drop([column], axis=1).join(result)\n if ignore_index:\n result.index = ibase.default_index(len(result))\n else:\n result.index = self.index.take(result.index)\n result = result.reindex(columns=self.columns, copy=False)\n\n return result\n\n def unstack(self, level=-1, fill_value=None):\n \"\"\"\n Pivot a level of the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series\n (the analogue of stack when the columns are not a MultiIndex).\n\n Parameters\n ----------\n level : int, str, or list of these, default -1 (last level)\n Level(s) of index to unstack, can pass level name.\n fill_value : int, str or dict\n Replace NaN with this value if the unstack produces missing values.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation\n from `unstack`).\n\n Examples\n --------\n >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),\n ... ('two', 'a'), ('two', 'b')])\n >>> s = pd.Series(np.arange(1.0, 5.0), index=index)\n >>> s\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n\n >>> s.unstack(level=-1)\n a b\n one 1.0 2.0\n two 3.0 4.0\n\n >>> s.unstack(level=0)\n one two\n a 1.0 3.0\n b 2.0 4.0\n\n >>> df = s.unstack(level=0)\n >>> df.unstack()\n one a 1.0\n b 2.0\n two a 3.0\n b 4.0\n dtype: float64\n \"\"\"\n from pandas.core.reshape.reshape import unstack\n\n result = unstack(self, level, fill_value)\n\n return result.__finalize__(self, method=\"unstack\")\n\n @Appender(_shared_docs[\"melt\"] % {\"caller\": \"df.melt(\", \"other\": \"melt\"})\n def melt(\n self,\n id_vars=None,\n value_vars=None,\n var_name=None,\n value_name=\"value\",\n col_level=None,\n ignore_index=True,\n ) -> DataFrame:\n\n return melt(\n self,\n id_vars=id_vars,\n value_vars=value_vars,\n var_name=var_name,\n value_name=value_name,\n col_level=col_level,\n ignore_index=ignore_index,\n )\n\n # ----------------------------------------------------------------------\n # Time series-related\n\n @doc(\n Series.diff,\n klass=\"Dataframe\",\n extra_params=\"axis : {0 or 'index', 1 or 'columns'}, default 0\\n \"\n \"Take difference over rows (0) or columns (1).\\n\",\n other_klass=\"Series\",\n examples=dedent(\n \"\"\"\n Difference with previous row\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]})\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(axis=1)\n a b c\n 0 NaN 0 0\n 1 NaN -1 3\n 2 NaN -1 7\n 3 NaN -1 13\n 4 NaN 0 20\n 5 NaN 2 28\n\n Difference with 3rd previous row\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n\n Overflow in input dtype\n\n >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)\n >>> df.diff()\n a\n 0 NaN\n 1 255.0\"\"\"\n ),\n )\n def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:\n if not isinstance(periods, int):\n if not (is_float(periods) and periods.is_integer()):\n raise ValueError(\"periods must be an integer\")\n periods = int(periods)\n\n bm_axis = self._get_block_manager_axis(axis)\n\n if bm_axis == 0 and periods != 0:\n return self - self.shift(periods, axis=axis)\n\n new_data = self._mgr.diff(n=periods, axis=bm_axis)\n return self._constructor(new_data).__finalize__(self, \"diff\")\n\n # ----------------------------------------------------------------------\n # Function application\n\n def _gotitem(\n self,\n key: Union[Label, List[Label]],\n ndim: int,\n subset: Optional[FrameOrSeriesUnion] = None,\n ) -> FrameOrSeriesUnion:\n \"\"\"\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : string / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n \"\"\"\n if subset is None:\n subset = self\n elif subset.ndim == 1: # is Series\n return subset\n\n # TODO: _shallow_copy(subset)?\n return subset[key]\n\n _agg_summary_and_see_also_doc = dedent(\n \"\"\"\n The aggregation operations are always performed over an axis, either the\n index (default) or the column axis. This behavior is different from\n `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,\n `var`), where the default is to compute the aggregation of the flattened\n array, e.g., ``numpy.mean(arr_2d)`` as opposed to\n ``numpy.mean(arr_2d, axis=0)``.\n\n `agg` is an alias for `aggregate`. Use the alias.\n\n See Also\n --------\n DataFrame.apply : Perform any type of operations.\n DataFrame.transform : Perform transformation type operations.\n core.groupby.GroupBy : Perform operations over groups.\n core.resample.Resampler : Perform operations over resampled bins.\n core.window.Rolling : Perform operations over rolling window.\n core.window.Expanding : Perform operations over expanding window.\n core.window.ExponentialMovingWindow : Perform operation over exponential weighted\n window.\n \"\"\"\n )\n\n _agg_examples_doc = dedent(\n \"\"\"\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=['A', 'B', 'C'])\n\n Aggregate these functions over the rows.\n\n >>> df.agg(['sum', 'min'])\n A B C\n sum 12.0 15.0 18.0\n min 1.0 2.0 3.0\n\n Different aggregations per column.\n\n >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})\n A B\n sum 12.0 NaN\n min 1.0 2.0\n max NaN 8.0\n\n Aggregate different functions over the columns and rename the index of the resulting\n DataFrame.\n\n >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))\n A B C\n x 7.0 NaN NaN\n y NaN 2.0 NaN\n z NaN NaN 6.0\n\n Aggregate over the columns.\n\n >>> df.agg(\"mean\", axis=\"columns\")\n 0 2.0\n 1 5.0\n 2 8.0\n 3 NaN\n dtype: float64\n \"\"\"\n )\n\n @doc(\n _shared_docs[\"aggregate\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n see_also=_agg_summary_and_see_also_doc,\n examples=_agg_examples_doc,\n )\n def aggregate(self, func=None, axis=0, *args, **kwargs):\n axis = self._get_axis_number(axis)\n\n relabeling, func, columns, order = reconstruct_func(func, **kwargs)\n\n result = None\n try:\n result, how = self._aggregate(func, axis, *args, **kwargs)\n except TypeError as err:\n exc = TypeError(\n \"DataFrame constructor called with \"\n f\"incompatible data and dtype: {err}\"\n )\n raise exc from err\n if result is None:\n return self.apply(func, axis=axis, args=args, **kwargs)\n\n if relabeling:\n # This is to keep the order to columns occurrence unchanged, and also\n # keep the order of new columns occurrence unchanged\n\n # For the return values of reconstruct_func, if relabeling is\n # False, columns and order will be None.\n assert columns is not None\n assert order is not None\n\n result_in_dict = relabel_result(result, func, columns, order)\n result = DataFrame(result_in_dict, index=columns)\n\n return result\n\n def _aggregate(self, arg, axis=0, *args, **kwargs):\n if axis == 1:\n # NDFrame.aggregate returns a tuple, and we need to transpose\n # only result\n result, how = aggregate(self.T, arg, *args, **kwargs)\n result = result.T if result is not None else result\n return result, how\n return aggregate(self, arg, *args, **kwargs)\n\n agg = aggregate\n\n @doc(\n _shared_docs[\"transform\"],\n klass=_shared_doc_kwargs[\"klass\"],\n axis=_shared_doc_kwargs[\"axis\"],\n )\n def transform(\n self, func: AggFuncType, axis: Axis = 0, *args, **kwargs\n ) -> DataFrame:\n result = transform(self, func, axis, *args, **kwargs)\n assert isinstance(result, DataFrame)\n return result\n\n def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):\n \"\"\"\n Apply a function along an axis of the DataFrame.\n\n Objects passed to the function are Series objects whose index is\n either the DataFrame's index (``axis=0``) or the DataFrame's columns\n (``axis=1``). By default (``result_type=None``), the final return type\n is inferred from the return type of the applied function. Otherwise,\n it depends on the `result_type` argument.\n\n Parameters\n ----------\n func : function\n Function to apply to each column or row.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n Axis along which the function is applied:\n\n * 0 or 'index': apply function to each column.\n * 1 or 'columns': apply function to each row.\n\n raw : bool, default False\n Determines if row or column is passed as a Series or ndarray object:\n\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray objects\n instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n result_type : {'expand', 'reduce', 'broadcast', None}, default None\n These only act when ``axis=1`` (columns):\n\n * 'expand' : list-like results will be turned into columns.\n * 'reduce' : returns a Series if possible rather than expanding\n list-like results. This is the opposite of 'expand'.\n * 'broadcast' : results will be broadcast to the original shape\n of the DataFrame, the original index and columns will be\n retained.\n\n The default behaviour (None) depends on the return value of the\n applied function: list-like results will be returned as a Series\n of those. However if the apply function returns a Series these\n are expanded to columns.\n args : tuple\n Positional arguments to pass to `func` in addition to the\n array/series.\n **kwds\n Additional keyword arguments to pass as keywords arguments to\n `func`.\n\n Returns\n -------\n Series or DataFrame\n Result of applying ``func`` along the given axis of the\n DataFrame.\n\n See Also\n --------\n DataFrame.applymap: For elementwise operations.\n DataFrame.aggregate: Only perform aggregating type operations.\n DataFrame.transform: Only perform transforming type operations.\n\n Examples\n --------\n >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])\n >>> df\n A B\n 0 4 9\n 1 4 9\n 2 4 9\n\n Using a numpy universal function (in this case the same as\n ``np.sqrt(df)``):\n\n >>> df.apply(np.sqrt)\n A B\n 0 2.0 3.0\n 1 2.0 3.0\n 2 2.0 3.0\n\n Using a reducing function on either axis\n\n >>> df.apply(np.sum, axis=0)\n A 12\n B 27\n dtype: int64\n\n >>> df.apply(np.sum, axis=1)\n 0 13\n 1 13\n 2 13\n dtype: int64\n\n Returning a list-like will result in a Series\n\n >>> df.apply(lambda x: [1, 2], axis=1)\n 0 [1, 2]\n 1 [1, 2]\n 2 [1, 2]\n dtype: object\n\n Passing ``result_type='expand'`` will expand list-like results\n to columns of a Dataframe\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')\n 0 1\n 0 1 2\n 1 1 2\n 2 1 2\n\n Returning a Series inside the function is similar to passing\n ``result_type='expand'``. The resulting column names\n will be the Series index.\n\n >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)\n foo bar\n 0 1 2\n 1 1 2\n 2 1 2\n\n Passing ``result_type='broadcast'`` will ensure the same shape\n result, whether list-like or scalar is returned by the function,\n and broadcast it along the axis. The resulting column names will\n be the originals.\n\n >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')\n A B\n 0 1 2\n 1 1 2\n 2 1 2\n \"\"\"\n from pandas.core.apply import frame_apply\n\n op = frame_apply(\n self,\n func=func,\n axis=axis,\n raw=raw,\n result_type=result_type,\n args=args,\n kwds=kwds,\n )\n return op.get_result()\n\n def applymap(self, func, na_action: Optional[str] = None) -> DataFrame:\n \"\"\"\n Apply a function to a Dataframe elementwise.\n\n This method applies a function that accepts and returns a scalar\n to every element of a DataFrame.\n\n Parameters\n ----------\n func : callable\n Python function, returns a single value from a single value.\n na_action : {None, 'ignore'}, default None\n If ‘ignore’, propagate NaN values, without passing them to func.\n\n .. versionadded:: 1.2\n\n Returns\n -------\n DataFrame\n Transformed DataFrame.\n\n See Also\n --------\n DataFrame.apply : Apply a function along input axis of DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])\n >>> df\n 0 1\n 0 1.000 2.120\n 1 3.356 4.567\n\n >>> df.applymap(lambda x: len(str(x)))\n 0 1\n 0 3 4\n 1 5 5\n\n Like Series.map, NA values can be ignored:\n\n >>> df_copy = df.copy()\n >>> df_copy.iloc[0, 0] = pd.NA\n >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')\n 0 1\n 0 <NA> 4\n 1 5 5\n\n Note that a vectorized version of `func` often exists, which will\n be much faster. You could square each number elementwise.\n\n >>> df.applymap(lambda x: x**2)\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n\n But it's better to avoid applymap in that case.\n\n >>> df ** 2\n 0 1\n 0 1.000000 4.494400\n 1 11.262736 20.857489\n \"\"\"\n if na_action not in {\"ignore\", None}:\n raise ValueError(\n f\"na_action must be 'ignore' or None. Got {repr(na_action)}\"\n )\n ignore_na = na_action == \"ignore\"\n\n # if we have a dtype == 'M8[ns]', provide boxed values\n def infer(x):\n if x.empty:\n return lib.map_infer(x, func, ignore_na=ignore_na)\n return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)\n\n return self.apply(infer).__finalize__(self, \"applymap\")\n\n # ----------------------------------------------------------------------\n # Merging / joining methods\n\n def append(\n self, other, ignore_index=False, verify_integrity=False, sort=False\n ) -> DataFrame:\n \"\"\"\n Append rows of `other` to the end of caller, returning a new object.\n\n Columns in `other` that are not in the caller are added as new columns.\n\n Parameters\n ----------\n other : DataFrame or Series/dict-like object, or list of these\n The data to append.\n ignore_index : bool, default False\n If True, the resulting axis will be labeled 0, 1, …, n - 1.\n verify_integrity : bool, default False\n If True, raise ValueError on creating index with duplicates.\n sort : bool, default False\n Sort columns if the columns of `self` and `other` are not aligned.\n\n .. versionchanged:: 1.0.0\n\n Changed to not sort by default.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n concat : General function to concatenate DataFrame or Series objects.\n\n Notes\n -----\n If a list of dict/series is passed and the keys are all contained in\n the DataFrame's index, the order of the columns in the resulting\n DataFrame will be unchanged.\n\n Iteratively appending rows to a DataFrame can be more computationally\n intensive than a single concatenate. A better solution is to append\n those rows to a list and then concatenate the list with the original\n DataFrame all at once.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))\n >>> df\n A B\n 0 1 2\n 1 3 4\n >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))\n >>> df.append(df2)\n A B\n 0 1 2\n 1 3 4\n 0 5 6\n 1 7 8\n\n With `ignore_index` set to True:\n\n >>> df.append(df2, ignore_index=True)\n A B\n 0 1 2\n 1 3 4\n 2 5 6\n 3 7 8\n\n The following, while not recommended methods for generating DataFrames,\n show two ways to generate a DataFrame from multiple data sources.\n\n Less efficient:\n\n >>> df = pd.DataFrame(columns=['A'])\n >>> for i in range(5):\n ... df = df.append({'A': i}, ignore_index=True)\n >>> df\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n\n More efficient:\n\n >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\n ... ignore_index=True)\n A\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n \"\"\"\n if isinstance(other, (Series, dict)):\n if isinstance(other, dict):\n if not ignore_index:\n raise TypeError(\"Can only append a dict if ignore_index=True\")\n other = Series(other)\n if other.name is None and not ignore_index:\n raise TypeError(\n \"Can only append a Series if ignore_index=True \"\n \"or if the Series has a name\"\n )\n\n index = Index([other.name], name=self.index.name)\n idx_diff = other.index.difference(self.columns)\n try:\n combined_columns = self.columns.append(idx_diff)\n except TypeError:\n combined_columns = self.columns.astype(object).append(idx_diff)\n other = (\n other.reindex(combined_columns, copy=False)\n .to_frame()\n .T.infer_objects()\n .rename_axis(index.names, copy=False)\n )\n if not self.columns.equals(combined_columns):\n self = self.reindex(columns=combined_columns)\n elif isinstance(other, list):\n if not other:\n pass\n elif not isinstance(other[0], DataFrame):\n other = DataFrame(other)\n if (self.columns.get_indexer(other.columns) >= 0).all():\n other = other.reindex(columns=self.columns)\n\n from pandas.core.reshape.concat import concat\n\n if isinstance(other, (list, tuple)):\n to_concat = [self, *other]\n else:\n to_concat = [self, other]\n return (\n concat(\n to_concat,\n ignore_index=ignore_index,\n verify_integrity=verify_integrity,\n sort=sort,\n )\n ).__finalize__(self, method=\"append\")\n\n def join(\n self, other, on=None, how=\"left\", lsuffix=\"\", rsuffix=\"\", sort=False\n ) -> DataFrame:\n \"\"\"\n Join columns of another DataFrame.\n\n Join columns with `other` DataFrame either on index or on a key\n column. Efficiently join multiple DataFrame objects by index at once by\n passing a list.\n\n Parameters\n ----------\n other : DataFrame, Series, or list of DataFrame\n Index should be similar to one of the columns in this one. If a\n Series is passed, its name attribute must be set, and that will be\n used as the column name in the resulting joined DataFrame.\n on : str, list of str, or array-like, optional\n Column or index level name(s) in the caller to join on the index\n in `other`, otherwise joins index-on-index. If multiple\n values given, the `other` DataFrame must have a MultiIndex. Can\n pass an array as the join key if it is not already contained in\n the calling DataFrame. Like an Excel VLOOKUP operation.\n how : {'left', 'right', 'outer', 'inner'}, default 'left'\n How to handle the operation of the two objects.\n\n * left: use calling frame's index (or column if on is specified)\n * right: use `other`'s index.\n * outer: form union of calling frame's index (or column if on is\n specified) with `other`'s index, and sort it.\n lexicographically.\n * inner: form intersection of calling frame's index (or column if\n on is specified) with `other`'s index, preserving the order\n of the calling's one.\n lsuffix : str, default ''\n Suffix to use from left frame's overlapping columns.\n rsuffix : str, default ''\n Suffix to use from right frame's overlapping columns.\n sort : bool, default False\n Order result DataFrame lexicographically by the join key. If False,\n the order of the join key depends on the join type (how keyword).\n\n Returns\n -------\n DataFrame\n A dataframe containing columns from both the caller and `other`.\n\n See Also\n --------\n DataFrame.merge : For column(s)-on-column(s) operations.\n\n Notes\n -----\n Parameters `on`, `lsuffix`, and `rsuffix` are not supported when\n passing a list of `DataFrame` objects.\n\n Support for specifying index levels as the `on` parameter was added\n in version 0.23.0.\n\n Examples\n --------\n >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],\n ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})\n\n >>> df\n key A\n 0 K0 A0\n 1 K1 A1\n 2 K2 A2\n 3 K3 A3\n 4 K4 A4\n 5 K5 A5\n\n >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],\n ... 'B': ['B0', 'B1', 'B2']})\n\n >>> other\n key B\n 0 K0 B0\n 1 K1 B1\n 2 K2 B2\n\n Join DataFrames using their indexes.\n\n >>> df.join(other, lsuffix='_caller', rsuffix='_other')\n key_caller A key_other B\n 0 K0 A0 K0 B0\n 1 K1 A1 K1 B1\n 2 K2 A2 K2 B2\n 3 K3 A3 NaN NaN\n 4 K4 A4 NaN NaN\n 5 K5 A5 NaN NaN\n\n If we want to join using the key columns, we need to set key to be\n the index in both `df` and `other`. The joined DataFrame will have\n key as its index.\n\n >>> df.set_index('key').join(other.set_index('key'))\n A B\n key\n K0 A0 B0\n K1 A1 B1\n K2 A2 B2\n K3 A3 NaN\n K4 A4 NaN\n K5 A5 NaN\n\n Another option to join using the key columns is to use the `on`\n parameter. DataFrame.join always uses `other`'s index but we can use\n any column in `df`. This method preserves the original DataFrame's\n index in the result.\n\n >>> df.join(other.set_index('key'), on='key')\n key A B\n 0 K0 A0 B0\n 1 K1 A1 B1\n 2 K2 A2 B2\n 3 K3 A3 NaN\n 4 K4 A4 NaN\n 5 K5 A5 NaN\n \"\"\"\n return self._join_compat(\n other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort\n )\n\n def _join_compat(\n self, other, on=None, how=\"left\", lsuffix=\"\", rsuffix=\"\", sort=False\n ):\n from pandas.core.reshape.concat import concat\n from pandas.core.reshape.merge import merge\n\n if isinstance(other, Series):\n if other.name is None:\n raise ValueError(\"Other Series must have a name\")\n other = DataFrame({other.name: other})\n\n if isinstance(other, DataFrame):\n if how == \"cross\":\n return merge(\n self,\n other,\n how=how,\n on=on,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n return merge(\n self,\n other,\n left_on=on,\n how=how,\n left_index=on is None,\n right_index=True,\n suffixes=(lsuffix, rsuffix),\n sort=sort,\n )\n else:\n if on is not None:\n raise ValueError(\n \"Joining multiple DataFrames only supported for joining on index\"\n )\n\n frames = [self] + list(other)\n\n can_concat = all(df.index.is_unique for df in frames)\n\n # join indexes only using concat\n if can_concat:\n if how == \"left\":\n res = concat(\n frames, axis=1, join=\"outer\", verify_integrity=True, sort=sort\n )\n return res.reindex(self.index, copy=False)\n else:\n return concat(\n frames, axis=1, join=how, verify_integrity=True, sort=sort\n )\n\n joined = frames[0]\n\n for frame in frames[1:]:\n joined = merge(\n joined, frame, how=how, left_index=True, right_index=True\n )\n\n return joined\n\n @Substitution(\"\")\n @Appender(_merge_doc, indents=2)\n def merge(\n self,\n right,\n how=\"inner\",\n on=None,\n left_on=None,\n right_on=None,\n left_index=False,\n right_index=False,\n sort=False,\n suffixes=(\"_x\", \"_y\"),\n copy=True,\n indicator=False,\n validate=None,\n ) -> DataFrame:\n from pandas.core.reshape.merge import merge\n\n return merge(\n self,\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n sort=sort,\n suffixes=suffixes,\n copy=copy,\n indicator=indicator,\n validate=validate,\n )\n\n def round(self, decimals=0, *args, **kwargs) -> DataFrame:\n \"\"\"\n Round a DataFrame to a variable number of decimal places.\n\n Parameters\n ----------\n decimals : int, dict, Series\n Number of decimal places to round each column to. If an int is\n given, round each column to the same number of places.\n Otherwise dict and Series round to variable numbers of places.\n Column names should be in the keys if `decimals` is a\n dict-like, or in the index if `decimals` is a Series. Any\n columns not included in `decimals` will be left as is. Elements\n of `decimals` which are not columns of the input will be\n ignored.\n *args\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n\n Returns\n -------\n DataFrame\n A DataFrame with the affected columns rounded to the specified\n number of decimal places.\n\n See Also\n --------\n numpy.around : Round a numpy array to the given number of decimals.\n Series.round : Round a Series to the given number of decimals.\n\n Examples\n --------\n >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],\n ... columns=['dogs', 'cats'])\n >>> df\n dogs cats\n 0 0.21 0.32\n 1 0.01 0.67\n 2 0.66 0.03\n 3 0.21 0.18\n\n By providing an integer each column is rounded to the same number\n of decimal places\n\n >>> df.round(1)\n dogs cats\n 0 0.2 0.3\n 1 0.0 0.7\n 2 0.7 0.0\n 3 0.2 0.2\n\n With a dict, the number of places for specific columns can be\n specified with the column names as key and the number of decimal\n places as value\n\n >>> df.round({'dogs': 1, 'cats': 0})\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n\n Using a Series, the number of places for specific columns can be\n specified with the column names as index and the number of\n decimal places as value\n\n >>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])\n >>> df.round(decimals)\n dogs cats\n 0 0.2 0.0\n 1 0.0 1.0\n 2 0.7 0.0\n 3 0.2 0.0\n \"\"\"\n from pandas.core.reshape.concat import concat\n\n def _dict_round(df, decimals):\n for col, vals in df.items():\n try:\n yield _series_round(vals, decimals[col])\n except KeyError:\n yield vals\n\n def _series_round(s, decimals):\n if is_integer_dtype(s) or is_float_dtype(s):\n return s.round(decimals)\n return s\n\n nv.validate_round(args, kwargs)\n\n if isinstance(decimals, (dict, Series)):\n if isinstance(decimals, Series):\n if not decimals.index.is_unique:\n raise ValueError(\"Index of decimals must be unique\")\n new_cols = list(_dict_round(self, decimals))\n elif is_integer(decimals):\n # Dispatch to Series.round\n new_cols = [_series_round(v, decimals) for _, v in self.items()]\n else:\n raise TypeError(\"decimals must be an integer, a dict-like or a Series\")\n\n if len(new_cols) > 0:\n return self._constructor(\n concat(new_cols, axis=1), index=self.index, columns=self.columns\n )\n else:\n return self\n\n # ----------------------------------------------------------------------\n # Statistical methods, etc.\n\n def corr(self, method=\"pearson\", min_periods=1) -> DataFrame:\n \"\"\"\n Compute pairwise correlation of columns, excluding NA/null values.\n\n Parameters\n ----------\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float. Note that the returned matrix from corr\n will have 1 along the diagonals and will be symmetric\n regardless of the callable's behavior.\n\n .. versionadded:: 0.24.0\n\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result. Currently only available for Pearson\n and Spearman correlation.\n\n Returns\n -------\n DataFrame\n Correlation matrix.\n\n See Also\n --------\n DataFrame.corrwith : Compute pairwise correlation with another\n DataFrame or Series.\n Series.corr : Compute the correlation between two Series.\n\n Examples\n --------\n >>> def histogram_intersection(a, b):\n ... v = np.minimum(a, b).sum().round(decimals=1)\n ... return v\n >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],\n ... columns=['dogs', 'cats'])\n >>> df.corr(method=histogram_intersection)\n dogs cats\n dogs 1.0 0.3\n cats 0.3 1.0\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if method == \"pearson\":\n correl = libalgos.nancorr(mat, minp=min_periods)\n elif method == \"spearman\":\n correl = libalgos.nancorr_spearman(mat, minp=min_periods)\n elif method == \"kendall\" or callable(method):\n if min_periods is None:\n min_periods = 1\n mat = mat.T\n corrf = nanops.get_corr_func(method)\n K = len(cols)\n correl = np.empty((K, K), dtype=float)\n mask = np.isfinite(mat)\n for i, ac in enumerate(mat):\n for j, bc in enumerate(mat):\n if i > j:\n continue\n\n valid = mask[i] & mask[j]\n if valid.sum() < min_periods:\n c = np.nan\n elif i == j:\n c = 1.0\n elif not valid.all():\n c = corrf(ac[valid], bc[valid])\n else:\n c = corrf(ac, bc)\n correl[i, j] = c\n correl[j, i] = c\n else:\n raise ValueError(\n \"method must be either 'pearson', \"\n \"'spearman', 'kendall', or a callable, \"\n f\"'{method}' was supplied\"\n )\n\n return self._constructor(correl, index=idx, columns=cols)\n\n def cov(\n self, min_periods: Optional[int] = None, ddof: Optional[int] = 1\n ) -> DataFrame:\n \"\"\"\n Compute pairwise covariance of columns, excluding NA/null values.\n\n Compute the pairwise covariance among the series of a DataFrame.\n The returned data frame is the `covariance matrix\n <https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns\n of the DataFrame.\n\n Both NA and null values are automatically excluded from the\n calculation. (See the note below about bias from missing values.)\n A threshold can be set for the minimum number of\n observations for each value created. Comparisons with observations\n below this threshold will be returned as ``NaN``.\n\n This method is generally used for the analysis of time series data to\n understand the relationship between different measures\n across time.\n\n Parameters\n ----------\n min_periods : int, optional\n Minimum number of observations required per pair of columns\n to have a valid result.\n\n ddof : int, default 1\n Delta degrees of freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n The covariance matrix of the series of the DataFrame.\n\n See Also\n --------\n Series.cov : Compute covariance with another Series.\n core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.\n core.window.Expanding.cov : Expanding sample covariance.\n core.window.Rolling.cov : Rolling sample covariance.\n\n Notes\n -----\n Returns the covariance matrix of the DataFrame's time series.\n The covariance is normalized by N-ddof.\n\n For DataFrames that have Series that are missing data (assuming that\n data is `missing at random\n <https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)\n the returned covariance matrix will be an unbiased estimate\n of the variance and covariance between the member Series.\n\n However, for many applications this estimate may not be acceptable\n because the estimate covariance matrix is not guaranteed to be positive\n semi-definite. This could lead to estimate correlations having\n absolute values which are greater than one, and/or a non-invertible\n covariance matrix. See `Estimation of covariance matrices\n <https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_\n matrices>`__ for more details.\n\n Examples\n --------\n >>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],\n ... columns=['dogs', 'cats'])\n >>> df.cov()\n dogs cats\n dogs 0.666667 -1.000000\n cats -1.000000 1.666667\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(1000, 5),\n ... columns=['a', 'b', 'c', 'd', 'e'])\n >>> df.cov()\n a b c d e\n a 0.998438 -0.020161 0.059277 -0.008943 0.014144\n b -0.020161 1.059352 -0.008543 -0.024738 0.009826\n c 0.059277 -0.008543 1.010670 -0.001486 -0.000271\n d -0.008943 -0.024738 -0.001486 0.921297 -0.013692\n e 0.014144 0.009826 -0.000271 -0.013692 0.977795\n\n **Minimum number of periods**\n\n This method also supports an optional ``min_periods`` keyword\n that specifies the required minimum number of non-NA observations for\n each column pair in order to have a valid result:\n\n >>> np.random.seed(42)\n >>> df = pd.DataFrame(np.random.randn(20, 3),\n ... columns=['a', 'b', 'c'])\n >>> df.loc[df.index[:5], 'a'] = np.nan\n >>> df.loc[df.index[5:10], 'b'] = np.nan\n >>> df.cov(min_periods=12)\n a b c\n a 0.316741 NaN -0.150812\n b NaN 1.248003 0.191417\n c -0.150812 0.191417 0.895202\n \"\"\"\n numeric_df = self._get_numeric_data()\n cols = numeric_df.columns\n idx = cols.copy()\n mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)\n\n if notna(mat).all():\n if min_periods is not None and min_periods > len(mat):\n base_cov = np.empty((mat.shape[1], mat.shape[1]))\n base_cov.fill(np.nan)\n else:\n base_cov = np.cov(mat.T, ddof=ddof)\n base_cov = base_cov.reshape((len(cols), len(cols)))\n else:\n base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)\n\n return self._constructor(base_cov, index=idx, columns=cols)\n\n def corrwith(self, other, axis=0, drop=False, method=\"pearson\") -> Series:\n \"\"\"\n Compute pairwise correlation.\n\n Pairwise correlation is computed between rows or columns of\n DataFrame with rows or columns of Series or DataFrame. DataFrames\n are first aligned along both axes before computing the\n correlations.\n\n Parameters\n ----------\n other : DataFrame, Series\n Object with which to compute correlations.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for\n row-wise.\n drop : bool, default False\n Drop missing indices from result.\n method : {'pearson', 'kendall', 'spearman'} or callable\n Method of correlation:\n\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n * callable: callable with input two 1d ndarrays\n and returning a float.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n Series\n Pairwise correlations.\n\n See Also\n --------\n DataFrame.corr : Compute pairwise correlation of columns.\n \"\"\"\n axis = self._get_axis_number(axis)\n this = self._get_numeric_data()\n\n if isinstance(other, Series):\n return this.apply(lambda x: other.corr(x, method=method), axis=axis)\n\n other = other._get_numeric_data()\n left, right = this.align(other, join=\"inner\", copy=False)\n\n if axis == 1:\n left = left.T\n right = right.T\n\n if method == \"pearson\":\n # mask missing values\n left = left + right * 0\n right = right + left * 0\n\n # demeaned data\n ldem = left - left.mean()\n rdem = right - right.mean()\n\n num = (ldem * rdem).sum()\n dom = (left.count() - 1) * left.std() * right.std()\n\n correl = num / dom\n\n elif method in [\"kendall\", \"spearman\"] or callable(method):\n\n def c(x):\n return nanops.nancorr(x[0], x[1], method=method)\n\n correl = self._constructor_sliced(\n map(c, zip(left.values.T, right.values.T)), index=left.columns\n )\n\n else:\n raise ValueError(\n f\"Invalid method {method} was passed, \"\n \"valid methods are: 'pearson', 'kendall', \"\n \"'spearman', or callable\"\n )\n\n if not drop:\n # Find non-matching labels along the given axis\n # and append missing correlations (GH 22375)\n raxis = 1 if axis == 0 else 0\n result_index = this._get_axis(raxis).union(other._get_axis(raxis))\n idx_diff = result_index.difference(correl.index)\n\n if len(idx_diff) > 0:\n correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))\n\n return correl\n\n # ----------------------------------------------------------------------\n # ndarray-like stats methods\n\n def count(self, axis=0, level=None, numeric_only=False):\n \"\"\"\n Count non-NA cells for each column or row.\n\n The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending\n on `pandas.options.mode.use_inf_as_na`) are considered NA.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index' counts are generated for each column.\n If 1 or 'columns' counts are generated for each row.\n level : int or str, optional\n If the axis is a `MultiIndex` (hierarchical), count along a\n particular `level`, collapsing into a `DataFrame`.\n A `str` specifies the level name.\n numeric_only : bool, default False\n Include only `float`, `int` or `boolean` data.\n\n Returns\n -------\n Series or DataFrame\n For each column/row the number of non-NA/null entries.\n If `level` is specified returns a `DataFrame`.\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n DataFrame.value_counts: Count unique combinations of columns.\n DataFrame.shape: Number of DataFrame rows and columns (including NA\n elements).\n DataFrame.isna: Boolean same-sized DataFrame showing places of NA\n elements.\n\n Examples\n --------\n Constructing DataFrame from a dictionary:\n\n >>> df = pd.DataFrame({\"Person\":\n ... [\"John\", \"Myla\", \"Lewis\", \"John\", \"Myla\"],\n ... \"Age\": [24., np.nan, 21., 33, 26],\n ... \"Single\": [False, True, True, True, False]})\n >>> df\n Person Age Single\n 0 John 24.0 False\n 1 Myla NaN True\n 2 Lewis 21.0 True\n 3 John 33.0 True\n 4 Myla 26.0 False\n\n Notice the uncounted NA values:\n\n >>> df.count()\n Person 5\n Age 4\n Single 5\n dtype: int64\n\n Counts for each **row**:\n\n >>> df.count(axis='columns')\n 0 3\n 1 2\n 2 3\n 3 3\n 4 3\n dtype: int64\n\n Counts for one level of a `MultiIndex`:\n\n >>> df.set_index([\"Person\", \"Single\"]).count(level=\"Person\")\n Age\n Person\n John 2\n Lewis 1\n Myla 1\n \"\"\"\n axis = self._get_axis_number(axis)\n if level is not None:\n return self._count_level(level, axis=axis, numeric_only=numeric_only)\n\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n # GH #423\n if len(frame._get_axis(axis)) == 0:\n result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))\n else:\n if frame._is_mixed_type or frame._mgr.any_extension_types:\n # the or any_extension_types is really only hit for single-\n # column frames with an extension array\n result = notna(frame).sum(axis=axis)\n else:\n # GH13407\n series_counts = notna(frame).sum(axis=axis)\n counts = series_counts.values\n result = self._constructor_sliced(\n counts, index=frame._get_agg_axis(axis)\n )\n\n return result.astype(\"int64\")\n\n def _count_level(self, level, axis=0, numeric_only=False):\n if numeric_only:\n frame = self._get_numeric_data()\n else:\n frame = self\n\n count_axis = frame._get_axis(axis)\n agg_axis = frame._get_agg_axis(axis)\n\n if not isinstance(count_axis, MultiIndex):\n raise TypeError(\n f\"Can only count levels on hierarchical {self._get_axis_name(axis)}.\"\n )\n\n # Mask NaNs: Mask rows or columns where the index level is NaN, and all\n # values in the DataFrame that are NaN\n if frame._is_mixed_type:\n # Since we have mixed types, calling notna(frame.values) might\n # upcast everything to object\n values_mask = notna(frame).values\n else:\n # But use the speedup when we have homogeneous dtypes\n values_mask = notna(frame.values)\n\n index_mask = notna(count_axis.get_level_values(level=level))\n if axis == 1:\n mask = index_mask & values_mask\n else:\n mask = index_mask.reshape(-1, 1) & values_mask\n\n if isinstance(level, str):\n level = count_axis._get_level_number(level)\n\n level_name = count_axis._names[level]\n level_index = count_axis.levels[level]._shallow_copy(name=level_name)\n level_codes = ensure_int64(count_axis.codes[level])\n counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)\n\n if axis == 1:\n result = self._constructor(counts, index=agg_axis, columns=level_index)\n else:\n result = self._constructor(counts, index=level_index, columns=agg_axis)\n\n return result\n\n def _reduce(\n self,\n op,\n name: str,\n *,\n axis=0,\n skipna=True,\n numeric_only=None,\n filter_type=None,\n **kwds,\n ):\n\n assert filter_type is None or filter_type == \"bool\", filter_type\n out_dtype = \"bool\" if filter_type == \"bool\" else None\n\n own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]\n\n dtype_is_dt = np.array(\n [is_datetime64_any_dtype(dtype) for dtype in own_dtypes],\n dtype=bool,\n )\n if numeric_only is None and name in [\"mean\", \"median\"] and dtype_is_dt.any():\n warnings.warn(\n \"DataFrame.mean and DataFrame.median with numeric_only=None \"\n \"will include datetime64 and datetime64tz columns in a \"\n \"future version.\",\n FutureWarning,\n stacklevel=5,\n )\n cols = self.columns[~dtype_is_dt]\n self = self[cols]\n\n # TODO: Make other agg func handle axis=None properly GH#21597\n axis = self._get_axis_number(axis)\n labels = self._get_agg_axis(axis)\n assert axis in [0, 1]\n\n def func(values):\n if is_extension_array_dtype(values.dtype):\n return extract_array(values)._reduce(name, skipna=skipna, **kwds)\n else:\n return op(values, axis=axis, skipna=skipna, **kwds)\n\n def blk_func(values):\n if isinstance(values, ExtensionArray):\n return values._reduce(name, skipna=skipna, **kwds)\n else:\n return op(values, axis=1, skipna=skipna, **kwds)\n\n def _get_data() -> DataFrame:\n if filter_type is None:\n data = self._get_numeric_data()\n else:\n # GH#25101, GH#24434\n assert filter_type == \"bool\"\n data = self._get_bool_data()\n return data\n\n if numeric_only is not None or axis == 0:\n # For numeric_only non-None and axis non-None, we know\n # which blocks to use and no try/except is needed.\n # For numeric_only=None only the case with axis==0 and no object\n # dtypes are unambiguous can be handled with BlockManager.reduce\n # Case with EAs see GH#35881\n df = self\n if numeric_only is True:\n df = _get_data()\n if axis == 1:\n df = df.T\n axis = 0\n\n ignore_failures = numeric_only is None\n\n # After possibly _get_data and transposing, we are now in the\n # simple case where we can use BlockManager.reduce\n res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)\n out = df._constructor(res).iloc[0]\n if out_dtype is not None:\n out = out.astype(out_dtype)\n if axis == 0 and is_object_dtype(out.dtype):\n # GH#35865 careful to cast explicitly to object\n nvs = coerce_to_dtypes(out.values, df.dtypes.iloc[np.sort(indexer)])\n out[:] = np.array(nvs, dtype=object)\n if axis == 0 and len(self) == 0 and name in [\"sum\", \"prod\"]:\n # Even if we are object dtype, follow numpy and return\n # float64, see test_apply_funcs_over_empty\n out = out.astype(np.float64)\n return out\n\n assert numeric_only is None\n\n data = self\n values = data.values\n\n try:\n result = func(values)\n\n except TypeError:\n # e.g. in nanops trying to convert strs to float\n\n data = _get_data()\n labels = data._get_agg_axis(axis)\n\n values = data.values\n with np.errstate(all=\"ignore\"):\n result = func(values)\n\n if filter_type == \"bool\" and notna(result).all():\n result = result.astype(np.bool_)\n elif filter_type is None and is_object_dtype(result.dtype):\n try:\n result = result.astype(np.float64)\n except (ValueError, TypeError):\n # try to coerce to the original dtypes item by item if we can\n if axis == 0:\n result = coerce_to_dtypes(result, data.dtypes)\n\n result = self._constructor_sliced(result, index=labels)\n return result\n\n def nunique(self, axis=0, dropna=True) -> Series:\n \"\"\"\n Count distinct observations over requested axis.\n\n Return Series with number of distinct observations. Can ignore NaN\n values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for\n column-wise.\n dropna : bool, default True\n Don't include NaN in the counts.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.nunique: Method nunique for Series.\n DataFrame.count: Count non-NA cells for each column or row.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})\n >>> df.nunique()\n A 3\n B 1\n dtype: int64\n\n >>> df.nunique(axis=1)\n 0 1\n 1 2\n 2 2\n dtype: int64\n \"\"\"\n return self.apply(Series.nunique, axis=axis, dropna=dropna)\n\n def idxmin(self, axis=0, skipna=True) -> Series:\n \"\"\"\n Return index of first occurrence of minimum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of minima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmin : Return index of the minimum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmin``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the minimum value in each column.\n\n >>> df.idxmin()\n consumption Pork\n co2_emissions Wheat Products\n dtype: object\n\n To return the index for the minimum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmin(axis=\"columns\")\n Pork consumption\n Wheat Products co2_emissions\n Beef consumption\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmin, \"argmin\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def idxmax(self, axis=0, skipna=True) -> Series:\n \"\"\"\n Return index of first occurrence of maximum over requested axis.\n\n NA/null values are excluded.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n skipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n\n Returns\n -------\n Series\n Indexes of maxima along the specified axis.\n\n Raises\n ------\n ValueError\n * If the row/column is empty\n\n See Also\n --------\n Series.idxmax : Return index of the maximum element.\n\n Notes\n -----\n This method is the DataFrame version of ``ndarray.argmax``.\n\n Examples\n --------\n Consider a dataset containing food consumption in Argentina.\n\n >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],\n ... 'co2_emissions': [37.2, 19.66, 1712]},\n ... index=['Pork', 'Wheat Products', 'Beef'])\n\n >>> df\n consumption co2_emissions\n Pork 10.51 37.20\n Wheat Products 103.11 19.66\n Beef 55.48 1712.00\n\n By default, it returns the index for the maximum value in each column.\n\n >>> df.idxmax()\n consumption Wheat Products\n co2_emissions Beef\n dtype: object\n\n To return the index for the maximum value in each row, use ``axis=\"columns\"``.\n\n >>> df.idxmax(axis=\"columns\")\n Pork co2_emissions\n Wheat Products consumption\n Beef co2_emissions\n dtype: object\n \"\"\"\n axis = self._get_axis_number(axis)\n\n res = self._reduce(\n nanops.nanargmax, \"argmax\", axis=axis, skipna=skipna, numeric_only=False\n )\n indices = res._values\n\n # indices will always be np.ndarray since axis is not None and\n # values is a 2d array for DataFrame\n # error: Item \"int\" of \"Union[int, Any]\" has no attribute \"__iter__\"\n assert isinstance(indices, np.ndarray) # for mypy\n\n index = self._get_axis(axis)\n result = [index[i] if i >= 0 else np.nan for i in indices]\n return self._constructor_sliced(result, index=self._get_agg_axis(axis))\n\n def _get_agg_axis(self, axis_num: int) -> Index:\n \"\"\"\n Let's be explicit about this.\n \"\"\"\n if axis_num == 0:\n return self.columns\n elif axis_num == 1:\n return self.index\n else:\n raise ValueError(f\"Axis must be 0 or 1 (got {repr(axis_num)})\")\n\n def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame:\n \"\"\"\n Get the mode(s) of each element along the selected axis.\n\n The mode of a set of values is the value that appears most often.\n It can be multiple values.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to iterate over while searching for the mode:\n\n * 0 or 'index' : get mode of each column\n * 1 or 'columns' : get mode of each row.\n\n numeric_only : bool, default False\n If True, only apply to numeric columns.\n dropna : bool, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n DataFrame\n The modes of each column or row.\n\n See Also\n --------\n Series.mode : Return the highest frequency value in a Series.\n Series.value_counts : Return the counts of values in a Series.\n\n Examples\n --------\n >>> df = pd.DataFrame([('bird', 2, 2),\n ... ('mammal', 4, np.nan),\n ... ('arthropod', 8, 0),\n ... ('bird', 2, np.nan)],\n ... index=('falcon', 'horse', 'spider', 'ostrich'),\n ... columns=('species', 'legs', 'wings'))\n >>> df\n species legs wings\n falcon bird 2 2.0\n horse mammal 4 NaN\n spider arthropod 8 0.0\n ostrich bird 2 NaN\n\n By default, missing values are not considered, and the mode of wings\n are both 0 and 2. Because the resulting DataFrame has two rows,\n the second row of ``species`` and ``legs`` contains ``NaN``.\n\n >>> df.mode()\n species legs wings\n 0 bird 2.0 0.0\n 1 NaN NaN 2.0\n\n Setting ``dropna=False`` ``NaN`` values are considered and they can be\n the mode (like for wings).\n\n >>> df.mode(dropna=False)\n species legs wings\n 0 bird 2 NaN\n\n Setting ``numeric_only=True``, only the mode of numeric columns is\n computed, and columns of other types are ignored.\n\n >>> df.mode(numeric_only=True)\n legs wings\n 0 2.0 0.0\n 1 NaN 2.0\n\n To compute the mode over columns and not rows, use the axis parameter:\n\n >>> df.mode(axis='columns', numeric_only=True)\n 0 1\n falcon 2.0 NaN\n horse 4.0 NaN\n spider 0.0 8.0\n ostrich 2.0 NaN\n \"\"\"\n data = self if not numeric_only else self._get_numeric_data()\n\n def f(s):\n return s.mode(dropna=dropna)\n\n return data.apply(f, axis=axis)\n\n def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation=\"linear\"):\n \"\"\"\n Return values at the given quantile over requested axis.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n Value between 0 <= q <= 1, the quantile(s) to compute.\n axis : {0, 1, 'index', 'columns'}, default 0\n Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be\n computed as well.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n\n Returns\n -------\n Series or DataFrame\n\n If ``q`` is an array, a DataFrame will be returned where the\n index is ``q``, the columns are the columns of self, and the\n values are the quantiles.\n If ``q`` is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n See Also\n --------\n core.window.Rolling.quantile: Rolling quantile.\n numpy.percentile: Numpy function to compute the percentile.\n\n Examples\n --------\n >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),\n ... columns=['a', 'b'])\n >>> df.quantile(.1)\n a 1.3\n b 3.7\n Name: 0.1, dtype: float64\n >>> df.quantile([.1, .5])\n a b\n 0.1 1.3 3.7\n 0.5 2.5 55.0\n\n Specifying `numeric_only=False` will also compute the quantile of\n datetime and timedelta data.\n\n >>> df = pd.DataFrame({'A': [1, 2],\n ... 'B': [pd.Timestamp('2010'),\n ... pd.Timestamp('2011')],\n ... 'C': [pd.Timedelta('1 days'),\n ... pd.Timedelta('2 days')]})\n >>> df.quantile(0.5, numeric_only=False)\n A 1.5\n B 2010-07-02 12:00:00\n C 1 days 12:00:00\n Name: 0.5, dtype: object\n \"\"\"\n validate_percentile(q)\n\n data = self._get_numeric_data() if numeric_only else self\n axis = self._get_axis_number(axis)\n is_transposed = axis == 1\n\n if is_transposed:\n data = data.T\n\n if len(data.columns) == 0:\n # GH#23925 _get_numeric_data may have dropped all columns\n cols = Index([], name=self.columns.name)\n if is_list_like(q):\n return self._constructor([], index=q, columns=cols)\n return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)\n\n result = data._mgr.quantile(\n qs=q, axis=1, interpolation=interpolation, transposed=is_transposed\n )\n\n if result.ndim == 2:\n result = self._constructor(result)\n else:\n result = self._constructor_sliced(result, name=q)\n\n if is_transposed:\n result = result.T\n\n return result\n\n def to_timestamp(\n self, freq=None, how: str = \"start\", axis: Axis = 0, copy: bool = True\n ) -> DataFrame:\n \"\"\"\n Cast to DatetimeIndex of timestamps, at *beginning* of period.\n\n Parameters\n ----------\n freq : str, default frequency of PeriodIndex\n Desired frequency.\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with DatetimeIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, PeriodIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_timestamp(freq=freq, how=how)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:\n \"\"\"\n Convert DataFrame from DatetimeIndex to PeriodIndex.\n\n Convert DataFrame from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed).\n\n Parameters\n ----------\n freq : str, default\n Frequency of the PeriodIndex.\n axis : {0 or 'index', 1 or 'columns'}, default 0\n The axis to convert (the index by default).\n copy : bool, default True\n If False then underlying input data is not copied.\n\n Returns\n -------\n DataFrame with PeriodIndex\n \"\"\"\n new_obj = self.copy(deep=copy)\n\n axis_name = self._get_axis_name(axis)\n old_ax = getattr(self, axis_name)\n if not isinstance(old_ax, DatetimeIndex):\n raise TypeError(f\"unsupported Type {type(old_ax).__name__}\")\n\n new_ax = old_ax.to_period(freq=freq)\n\n setattr(new_obj, axis_name, new_ax)\n return new_obj\n\n def isin(self, values) -> DataFrame:\n \"\"\"\n Whether each element in the DataFrame is contained in values.\n\n Parameters\n ----------\n values : iterable, Series, DataFrame or dict\n The result will only be true at a location if all the\n labels match. If `values` is a Series, that's the index. If\n `values` is a dict, the keys must be the column names,\n which must match. If `values` is a DataFrame,\n then both the index and column labels must match.\n\n Returns\n -------\n DataFrame\n DataFrame of booleans showing whether each element in the DataFrame\n is contained in values.\n\n See Also\n --------\n DataFrame.eq: Equality test for DataFrame.\n Series.isin: Equivalent method on Series.\n Series.str.contains: Test if pattern or regex is contained within a\n string of a Series or Index.\n\n Examples\n --------\n >>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},\n ... index=['falcon', 'dog'])\n >>> df\n num_legs num_wings\n falcon 2 2\n dog 4 0\n\n When ``values`` is a list check whether every value in the DataFrame\n is present in the list (which animals have 0 or 2 legs or wings)\n\n >>> df.isin([0, 2])\n num_legs num_wings\n falcon True True\n dog False True\n\n When ``values`` is a dict, we can pass values to check for each\n column separately:\n\n >>> df.isin({'num_wings': [0, 3]})\n num_legs num_wings\n falcon False False\n dog False True\n\n When ``values`` is a Series or DataFrame the index and column must\n match. Note that 'falcon' does not match based on the number of legs\n in df2.\n\n >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},\n ... index=['spider', 'falcon'])\n >>> df.isin(other)\n num_legs num_wings\n falcon True True\n dog False False\n \"\"\"\n if isinstance(values, dict):\n from pandas.core.reshape.concat import concat\n\n values = collections.defaultdict(list, values)\n return concat(\n (\n self.iloc[:, [i]].isin(values[col])\n for i, col in enumerate(self.columns)\n ),\n axis=1,\n )\n elif isinstance(values, Series):\n if not values.index.is_unique:\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self), axis=\"index\")\n elif isinstance(values, DataFrame):\n if not (values.columns.is_unique and values.index.is_unique):\n raise ValueError(\"cannot compute isin with a duplicate axis.\")\n return self.eq(values.reindex_like(self))\n else:\n if not is_list_like(values):\n raise TypeError(\n \"only list-like or dict-like objects are allowed \"\n \"to be passed to DataFrame.isin(), \"\n f\"you passed a '{type(values).__name__}'\"\n )\n return self._constructor(\n algorithms.isin(self.values.ravel(), values).reshape(self.shape),\n self.index,\n self.columns,\n )\n\n # ----------------------------------------------------------------------\n # Add index and columns\n _AXIS_ORDERS = [\"index\", \"columns\"]\n _AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {\n **NDFrame._AXIS_TO_AXIS_NUMBER,\n 1: 1,\n \"columns\": 1,\n }\n _AXIS_REVERSED = True\n _AXIS_LEN = len(_AXIS_ORDERS)\n _info_axis_number = 1\n _info_axis_name = \"columns\"\n\n index: Index = properties.AxisProperty(\n axis=1, doc=\"The index (row labels) of the DataFrame.\"\n )\n columns: Index = properties.AxisProperty(\n axis=0, doc=\"The column labels of the DataFrame.\"\n )\n\n @property\n def _AXIS_NUMBERS(self) -> Dict[str, int]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NUMBERS\n return {\"index\": 0, \"columns\": 1}\n\n @property\n def _AXIS_NAMES(self) -> Dict[int, str]:\n \"\"\".. deprecated:: 1.1.0\"\"\"\n super()._AXIS_NAMES\n return {0: \"index\", 1: \"columns\"}\n\n # ----------------------------------------------------------------------\n # Add plotting methods to DataFrame\n plot = CachedAccessor(\"plot\", pandas.plotting.PlotAccessor)\n hist = pandas.plotting.hist_frame\n boxplot = pandas.plotting.boxplot_frame\n sparse = CachedAccessor(\"sparse\", SparseFrameAccessor)\n\n\nDataFrame._add_numeric_operations()\n\nops.add_flex_arithmetic_methods(DataFrame)\n\n\ndef _from_nested_dict(data) -> collections.defaultdict:\n new_data: collections.defaultdict = collections.defaultdict(dict)\n for index, s in data.items():\n for col, v in s.items():\n new_data[col][index] = v\n return new_data\n" ]
[ [ "pandas.core.generic.NDFrame._set_item", "pandas.core.reshape.melt.melt", "pandas.core.reshape.concat.concat", "numpy.asarray", "pandas.core.indexing.convert_to_index_sliceable", "pandas.core.nanops.nancorr", "pandas.core.dtypes.cast.maybe_infer_to_datetimelike", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "pandas.io.feather_format.to_feather", "pandas.core.computation.expressions.where", "numpy.transpose", "pandas.core.algorithms.SelectNFrame", "pandas.io.common.get_handle", "pandas.util._decorators.deprecate_kwarg", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas.core.ops.align_method_FRAME", "pandas.core.indexes.api.Index", "pandas.core.ops.frame_arith_method_with_reindex", "pandas.core.dtypes.common.is_dataclass", "pandas.core.generic.NDFrame._iset_item", "pandas._libs.hashtable.duplicated_int64", "pandas.core.dtypes.common.is_named_tuple", "pandas.io.formats.console.in_ipython_frontend", "pandas.core.dtypes.cast.find_common_type", "pandas.core.indexing.check_bool_indexer", "pandas.core.indexes.multi.MultiIndex.from_arrays", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.cast.coerce_to_dtypes", "pandas.util._decorators.Appender", "pandas.io.formats.format.DataFrameFormatter", "numpy.errstate", "numpy.rec.fromarrays", "pandas.io.formats.console.get_console_size", "pandas.core.reshape.reshape.unstack", "numpy.array", "numpy.dot", "pandas.core.dtypes.common.is_sequence", "pandas.option_context", "pandas.core.reshape.reshape.stack", "pandas.core.dtypes.common.ensure_int64", "pandas.core.dtypes.common.is_integer", "pandas.core.aggregation.relabel_result", "pandas.core.internals.construction.dataclasses_to_dicts", "numpy.cov", "pandas.compat.numpy.function.validate_round", "pandas.core.dtypes.common.is_hashable", "pandas.core.reshape.reshape.stack_multiple", "pandas.io.stata.StataWriterUTF8", "pandas.core.aggregation.aggregate", "pandas._libs.properties.AxisProperty", "pandas.core.aggregation.transform", "pandas._libs.lib.item_from_zerodim", "pandas.core.ops.fill_binop", "pandas.core.dtypes.common.ensure_platform_int", "pandas.core.dtypes.missing.isna", "pandas.core.ops.should_reindex_frame_op", "pandas.util._validators.validate_axis_style_args", "pandas.core.dtypes.cast.validate_numeric_casting", "pandas.core.dtypes.common.is_object_dtype", "pandas.core.internals.construction.arrays_to_mgr", "pandas.core.reshape.pivot.pivot", "pandas.core.dtypes.cast.maybe_upcast", "pandas.core.internals.construction.init_ndarray", "pandas.core.dtypes.cast.maybe_box_datetimelike", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.cast.maybe_casted_values", "pandas.core.dtypes.common.is_dict_like", "pandas.core.aggregation.reconstruct_func", "pandas._libs.algos.nancorr", "pandas.util._decorators.doc", "pandas.core.construction.extract_array", "pandas.core.dtypes.common.is_list_like", "numpy.ma.getmaskarray", "pandas.util._decorators.rewrite_axis_style_signature", "numpy.iterable", "pandas.core.algorithms.take_2d_multi", "pandas.io.formats.console.in_interactive_session", "pandas.io.parquet.to_parquet", "pandas.core.apply.frame_apply", "pandas.util._validators.validate_percentile", "pandas.core.common.asarray_tuplesafe", "pandas.compat.numpy.function.validate_transpose", "pandas.core.ops.get_array_op", "pandas.core.internals.construction.masked_rec_array_to_mgr", "pandas.core.common.apply_if_callable", "numpy.sort", "pandas.core.internals.construction.sanitize_index", "pandas._libs.algos.nancorr_spearman", "pandas.core.ops.add_flex_arithmetic_methods", "pandas.io.formats.style.Styler", "pandas.core.internals.construction.get_names_from_index", "pandas.core.sorting.nargsort", "pandas.core.series.Series", "pandas.util._validators.validate_bool_kwarg", "pandas.io.formats.format.DataFrameRenderer", "pandas.io.formats.info.DataFrameInfo", "pandas.core.nanops.get_corr_func", "pandas.core.computation.eval.eval", "pandas.core.indexes.api.ensure_index_from_sequences", "pandas.core.accessor.CachedAccessor", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.cast.maybe_cast_to_datetime", "pandas.io.gbq.to_gbq", "pandas.core.sorting.lexsort_indexer", "pandas.core.dtypes.common.pandas_dtype", "pandas._libs.lib.maybe_convert_objects", "numpy.isfinite", "pandas.core.dtypes.common.is_float", "pandas._libs.lib.map_infer", "pandas.core.dtypes.common.infer_dtype_from_object", "pandas.core.indexes.api.ensure_index", "pandas.core.reshape.pivot.pivot_table", "pandas.core.dtypes.common.is_iterator", "pandas.core.groupby.generic.DataFrameGroupBy", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "pandas.core.dtypes.missing.notna", "pandas.core.dtypes.cast.invalidate_string_dtypes", "pandas.core.internals.construction.to_arrays", "pandas.core.generic.NDFrame.__init__", "numpy.where", "pandas.core.dtypes.common.is_bool_dtype", "pandas._config.get_option", "pandas.core.common.is_bool_indexer", "pandas.core.internals.construction.init_dict", "pandas.core.reshape.merge.merge", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.core.internals.construction.reorder_arrays", "pandas.util._decorators.Substitution", "numpy.compress", "pandas.core.common.standardize_mapping", "numpy.empty", "pandas.core.dtypes.common.is_dtype_equal", "pandas.compat._optional.import_optional_dependency", "numpy.shape", "pandas.core.indexes.multi.maybe_droplevels", "pandas.core.sorting.get_group_index", "pandas.core.dtypes.cast.maybe_convert_platform", "numpy.full" ] ]
gwtaylor/pyautodiff
[ "7973e26f1c233570ed4bb10d08634ec7378e2152" ]
[ "autodiff/examples/svm.py" ]
[ "\"\"\"\nLinear SVM\n==========\n\nThis script fits a linear support vector machine classifier to random data. It\nillustrates how a function defined purely by NumPy operations can be minimized\ndirectly with a gradient-based solver.\n\n\"\"\"\nimport numpy as np\nfrom autodiff.optimize import fmin_l_bfgs_b\n\n\ndef test_svm():\n rng = np.random.RandomState(1)\n\n # -- create some fake data\n x = rng.rand(10, 5)\n y = 2 * (rng.rand(10) > 0.5) - 1\n l2_regularization = 1e-4\n\n # -- loss function\n def loss_fn(weights, bias):\n margin = y * (np.dot(x, weights) + bias)\n loss = np.maximum(0, 1 - margin) ** 2\n l2_cost = 0.5 * l2_regularization * np.dot(weights, weights)\n loss = np.mean(loss) + l2_cost\n print('ran loss_fn(), returning {}'.format(loss))\n return loss\n\n # -- call optimizer\n w_0, b_0 = np.zeros(5), np.zeros(())\n w, b = fmin_l_bfgs_b(loss_fn, init_args=(w_0, b_0))\n\n final_loss = loss_fn(w, b)\n\n assert np.allclose(final_loss, 0.7229)\n\n print('optimization successful!')\n\n\nif __name__ == '__main__':\n test_svm()\n" ]
[ [ "numpy.allclose", "numpy.zeros", "numpy.random.RandomState", "numpy.maximum", "numpy.dot", "numpy.mean" ] ]
hz-ants/CDPN-source-
[ "625f9a80858f8a2fb9e74f88ea83073495141693" ]
[ "lib/models/resnet_trans_head.py" ]
[ "import torch.nn as nn\nimport torch\n\nclass TransHeadNet(nn.Module):\n def __init__(self, in_channels, num_layers=3, num_filters=256, kernel_size=3, output_dim=3, freeze=False,\n with_bias_end=True):\n super(TransHeadNet, self).__init__()\n\n self.freeze = freeze\n\n if kernel_size == 3:\n padding = 1\n elif kernel_size == 2:\n padding = 0\n\n self.features = nn.ModuleList()\n for i in range(num_layers):\n _in_channels = in_channels if i == 0 else num_filters\n self.features.append(nn.Conv2d(_in_channels, num_filters, kernel_size=kernel_size, stride=1, padding=padding, bias=False))\n self.features.append(nn.BatchNorm2d(num_filters))\n self.features.append(nn.ReLU(inplace=True))\n\n self.linears = nn.ModuleList()\n self.linears.append(nn.Linear(256 * 8 * 8, 4096))\n self.linears.append(nn.ReLU(inplace=True))\n self.linears.append(nn.Linear(4096, 4096))\n self.linears.append(nn.ReLU(inplace=True))\n self.linears.append(nn.Linear(4096, output_dim))\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, mean=0, std=0.001)\n if with_bias_end and (m.bias is not None):\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, mean=0, std=0.001)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0, std=0.001)\n\n def forward(self, x):\n if self.freeze:\n with torch.no_grad():\n for i, l in enumerate(self.features):\n x = l(x)\n x = x.view(-1, 256*8*8)\n for i, l in enumerate(self.linears):\n x = l(x)\n return x.detach()\n else:\n for i, l in enumerate(self.features):\n x = l(x)\n x = x.view(-1, 256*8*8)\n for i, l in enumerate(self.linears):\n x = l(x)\n return x\n\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.init.constant_", "torch.no_grad", "torch.nn.init.normal_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.ReLU" ] ]
sejaldua/duolingogogo
[ "226a2a9417238f9c3f0ce738d491b58cdf4dcbdc" ]
[ "app.py" ]
[ "import streamlit as st\nimport pandas as pd\nimport yaml\nimport duolingo\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager\nfrom datetime import timezone, timedelta\nmatplotlib.rcParams['font.family'] = ['Source Han Sans CN']\n\nwith open(\"duo_credentials.yaml\", 'r') as stream:\n creds = yaml.safe_load(stream)\n\nlingo = duolingo.Duolingo(creds['username'], creds['password'])\nst.write(\"Hello :wave: \" + lingo.get_user_info()['username'])\n\nstreak = lingo.get_streak_info()\nxp = lingo.get_daily_xp_progress()\n\nst.header(\"Calendar\")\ncal = lingo.get_calendar('zs')\ncal_df = pd.DataFrame.from_records(cal)\n# creating new datetime-based features\n# cal_df['timestamp'] = cal_df['datetime'].apply(lambda x: pytz.timezone(\"America/New_York\").localize(pd.to_datetime(x, unit='ms'), is_dst=None))\ncal_df['timestamp'] = cal_df['datetime'].apply(lambda x: pd.to_datetime(x, unit='ms') - timedelta(hours=4))\ncal_df['year'] = cal_df.timestamp.dt.year\ncal_df['month'] = cal_df.timestamp.dt.month\ncal_df['hour'] = cal_df.timestamp.dt.hour\ncal_df['weekday'] = cal_df.timestamp.dt.day_name()\ncal_df['week_num'] = cal_df['timestamp'].apply(lambda x: x.isocalendar()[1] % 52)\n\n# get weekday_num in order of MTWTFSS because we want to sort the rows of the heatmap in order\nweekday_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\nmapping = {k: v for k, v in zip(weekday_order, [i+1 for i in range(7)])}\ncal_df['weekday_num'] = cal_df['weekday'].apply(lambda x: mapping[x])\n# st.dataframe(cal_df)\n\ndf_to_pivot = cal_df[['week_num', 'weekday_num', 'improvement']]\npivoted_data = pd.pivot_table(df_to_pivot, values='improvement', index=['weekday_num'], columns=['week_num'], aggfunc=sum)\npivoted_data = pivoted_data.reindex([i+1 for i in range(max(pivoted_data.columns))], axis=1)\npivoted_data.dropna(axis=1, how='all', inplace=True)\n# st.dataframe(pivoted_data)\n\nfig = plt.figure(figsize=(6,4));\nsns.heatmap(pivoted_data, linewidths=6, cmap='BuGn', cbar=True,\n linecolor='white', square=True, yticklabels=weekday_order);\n # xticklabels=[*space, 'Jan', *space, 'Feb', *space, 'Mar', *space, 'Apr', \n # *space, 'May', *space, 'Jun', *space, 'Jul']);\nplt.ylabel(\"\");\nplt.xlabel(\"\");\nst.write(fig)\n\n# cal_df.sort_values(by='datetime', ascending=False, inplace=True)\n# cal_df['datetime'] = cal_df['datetime'].apply(lambda x: pd.to_datetime(x, unit='ms').date())\n# fig = plt.figure(figsize=(10,6))\n# ax = sns.barplot(data=cal_df, x='datetime', y='improvement', estimator=sum, ci=None)\n# st.write(fig)\n\nst.header(\"Language Details\")\nld = lingo.get_language_details('Chinese')\nlp = lingo.get_language_progress('zs')\nst.write(\"Streak: \", ld['streak'], \" :fire:\")\nst.write(\"Total points: \", ld['points'], \" 📈\")\nst.write(\"Skills learned: \", lp['num_skills_learned'], \" :seedling:\")\nst.write(\"Current level: \", ld['level'], \" 🤓\")\nst.write('Progress towards next level: ', lp['level_progress'], '/', lp['level_points'])\nst.progress(lp['level_percent'])\n\nst.header('Known Topics')\nst.write(', '.join(lingo.get_known_topics('zs')))\n\nst.header('Known Words')\nst.write(', '.join(lingo.get_known_words('zs')))\n" ]
[ [ "matplotlib.pyplot.figure", "pandas.DataFrame.from_records", "pandas.to_datetime", "matplotlib.pyplot.ylabel", "pandas.pivot_table", "matplotlib.pyplot.xlabel" ] ]
Henley13/paper_translation_factories_2020
[ "77558ed70467cf91062abf62e46c794bfbc08e4a" ]
[ "big-fish/bigfish/stack/postprocess.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nFunctions used to format and clean any intermediate results loaded in or\nreturned by a bigfish method.\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom .utils import check_array, check_parameter, get_offset_value\n\nfrom skimage.measure import regionprops, find_contours\nfrom skimage.draw import polygon_perimeter\n\n\n# ### Transcription sites ###\n\ndef remove_transcription_site(mask_nuc, spots_in_foci, foci):\n \"\"\"We define a transcription site as a foci detected in the nucleus.\n\n Parameters\n ----------\n mask_nuc : np.ndarray, bool\n Binary mask of the nuclei with shape (y, x).\n spots_in_foci : np.ndarray, np.int64\n Coordinate of the spots detected inside foci, with shape (nb_spots, 4).\n One coordinate per dimension (zyx coordinates) plus the index of the\n foci.\n foci : np.ndarray, np.int64\n Array with shape (nb_foci, 5). One coordinate per dimension for the\n foci centroid (zyx coordinates), the number of RNAs detected in the\n foci and its index.\n\n Returns\n -------\n spots_in_foci_cleaned : np.ndarray, np.int64\n Coordinate of the spots detected inside foci, with shape (nb_spots, 4).\n One coordinate per dimension (zyx coordinates) plus the index of the\n foci. Transcription sites are removed.\n foci_cleaned : np.ndarray, np.int64\n Array with shape (nb_foci, 5). One coordinate per dimension for the\n foci centroid (zyx coordinates), the number of RNAs detected in the\n foci and its index. Transcription sites are removed.\n\n \"\"\"\n # check parameters\n check_array(mask_nuc,\n ndim=2,\n dtype=[bool],\n allow_nan=False)\n check_array(spots_in_foci,\n ndim=2,\n dtype=[np.int64],\n allow_nan=False)\n check_array(foci,\n ndim=2,\n dtype=[np.int64],\n allow_nan=False)\n\n # remove foci inside nuclei\n mask_transcription_site = mask_nuc[foci[:, 1], foci[:, 2]]\n foci_cleaned = foci[~mask_transcription_site]\n\n # filter spots in transcription sites\n spots_to_keep = foci_cleaned[:, 4]\n mask_spots_to_keep = np.isin(spots_in_foci[:, 3], spots_to_keep)\n spots_in_foci_cleaned = spots_in_foci[mask_spots_to_keep]\n\n return spots_in_foci_cleaned, foci_cleaned\n\n\n# ### Cell extraction ###\n\ndef extract_spots_from_frame(spots, z_lim=None, y_lim=None, x_lim=None):\n \"\"\"Get spots coordinates within a given frame.\n\n Parameters\n ----------\n spots : np.ndarray, np.int64\n Coordinate of the spots detected inside foci, with shape (nb_spots, 3)\n or (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus\n the index of the foci if necessary.\n z_lim : tuple[int, int]\n Minimum and maximum coordinate of the frame along the z axis.\n y_lim : tuple[int, int]\n Minimum and maximum coordinate of the frame along the y axis.\n x_lim : tuple[int, int]\n Minimum and maximum coordinate of the frame along the x axis.\n\n Returns\n -------\n extracted_spots : np.ndarray, np.int64\n Coordinate of the spots detected inside foci, with shape (nb_spots, 3)\n or (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus\n the index of the foci if necessary.\n\n \"\"\"\n # check parameters\n check_array(spots,\n ndim=2,\n dtype=[np.int64],\n allow_nan=False)\n check_parameter(z_lim=(tuple, type(None)),\n y_lim=(tuple, type(None)),\n x_lim=(tuple, type(None)))\n\n # extract spots\n extracted_spots = spots.copy()\n if z_lim is not None:\n extracted_spots = extracted_spots[extracted_spots[:, 0] < z_lim[1]]\n extracted_spots = extracted_spots[z_lim[0] < extracted_spots[:, 0]]\n extracted_spots[:, 0] -= z_lim[0]\n if y_lim is not None:\n extracted_spots = extracted_spots[extracted_spots[:, 1] < y_lim[1]]\n extracted_spots = extracted_spots[y_lim[0] < extracted_spots[:, 1]]\n extracted_spots[:, 1] -= y_lim[0]\n if x_lim is not None:\n extracted_spots = extracted_spots[extracted_spots[:, 2] < x_lim[1]]\n extracted_spots = extracted_spots[x_lim[0] < extracted_spots[:, 2]]\n extracted_spots[:, 2] -= x_lim[0]\n\n return extracted_spots\n\n\ndef extract_coordinates_image(cyt_labelled, nuc_labelled, spots_out, spots_in,\n foci):\n \"\"\"Extract relevant coordinates from an image, based on segmentation and\n detection results.\n\n For each cell in an image we return the coordinates of the cytoplasm, the\n nucleus, the RNA spots and information about the detected foci. We extract\n 2-d coordinates for the cell and 3-d coordinates for the spots and foci.\n\n Parameters\n ----------\n cyt_labelled : np.ndarray, np.uint or np.int\n Labelled cytoplasms image with shape (y, x).\n nuc_labelled : np.ndarray, np.uint or np.int\n Labelled nuclei image with shape (y, x).\n spots_out : np.ndarray, np.int64\n Coordinate of the spots detected outside foci, with shape\n (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus a\n default index (-1 for mRNAs spotted outside a foci).\n spots_in : np.ndarray, np.int64\n Coordinate of the spots detected inside foci, with shape (nb_spots, 4).\n One coordinate per dimension (zyx coordinates) plus the index of the\n foci.\n foci : np.ndarray, np.int64\n Array with shape (nb_foci, 5). One coordinate per dimension for the\n foci centroid (zyx coordinates), the number of RNAs detected in the\n foci and its index.\n\n Returns\n -------\n results : List[(cyt_coord, nuc_coord, rna_coord, cell_foci, cell)]\n - cyt_coord : np.ndarray, np.int64\n Coordinates of the cytoplasm border with shape (nb_points, 2).\n - nuc_coord : np.ndarray, np.int64\n Coordinates of the nuclei border with shape (nb_points, 2).\n - rna_coord : np.ndarray, np.int64\n Coordinates of the RNA spots with shape (nb_spots, 4). One\n coordinate per dimension (zyx dimension), plus the index of a\n potential foci.\n - cell_foci : np.ndarray, np.int64\n Array with shape (nb_foci, 5). One coordinate per dimension for the\n foci centroid (zyx coordinates), the number of RNAs detected in the\n foci and its index.\n - cell : Tuple[int]\n Box coordinate of the cell in the original image (min_y, min_x,\n max_y and max_x).\n\n \"\"\"\n # check parameters\n check_array(cyt_labelled,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64],\n allow_nan=True)\n check_array(nuc_labelled,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64],\n allow_nan=True)\n check_array(spots_out,\n ndim=2,\n dtype=[np.int64],\n allow_nan=False)\n check_array(spots_in,\n ndim=2,\n dtype=[np.int64],\n allow_nan=False)\n check_array(foci,\n ndim=2,\n dtype=[np.int64],\n allow_nan=False)\n\n # initialize results\n results = []\n borders = np.zeros(cyt_labelled.shape, dtype=bool)\n borders[:, 0] = True\n borders[0, :] = True\n borders[:, cyt_labelled.shape[1] - 1] = True\n borders[cyt_labelled.shape[0] - 1, :] = True\n cells = regionprops(cyt_labelled)\n for cell in cells:\n\n # get information about the cell\n label = cell.label\n (min_y, min_x, max_y, max_x) = cell.bbox\n\n # get masks of the cell\n cyt = cyt_labelled.copy()\n cyt = (cyt == label)\n nuc = nuc_labelled.copy()\n nuc = (nuc == label)\n\n # check if cell is not cropped by the borders\n if _check_cropped_cell(cyt, borders):\n continue\n\n # check if nucleus is in the cytoplasm\n if not _check_nucleus_in_cell(cyt, nuc):\n continue\n\n # get boundaries coordinates\n cyt_coord, nuc_coord = _get_boundaries_coordinates(cyt, nuc)\n\n # filter foci\n foci_cell, spots_in_foci_cell = _extract_foci(foci, spots_in, cyt)\n\n # get rna coordinates\n spots_out_foci_cell = _extract_spots_outside_foci(cyt, spots_out)\n rna_coord = np.concatenate([spots_out_foci_cell, spots_in_foci_cell],\n axis=0)\n\n # filter cell without enough spots\n if len(rna_coord) < 30:\n continue\n\n # initialize cell coordinates\n cyt_coord[:, 0] -= min_y\n cyt_coord[:, 1] -= min_x\n nuc_coord[:, 0] -= min_y\n nuc_coord[:, 1] -= min_x\n rna_coord[:, 1] -= min_y\n rna_coord[:, 2] -= min_x\n foci_cell[:, 1] -= min_y\n foci_cell[:, 2] -= min_x\n\n results.append((cyt_coord, nuc_coord, rna_coord, foci_cell, cell.bbox))\n\n return results\n\n\ndef _check_cropped_cell(cell_cyt_mask, border_frame):\n \"\"\"\n Check if a cell is cropped by the border frame.\n\n Parameters\n ----------\n cell_cyt_mask : np.ndarray, bool\n Binary mask of the cell cytoplasm.\n\n border_frame : np.ndarray, bool\n Binary mask of the border frame.\n\n Returns\n -------\n _ : bool\n True if cell is cropped.\n\n \"\"\"\n # check cell is not cropped by the borders\n crop = cell_cyt_mask & border_frame\n if np.any(crop):\n return True\n else:\n return False\n\n\ndef _check_nucleus_in_cell(cell_cyt_mask, cell_nuc_mask):\n \"\"\"\n Check if the nucleus is properly contained in the cell cytoplasm.\n\n Parameters\n ----------\n cell_cyt_mask : np.ndarray, bool\n Binary mask of the cell cytoplasm.\n\n cell_nuc_mask : np.ndarray, bool\n Binary mask of the nucleus cytoplasm.\n\n Returns\n -------\n _ : bool\n True if the nucleus is in the cell.\n\n \"\"\"\n diff = cell_cyt_mask | cell_nuc_mask\n if np.any(diff != cell_cyt_mask):\n return False\n else:\n return True\n\n\ndef _get_boundaries_coordinates(cell_cyt_mask, cell_nuc_mask):\n \"\"\"\n Find boundaries coordinates for cytoplasm and nucleus.\n\n Parameters\n ----------\n cell_cyt_mask : np.ndarray, bool\n Mask of the cell cytoplasm.\n cell_nuc_mask : np.ndarray, bool\n Mask of the cell nucleus.\n\n Returns\n -------\n cyt_coord : np.ndarray, np.int64\n Coordinates of the cytoplasm in 2-d (yx dimension).\n nuc_coord : np.ndarray, np.int64\n Coordinates of the nucleus in 2-d (yx dimension).\n\n \"\"\"\n cyt_coord = np.array([], dtype=np.int64).reshape((0, 2))\n nuc_coord = np.array([], dtype=np.int64).reshape((0, 2))\n\n # cyt coordinates\n cell_cyt_coord = find_contours(cell_cyt_mask, level=0)\n if len(cell_cyt_coord) == 0:\n pass\n elif len(cell_cyt_coord) == 1:\n cyt_coord = cell_cyt_coord[0].astype(np.int64)\n else:\n m = 0\n for coord in cell_cyt_coord:\n if len(coord) > m:\n m = len(coord)\n cyt_coord = coord.astype(np.int64)\n\n # nuc coordinates\n cell_nuc_coord = find_contours(cell_nuc_mask, level=0)\n if len(cell_nuc_coord) == 0:\n pass\n elif len(cell_nuc_coord) == 1:\n nuc_coord = cell_nuc_coord[0].astype(np.int64)\n else:\n m = 0\n for coord in cell_nuc_coord:\n if len(coord) > m:\n m = len(coord)\n nuc_coord = coord.astype(np.int64)\n\n return cyt_coord, nuc_coord\n\n\ndef _extract_foci(foci, spots_in_foci, cell_cyt_mask):\n \"\"\"\n Extract foci and related spots detected in a specific cell.\n\n Parameters\n ----------\n foci : np.ndarray, np.int64\n Array with shape (nb_foci, 5). One coordinate per dimension for the\n foci centroid (zyx coordinates), the number of RNAs detected in the\n foci and its index.\n\n spots_in_foci : : np.ndarray, np.int64\n Coordinate of the spots detected inside foci, with shape (nb_spots, 4).\n One coordinate per dimension (zyx coordinates) plus the index of the\n foci.\n cell_cyt_mask : np.ndarray, bool\n Binary mask of the cell with shape (y, x).\n\n Returns\n -------\n spots_in_foci_cell : np.ndarray, np.int64\n Coordinate of the spots detected inside foci in the cell, with shape\n (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus the\n index of the foci.\n foci_cell : np.ndarray, np.int64\n Array with shape (nb_foci, 5). One coordinate per dimension for the\n foci centroid (zyx coordinates), the number of RNAs detected in the\n foci and its index.\n\n \"\"\"\n # filter foci\n mask_foci_cell = cell_cyt_mask[foci[:, 1], foci[:, 2]]\n if mask_foci_cell.sum() == 0:\n foci_cell = np.array([], dtype=np.int64).reshape((0, 5))\n spots_in_foci_cell = np.array([], dtype=np.int64).reshape((0, 4))\n return foci_cell, spots_in_foci_cell\n\n foci_cell = foci[mask_foci_cell]\n\n # filter spots in foci\n spots_to_keep = foci_cell[:, 4]\n mask_spots_to_keep = np.isin(spots_in_foci[:, 3], spots_to_keep)\n spots_in_foci_cell = spots_in_foci[mask_spots_to_keep]\n\n return foci_cell, spots_in_foci_cell\n\n\ndef _extract_spots_outside_foci(cell_cyt_mask, spots_out_foci):\n \"\"\"\n Extract spots detected outside foci, in a specific cell.\n\n Parameters\n ----------\n cell_cyt_mask : np.ndarray, bool\n Binary mask of the cell with shape (y, x).\n spots_out_foci : np.ndarray, np.int64\n Coordinate of the spots detected outside foci, with shape\n (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus a\n default index (-1 for mRNAs spotted outside a foci).\n\n Returns\n -------\n spots_out_foci_cell : np.ndarray, np.int64\n Coordinate of the spots detected outside foci in the cell, with shape\n (nb_spots, 4). One coordinate per dimension (zyx coordinates) plus the\n index of the foci.\n\n \"\"\"\n # get coordinates of rna outside foci\n mask_spots_to_keep = cell_cyt_mask[spots_out_foci[:, 1],\n spots_out_foci[:, 2]]\n spots_out_foci_cell = spots_out_foci[mask_spots_to_keep]\n\n return spots_out_foci_cell\n\n\n# ### Segmentation postprocessing ###\n\n# TODO add from_binary_surface_to_binary_boundaries\n\ndef center_binary_mask(cyt, nuc=None, rna=None):\n \"\"\"Center a 2-d binary mask (surface or boundaries) and pad it.\n\n One mask should be at least provided ('cyt'). If others masks are provided\n ('nuc' and 'rna'), they will be transformed like the main mask. All the\n provided masks should have the same shape. If others coordinates are\n provided, the values will be transformed, but an array of coordinates with\n the same format is returned\n\n Parameters\n ----------\n cyt : np.ndarray, np.uint or np.int or bool\n Binary image of cytoplasm with shape (y, x).\n nuc : np.ndarray, np.uint or np.int or bool\n Binary image of nucleus with shape (y, x) or array of nucleus\n coordinates with shape (nb_points, 2).\n rna : np.ndarray, np.uint or np.int or bool\n Binary image of mRNAs localization with shape (y, x) or array of mRNAs\n coordinates with shape (nb_points, 2) or (nb_points, 3).\n\n Returns\n -------\n cyt_centered : np.ndarray, np.uint or np.int or bool\n Centered binary image of cytoplasm with shape (y, x).\n nuc_centered : np.ndarray, np.uint or np.int or bool\n Centered binary image of nucleus with shape (y, x).\n rna_centered : np.ndarray, np.uint or np.int or bool\n Centered binary image of mRNAs localizations with shape (y, x).\n\n \"\"\"\n # check parameters\n check_array(cyt,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64, bool])\n if nuc is not None:\n check_array(nuc,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64, bool])\n if rna is not None:\n check_array(rna,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64, bool])\n\n # initialize parameter\n nuc_centered, rna_centered = None, None\n marge = get_offset_value()\n\n # center the binary mask of the cell\n coord = np.nonzero(cyt)\n coord = np.column_stack(coord)\n min_y, max_y = coord[:, 0].min(), coord[:, 0].max()\n min_x, max_x = coord[:, 1].min(), coord[:, 1].max()\n shape_y = max_y - min_y + 1\n shape_x = max_x - min_x + 1\n cyt_centered_shape = (shape_y + 2 * marge, shape_x + 2 * marge)\n cyt_centered = np.zeros(cyt_centered_shape, dtype=bool)\n crop = cyt[min_y:max_y + 1, min_x:max_x + 1]\n cyt_centered[marge:shape_y + marge, marge:shape_x + marge] = crop\n\n # center the binary mask of the nucleus with the same transformation\n if nuc is not None:\n if nuc.shape == 2:\n nuc_centered = nuc.copy()\n nuc_centered[:, 0] = nuc_centered[:, 0] - min_y + marge\n nuc_centered[:, 1] = nuc_centered[:, 1] - min_x + marge\n\n elif nuc.shape == cyt.shape:\n nuc_centered = np.zeros(cyt_centered_shape, dtype=bool)\n crop = nuc[min_y:max_y + 1, min_x:max_x + 1]\n nuc_centered[marge:shape_y + marge, marge:shape_x + marge] = crop\n\n else:\n raise ValueError(\"mRNAs mask should have the same shape than \"\n \"cytoplasm mask and coordinates should be in 2-d\")\n\n # center the binary mask of the mRNAs with the same transformation\n if rna is not None:\n if rna.shape[1] == 3:\n rna_centered = rna.copy()\n rna_centered[:, 1] = rna_centered[:, 1] - min_y + marge\n rna_centered[:, 2] = rna_centered[:, 2] - min_x + marge\n\n elif rna.shape[1] == 2:\n rna_centered = rna.copy()\n rna_centered[:, 0] = rna_centered[:, 0] - min_y + marge\n rna_centered[:, 1] = rna_centered[:, 1] - min_x + marge\n\n elif rna.shape == cyt.shape:\n rna_centered = np.zeros(cyt_centered_shape, dtype=bool)\n crop = rna[min_y:max_y + 1, min_x:max_x + 1]\n rna_centered[marge:shape_y + marge, marge:shape_x + marge] = crop\n\n else:\n raise ValueError(\"mRNAs mask should have the same shape than \"\n \"cytoplasm mask and coordinates should be in 2-d \"\n \"or 3-d\")\n\n return cyt_centered, nuc_centered, rna_centered\n\n\ndef from_surface_to_coord(binary_surface):\n \"\"\"Extract coordinates from a 2-d binary matrix.\n\n The resulting coordinates represent the external boundaries of the object.\n\n Parameters\n ----------\n binary_surface : np.ndarray, np.uint or np.int or bool\n Binary image with shape (y, x).\n\n Returns\n -------\n coord : np.ndarray, np.int64\n Array of boundaries coordinates with shape (nb_points, 2).\n\n \"\"\"\n # check parameters\n check_array(binary_surface,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64, bool])\n\n # from binary surface to 2D coordinates boundaries\n coord = find_contours(binary_surface, level=0)[0].astype(np.int64)\n\n return coord\n\n\ndef complete_coord_boundaries(coord):\n \"\"\"Complete a 2-d coordinates array, by generating/interpolating missing\n points.\n\n Parameters\n ----------\n coord : np.ndarray, np.int64\n Array of coordinates to complete, with shape (nb_points, 2).\n\n Returns\n -------\n coord_completed : np.ndarray, np.int64\n Completed coordinates arrays, with shape (nb_points, 2).\n\n \"\"\"\n # check parameters\n check_array(coord,\n ndim=2,\n dtype=[np.int64])\n\n # for each array in the list, complete its coordinates using the scikit\n # image method 'polygon_perimeter'\n coord_y, coord_x = polygon_perimeter(coord[:, 0], coord[:, 1])\n coord_y = coord_y[:, np.newaxis]\n coord_x = coord_x[:, np.newaxis]\n coord_completed = np.concatenate((coord_y, coord_x), axis=-1)\n\n return coord_completed\n\n\ndef _from_coord_to_boundaries(coord_cyt, coord_nuc=None, coord_rna=None):\n \"\"\"Convert 2-d coordinates to a binary matrix with the boundaries of the\n object.\n\n As we manipulate the coordinates of the external boundaries, the relative\n binary matrix has two extra pixels in each dimension. We compensate by\n reducing the marge by one in order to keep the same shape for the frame.\n If others coordinates are provided, the relative binary matrix is build\n with the same shape as the main coordinates.\n\n Parameters\n ----------\n coord_cyt : np.ndarray, np.int64\n Array of cytoplasm boundaries coordinates with shape (nb_points, 2).\n coord_nuc : np.ndarray, np.int64\n Array of nucleus boundaries coordinates with shape (nb_points, 2).\n coord_rna : np.ndarray, np.int64\n Array of mRNAs coordinates with shape (nb_points, 2) or\n (nb_points, 3).\n\n Returns\n -------\n cyt : np.ndarray, np.uint or np.int or bool\n Binary image of cytoplasm boundaries with shape (y, x).\n nuc : np.ndarray, np.uint or np.int or bool\n Binary image of nucleus boundaries with shape (y, x).\n rna : np.ndarray, np.uint or np.int or bool\n Binary image of mRNAs localizations with shape (y, x).\n\n \"\"\"\n # initialize parameter\n nuc, rna = None, None\n marge = get_offset_value()\n marge -= 1\n\n # from 2D coordinates boundaries to binary boundaries\n max_y = coord_cyt[:, 0].max()\n max_x = coord_cyt[:, 1].max()\n min_y = coord_cyt[:, 0].min()\n min_x = coord_cyt[:, 1].min()\n shape_y = max_y - min_y + 1\n shape_x = max_x - min_x + 1\n image_shape = (shape_y + 2 * marge, shape_x + 2 * marge)\n coord_cyt[:, 0] = coord_cyt[:, 0] - min_y + marge\n coord_cyt[:, 1] = coord_cyt[:, 1] - min_x + marge\n cyt = np.zeros(image_shape, dtype=bool)\n cyt[coord_cyt[:, 0], coord_cyt[:, 1]] = True\n\n # transform nucleus coordinates with the same parameters\n if coord_nuc is not None:\n nuc = np.zeros(image_shape, dtype=bool)\n coord_nuc[:, 0] = coord_nuc[:, 0] - min_y + marge\n coord_nuc[:, 1] = coord_nuc[:, 1] - min_x + marge\n nuc[coord_nuc[:, 0], coord_nuc[:, 1]] = True\n\n # transform mRNAs coordinates with the same parameters\n if coord_rna is not None:\n rna = np.zeros(image_shape, dtype=bool)\n if coord_rna.shape[1] == 3:\n coord_rna[:, 1] = coord_rna[:, 1] - min_y + marge\n coord_rna[:, 2] = coord_rna[:, 2] - min_x + marge\n rna[coord_rna[:, 1], coord_rna[:, 2]] = True\n else:\n coord_rna[:, 0] = coord_rna[:, 0] - min_y + marge\n coord_rna[:, 1] = coord_rna[:, 1] - min_x + marge\n rna[coord_rna[:, 0], coord_rna[:, 1]] = True\n\n return cyt, nuc, rna\n\n\ndef from_boundaries_to_surface(binary_boundaries):\n \"\"\"Fill in the binary matrix representing the boundaries of an object.\n\n Parameters\n ----------\n binary_boundaries : np.ndarray, np.uint or np.int or bool\n Binary image with shape (y, x).\n\n Returns\n -------\n binary_surface : np.ndarray, np.uint or np.int or bool\n Binary image with shape (y, x).\n\n \"\"\"\n # TODO check dtype input & output\n # check parameters\n check_array(binary_boundaries,\n ndim=2,\n dtype=[np.uint8, np.uint16, np.int64, bool])\n\n # from binary boundaries to binary surface\n binary_surface = ndi.binary_fill_holes(binary_boundaries)\n\n return binary_surface\n\n\ndef from_coord_to_surface(coord_cyt, coord_nuc=None, coord_rna=None):\n \"\"\"Convert 2-d coordinates to a binary matrix with the surface of the\n object.\n\n As we manipulate the coordinates of the external boundaries, the relative\n binary matrix has two extra pixels in each dimension. We compensate by\n keeping only the inside pixels of the object surface.\n If others coordinates are provided, the relative binary matrix is build\n with the same shape as the main coordinates.\n\n Parameters\n ----------\n coord_cyt : np.ndarray, np.int64\n Array of cytoplasm boundaries coordinates with shape (nb_points, 2).\n coord_nuc : np.ndarray, np.int64\n Array of nucleus boundaries coordinates with shape (nb_points, 2).\n coord_rna : np.ndarray, np.int64\n Array of mRNAs coordinates with shape (nb_points, 2) or\n (nb_points, 3).\n\n Returns\n -------\n cyt_surface : np.ndarray, np.uint or np.int or bool\n Binary image of cytoplasm surface with shape (y, x).\n nuc_surface : np.ndarray, np.uint or np.int or bool\n Binary image of nucleus surface with shape (y, x).\n rna : np.ndarray, np.uint or np.int or bool\n Binary image of mRNAs localizations with shape (y, x).\n\n \"\"\"\n # check parameters\n check_array(coord_cyt,\n ndim=2,\n dtype=[np.int64])\n if coord_nuc is not None:\n check_array(coord_nuc,\n ndim=2,\n dtype=[np.int64])\n if coord_rna is not None:\n check_array(coord_rna,\n ndim=2,\n dtype=[np.int64])\n\n # from coordinates to binary boundaries\n cyt, nuc, rna = _from_coord_to_boundaries(coord_cyt, coord_nuc, coord_rna)\n\n # from binary boundaries to binary surface\n cyt_surface = from_boundaries_to_surface(cyt)\n nuc_surface = from_boundaries_to_surface(nuc)\n\n return cyt_surface, nuc_surface, rna\n" ]
[ [ "numpy.zeros", "scipy.ndimage.binary_fill_holes", "numpy.any", "numpy.column_stack", "numpy.isin", "numpy.array", "numpy.concatenate", "numpy.nonzero" ] ]
metamoles/metamoles
[ "251de6672029566d8becf2538684c0506fc297d0" ]
[ "deprecated/code/datacleaning.py" ]
[ "#!/usr/bin/env python\nimport Bio\nfrom Bio.KEGG import REST\nfrom Bio.KEGG import Enzyme\nimport re\nfrom Bio.KEGG import Compound\n\nimport gzip\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef create_enzyme_df(path_to_file):\n \"\"\"\n input:path_to_file. file.gz format\n output:enzyme dataframe\n \"\"\"\n\n enzyme_fields = [method for method in dir(Enzyme.Record()) if not method.startswith('_')]\n data_matrix = []\n\n with gzip.open(path_to_file, 'rt') as file:\n for record in enzyme.parse(file):\n data_matrix.append([getattr(record, field) for field in enzyme_fields])\n\n enzyme_df = pd.DataFrame(data_matrix, columns=enzyme_fields)\n return enzyme_df\n\n\n\ndef get_compact_promiscuous_df(enzyme_df):\n \"\"\"\n input:enzyme dataframe (dataframe)\n output:promiscuous enzyme dataframe (dataframe)\n \"\"\"\n\n promiscuous_df = enzyme_df[[True if len(rxn) > 1 else False for rxn in enzyme_df['reaction']]]\n compact_promiscuous_df = promiscuous_df[['entry','reaction','product','substrate']]\n\n return compact_promiscuous_df\n\n\n\ndef get_reaction_list(df):\n \"\"\"\n get the list of reaction from a dataframe that contains reaction column\n input:dataframe with reaction column (df)\n output: list of reaction (list)\n \"\"\"\n reaction_list = []\n for index,row in df.iterrows():\n for reaction in row['reaction']:\n reaction_split = reaction.split(\"[RN:\")[-1]\n if reaction_split.startswith(\"R\") and not reaction_split.startswith(\"RN\"):\n for i in reaction_split[:-1].split(\" \"):\n reaction_list.append(i)\n return reaction_list\n\n\n\ndef query_reversible_reaction(reaction_list):\n \"\"\"\n get the list of reversible reaction\n input:list of reactions(list) eg)[\"R00709\"]\n output:list of reversible reactions(list) \n \"\"\"\n\n reversible_reaction = []\n for reaction in reaction_list:\n reaction_file = REST.kegg_get(reaction).read()\n for i in reaction_file.rstrip().split(\"\\n\"):\n if i.startswith(\"EQUATION\") and \"<=>\" in i:\n reversible_reaction.append(reaction)\n return reversible_reaction\n\n\n\ndef combine_substrate_product(df):\n \"\"\"\n append substrates to product column.\n should not be run multiple times. \n it will append substrates multiple times\n input:dataframe with substrate and product(df)\n output:dataframe with combined substrate and product. named under product column(df)\n \"\"\"\n\n rowindex = np.arange(0,len(df))\n df_with_ordered_index = df.set_index(rowindex)\n\n newdf = df_with_ordered_index\n \n for index,row in df_with_ordered_index.iterrows():\n productlist = row['product']\n substratelist = row['substrate']\n newdf.iloc[index,2] = productlist + substratelist \n\n return newdf[[\"entry\",\"product\"]]\n\n\n\ndef get_cofactor_list(cofactor_df,CPDcolumnname):\n \"\"\"\n <input>\n cofactor_df : cofactor dataframe(df)\n CPDcolumnname : name of CPD columnname from cofactor dataframe(str) \n <output>\n cofactor_list : list of cofactors from cofactor dataframe (list)\n \"\"\"\n\n cofactor_list = [cofactor[4:10] for cofactor in cofactor_df[CPDcolumnname]]\n return cofactor_list\n\n\ndef get_cpd_id(compound_full):\n \"\"\"\n input:compound_full = compound full name (str) eg) 'oxalureate [CPD:C00802]'\n output: cpd = cpd id (str) eg) 'C01007'\n \"\"\"\n cpd = compound_full[-7:-1]\n return cpd \n\n\n\ndef rm_cofactor_only_cpd(enzyme_df,cofactor_list,compound_columnname=\"product\",keepNA=True):\n \"\"\"\n <input>\n enzyme_df : dataframe with enzyme information. should have substrate and product combined(df)\n compound_columnname : name of the column with compounds (str)\n cofactor_list : list of cofactors to be removed (list)\n keepNA : if false, will drop the row with no compounds (boolean, default:True) \n <output>\n clean dataframe (df) \n \"\"\"\n newdf = enzyme_df.drop([\"product\"],axis=1)\n cleaned_compound_column = []\n for index,row in enzyme_df.iterrows():\n cpd_compound_list =[]\n for compound in row[compound_columnname]:\n if \"CPD\" in compound:\n onlycpd = get_cpd(compound)\n if onlycpd not in cofactor_list:\n cpd_compound_list.append(onlycpd)\n else:\n pass\n if len(cpd_compound_list)==0:\n cleaned_compound_column.append(\"NA\")\n else: \n cleaned_compound_column.append(cpd_compound_list)\n newdf['product'] = cleaned_compound_column\n\n if keepNA==False:\n newdf = newdf.loc[cleaned_df_productinList['product']!='NA']\n \n return newdf\n\n\n\ndef itemlist_eachrow(df,oldcolumnname,newcolumnname,sorting_column):\n \"\"\"\n <input>\n df: dataframe with list items in one column (dataframe)\n oldcolumnname : name of the old column to be replaced (str) eg)\"products\"\n newcolumnname : name of the new column to replace (str) eg)\"product\"\n sorting_column : name of the column to be sorted by (str) eg)\"entry\"\n\n <output>\n dataframe with each item in each row. \n \n \"\"\"\n newdf = df[oldcolumnname].\\\n apply(pd.Series).\\\n merge(df, left_index=True, right_index=True).\\\n drop([oldcolumnname],axis=1).\\\n melt(id_vars=[enzymecolumn],value_name=newcolumnname).\\\n sort_values(by=[sorting_column]).\\\n dropna().\\\n drop(columns=[\"variable\"])\n return newdf\n\n\ndef compound_records_to_df(file_path):\n \"\"\"\n Function parses all records using Biopython.Bio.KEGG.Compound parser, and returns a pandas dataframe.\n <Input>\n filepath = file path to a gzipped text file of KEGG enzyme records (str) \n <output>\n compound dataframe \n \"\"\"\n compound_fields = [method for method in dir(Compound.Record()) if not method.startswith('_')]\n data_matrix = []\n\n with gzip.open(file_path, 'rt') as file:\n for record in Compound.parse(file):\n data_matrix.append([getattr(record, field) for field in compound_fields])\n \n compound_df = pd.DataFrame(data_matrix, columns=compound_fields)\n return compound_df\n\n\n\ndef extract_PubChem_id(field):\n \"\"\"\n This function uses regular expressions to extract the PubChem compound IDs from a field in a record\n input : field \n output : pubchem_id \n \"\"\"\n\n regex = \"'PubChem', \\[\\'(\\d+)\\'\\]\\)\" # matches \"'PubChem', ['\" characters exactly, then captures any number of digits (\\d+), before another literal \"']\" character match\n ids = re.findall(regex, str(field), re.IGNORECASE)\n if len(ids) > 0:\n pubchem_id = ids[0]\n else:\n pubchem_id = ''\n \n return pubchem_id\n\n\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
rsuderman/iree-samples
[ "e7ba8e639c1bdd763793a6cf21930fb238607b3f" ]
[ "tflitehub/mobilenet_quant_test.py" ]
[ "# RUN: %PYTHON %s\n\nimport absl.testing\nimport numpy\nimport test_util\nimport urllib.request\n\nfrom PIL import Image\n\nmodel_path = \"https://tfhub.dev/tensorflow/lite-model/mobilenet_v2_1.0_224_quantized/1/default/1?lite-format=tflite\"\n\nclass MobilenetQuantTest(test_util.TFLiteModelTest):\n def __init__(self, *args, **kwargs):\n super(MobilenetQuantTest, self).__init__(model_path, *args, **kwargs)\n\n def compare_results(self, iree_results, tflite_results, details):\n super(MobilenetQuantTest, self).compare_results(iree_results, tflite_results, details)\n self.assertTrue(numpy.isclose(iree_results[0], tflite_results[0], atol=1e-6).all())\n\n def generate_inputs(self, input_details):\n img_path = \"https://github.com/google-coral/test_data/raw/master/cat.bmp\"\n local_path = \"/\".join([self.workdir, \"cat.bmp\"])\n urllib.request.urlretrieve(img_path, local_path)\n\n shape = input_details[0][\"shape\"]\n im = numpy.array(Image.open(local_path).resize((shape[1], shape[2])))\n args = [im.reshape(shape)]\n return args\n\n def test_compile_tflite(self):\n self.compile_and_execute()\n\nif __name__ == '__main__':\n absl.testing.absltest.main()\n" ]
[ [ "numpy.isclose" ] ]
WyckliffeAluga/data-chronicles
[ "5219fe9cdbafb9fd7be88727483952c4c13f2790" ]
[ "Core Concepts/Deep Learning/3_RELU_activation_function.py" ]
[ "import numpy as np\n\ndef relu(input):\n '''Define your relu activation function here'''\n # Calculate the value for the output of the relu function: output\n output = max(input, 0)\n \n # Return the value just calculated\n return(output)\n\ninput_data = np.array([3,5])\n\n# Calculate node 0 value: node_0_output\nnode_0_input = (input_data * weights['node_0']).sum()\nnode_0_output = relu(node_0_input)\n\n# Calculate node 1 value: node_1_output\nnode_1_input = (input_data * weights['node_1']).sum()\nnode_1_output = relu(node_1_input)\n\n# Put node values into array: hidden_layer_outputs\nhidden_layer_outputs = np.array([node_0_output, node_1_output])\n\n# Calculate model output (do not apply relu)\nmodel_output = (hidden_layer_outputs * weights['output']).sum()\n\n# Print model output\nprint(model_output)\n" ]
[ [ "numpy.array" ] ]
cwegg/astro-dynamo
[ "024f8aad8785488e9ae3328095d3d9c53b3e31b0" ]
[ "astro_dynamo/model.py" ]
[ "import math\nfrom typing import List, Union, Tuple\n\nimport torch\nimport torch.nn as nn\n\nfrom astro_dynamo.snap import SnapShot\nfrom .snaptools import align_bar\n\n\ndef _symmetrize_matrix(x, dim):\n \"\"\"Symmetrize a tensor along dimension dim\"\"\"\n return (x + x.flip(dims=[dim])) / 2\n\n\nclass DynamicalModel(nn.Module):\n \"\"\"DynamicalModels class. This containts a snapshot of the particles, the potentials\n in which they move, and the targets to which the model should be fitted.\n\n Attributes:\n snap:\n Should be a SnapShot whose masses will be optimised\n\n potentials:\n The potentials add. If self gravity is not required set self_gravity_update to None.\n If self gravity is required then the potential of the snapshot should be in potentials[0]\n and self_gravity_update represents how much to update the running average of the density on\n each iteration. Default value is 0.2 which is then exponential averages the density with timescale\n 5 snapshots(=1/0.2).\n\n targets:\n A list of targets. Running\n model = DynamicalModel(snap, potentials, targets)\n current_target_list = model()\n will provide an list of theDynamicalModelse targets evaluated with the present model. These are then\n typically combined to a loss that pytorch can optimise.\n\n Methods:\n forward()\n Computes the targets by evaluating them on the current snapshot. Can also be called as DynamicalModel()\n integrate(steps=256)\n Integrates the model forward by steps. Updates potential the density assocaiates to potential[0]\n update_potential()\n Recomputes the accelerations from potential[0]. Adjust each snapshots velocity by a factor vc_new/vc_old\n resample()\n Resamples the snapshot to equal mass particles.\n \"\"\"\n\n def __init__(self, snap, potentials, targets, self_gravity_update=0.2):\n super(DynamicalModel, self).__init__()\n self.snap = snap\n self.targets = nn.ModuleList(targets)\n self.potentials = nn.ModuleList(potentials)\n self.self_gravity_update = self_gravity_update\n\n def forward(self):\n return [target(self) for target in self.targets]\n\n def integrate(self, steps=256):\n with torch.no_grad():\n self.snap.leapfrog_steps(potentials=self.potentials, steps=steps)\n if self.self_gravity_update is not None:\n self.potentials[0].update_density(self.snap.positions,\n self.snap.masses.detach(),\n fractional_update=self.self_gravity_update)\n\n def update_potential(self, dm_potential=None, update_velocities=True):\n with torch.no_grad():\n if update_velocities:\n old_accelerations = self.snap.get_accelerations(self.potentials,\n self.snap.positions)\n old_vc = torch.sum(-old_accelerations * self.snap.positions,\n dim=-1).sqrt()\n self.potentials[0].rho = _symmetrize_matrix(\n _symmetrize_matrix(\n _symmetrize_matrix(self.potentials[0].rho, 0), 1), 2)\n self.potentials[0].grid_accelerations()\n if dm_potential is not None:\n self.potentials[1] = dm_potential\n if update_velocities:\n new_accelerations = self.snap.get_accelerations(self.potentials,\n self.snap.positions)\n new_vc = torch.sum(-new_accelerations * self.snap.positions,\n dim=-1).sqrt()\n gd = torch.isfinite(new_vc / old_vc) & (new_vc / old_vc > 0)\n self.snap.velocities[gd, :] *= (new_vc / old_vc)[gd, None]\n align_bar(self.snap)\n\n def resample(self, velocity_perturbation=0.01):\n \"\"\"Resample the model to equal mass particles.\n\n Note that the snapshot changes and so the parameters of\n the model also change in a way that any optimiser that keeps parameter-by-parameter information e.g.\n gradients must also be update.\"\"\"\n with torch.no_grad():\n self.snap = self.snap.resample(self.potentials,\n velocity_perturbation=velocity_perturbation)\n align_bar(self.snap)\n\n def vc(self, components=False, r=torch.linspace(0, 9),\n phi=torch.linspace(0, math.pi)):\n \"\"\"Returns (r,vc) the circular velocity of the model in physical units and locations at which it was evaluated.\n\n If components=True then return list containing the vc of each component, otherwise just return the total.\n r optionally specifies the physical radii at which to compute vc\n phi specifies the azimuths over which to average.\"\"\"\n phi_grid, r_grid = torch.meshgrid(phi, r)\n phi_grid, r_grid = phi_grid.flatten(), r_grid.flatten()\n pos = torch.stack((r_grid * torch.cos(phi_grid),\n r_grid * torch.sin(phi_grid), 0 * phi_grid)).t()\n pos = pos.to(device=self.d_scale.device)\n pos /= self.d_scale\n vc = []\n for potential in self.potentials:\n device = next(potential.buffers()).device\n acc = potential.get_accelerations(pos.to(device=device)).to(\n device=pos.device)\n vc += [torch.sum(-acc * pos, dim=1).sqrt().reshape(\n phi.shape + r.shape).mean(dim=0) * self.v_scale]\n if components:\n return r, vc\n else:\n total_vc = vc[0]\n for thisvc in vc[1:]:\n total_vc = (total_vc ** 2 + thisvc ** 2).sqrt()\n return r, total_vc\n\n\nclass MilkyWayModel(DynamicalModel):\n def __init__(self, snap: SnapShot, potentials: List[nn.Module],\n targets: List[nn.Module],\n self_gravity_update: Union[float, torch.Tensor] = 0.2,\n bar_angle: Union[float, torch.Tensor] = 27.,\n r_0: Union[float, torch.Tensor] = 8.2,\n z_0: Union[float, torch.Tensor] = 0.014,\n v_scale: Union[float, torch.Tensor] = 240,\n d_scale: Union[float, torch.Tensor] = 1.4,\n v_sun: Union[List[float], Tuple[float, float, float],\n torch.Tensor] = (11.1, 12.24 + 238.0, 7.25)\n ):\n super(MilkyWayModel, self).__init__(snap, potentials, targets,\n self_gravity_update)\n self.bar_angle = nn.Parameter(torch.as_tensor(bar_angle),\n requires_grad=False)\n self.r_0 = nn.Parameter(torch.as_tensor(r_0), requires_grad=False)\n self.z_0 = nn.Parameter(torch.as_tensor(z_0), requires_grad=False)\n self.v_scale = nn.Parameter(torch.as_tensor(v_scale),\n requires_grad=False)\n self.d_scale = nn.Parameter(torch.as_tensor(d_scale),\n requires_grad=False)\n self.v_sun = nn.Parameter(torch.as_tensor(v_sun), requires_grad=False)\n\n @property\n def m_scale(self) -> torch.tensor:\n G = 4.302E-3 # Gravitational constant in astronomical units\n return self.d_scale * 1e3 * self.v_scale ** 2 / G\n\n @property\n def t_scale(self) -> torch.tensor:\n \"\"\"1 iu in time in Gyr\"\"\"\n return self.d_scale / self.v_scale * 0.977813106 # note that 1km/s is almost 1kpc/Gyr\n\n @property\n def xyz(self) -> torch.tensor:\n \"\"\"Return position of particles in relative to the Sun in cartesian coordinates with units kpc\n \"\"\"\n ddtor = math.pi / 180.\n ang = self.bar_angle * ddtor\n pos = self.snap.positions\n xyz = torch.zeros_like(pos)\n inplane_gc_distance = (self.r_0 ** 2 - self.z_0 ** 2).sqrt()\n xyz[:, 0] = (pos[:, 0] * torch.cos(-ang) - pos[:, 1] * torch.sin(\n -ang)) * self.d_scale + inplane_gc_distance\n xyz[:, 1] = (pos[:, 0] * torch.sin(-ang) + pos[:, 1] * torch.cos(\n -ang)) * self.d_scale\n xyz[:, 2] = pos[:, 2] * self.d_scale - self.z_0\n return xyz\n\n @property\n def l_b_mu(self) -> torch.tensor:\n \"\"\"Return array of particles in galactic (l,b,mu) coordinates. (l,b) in degrees. mu is distance modulus\"\"\"\n xyz = self.xyz\n l_b_mu = torch.zeros_like(xyz)\n d = (xyz[:, 0] ** 2 + xyz[:, 1] ** 2 + xyz[:, 2] ** 2).sqrt()\n l_b_mu[:, 0] = torch.atan2(xyz[:, 1], xyz[:, 0]) * 180 / math.pi\n b_offset = torch.asin(\n self.z_0 / self.r_0) # the GC has z = -z_0, rotate b coordinate so this is at l,b=(0,0)\n l_b_mu[:, 1] = (torch.asin(xyz[:, 2] / d) + b_offset) * 180 / math.pi\n l_b_mu[:, 2] = 5 * (100 * d).log10()\n return l_b_mu\n\n @property\n def masses(self) -> torch.tensor:\n return self.snap.masses * self.m_scale\n\n @property\n def omega(self) -> torch.tensor:\n return self.snap.omega * self.v_scale / self.d_scale\n\n @omega.setter\n def omega(self, omega: float):\n self.snap.omega = omega / self.v_scale * self.d_scale\n\n @property\n def uvw(self) -> torch.tensor:\n \"\"\"Return UVW velocities.\n \"\"\"\n ddtor = math.pi / 180.\n ang = self.bar_angle * ddtor\n vxyz = torch.zeros_like(self.snap.positions)\n # sun moves at Vsun[0] towards galactic center i.e. other stars are moving away towards larger x\n vel = self.snap.velocities * self.v_scale\n vxyz[:, 0] = (vel[:, 0] * torch.cos(-ang) - vel[:, 1] * torch.sin(-ang)) + self.v_sun[0]\n # sun moves at Vsun[1] in direction of rotation, other stars are going slower than (0,-Vc,0)\n vxyz[:, 1] = (vel[:, 0] * torch.sin(-ang) + vel[:, 1] * torch.cos(-ang)) - self.v_sun[1]\n # sun is moving towards ngp i.e. other stars on average move at negative vz\n vxyz[:, 2] = vel[:, 2] - self.v_sun[2]\n return vxyz\n\n @property\n def vr(self) -> torch.tensor:\n \"\"\"Return array of particles radial velocities in [km/s]\"\"\"\n xyz = self.xyz\n vxyz = self.uvw\n r = xyz.norm(dim=-1)\n vr = (xyz * vxyz).sum(dim=-1) / r\n return vr\n\n @property\n def mul_mub(self) -> torch.tensor:\n \"\"\"Return proper motions of particles in [mas/yr] in (l, b).\n Proper motion in l is (rate of change of l)*cos(b)\"\"\"\n xyz = self.xyz\n vxyz = self.uvw\n r = xyz.norm(dim=-1)\n rxy = (xyz[:, 0] ** 2 + xyz[:, 1] ** 2).sqrt()\n # magic number comes from: 1 mas/yr = 4.74057 km/s at 1 kpc\n mul = (-vxyz[:, 0] * xyz[:, 1] / rxy + vxyz[:, 1] * xyz[:, 0] / rxy) / r / 4.74057\n mub = (-vxyz[:, 0] * xyz[:, 2] * xyz[:, 0] / rxy - vxyz[:, 1] * xyz[:, 2] * xyz[:, 1] / rxy + vxyz[:, 2] * rxy) / (\n r ** 2) / 4.74057\n return torch.stack((mul, mub), dim=-1)\n" ]
[ [ "torch.sum", "torch.stack", "torch.cos", "torch.asin", "torch.as_tensor", "torch.linspace", "torch.zeros_like", "torch.no_grad", "torch.sin", "torch.nn.ModuleList", "torch.meshgrid", "torch.isfinite", "torch.atan2" ] ]
HennyJie/BrainGB
[ "96cf6711e2f2e6fa48b699ce3c0d6e318955c4de" ]
[ "src/dataset/transforms.py" ]
[ "import torch\nfrom node2vec import Node2Vec as Node2Vec_\nfrom .brain_data import BrainData\nfrom torch_geometric.data import Data\nfrom networkx.convert_matrix import from_numpy_matrix\nfrom .utils import binning, LDP\nimport networkx as nx\nfrom .base_transform import BaseTransform\nfrom numpy import linalg as LA\nimport numpy as np\n\n\nclass FromSVTransform(BaseTransform):\n def __init__(self, sv_transform):\n super(FromSVTransform, self).__init__()\n self.sv_transform = sv_transform\n\n def __call__(self, data):\n keys = list(filter(lambda x: x.startswith('edge_index'), data.keys))\n for key in keys:\n if key.startswith('edge_index'):\n postfix = key[10:]\n edge_index = data[f'edge_index{postfix}']\n edge_attr = data[f'edge_attr{postfix}']\n svdata = Data(edge_index=edge_index, edge_attr=edge_attr, num_nodes=data.num_nodes)\n svdata_transformed = self.sv_transform(svdata)\n data[f'x{postfix}'] = svdata_transformed.x\n data[f'edge_index{postfix}'] = svdata_transformed.edge_index\n data[f'edge_attr{postfix}'] = svdata_transformed.edge_attr\n return data\n\n def __str__(self):\n return self.sv_transform.__class__.__name__\n\n\nclass Identity(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns a diagonal matrix with ones on the diagonal.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n data.x = torch.diag(torch.ones(data.num_nodes))\n return data\n\n\nclass Degree(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns a diagonal matrix with the degree of each node on the diagonal.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n data.x = torch.Tensor(adj.sum(dim=1, keepdim=True)).float()\n return data\n\n def __str__(self):\n return 'Degree'\n\n\nclass LDPTransform(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns node feature with LDP transform.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n data.x = torch.Tensor(\n LDP(nx.from_numpy_array(adj.numpy()))\n ).float()\n return data\n\n def __str__(self):\n return 'LDP'\n\n\nclass DegreeBin(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns node feature with degree bin transform.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n return torch.Tensor(binning(adj.sum(dim=1))).float()\n\n def __str__(self):\n return 'Degree_Bin'\n\n\nclass Adj(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns adjacency matrix.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n data.x = adj\n return data\n\n def __str__(self):\n return 'Adj'\n\n\nclass Eigenvector(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns node feature with eigenvector.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n w, v = LA.eig(adj.numpy())\n # indices = np.argsort(w)[::-1]\n v = v.transpose()\n data.x = torch.Tensor(v).float()\n return data\n\n\nclass EigenNorm(BaseTransform):\n def __call__(self, data: BrainData):\n \"\"\"\n Returns node feature with eigen norm.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n sum_of_rows = adj.sum(dim=1)\n adj /= sum_of_rows\n adj = torch.nan_to_num(adj)\n w, v = LA.eig(adj.numpy())\n # indices = np.argsort(w)[::-1]\n v = v.transpose()\n data.x = torch.Tensor(v).float()\n return data\n\n\nclass Node2Vec(BaseTransform):\n def __init__(self, feature_dim=32, walk_length=5, num_walks=200, num_workers=4,\n window=10, min_count=1, batch_words=4):\n super(Node2Vec, self).__init__()\n self.feature_dim = feature_dim\n self.walk_length = walk_length\n self.num_walks = num_walks\n self.num_workers = num_workers\n self.window = window\n self.min_count = min_count\n self.batch_words = batch_words\n\n def __call__(self, data):\n \"\"\"\n Returns node feature with node2vec transform.\n :param data: BrainData\n :return: torch.Tensor\n \"\"\"\n adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])\n adj = adj.to_dense()\n if (adj < 0).int().sum() > 0:\n # split the adjacency matrix into two (negative and positive) parts\n pos_adj = adj.clone()\n pos_adj[adj < 0] = 0\n neg_adj = adj.clone()\n neg_adj[adj > 0] = 0\n neg_adj = -neg_adj\n adjs = [pos_adj, neg_adj]\n else:\n adjs = [adj]\n\n xs = []\n for adj in adjs:\n x = torch.zeros((data.num_nodes, self.feature_dim))\n graph = from_numpy_matrix(adj.numpy())\n node2vec = Node2Vec_(graph, dimensions=self.feature_dim, walk_length=self.walk_length,\n num_walks=self.num_walks, workers=self.num_workers)\n model = node2vec.fit(window=self.window, min_count=self.min_count,\n batch_words=self.batch_words)\n for i in range(data.num_nodes):\n x[i] = torch.Tensor(model.wv[f'{i}'].copy())\n xs.append(x)\n data.x = torch.cat(xs, dim=-1)\n return data\n\n def __str__(self):\n return 'Node2Vec'\n" ]
[ [ "torch.ones", "torch.sparse_coo_tensor", "torch.zeros", "torch.cat", "torch.Tensor", "torch.nan_to_num" ] ]
oahziur/probability
[ "ca14fa8924749593fd21e2b6389551f964527eec", "11645be43d2845da65a4fbafde4cfa95780280c0" ]
[ "tensorflow_probability/python/bijectors/tanh.py", "tensorflow_probability/python/mcmc/transformed_kernel_test.py" ]
[ "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tanh bijector.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_probability.python.bijectors import bijector\n\n\n__all__ = [\n \"Tanh\",\n]\n\n\nclass Tanh(bijector.Bijector):\n \"\"\"Bijector that computes `Y = tanh(X)`, therefore `Y in (-1, 1)`.\n\n This can be achieved by an affine transform of the Sigmoid bijector, i.e.,\n it is equivalent to\n ```\n tfb.Chain([tfb.Affine(shift=-1, scale=2.),\n tfb.Sigmoid(),\n tfb.Affine(scale=2.)])\n ```\n\n However, using the `Tanh` bijector directly is slightly faster and more\n numerically stable.\n \"\"\"\n\n def __init__(self, validate_args=False, name=\"tanh\"):\n super(Tanh, self).__init__(\n forward_min_event_ndims=0,\n validate_args=validate_args,\n name=name)\n\n def _forward(self, x):\n return tf.nn.tanh(x)\n\n def _inverse(self, y):\n return tf.atanh(y)\n\n def _inverse_log_det_jacobian(self, y):\n return -tf.log1p(-tf.square(y))\n\n def _forward_log_det_jacobian(self, x):\n # This formula is mathematically equivalent to\n # `tf.log1p(-tf.square(tf.tanh(x)))`, however this code is more numerically\n # stable.\n\n # Derivation:\n # log(1 - tanh(x)^2)\n # = log(sech(x)^2)\n # = 2 * log(sech(x))\n # = 2 * log(2e^-x / (e^-2x + 1))\n # = 2 * (log(2) - x - log(e^-2x + 1))\n # = 2 * (log(2) - x - softplus(-2x))\n return 2. * (np.log(2.) - x - tf.nn.softplus(-2. * x))\n", "# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for `TransformedTransitionKernel` `TransitionKernel`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n# Dependency imports\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\n\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\n\nFakeInnerKernelResults = collections.namedtuple(\n 'FakeInnerKernelResults', [])\n\n\nclass FakeInnerKernel(tfp.mcmc.TransitionKernel):\n \"\"\"Fake Transition Kernel.\"\"\"\n\n def __init__(self, target_log_prob_fn):\n self._parameters = dict(target_log_prob_fn=target_log_prob_fn)\n\n @property\n def parameters(self):\n return self._parameters\n\n @property\n def is_calibrated(self):\n return True\n\n def one_step(self, current_state, previous_kernel_results):\n pass\n\n def bootstrap_results(self, init_state):\n return FakeInnerKernelResults()\n\n\nclass TransformedTransitionKernelTest(tf.test.TestCase):\n\n def setUp(self):\n self.dtype = np.float32\n\n def test_support_works_correctly_with_HMC(self):\n num_results = 2000\n with self.cached_session(graph=tf.Graph()) as sess:\n target = tfd.Beta(\n concentration1=self.dtype(1.),\n concentration0=self.dtype(10.))\n transformed_hmc = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target.log_prob,\n step_size=1.64,\n num_leapfrog_steps=2,\n seed=55),\n bijector=tfb.Sigmoid())\n # Recall, tfp.mcmc.sample_chain calls\n # transformed_hmc.bootstrap_results too.\n states, kernel_results = tfp.mcmc.sample_chain(\n num_results=num_results,\n # The initial state is used by inner_kernel.bootstrap_results.\n # Note the input is *after* bijector.forward.\n current_state=self.dtype(0.25),\n kernel=transformed_hmc,\n num_burnin_steps=200,\n num_steps_between_results=1,\n parallel_iterations=1)\n self.assertEqual(num_results, tf.dimension_value(states.shape[0]))\n sample_mean = tf.reduce_mean(states, axis=0)\n sample_var = tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0)\n [\n sample_mean_,\n sample_var_,\n is_accepted_,\n true_mean_,\n true_var_,\n ] = sess.run([\n sample_mean,\n sample_var,\n kernel_results.inner_results.is_accepted,\n target.mean(),\n target.variance(),\n ])\n self.assertAllClose(true_mean_, sample_mean_,\n atol=0.06, rtol=0.)\n self.assertAllClose(true_var_, sample_var_,\n atol=0.01, rtol=0.1)\n self.assertNear(0.6, is_accepted_.mean(), err=0.05)\n\n def test_support_works_correctly_with_MALA(self):\n num_results = 2000\n with self.cached_session(graph=tf.Graph()) as sess:\n target = tfd.Beta(\n concentration1=self.dtype(1.),\n concentration0=self.dtype(10.))\n transformed_mala = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(\n target_log_prob_fn=target.log_prob,\n step_size=1.,\n seed=55),\n bijector=tfb.Sigmoid())\n # Recall, tfp.mcmc.sample_chain calls\n # transformed_hmc.bootstrap_results too.\n states, _ = tfp.mcmc.sample_chain(\n num_results=num_results,\n # The initial state is used by inner_kernel.bootstrap_results.\n # Note the input is *after* bijector.forward.\n current_state=self.dtype(0.25),\n kernel=transformed_mala,\n num_burnin_steps=200,\n num_steps_between_results=1,\n parallel_iterations=1)\n self.assertEqual(num_results, tf.dimension_value(states.shape[0]))\n sample_mean = tf.reduce_mean(states, axis=0)\n sample_var = tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0)\n [\n sample_mean_,\n sample_var_,\n true_mean_,\n true_var_,\n ] = sess.run([\n sample_mean,\n sample_var,\n target.mean(),\n target.variance(),\n ])\n self.assertAllClose(true_mean_, sample_mean_,\n atol=0.06, rtol=0.)\n self.assertAllClose(true_var_, sample_var_,\n atol=0.01, rtol=0.1)\n\n def test_support_works_correctly_with_RWM(self):\n num_results = 2000\n with self.cached_session(graph=tf.Graph()) as sess:\n target = tfd.Beta(\n concentration1=self.dtype(1.),\n concentration0=self.dtype(10.))\n transformed_rwm = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.RandomWalkMetropolis(\n target_log_prob_fn=target.log_prob,\n new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=1.5),\n seed=55),\n bijector=tfb.Sigmoid())\n # Recall, tfp.mcmc.sample_chain calls\n # transformed_hmc.bootstrap_results too.\n states, _ = tfp.mcmc.sample_chain(\n num_results=num_results,\n # The initial state is used by inner_kernel.bootstrap_results.\n # Note the input is *after* bijector.forward.\n current_state=self.dtype(0.25),\n kernel=transformed_rwm,\n num_burnin_steps=200,\n num_steps_between_results=1,\n parallel_iterations=1)\n self.assertEqual(num_results, tf.dimension_value(states.shape[0]))\n sample_mean = tf.reduce_mean(states, axis=0)\n sample_var = tf.reduce_mean(\n tf.squared_difference(states, sample_mean),\n axis=0)\n [\n sample_mean_,\n sample_var_,\n true_mean_,\n true_var_,\n ] = sess.run([\n sample_mean,\n sample_var,\n target.mean(),\n target.variance(),\n ])\n self.assertAllClose(true_mean_, sample_mean_,\n atol=0.06, rtol=0.)\n self.assertAllClose(true_var_, sample_var_,\n atol=0.01, rtol=0.1)\n\n def test_end_to_end_works_correctly(self):\n true_mean = self.dtype([0, 0])\n true_cov = self.dtype([[1, 0.5],\n [0.5, 1]])\n num_results = 2000\n counter = collections.Counter()\n with self.cached_session(graph=tf.Graph()) as sess:\n def target_log_prob(x, y):\n counter['target_calls'] += 1\n # Corresponds to unnormalized MVN.\n # z = matmul(inv(chol(true_cov)), [x, y] - true_mean)\n z = tf.stack([x, y], axis=-1) - true_mean\n z = tf.squeeze(\n tf.linalg.triangular_solve(\n np.linalg.cholesky(true_cov),\n z[..., tf.newaxis]),\n axis=-1)\n return -0.5 * tf.reduce_sum(z**2., axis=-1)\n\n transformed_hmc = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(\n target_log_prob_fn=target_log_prob,\n # Affine scaling means we have to change the step_size\n # in order to get 60% acceptance, as was done in mcmc/hmc_test.py.\n step_size=[1.23 / 0.75, 1.23 / 0.5],\n num_leapfrog_steps=2,\n seed=54),\n bijector=[\n tfb.AffineScalar(scale=0.75),\n tfb.AffineScalar(scale=0.5),\n ])\n # Recall, tfp.mcmc.sample_chain calls\n # transformed_hmc.bootstrap_results too.\n states, kernel_results = tfp.mcmc.sample_chain(\n num_results=num_results,\n # The initial state is used by inner_kernel.bootstrap_results.\n # Note the input is *after* `bijector.forward`.\n current_state=[self.dtype(-2), self.dtype(2)],\n kernel=transformed_hmc,\n num_burnin_steps=200,\n num_steps_between_results=1,\n parallel_iterations=1)\n self.assertAllEqual(dict(target_calls=2), counter)\n states = tf.stack(states, axis=-1)\n self.assertEqual(num_results, tf.dimension_value(states.shape[0]))\n sample_mean = tf.reduce_mean(states, axis=0)\n x = states - sample_mean\n sample_cov = tf.matmul(x, x, transpose_a=True) / self.dtype(num_results)\n [sample_mean_, sample_cov_, is_accepted_] = sess.run([\n sample_mean, sample_cov, kernel_results.inner_results.is_accepted])\n self.assertNear(0.6, is_accepted_.mean(), err=0.05)\n self.assertAllClose(true_mean, sample_mean_,\n atol=0.06, rtol=0.)\n self.assertAllClose(true_cov, sample_cov_,\n atol=0., rtol=0.1)\n\n def test_bootstrap_requires_xor_args(self):\n def fake_target_log_prob(x):\n return -x**2 / 2.\n\n transformed_fake = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),\n bijector=tfb.Exp())\n with self.assertRaisesWithPredicateMatch(\n ValueError, r'Must specify exactly one'):\n transformed_fake.bootstrap_results()\n with self.assertRaisesWithPredicateMatch(\n ValueError, r'Must specify exactly one'):\n transformed_fake.bootstrap_results(\n init_state=2., transformed_init_state=np.log(2.))\n\n def test_bootstrap_correctly_untransforms(self):\n def fake_target_log_prob(x):\n return -x**2 / 2.\n\n transformed_fake = tfp.mcmc.TransformedTransitionKernel(\n inner_kernel=FakeInnerKernel(target_log_prob_fn=fake_target_log_prob),\n bijector=tfb.Exp())\n with self.cached_session(graph=tf.Graph()) as sess:\n [\n automatic_pkr,\n manual_pkr,\n ] = sess.run([\n transformed_fake.bootstrap_results(2.),\n transformed_fake.bootstrap_results(transformed_init_state=[4., 5.]),\n ])\n self.assertNear(np.log(2.), automatic_pkr.transformed_state, err=1e-6)\n self.assertAllClose(\n [4., 5.], manual_pkr.transformed_state, atol=0., rtol=1e-6)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.nn.tanh", "numpy.log", "tensorflow.atanh", "tensorflow.square", "tensorflow.nn.softplus" ], [ "tensorflow.stack", "tensorflow.dimension_value", "tensorflow.squared_difference", "tensorflow.reduce_mean", "tensorflow.matmul", "tensorflow.Graph", "numpy.linalg.cholesky", "numpy.log", "tensorflow.reduce_sum", "tensorflow.test.main" ] ]
iamlmn/monte_carlo_analysis
[ "45f7af2b439f80bce429a94257a1167c9d5f4a2c" ]
[ "scenario analysis/portfolio_evaluation.py" ]
[ "import yfinance\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef _simulate_returns(historical_returns,forecast_days):\n return historical_returns.sample(n = forecast_days, \n replace = True).reset_index(drop = True)\n\n\ndef simulate_modified_returns(\n historical_returns,\n forecast_days,\n correct_mean_by):\n h = historical_returns.copy()\n new_series = h + correct_mean_by\n return new_series.sample(n=forecast_days, \n replace = True).reset_index(drop=True)\n\n\ndef simulate_portfolio(historical_returns,composition,forecast_days):\n result = 0\n for t in tqdm(composition):\n name,weight = t[0],t[1]\n s = _simulate_returns(historical_returns['return_%s' % (name)], forecast_days)\n result = result + s * weight\n \n return(result)\n\n\ndef simulate_modified_portfolio(\n historical_returns,\n composition,\n forecast_days):\n \n result = 0\n \n for t in composition:\n name,weight,correction = t[0],t[1],t[2]\n s = simulate_modified_returns(\n historical_returns['return_%s' % (name)], \n forecast_days,correction\n )\n \n result = result + s * weight\n return(result)\n\n\n\ndef simulation(historical_returns,composition,forecast_days,n_iterations):\n simulated_portfolios = None\n\n for i in range(n_iterations):\n sim = simulate_modified_portfolio(historical_returns,composition,forecast_days)\n\n sim_port = pd.DataFrame({'returns_%d' % (i) : sim})\n\n if simulated_portfolios is None:\n simulated_portfolios = sim_port\n else:\n simulated_portfolios = simulated_portfolios.join(sim_port)\n \n return simulated_portfolios\n\nif __name__ == '__main__':\n portfolio_composition = [('MSFT',0.5),('AAPL',0.2),('GOOG',0.3)]\n returns = pd.DataFrame({})\n\n # create returns portfolio dataframe\n\n \n for t in portfolio_composition:\n name = t[0]\n ticker = yfinance.Ticker(name)\n data = ticker.history(interval=\"1d\",start=\"2010-01-01\",end=\"2019-12-31\")\n data['return_%s' % (name)] = data['Close'].pct_change(1)\n returns = returns.join(data[['return_%s' % (name)]],how=\"outer\").dropna()\n\n # Monte Carlo simulation of a portfolio\n\n \n # simulate_portfolio(returns,portfolio_composition,10)\n # This may be enough for portfolio simulation, but we want something more, that is the what-if analysis.\n\n # print(\"The historical average returns are : \\n\", returns.mean(axis=0))\n \n '''\n If we perform portfolio simulation as shown before, \n we are simply saying that the future returns are a random sample \n of the past returns. We already know this isn’t completely true. \n Moreover, maybe we are performing scenario analysis because \n we want to know what happens if certain conditions will occur. \n For example, what happens if the average daily return of each stock \n is lower than its historical value?If we perform portfolio \n simulation as shown before, we are simply saying that the future returns \n are a random sample of the past returns. We already know this \n isn’t completely true. Moreover, maybe we are performing scenario analysis \n because we want to know what happens if certain conditions will occur. \n For example, what happens if the average daily return of each \n stock is lower than its historical value?\n '''\n\n print('Let’s try to simulate what happens if the average \\\n returns drop by -0.0001 for MSFT, -0.001 for AAPL and -0.0005 for GOOG. \\\n We must subtract these quantities from each stock and then simulate the \\\n future portfolios with the new, modified data.')\n\n\n\n # We’ll add these corrections directly to the portfolio_composition list (they are the third component of each tuple):\n\n new_portfolio_composition = [\n ('MSFT', 0.5,-0.0001), \n ('AAPL', 0.2,-0.001), \n ('GOOG', 0.3,-0.0005)\n]\n\n # Simulations and results\n\n forecast_days = 20\n n_iterations = 200\n\n simulated_portfolios = simulation(returns,\n new_portfolio_composition,forecast_days,n_iterations)\n\n\n\n # Taken the daily returns of a portfolio, we can build the return after N days with the compound interest formula:\n\n percentile_5th = simulated_portfolios.cumsum().apply(lambda x : np.percentile(x,5),axis=1)\n percentile_95th = simulated_portfolios.cumsum().apply(lambda x : np.percentile(x,95),axis=1)\n average_port = simulated_portfolios.cumsum().apply(lambda x : np.mean(x),axis=1)\n print(percentile_5th.tail(1))\n print(percentile_95th.tail(1))\n print(average_port.tail(1))\n\n # Confidence interval for future portfolios\n x = range(forecast_days)\n\n plt.rcParams['figure.figsize'] = [10, 10]\n\n plt.plot(x,average_port,label=\"Average portfolio\")\n plt.xlabel(\"Day\")\n plt.ylabel(\"Portfolio return\")\n\n\n plt.fill_between(x, percentile_5th, percentile_95th,alpha=0.2)\n plt.grid()\n plt.legend()\n\n plt.show()\n\n\n # Probability of beating the portfolio target\n\n target_return = 0.02\n target_prob_port = simulated_portfolios.cumsum().apply(lambda x : np.mean(x > target_return),axis=1)\n\n print(\"Probabilityof beating the portfolio target {} \".format(target_return),target_prob_port.tail(1))\n\n\n # The size of the error bars is calculated with the standard error formula:\n err_bars = np.sqrt(\n target_prob_port * (1-target_prob_port) / n_iterations\n )\n x = range(forecast_days)\n plt.rcParams['figure.figsize'] = [10, 10]\n plt.bar(x,target_prob_port,yerr = err_bars)\n plt.xlabel(\"Day\")\n plt.ylabel(\"Probability of return >= %.2f\" % (target_return))\n plt.grid()\n plt.show()\n\n\n\n # Sharpe ratio histogram\n '''\n performance metric of a portfolio\n '''\n\n sharpe_indices = simulated_portfolios.apply(lambda x : np.mean(x)/np.std(x))\n plt.hist(sharpe_indices,bins=\"rice\")\n plt.xlabel(\"Sharpe ratio\")\n plt.show()\n print(\"Sharpe ratio mean value\",np.mean(sharpe_indices))" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "pandas.DataFrame", "numpy.std", "numpy.percentile", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.hist", "numpy.sqrt", "matplotlib.pyplot.plot", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.xlabel", "numpy.mean", "matplotlib.pyplot.bar" ] ]
rtg0795/tfx
[ "63c31b719896eef645df3850d0e6b946e44cd059", "63c31b719896eef645df3850d0e6b946e44cd059", "63c31b719896eef645df3850d0e6b946e44cd059" ]
[ "tfx/orchestration/portable/python_executor_operator_test.py", "tfx/orchestration/kubeflow/kubeflow_dag_runner_test.py", "tfx/utils/name_utils_test.py" ]
[ "# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.orchestration.portable.python_executor_operator.\"\"\"\n\nimport os\nfrom typing import Any, Dict, List\n\nimport tensorflow as tf\nfrom tfx import types\nfrom tfx.dsl.components.base import base_executor\nfrom tfx.dsl.io import fileio\nfrom tfx.orchestration.portable import data_types\nfrom tfx.orchestration.portable import outputs_utils\nfrom tfx.orchestration.portable import python_executor_operator\nfrom tfx.proto.orchestration import executable_spec_pb2\nfrom tfx.proto.orchestration import execution_result_pb2\nfrom tfx.proto.orchestration import pipeline_pb2\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import test_case_utils\n\nfrom google.protobuf import text_format\n\n\nclass InprocessExecutor(base_executor.BaseExecutor):\n \"\"\"A Fake in-process executor what returns execution result.\"\"\"\n\n def Do(\n self, input_dict: Dict[str, List[types.Artifact]],\n output_dict: Dict[str, List[types.Artifact]],\n exec_properties: Dict[str, Any]) -> execution_result_pb2.ExecutorOutput:\n executor_output = execution_result_pb2.ExecutorOutput()\n outputs_utils.populate_output_artifact(executor_output, output_dict)\n outputs_utils.populate_exec_properties(executor_output, exec_properties)\n return executor_output\n\n\nclass NotInprocessExecutor(base_executor.BaseExecutor):\n \"\"\"A Fake not-in-process executor what writes execution result to executor_output_uri.\"\"\"\n\n def Do(self, input_dict: Dict[str, List[types.Artifact]],\n output_dict: Dict[str, List[types.Artifact]],\n exec_properties: Dict[str, Any]) -> None:\n executor_output = execution_result_pb2.ExecutorOutput()\n outputs_utils.populate_output_artifact(executor_output, output_dict)\n outputs_utils.populate_exec_properties(executor_output, exec_properties)\n with fileio.open(self._context.executor_output_uri, 'wb') as f:\n f.write(executor_output.SerializeToString())\n\n\nclass InplaceUpdateExecutor(base_executor.BaseExecutor):\n \"\"\"A Fake executor that uses the executor Context to compute its output.\"\"\"\n\n def Do(self, input_dict: Dict[str, List[types.Artifact]],\n output_dict: Dict[str, List[types.Artifact]],\n exec_properties: Dict[str, Any]) -> None:\n model = output_dict['output_key'][0]\n model.name = '{0}.{1}.my_model'.format(\n self._context.pipeline_info.id,\n self._context.pipeline_node.node_info.id)\n\n\nclass PythonExecutorOperatorTest(test_case_utils.TfxTest):\n\n def _get_execution_info(self, input_dict, output_dict, exec_properties):\n pipeline_node = pipeline_pb2.PipelineNode(node_info={'id': 'MyPythonNode'})\n pipeline_info = pipeline_pb2.PipelineInfo(id='MyPipeline')\n stateful_working_dir = os.path.join(self.tmp_dir, 'stateful_working_dir')\n executor_output_uri = os.path.join(self.tmp_dir, 'executor_output')\n return data_types.ExecutionInfo(\n execution_id=1,\n input_dict=input_dict,\n output_dict=output_dict,\n exec_properties=exec_properties,\n stateful_working_dir=stateful_working_dir,\n execution_output_uri=executor_output_uri,\n pipeline_node=pipeline_node,\n pipeline_info=pipeline_info,\n pipeline_run_id=99)\n\n def testRunExecutor_with_InprocessExecutor(self):\n executor_sepc = text_format.Parse(\n \"\"\"\n class_path: \"tfx.orchestration.portable.python_executor_operator_test.InprocessExecutor\"\n \"\"\", executable_spec_pb2.PythonClassExecutableSpec())\n operator = python_executor_operator.PythonExecutorOperator(executor_sepc)\n input_dict = {'input_key': [standard_artifacts.Examples()]}\n output_dict = {'output_key': [standard_artifacts.Model()]}\n exec_properties = {'key': 'value'}\n executor_output = operator.run_executor(\n self._get_execution_info(input_dict, output_dict, exec_properties))\n self.assertProtoPartiallyEquals(\n \"\"\"\n execution_properties {\n key: \"key\"\n value {\n string_value: \"value\"\n }\n }\n output_artifacts {\n key: \"output_key\"\n value {\n artifacts {\n }\n }\n }\"\"\", executor_output)\n\n def testRunExecutor_with_NotInprocessExecutor(self):\n executor_sepc = text_format.Parse(\n \"\"\"\n class_path: \"tfx.orchestration.portable.python_executor_operator_test.NotInprocessExecutor\"\n \"\"\", executable_spec_pb2.PythonClassExecutableSpec())\n operator = python_executor_operator.PythonExecutorOperator(executor_sepc)\n input_dict = {'input_key': [standard_artifacts.Examples()]}\n output_dict = {'output_key': [standard_artifacts.Model()]}\n exec_properties = {'key': 'value'}\n executor_output = operator.run_executor(\n self._get_execution_info(input_dict, output_dict, exec_properties))\n self.assertProtoPartiallyEquals(\n \"\"\"\n execution_properties {\n key: \"key\"\n value {\n string_value: \"value\"\n }\n }\n output_artifacts {\n key: \"output_key\"\n value {\n artifacts {\n }\n }\n }\"\"\", executor_output)\n\n def testRunExecutor_with_InplaceUpdateExecutor(self):\n executor_sepc = text_format.Parse(\n \"\"\"\n class_path: \"tfx.orchestration.portable.python_executor_operator_test.InplaceUpdateExecutor\"\n \"\"\", executable_spec_pb2.PythonClassExecutableSpec())\n operator = python_executor_operator.PythonExecutorOperator(executor_sepc)\n input_dict = {'input_key': [standard_artifacts.Examples()]}\n output_dict = {'output_key': [standard_artifacts.Model()]}\n exec_properties = {\n 'string': 'value',\n 'int': 1,\n 'float': 0.0,\n # This should not happen on production and will be\n # dropped.\n 'proto': execution_result_pb2.ExecutorOutput()\n }\n executor_output = operator.run_executor(\n self._get_execution_info(input_dict, output_dict, exec_properties))\n self.assertProtoPartiallyEquals(\n \"\"\"\n execution_properties {\n key: \"float\"\n value {\n double_value: 0.0\n }\n }\n execution_properties {\n key: \"int\"\n value {\n int_value: 1\n }\n }\n execution_properties {\n key: \"string\"\n value {\n string_value: \"value\"\n }\n }\n output_artifacts {\n key: \"output_key\"\n value {\n artifacts {\n custom_properties {\n key: \"name\"\n value {\n string_value: \"MyPipeline.MyPythonNode.my_model\"\n }\n }\n name: \"MyPipeline.MyPythonNode.my_model\"\n }\n }\n }\"\"\", executor_output)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.orchestration.kubeflow.kubeflow_dag_runner.\"\"\"\n\nimport json\nimport os\nimport tarfile\nfrom typing import List\n\nfrom kfp import onprem\nimport tensorflow as tf\nfrom tfx.components.statistics_gen import component as statistics_gen_component\nfrom tfx.dsl.component.experimental import executor_specs\nfrom tfx.dsl.component.experimental.annotations import Parameter\nfrom tfx.dsl.component.experimental.decorators import component\nfrom tfx.dsl.components.base import base_component\nfrom tfx.dsl.io import fileio\nfrom tfx.extensions.google_cloud_big_query.example_gen import component as big_query_example_gen_component\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline as tfx_pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.orchestration.kubeflow.decorators import FinalStatusStr\nfrom tfx.proto import example_gen_pb2\nfrom tfx.types import component_spec\nfrom tfx.utils import telemetry_utils\nfrom tfx.utils import test_case_utils\nimport yaml\n\nfrom ml_metadata.proto import metadata_store_pb2\n\n\n@component\ndef _say_hi(status: Parameter[str]):\n print(status)\n\n\n# 2-step pipeline under test.\ndef _two_step_pipeline() -> tfx_pipeline.Pipeline:\n default_input_config = json.dumps({\n 'splits': [{\n 'name': 'single_split',\n 'pattern': 'SELECT * FROM default-table'\n }]\n })\n input_config = data_types.RuntimeParameter(\n name='input_config', ptype=str, default=default_input_config)\n example_gen = big_query_example_gen_component.BigQueryExampleGen(\n input_config=input_config, output_config=example_gen_pb2.Output())\n statistics_gen = statistics_gen_component.StatisticsGen(\n examples=example_gen.outputs['examples'])\n return tfx_pipeline.Pipeline(\n pipeline_name='two_step_pipeline',\n pipeline_root='pipeline_root',\n metadata_connection_config=metadata_store_pb2.ConnectionConfig(),\n components=[example_gen, statistics_gen],\n )\n\n\nclass _DummySpec(component_spec.ComponentSpec):\n INPUTS = {}\n OUTPUTS = {}\n PARAMETERS = {}\n\n\nclass _DummyComponent(base_component.BaseComponent):\n SPEC_CLASS = _DummySpec\n EXECUTOR_SPEC = executor_specs.TemplatedExecutorContainerSpec(\n image='dummy:latest', command=['ls'])\n\n def __init__(self):\n super().__init__(_DummySpec())\n\n\ndef _container_component_pipeline() -> tfx_pipeline.Pipeline:\n return tfx_pipeline.Pipeline(\n pipeline_name='container_component_pipeline',\n pipeline_root='pipeline_root',\n metadata_connection_config=metadata_store_pb2.ConnectionConfig(),\n components=[_DummyComponent()],\n )\n\n\nclass KubeflowDagRunnerTest(test_case_utils.TfxTest):\n\n def setUp(self):\n super().setUp()\n self._source_data_dir = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'testdata')\n self.enter_context(test_case_utils.change_working_dir(self.tmp_dir))\n\n def _compare_tfx_ir_against_testdata(self, args: List[str], golden_file: str):\n index_of_tfx_ir_flag = args.index('--tfx_ir')\n self.assertAllGreater(len(args), index_of_tfx_ir_flag)\n real_tfx_ir = json.loads(args[index_of_tfx_ir_flag + 1])\n real_tfx_ir_str = json.dumps(real_tfx_ir, sort_keys=True)\n with open(os.path.join(self._source_data_dir,\n golden_file)) as tfx_ir_json_file:\n formatted_tfx_ir = json.dumps(json.load(tfx_ir_json_file), sort_keys=True)\n self.assertEqual(real_tfx_ir_str, formatted_tfx_ir)\n\n def testTwoStepPipeline(self):\n \"\"\"Sanity-checks the construction and dependencies for a 2-step pipeline.\"\"\"\n kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())\n file_path = os.path.join(self.tmp_dir, 'two_step_pipeline.tar.gz')\n self.assertTrue(fileio.exists(file_path))\n\n with tarfile.TarFile.open(file_path).extractfile(\n 'pipeline.yaml') as pipeline_file:\n self.assertIsNotNone(pipeline_file)\n pipeline = yaml.safe_load(pipeline_file)\n\n containers = [\n c for c in pipeline['spec']['templates'] if 'container' in c\n ]\n self.assertEqual(2, len(containers))\n\n big_query_container = [\n c for c in containers if c['name'] == 'bigqueryexamplegen'\n ]\n self.assertEqual(1, len(big_query_container))\n self.assertEqual([\n 'python',\n '-m',\n 'tfx.orchestration.kubeflow.container_entrypoint',\n ], big_query_container[0]['container']['command'])\n self.assertIn('--tfx_ir', big_query_container[0]['container']['args'])\n self.assertIn('--node_id', big_query_container[0]['container']['args'])\n self._compare_tfx_ir_against_testdata(\n big_query_container[0]['container']['args'],\n 'two_step_pipeline_post_dehydrate_ir.json')\n\n statistics_gen_container = [\n c for c in containers if c['name'] == 'statisticsgen'\n ]\n self.assertEqual(1, len(statistics_gen_container))\n\n # Ensure the pod labels are correctly appended.\n metadata = [\n c['metadata'] for c in pipeline['spec']['templates'] if 'dag' not in c\n ]\n for m in metadata:\n self.assertEqual('tfx', m['labels'][telemetry_utils.LABEL_KFP_SDK_ENV])\n\n # Ensure dependencies between components are captured.\n dag = [c for c in pipeline['spec']['templates'] if 'dag' in c]\n self.assertEqual(1, len(dag))\n\n self.assertEqual(\n {\n 'tasks': [{\n 'name': 'bigqueryexamplegen',\n 'template': 'bigqueryexamplegen',\n 'arguments': {\n 'parameters': [{\n 'name': 'input_config',\n 'value': '{{inputs.parameters.input_config}}'\n }, {\n 'name': 'pipeline-root',\n 'value': '{{inputs.parameters.pipeline-root}}'\n }]\n }\n }, {\n 'name': 'statisticsgen',\n 'template': 'statisticsgen',\n 'arguments': {\n 'parameters': [{\n 'name': 'pipeline-root',\n 'value': '{{inputs.parameters.pipeline-root}}'\n }]\n },\n 'dependencies': ['bigqueryexamplegen'],\n }]\n }, dag[0]['dag'])\n\n def testDefaultPipelineOperatorFuncs(self):\n kubeflow_dag_runner.KubeflowDagRunner().run(_two_step_pipeline())\n file_path = 'two_step_pipeline.tar.gz'\n self.assertTrue(fileio.exists(file_path))\n\n with tarfile.TarFile.open(file_path).extractfile(\n 'pipeline.yaml') as pipeline_file:\n self.assertIsNotNone(pipeline_file)\n pipeline = yaml.safe_load(pipeline_file)\n\n containers = [\n c for c in pipeline['spec']['templates'] if 'container' in c\n ]\n self.assertEqual(2, len(containers))\n\n def testMountGcpServiceAccount(self):\n kubeflow_dag_runner.KubeflowDagRunner(\n config=kubeflow_dag_runner.KubeflowDagRunnerConfig(\n pipeline_operator_funcs=kubeflow_dag_runner\n .get_default_pipeline_operator_funcs(use_gcp_sa=True))).run(\n _two_step_pipeline())\n file_path = 'two_step_pipeline.tar.gz'\n self.assertTrue(fileio.exists(file_path))\n\n with tarfile.TarFile.open(file_path).extractfile(\n 'pipeline.yaml') as pipeline_file:\n self.assertIsNotNone(pipeline_file)\n pipeline = yaml.safe_load(pipeline_file)\n\n containers = [\n c for c in pipeline['spec']['templates'] if 'container' in c\n ]\n self.assertEqual(2, len(containers))\n\n # Check that each container has default GCP credentials.\n\n container_0 = containers[0]\n env = [\n env for env in container_0['container']['env']\n if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'\n ]\n self.assertEqual(1, len(env))\n self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',\n env[0]['value'])\n\n container_1 = containers[0]\n env = [\n env for env in container_1['container']['env']\n if env['name'] == 'GOOGLE_APPLICATION_CREDENTIALS'\n ]\n self.assertEqual(1, len(env))\n self.assertEqual('/secret/gcp-credentials/user-gcp-sa.json',\n env[0]['value'])\n\n def testVolumeMountingPipelineOperatorFuncs(self):\n mount_volume_op = onprem.mount_pvc('my-persistent-volume-claim',\n 'my-volume-name',\n '/mnt/volume-mount-path')\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n pipeline_operator_funcs=[mount_volume_op])\n\n kubeflow_dag_runner.KubeflowDagRunner(config=config).run(\n _two_step_pipeline())\n file_path = 'two_step_pipeline.tar.gz'\n self.assertTrue(fileio.exists(file_path))\n\n with tarfile.TarFile.open(file_path).extractfile(\n 'pipeline.yaml') as pipeline_file:\n self.assertIsNotNone(pipeline_file)\n pipeline = yaml.safe_load(pipeline_file)\n\n container_templates = [\n c for c in pipeline['spec']['templates'] if 'container' in c\n ]\n self.assertEqual(2, len(container_templates))\n\n volumes = [{\n 'name': 'my-volume-name',\n 'persistentVolumeClaim': {\n 'claimName': 'my-persistent-volume-claim'\n }\n }]\n\n # Check that the PVC is specified for kfp<=0.1.31.1.\n if 'volumes' in pipeline['spec']:\n self.assertEqual(volumes, pipeline['spec']['volumes'])\n\n for template in container_templates:\n # Check that each container has the volume mounted.\n self.assertEqual([{\n 'name': 'my-volume-name',\n 'mountPath': '/mnt/volume-mount-path'\n }], template['container']['volumeMounts'])\n\n # Check that each template has the PVC specified for kfp>=0.1.31.2.\n if 'volumes' in template:\n self.assertEqual(volumes, template['volumes'])\n\n def testContainerComponent(self):\n kubeflow_dag_runner.KubeflowDagRunner().run(_container_component_pipeline())\n file_path = os.path.join(self.tmp_dir,\n 'container_component_pipeline.tar.gz')\n self.assertTrue(fileio.exists(file_path))\n\n with tarfile.TarFile.open(file_path).extractfile(\n 'pipeline.yaml') as pipeline_file:\n self.assertIsNotNone(pipeline_file)\n pipeline = yaml.safe_load(pipeline_file)\n containers = [\n c for c in pipeline['spec']['templates'] if 'container' in c\n ]\n self.assertLen(containers, 1)\n component_args = containers[0]['container']['args']\n self.assertIn('--node_id', component_args)\n\n def testExitHandler(self):\n dag_runner = kubeflow_dag_runner.KubeflowDagRunner()\n dag_runner.set_exit_handler(_say_hi(status=FinalStatusStr()))\n pipeline = _container_component_pipeline()\n pipeline.enable_cache = True\n dag_runner.run(pipeline)\n file_path = os.path.join(self.tmp_dir,\n 'container_component_pipeline.tar.gz')\n self.assertTrue(fileio.exists(file_path))\n\n with tarfile.TarFile.open(file_path).extractfile(\n 'pipeline.yaml') as pipeline_file:\n self.assertIsNotNone(pipeline_file)\n pipeline = yaml.safe_load(pipeline_file)\n self.assertIn('onExit', pipeline['spec'])\n containers = [\n c for c in pipeline['spec']['templates'] if 'container' in c\n ]\n self.assertLen(containers, 2)\n exit_component_args = ' '.join(containers[1]['container']['args'])\n self.assertIn('{{workflow.status}}', exit_component_args)\n self.assertNotIn('enableCache', exit_component_args)\n first_component_args = ' '.join(containers[0]['container']['args'])\n self.assertNotIn('{{workflow.status}}', first_component_args)\n self.assertIn('enableCache', first_component_args)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2022 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.utils.name_utils.\"\"\"\n\nimport types\n\nimport tensorflow as tf\n\nfrom tfx.utils import name_utils\n\n\nclass Foo:\n class Bar:\n pass\n\n\ndef fun():\n pass\n\nVALUE = 42\n\n\nclass ClassUtilsTest(tf.test.TestCase):\n\n def testGetFullName_GoodExamples(self):\n self.assertEqual(name_utils.get_full_name(str), 'builtins.str')\n self.assertEqual(name_utils.get_full_name(Foo), f'{__name__}.Foo')\n self.assertEqual(name_utils.get_full_name(Foo.Bar), f'{__name__}.Foo.Bar')\n self.assertEqual(name_utils.get_full_name(fun), f'{__name__}.fun')\n\n def testGetFullName_BadExamples(self):\n with self.assertRaisesRegex(ValueError, 'does not have a name'):\n name_utils.get_full_name(VALUE)\n\n with self.assertRaisesRegex(ValueError, 'does not have a qualified name'):\n class DynamicClass:\n pass\n name_utils.get_full_name(DynamicClass)\n\n with self.assertRaisesRegex(ValueError, 'is not importable'):\n dynamic_class = types.new_class('DynamicClass')\n name_utils.get_full_name(dynamic_class)\n\n def testGetClass_GoodExamples(self):\n self.assertIs(name_utils.resolve_full_name('builtins.str'), str)\n self.assertIs(name_utils.resolve_full_name(f'{__name__}.Foo'), Foo)\n self.assertIs(name_utils.resolve_full_name(f'{__name__}.Foo.Bar'), Foo.Bar)\n self.assertIs(name_utils.resolve_full_name(f'{__name__}.fun'), fun)\n\n def testGetClass_BadExamples(self):\n with self.assertRaisesRegex(ValueError, 'not a valid name.'):\n name_utils.resolve_full_name(42)\n\n with self.assertRaisesRegex(ValueError, 'not a valid name.'):\n name_utils.resolve_full_name('foo^ax.1234')\n\n with self.assertRaisesRegex(ValueError, 'Cannot find'):\n name_utils.resolve_full_name('non_existing_module_name.meh.FakeClass')\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ], [ "tensorflow.test.main" ], [ "tensorflow.test.main" ] ]
DamienIrving/ocean-analysis
[ "23a6dbf616fb84e6e158e32534ffd394e0df2e3e" ]
[ "visualisation/drift_paper/plot_ohc_drift.py" ]
[ "\"\"\"\nFilename: plot_ohc_drift.py\nAuthor: Damien Irving, [email protected]\nDescription: Create a bar chart showing drift in ocean heat content\n and its thermal and barystatic components \n\n\"\"\"\n\n# Import general Python modules\n\nimport sys\nimport os\nimport re\nimport pdb\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport cmdline_provenance as cmdprov\n\ncwd = os.getcwd()\nrepo_dir = '/'\nfor directory in cwd.split('/')[1:]:\n repo_dir = os.path.join(repo_dir, directory)\n if directory == 'ocean-analysis':\n break\n\nimport matplotlib as mpl\nmpl.rcParams['axes.labelsize'] = 'large'\nmpl.rcParams['axes.titlesize'] = 'x-large'\nmpl.rcParams['xtick.labelsize'] = 'medium'\nmpl.rcParams['ytick.labelsize'] = 'large'\nmpl.rcParams['legend.fontsize'] = 'large'\n\n\n# Define functions \n\ndef get_quartiles(df, column_name, df_project, units):\n \"\"\"Get the ensemble quartiles\"\"\"\n\n assert len(df) == len(df_project)\n\n quartiles = ['# ' + column_name + ' quartiles']\n for project in ['cmip6', 'cmip5']:\n df_subset = df[df_project == project]\n \n upper_quartile = df_subset[column_name].abs().quantile(0.75)\n median = df_subset[column_name].abs().quantile(0.5)\n lower_quartile = df_subset[column_name].abs().quantile(0.25)\n \n upper_quartile_text = \"%s upper quartile: %f %s\" %(project, upper_quartile, units)\n median_text = \"%s median: %f %s\" %(project, median, units)\n lower_quartile_text = \"%s lower quartile: %f %s\" %(project, lower_quartile, units)\n \n quartiles.append(upper_quartile_text)\n quartiles.append(median_text)\n quartiles.append(lower_quartile_text)\n\n return quartiles\n\n\ndef main(inargs):\n \"\"\"Run the program.\"\"\"\n\n df = pd.read_csv(inargs.infile)\n df.set_index(df['model'], drop=True, inplace=True)\n #df.set_index(df['model'] + ' (' + df['run'] + ')', drop=True, inplace=True)\n x = np.arange(df.shape[0])\n ncmip5 = df['project'].value_counts()['cmip5']\n\n df_ohc = df[['OHC (J yr-1)', 'thermal OHC (J yr-1)', 'barystatic OHC (J yr-1)']]\n\n sec_in_year = 365.25 * 24 * 60 * 60\n earth_surface_area = 5.1e14\n df_ohc = (df_ohc / sec_in_year) / earth_surface_area\n df_ohc = df_ohc.rename(columns={\"OHC (J yr-1)\": \"change in OHC ($dH/dt$)\",\n \"thermal OHC (J yr-1)\": \"change in OHC temperature component ($dH_T/dt$)\",\n \"barystatic OHC (J yr-1)\": \"change in OHC barystatic component ($dH_m/dt$)\"})\n\n df_ohc.plot.bar(figsize=(18,6), color=['#272727', 'tab:red', 'tab:blue'], width=0.9, zorder=2)\n plt.axhspan(0.4, 1.0, color='0.95', zorder=1)\n plt.axvline(x=ncmip5 - 0.5, color='0.5', linewidth=2.0)\n units = 'equivalent planetary energy imbalance (W m$^{-2}$)'\n plt.ylabel(units)\n plt.axvline(x=x[0]-0.5, color='0.5', linewidth=0.1)\n for val in x:\n plt.axvline(x=val+0.5, color='0.5', linewidth=0.1)\n \n quartiles = get_quartiles(df_ohc, \"change in OHC ($dH/dt$)\", df['project'], units)\n\n plt.savefig(inargs.outfile, bbox_inches='tight', dpi=400)\n log_file = re.sub('.png', '.met', inargs.outfile)\n log_text = cmdprov.new_log(git_repo=repo_dir, extra_notes=quartiles)\n cmdprov.write_log(log_file, log_text)\n\n\nif __name__ == '__main__':\n\n extra_info =\"\"\" \nauthor:\n Damien Irving, [email protected]\n\n\"\"\"\n\n description = 'Create a bar chart showing drift in ocean heat content'\n parser = argparse.ArgumentParser(description=description,\n epilog=extra_info, \n argument_default=argparse.SUPPRESS,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"infile\", type=str, help=\"Input file name\")\n parser.add_argument(\"outfile\", type=str, help=\"Output file name\")\n\n args = parser.parse_args() \n main(args)\n" ]
[ [ "matplotlib.pyplot.axvline", "pandas.read_csv", "matplotlib.pyplot.savefig", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axhspan" ] ]
vipavlovic/pyprobml
[ "59a2edc682d0163955db5e2f27491ad772b60141", "59a2edc682d0163955db5e2f27491ad772b60141", "59a2edc682d0163955db5e2f27491ad772b60141" ]
[ "scripts/emnist_viz_tf.py", "scripts/spectral_clustering_demo.py", "scripts/logreg_iris_bayes_2d_pymc3.py" ]
[ "\n\n\nimport superimport\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pyprobml_utils as pml\n\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nnp.random.seed(0)\n\nds, info = tfds.load('emnist', split='test', shuffle_files=False, with_info=True) # horribly slow\nprint(info)\n\n\nplt.figure(figsize=(10, 10))\ni = 0\nfor example in ds:\n image = example[\"image\"]\n label = example[\"label\"]\n plt.subplot(5, 5, i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(image)\n plt.title(label)\n i += 1\n if i >= 25: break\n\npml.savefig(\"emnist-data.pdf\")\nplt.show()", "import superimport\n\nimport itertools\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.linalg import eigh\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics.pairwise import rbf_kernel\nimport pyprobml_utils as pml\n\nplt.style.use('classic')\n\ndef spectral_clustering_demo():\n np.random.seed(0)\n num_clusters = 2\n for data_type, data in (('circle', sample_circle(num_clusters)),\n ('spiral', sample_spiral())):\n kmeans = KMeans(n_clusters=num_clusters, random_state=0)\n kmeans.fit(data)\n assignments = kmeans.predict(data)\n plot_data(data, assignments, 'k-means clustering', data_type)\n\n sigma = 0.1\n gamma = 1 / (2 * sigma ** 2)\n W = rbf_kernel(data, gamma=gamma)\n d = np.sum(W, 1, keepdims=True)\n sqrt_d = np.sqrt(d)\n\n normalized_W = (W / sqrt_d) / sqrt_d.T\n paranoid_assert(W, normalized_W, False)\n\n # We select the largest eigen values of normalized_W, rather\n # than the smallest eigenvalues of I - normalized_W. The two\n # problems are equivalent. The eigen values can be converted\n # between the two problems via `1 - eigen_values`. The eigen\n # vectors are the same between both problems.\n eigen_values, eigen_vectors = eigh(normalized_W,\n # Get only the top num_clusters eigenvalues\n eigvals=(data.shape[0] - num_clusters, data.shape[0]-1))\n eigen_vectors = eigen_vectors / np.linalg.norm(eigen_vectors, axis=1, keepdims=True)\n\n kmeans.fit(eigen_vectors)\n assignments = kmeans.predict(eigen_vectors)\n plot_data(data, assignments, 'spectral clustering', data_type)\n\n plt.show()\n\ndef paranoid_assert(W, normalized_W, enable):\n if not enable:\n return\n D = np.diag(np.sum(W, 1))\n L = D - W\n D_inv_sqrt = np.diag(1 / np.diag(np.sqrt(D)))\n np.testing.assert_almost_equal(np.sum(L, 1), 0, err_msg=\"Rows of Laplacian must sum to 0.\")\n np.testing.assert_allclose(normalized_W, D_inv_sqrt * W * D_inv_sqrt, rtol=0, atol=1)\n\ndef sample_circle(num_clusters):\n points_per_cluster = 500\n bandwidth = 0.1\n\n data = np.zeros((num_clusters * points_per_cluster, 2))\n for k, n in itertools.product(range(num_clusters), range(points_per_cluster)):\n theta = 2 * np.pi * np.random.uniform()\n rho = k + 1 + np.random.randn() * bandwidth\n x, y = pol2cart(theta, rho)\n idx = k * points_per_cluster + n\n data[idx, 0] = x\n data[idx, 1] = y\n data = data.reshape((num_clusters * points_per_cluster, 2))\n return data\n\ndef pol2cart(theta, rho):\n x = rho * np.cos(theta)\n y = rho * np.sin(theta)\n return(x, y)\n\ndef sample_spiral():\n # Only 2 clusters in this case. This is hard-coded.\n points_per_cluster = 500\n bandwidth = 0.1\n\n data = np.empty((points_per_cluster, 2))\n\n w = np.arange(1, points_per_cluster + 1).astype(np.float32) / points_per_cluster\n data[:,0] = (4 * w + 1) * np.cos(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth\n data[:,1] = (4 * w + 1) * np.sin(2*np.pi * w) + np.random.randn(points_per_cluster) * bandwidth\n data = np.vstack((data, -data))\n\n return data\n\ndef plot_data(data, assignments, title, data_type):\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.plot(data[assignments == 0, 0], data[assignments == 0, 1], 'o', color='r')\n ax.plot(data[assignments == 1, 0], data[assignments == 1, 1], 'o', color='b')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.axis('square')\n ax.grid(True)\n ax.set_title(title)\n plt.tight_layout()\n pml.savefig(f\"{data_type}_{title.replace(' ', '_')}.pdf\")\n\nif __name__ == '__main__':\n spectral_clustering_demo()\n", "# Bayesian Binary logistic regression in 2d for iris flwoers\n\n# Code is based on \n# https://github.com/aloctavodia/BAP/blob/master/code/Chp4/04_Generalizing_linear_models.ipynb\n\nimport superimport\n\nimport pymc3 as pm\nimport numpy as np\nimport pandas as pd\nimport theano.tensor as tt\n#import seaborn as sns\nimport scipy.stats as stats\nfrom scipy.special import expit as logistic\nimport matplotlib.pyplot as plt\nimport arviz as az\nfrom sklearn.datasets import load_iris\nimport pyprobml_utils as pml\n\niris = load_iris()\nX = iris.data \ny = iris.target\n\n# Convert to pandas dataframe \ndf_iris = pd.DataFrame(data=iris.data, \n columns=['sepal_length', 'sepal_width', \n 'petal_length', 'petal_width'])\ndf_iris['species'] = pd.Series(iris.target_names[y], dtype='category')\n\n\ndf = df_iris.query(\"species == ('setosa', 'versicolor')\") \n\n# We reduce the sample size from 50 to 25 per class,\n# or to 5 + 45 in the unbalanced setting.\n# The latter will increase posterior uncertainty\nunbalanced = False # True\nif unbalanced:\n df = df[45:95]\nelse:\n df = df[25:75]\nassert(len(df)==50)\n\ny_1 = pd.Categorical(df['species']).codes \nx_n = ['sepal_length', 'sepal_width'] \nx_1 = df[x_n].values\n\n\nwith pm.Model() as model_1: \n α = pm.Normal('α', mu=0, sd=10) \n β = pm.Normal('β', mu=0, sd=2, shape=len(x_n)) \n \n μ = α + pm.math.dot(x_1, β) \n θ = pm.Deterministic('θ', 1 / (1 + pm.math.exp(-μ))) \n bd = pm.Deterministic('bd', -α/β[1] - β[0]/β[1] * x_1[:,0])\n \n yl = pm.Bernoulli('yl', p=θ, observed=y_1) \n \n trace_1 = pm.sample(2000, cores=1, chains=2)\n \nvarnames = ['α', 'β'] \n#az.plot_forest(trace_1, var_names=varnames);\n\nidx = np.argsort(x_1[:,0]) \nbd = trace_1['bd'].mean(0)[idx] \n\nplt.figure()\nplt.scatter(x_1[:,0], x_1[:,1], c=[f'C{x}' for x in y_1]) \nplt.plot(x_1[:,0][idx], bd, color='k'); \n\naz.plot_hdi(x_1[:,0], trace_1['bd'], color='k')\n \nplt.xlabel(x_n[0]) \nplt.ylabel(x_n[1])\n\nplt.tight_layout()\nif unbalanced:\n pml.savefig('logreg_iris_bayes_2d_unbalanced.pdf', dpi=300)\nelse:\n pml.savefig('logreg_iris_bayes_2d.pdf', dpi=300)\n \nplt.show()" ]
[ [ "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "numpy.random.seed", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow", "matplotlib.pyplot.yticks" ], [ "numpy.sum", "matplotlib.pyplot.tight_layout", "numpy.random.seed", "sklearn.cluster.KMeans", "numpy.vstack", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure", "numpy.cos", "scipy.linalg.eigh", "sklearn.metrics.pairwise.rbf_kernel", "numpy.random.uniform", "numpy.zeros", "numpy.arange", "numpy.linalg.norm", "numpy.empty", "numpy.random.randn", "matplotlib.pyplot.show", "numpy.testing.assert_allclose", "numpy.sqrt", "numpy.sin" ], [ "pandas.Series", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "pandas.DataFrame", "numpy.argsort", "pandas.Categorical", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter", "sklearn.datasets.load_iris" ] ]
MatthiasDR96/industrial_robotics_simulator
[ "9039e7a581ce97c583c73294e9937664de90530b" ]
[ "src/Interpolator.py" ]
[ "import numpy as np\nfrom sympy import *\n\n\ndef interpolate_cubic(p1, p2, k_traj, t):\n '''\n Computes a smooth cubic polynomail between 2 N-dimensional points\n Input:\n p1: Nx1 numpy array the first point\n p2: Nx1 numpy array the second point\n dp1: Nx1 numpy array of the required velocities at the first point\n dp2: Nx1 numpy array of the required velocities at the second point\n T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2\n f: Scalar which denotes the frequency of sampling\n Returns:\n traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps\n dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps\n ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps\n '''\n\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(k_traj) == int and (type(t) == float or type(t) == int)\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n dddtraj_list = []\n s, ds, dds, ddds = get_normalized_third_degree_polynomial(k_traj)\n for i in range(len(p1)):\n traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]\n dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)\n ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)\n dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n dddtraj_list.append(dddtraj_)\n\n tv = np.linspace(0, t, k_traj)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n dddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n dddtraj = np.asarray(dddtraj_list)\n\n return traj, dtraj, ddtraj, dddtraj\n\n\ndef interpolate_quintic(p1, p2, k_traj, t):\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(k_traj) == int and (type(t) == float or type(t) == int)\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n dddtraj_list = []\n s, ds, dds, ddds = get_normalized_fifth_degree_polynomial(k_traj)\n for i in range(len(p1)):\n traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]\n dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)\n ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)\n dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n dddtraj_list.append(dddtraj_)\n\n tv = np.linspace(0, t, k_traj)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n dddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n dddtraj = np.asarray(dddtraj_list)\n\n return traj, dtraj, ddtraj, dddtraj\n\n\ndef interpolate_septic(p1, p2, k_traj, t):\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(k_traj) == int and (type(t) == float or type(t) == int)\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n dddtraj_list = []\n s, ds, dds, ddds = get_normalized_seventh_degree_polynomial(k_traj)\n for i in range(len(p1)):\n traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]\n dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)\n ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)\n dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n dddtraj_list.append(dddtraj_)\n\n tv = np.linspace(0, t, k_traj)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n dddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n dddtraj = np.asarray(dddtraj_list)\n\n return traj, dtraj, ddtraj, dddtraj\n\n\ndef interpolate_nonic(p1, p2, k_traj, t):\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(k_traj) == int and (type(t) == float or type(t) == int)\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n dddtraj_list = []\n s, ds, dds, ddds = get_normalized_ninth_degree_polynomial(k_traj)\n for i in range(len(p1)):\n traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]\n dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)\n ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)\n dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n dddtraj_list.append(dddtraj_)\n\n tv = np.linspace(0, t, k_traj)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n dddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n dddtraj = np.asarray(dddtraj_list)\n\n return traj, dtraj, ddtraj, dddtraj\n\n\ndef interpolate_trapezoid(p1, p2, k_traj, t):\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(k_traj) == int and (type(t) == float or type(t) == int)\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n dddtraj_list = []\n s, ds, dds, ddds = get_normalized_trapezoid_polynomial(k_traj)\n for i in range(len(p1)):\n traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]\n dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)\n ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)\n dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n dddtraj_list.append(dddtraj_)\n\n tv = np.linspace(0, t, k_traj)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n dddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n dddtraj = np.asarray(dddtraj_list)\n\n return traj, dtraj, ddtraj, dddtraj\n\n\ndef interpolate_minimum_jerk_derivative(p1, p2, k_traj, t):\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(k_traj) == int and (type(t) == float or type(t) == int)\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n dddtraj_list = []\n s, ds, dds, ddds = get_normalized_minimum_jerk_derivative_polynomial(k_traj)\n for i in range(len(p1)):\n traj_ = [((p2[i] - p1[i]) * s[j] + p1[i]) for j in range(len(s))]\n dtraj_ = np.divide([((p2[i] - p1[i]) * ds[j]) for j in range(len(ds))], t)\n ddtraj_ = np.divide([((p2[i] - p1[i]) * dds[j]) for j in range(len(dds))], t ** 2)\n dddtraj_ = np.divide([((p2[i] - p1[i]) * ddds[j]) for j in range(len(ddds))], t ** 3)\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n dddtraj_list.append(dddtraj_)\n\n tv = np.linspace(0, t, k_traj)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n dddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n dddtraj = np.asarray(dddtraj_list)\n\n return traj, dtraj, ddtraj, dddtraj\n\n\ndef get_normalized_first_degree_polynomial(k_traj):\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n for i in range(k_traj):\n t = tau[i]\n stau[i] = t\n dstau_dtau[i] = 1\n ddstau_ddtau[i] = 0\n dddstau_dddtau[i] = 0\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_third_degree_polynomial(k_traj):\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n for i in range(k_traj):\n t = tau[i]\n stau[i] = -2 * (t ** 3) + 3 * (t ** 2)\n dstau_dtau[i] = -6 * (t ** 2) + 6 * t\n ddstau_ddtau[i] = -12 * t + 6\n dddstau_dddtau[i] = -12\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_fifth_degree_polynomial(k_traj):\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n for i in range(k_traj):\n t = tau[i]\n stau[i] = 6 * (t ** 5) - 15 * (t ** 4) + 10 * (t ** 3)\n dstau_dtau[i] = 30 * (t ** 4) - 60 * (t ** 3) + 30 * (t ** 2)\n ddstau_ddtau[i] = 120 * (t ** 3) - 180 * (t ** 2) + 60 * t\n dddstau_dddtau[i] = 360 * (t ** 2) - 360 * t + 60\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_seventh_degree_polynomial(k_traj):\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n for i in range(k_traj):\n t = tau[i]\n stau[i] = -20 * (t ** 7) + 70 * (t ** 6) - 84 * (t ** 5) + 35 * (t ** 4)\n dstau_dtau[i] = -140 * (t ** 6) + 420 * (t ** 5) - 420 * (t ** 4) + 140 * (t ** 3)\n ddstau_ddtau[i] = -840 * (t ** 5) + 2100 * (t ** 4) - 1680 * (t ** 3) + 420 * (t ** 2)\n dddstau_dddtau[i] = -4200 * (t ** 4) + 8400 * (t ** 3) - 5040 * (t ** 2) + 840 * t\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_ninth_degree_polynomial(k_traj):\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n for i in range(1, k_traj):\n t = tau[i]\n stau[i] = 70 * (t ** 9) - 315 * (t ** 8) + 540 * (t ** 7) - 420 * (t ** 6) + 126 * (t ** 5)\n dstau_dtau[i] = 630 * (t ** 8) - 2520 * (t ** 7) + 3780 * (t ** 6) - 2520 * (t ** 5) + 630 * (t ** 4)\n ddstau_ddtau[i] = 5040 * (t ** 7) - 17640 * (t ** 6) + 22680 * (t ** 5) - 12600 * (t ** 4) + 2520 * (t ** 3)\n dddstau_dddtau[i] = 35280 * (t ** 6) - 105840 * (t ** 5) + 113400 * (t ** 4) - 50400 * (t ** 3) + 7560 * (\n t ** 2)\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_trapezoid_polynomial(k_traj):\n t_acc = 1 / 10.\n t_ct = 1 - 2 * t_acc\n v_m = 1.0 / (t_acc + t_ct)\n x = t_acc\n\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n for i in range(k_traj):\n t = tau[i]\n if 0 <= t <= x:\n res = 0.5 * v_m * (t ** 2) / t_acc\n vel = v_m * t / t_acc\n elif x < t <= 1 - x:\n res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * (t - t_acc)\n vel = v_m\n elif t > 1 - x:\n res = 0.5 * v_m * (t_acc ** 2) / t_acc + v_m * t_ct + v_m * (t - t_acc - t_ct) - 0.5 * v_m / t_acc * (\n t - t_acc - t_ct) ** 2\n vel = v_m - v_m / t_acc * (t - t_acc - t_ct)\n else:\n res = None\n vel = None\n stau[i] = res\n dstau_dtau[i] = vel\n\n for i in range(tau.size - 2):\n dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])\n\n for i in range(tau.size - 2):\n ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])\n\n for i in range(tau.size - 2):\n dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_minimum_jerk_derivative_polynomial(k_traj):\n x = (1 - np.sqrt(0.5)) / 2\n\n tau = np.linspace(0, 1, k_traj)\n stau = np.linspace(0, 1, k_traj)\n dstau_dtau = np.linspace(0, 0, k_traj)\n ddstau_ddtau = np.linspace(0, 0, k_traj)\n dddstau_dddtau = np.linspace(0, 0, k_traj)\n\n res = None\n for i in range(k_traj - 1):\n t = tau[i]\n if 0 <= t <= x:\n res = 16 * (t ** 4)\n elif x < t <= 0.5:\n res = -16 * (t ** 4) + 128 * x * (t ** 3) - 192 * (x ** 2) * (t ** 2) + 128 * (x ** 3) * t - 32 * (x ** 4)\n elif 0.5 < t <= 1 - x:\n res = 1 + 16 * ((1 - t) ** 4) - 128 * x * ((1 - t) ** 3) + 192 * (x ** 2) * ((1 - t) ** 2) - 128 * (\n x ** 3) * (1 - t) + 32 * (x ** 4)\n elif 1 - x < t <= 1:\n res = 1 - 16 * (1 - t) ** 4\n stau[i] = res\n\n for i in range(tau.size - 2):\n dstau_dtau[i] = (stau[i + 1] - stau[i]) / (tau[i + 1] - tau[i])\n\n for i in range(tau.size - 2):\n ddstau_ddtau[i] = (dstau_dtau[i + 1] - dstau_dtau[i]) / (tau[i + 1] - tau[i])\n\n for i in range(tau.size - 2):\n dddstau_dddtau[i] = (ddstau_ddtau[i + 1] - ddstau_ddtau[i]) / (tau[i + 1] - tau[i])\n\n return stau, dstau_dtau, ddstau_ddtau, dddstau_dddtau\n\n\ndef get_normalized_cubic_polynomial_coefficients():\n # Kinematic equations for a cubic polynomial\n x0 = [1, 0, 0, 0]\n xt = [1, 1, pow(1, 2), pow(1, 3)]\n v0 = [0, 1, 0, 0]\n vt = [0, 1, 2 * 1, 3 * pow(1, 2)]\n\n # Solve polynomial coefficients\n a = np.array([x0, xt, v0, vt], dtype='float')\n b = np.array([[0], [1], [0], [0]], dtype='float')\n polynomial = np.linalg.solve(a, b)\n return polynomial\n\n\ndef get_normalized_quintic_polynomial_coefficients():\n # Kinematic equations for a cubic polynomial\n x0 = [1, 0, 0, 0, 0, 0]\n xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]\n v0 = [0, 1, 0, 0, 0, 0]\n vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]\n a0 = [0, 0, 2, 0, 0, 0]\n at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]\n\n # Solve polynomial coefficients\n a = np.array([x0, xt, v0, vt, a0, at], dtype='float')\n b = np.array([[0], [1], [0], [0], [0], [0]], dtype='float')\n polynomial = np.linalg.solve(a, b)\n return polynomial\n\n\ndef get_normalized_septic_polynomial_coefficients():\n # Kinematic equations for a cubic polynomial\n x0 = [1, 0, 0, 0, 0, 0, 0, 0]\n xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5), pow(1, 6), pow(1, 7)]\n v0 = [0, 1, 0, 0, 0, 0, 0, 0]\n vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4), 6 * pow(1, 5), 7 * pow(1, 6)]\n a0 = [0, 0, 2, 0, 0, 0, 0, 0]\n at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3), 30 * pow(1, 4), 42 * pow(1, 5)]\n j0 = [0, 0, 0, 6, 0, 0, 0, 0]\n jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]\n\n # Solve polynomial coefficients\n a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')\n b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')\n polynomial = np.linalg.solve(a, b)\n return polynomial\n\n\ndef get_normalized_nonic_polynomial_coefficients():\n # Kinematic equations for a cubic polynomial\n x0 = [1, 0, 0, 0, 0, 0]\n xt = [1, 1, pow(1, 2), pow(1, 3), pow(1, 4), pow(1, 5)]\n v0 = [0, 1, 0, 0, 0, 0]\n vt = [0, 1, 2 * 1, 3 * pow(1, 2), 4 * pow(1, 3), 5 * pow(1, 4)]\n a0 = [0, 0, 2, 0, 0, 0]\n at = [0, 0, 2, 6 * 1, 12 * pow(1, 2), 20 * pow(1, 3)]\n j0 = [0, 0, 0, 6, 0, 0, 0, 0]\n jt = [0, 0, 0, 6, 24 * 1, 60 * pow(1, 2), 120 * pow(1, 3), 210 * pow(1, 4)]\n\n # Solve polynomial coefficients\n a = np.array([x0, xt, v0, vt, a0, at, j0, jt], dtype='float')\n b = np.array([[0], [1], [0], [0], [0], [0], [0], [0]], dtype='float')\n polynomial = np.linalg.solve(a, b)\n return polynomial\n\n\ndef interpolate_quint_2(p1, p2, dp1, dp2, ddp1, ddp2, k_traj, T):\n '''\n Computes a smooth quintic polynomial between 2 N-dimensional points\n Input:\n p1: Nx1 numpy array the first point\n p2: Nx1 numpy array the second point\n dp1: Nx1 numpy array of the required velocities at the first point\n dp2: Nx1 numpy array of the required velocities at the second point\n ddp1: Nx1 numpy array of the required accelerations the first point\n ddp2: Nx1 numpy array of the required accelerations the second point\n T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2\n f: Scalar which denotes the frequency of sampling\n Returns:\n traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps\n dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps\n ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps\n '''\n\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(dp1) == np.ndarray and type(dp2) == np.ndarray\n assert type(ddp1) == np.ndarray and type(ddp2) == np.ndarray\n assert type(k_traj) == int and (type(T) == float or type(T) == int)\n\n # Kinematic equations for a quintic polynomial\n x0 = [1, 0, 0, 0, 0, 0]\n xT = [1, T, pow(T, 2), pow(T, 3), pow(T, 4), pow(T, 5)]\n v0 = [0, 1, 0, 0, 0, 0]\n vT = [0, 1, 2 * T, 3 * pow(T, 2), 4 * pow(T, 3), 5 * pow(T, 4)]\n a0 = [0, 0, 2, 0, 0, 0]\n aT = [0, 0, 2, 6 * T, 12 * pow(T, 2), 20 * pow(T, 3)]\n\n # Kinematic matrix\n A = np.array([x0, xT, v0, vT, a0, aT], dtype='float')\n\n # Interpolate\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n t = Symbol('t')\n tv = np.linspace(0, T, k_traj)\n for i in range(len(p1)):\n B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]], [ddp1[i]], [ddp2[i]]], dtype='float')\n x = np.linalg.solve(A, B)\n traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3) + x[4, 0] * pow(t, 4) + x[\n 5, 0] * pow(t, 5)\n dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2) + 4 * x[4, 0] * pow(t, 3) + 5 * x[\n 5, 0] * pow(t, 4)\n ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t + 12 * x[4, 0] * pow(t, 2) + 20 * x[5, 0] * pow(t, 3)\n traj_ = [traj.subs(t, tv_) for tv_ in tv]\n dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]\n ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n traj = np.asarray(traj_list)\n dtraj = np.asarray(dtraj_list)\n ddtraj = np.asarray(ddtraj_list)\n\n return traj, dtraj, ddtraj\n\n\ndef interpolate_cubic_2(p1, p2, k_traj, T, dp1=np.zeros((6, 1)), dp2=np.zeros((6, 1))):\n '''\n Computes a smooth cubic polynomal between 2 N-dimensional points\n Input:\n p1: Nx1 numpy array the first point\n p2: Nx1 numpy array the second point\n dp1: Nx1 numpy array of the required velocities at the first point\n dp2: Nx1 numpy array of the required velocities at the second point\n T: Scalar which denotes the time needed to traverse the polynomal from point 1 to point 2\n f: Scalar which denotes the frequency of sampling\n Returns:\n traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps\n dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps\n ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps\n '''\n\n assert type(p1) == np.ndarray and type(p2) == np.ndarray\n assert type(dp1) == np.ndarray and type(dp2) == np.ndarray\n assert type(k_traj) == int and (type(T) == float or type(T) == int)\n\n # Kinematic equations for a cubic polynomial\n x0 = [1, 0, 0, 0]\n xT = [1, T, pow(T, 2), pow(T, 3)]\n v0 = [0, 1, 0, 0]\n vT = [0, 1, 2 * T, 3 * pow(T, 2)]\n\n # Kinematic matrix\n A = np.array([x0, xT, v0, vT], dtype='float')\n\n traj_list = []\n dtraj_list = []\n ddtraj_list = []\n t = Symbol('t')\n tv = np.linspace(0, T, k_traj)\n for i in range(len(p1)):\n B = np.array([[p1[i]], [p2[i]], [dp1[i]], [dp2[i]]], dtype='float')\n x = np.linalg.solve(A, B)\n traj = x[0, 0] + x[1, 0] * t + x[2, 0] * pow(t, 2) + x[3, 0] * pow(t, 3)\n dtraj = x[1, 0] + 2 * x[2, 0] * t + 3 * x[3, 0] * pow(t, 2)\n ddtraj = 2 * x[2, 0] + 6 * x[3, 0] * t\n traj_ = [traj.subs(t, tv_) for tv_ in tv]\n dtraj_ = [dtraj.subs(t, tv_) for tv_ in tv]\n ddtraj_ = [ddtraj.subs(t, tv_) for tv_ in tv]\n traj_list.append(traj_)\n dtraj_list.append(dtraj_)\n ddtraj_list.append(ddtraj_)\n traj_list.append(tv)\n dtraj_list.append(tv)\n ddtraj_list.append(tv)\n traj = np.array(traj_list)\n dtraj = np.array(dtraj_list)\n ddtraj = np.array(ddtraj_list)\n\n return traj, dtraj, ddtraj\n\n\ndef interpolate_viapoints(p, v1, vn, k_traj, t):\n '''\n Computes a smooth cubic polynomal between M N-dimensional points\n Input:\n p: MxN numpy array containing all points\n v1: Nx1 numpy array of the required velocities at the first point\n vn: Nx1 numpy array of the required velocities at the last point\n t: Mx1 numpy array of the timesteps at which the points should be reached\n f: Scalar which denotes the frequency of sampling\n Returns:\n traj: (N+1) x (Txf) matrix with all interpolated position points for each axis + timesteps\n dtraj: (N+1) x (Txf) matrix with all interpolated velocities for each axis + timesteps\n ddtraj: (N+1) x (Txf) matrix with all interpolated accelerations for each axis + timesteps\n '''\n\n assert type(p) == np.ndarray and type(k_traj) == int\n\n # Compute time interval matrix\n h = list(np.zeros((len(t) - 1, 1)))\n for i in range(len(t) - 1):\n h[i] = t[i + 1] - t[i]\n\n # Compute A(h) matrix\n A = np.zeros((len(h) - 1, len(h) - 1))\n for i in range(len(h) - 1):\n for j in range(len(h) - 1):\n if i == j:\n A[i][j] = 2 * (h[i] + h[i + 1])\n if i == j + 1:\n A[i][j] = h[i + 1]\n if j == i + 1:\n A[i][j] = h[i]\n\n # Compute known B(p0,p1,h,v1,vn) matrix\n B = np.zeros((len(h) - 1, len(p[0])))\n for i in range(len(h) - 1):\n B[i] = (3 / (h[i] * h[i + 1])) * (\n pow(h[i], 2) * (np.subtract(p[i + 2], p[i + 1])) + pow(h[i + 1], 2) * (np.subtract(p[i + 1], p[i])))\n B[0] = B[0] - np.dot(h[1], v1)\n B[-1] = B[-1] - np.dot(h[-2], vn)\n\n # Solve for all unknown velocities of intermediate knots\n x = np.linalg.solve(A, B)\n vel = [v1.copy()]\n [vel.append(x[i]) for i in range(len(x))]\n vel.append(vn.copy())\n\n # Compute N-1 polynomials using computed velocities\n traj = [[0], [0], [0], [0], [0], [0], [0]]\n dtraj = [[0], [0], [0], [0], [0], [0], [0]]\n ddtraj = [[0], [0], [0], [0], [0], [0], [0]]\n for i in range(len(p) - 1):\n traj_, dtraj_, ddtraj_ = interpolate_cubic_2(p[i], p[i + 1], k_traj, float(h[i]), vel[i], vel[i + 1])\n for j in range(len(traj) - 1):\n traj[j].extend(traj_[j])\n dtraj[j].extend(dtraj_[j])\n ddtraj[j].extend(ddtraj_[j])\n traj[-1].extend(traj_[-1] + traj[-1][-1])\n dtraj[-1].extend(dtraj_[-1] + dtraj[-1][-1])\n ddtraj[-1].extend(ddtraj_[-1] + ddtraj[-1][-1])\n traj = np.asarray(np.delete(traj, 0, 1))\n dtraj = np.asarray(np.delete(traj, 0, 1))\n ddtraj = np.asarray(np.delete(traj, 0, 1))\n\n return traj, dtraj, ddtraj\n" ]
[ [ "numpy.sqrt", "numpy.linalg.solve", "numpy.zeros", "numpy.subtract", "numpy.asarray", "numpy.delete", "numpy.array", "numpy.dot", "numpy.linspace" ] ]
AwhLorraine/mshoot
[ "d6981fa37c55da0457ac0371f9850743858a3543", "d6981fa37c55da0457ac0371f9850743858a3543" ]
[ "test/test_mpc.py", "examples/tutorial/tutorial.py" ]
[ "import unittest\nimport os\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import StateSpace\nimport matplotlib.pyplot as plt\n\nimport mshoot\n\n\ndef cfun(xdf, ydf):\n \"\"\"\n :param ydf: DataFrame, model states\n :param ydf: DataFrame, model outputs\n :return: float\n \"\"\"\n qout = ydf['qout'].values\n c = np.sum(qout ** 2) / qout.size\n return c\n\n\nclass TestMPC(unittest.TestCase):\n\n def setUp(self):\n fmupath = os.path.join('resources', 'fmus', 'R1C1', 'R1C1.fmu')\n parameters = {'C': 1e6, 'R': 0.01}\n self.model = mshoot.SimFMU(\n fmupath,\n outputs=['qout', 'Tr'],\n states=['heatCapacitor.T'],\n parameters=parameters,\n verbose=False)\n\n def tearDown(self):\n pass\n\n def test_mpc(self):\n # Inputs\n t = np.arange(0, 3600 * 10, 3600)\n inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])\n inp['q'] = np.full(t.size, 0)\n inp['Tout'] = np.full(t.size, 273.15)\n\n # Bounds\n ubounds = [(0., 4000.)]\n xbounds = [(293.15, 296.15)]\n\n # Initial state\n x0 = [293.65]\n\n # Optimization\n mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)\n\n u, xctr, xemu, yemu, uhist = mpc.optimize(\n model=self.model,\n inp_ctr=inp.copy(),\n inp_emu=inp.copy(),\n free=['q'],\n ubounds=ubounds,\n xbounds=xbounds,\n x0=x0,\n ynominal=[4000., 293.15],\n step=1,\n horizon=3\n )\n\n # ax = u.plot(title='u')\n # ax.set_ylim(0, 4000)\n # ax = xemu.plot(title='xemu')\n # ax.set_ylim(292.15, 296.15)\n # plt.show()\n\n # Assert the solution is correct\n self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer\n\n # Validate emulation with optimized control\n inp['q'] = u['q']\n yvld, xvld = self.model.simulate(inp, x0)\n\n # self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *\n self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *\n # * FMU results might be shifted in time by one time step.\n # The reason is unknown, but FMU- or pyFMI-specific.\n\n def test_mpc_inp_clb(self):\n # Inputs\n t = np.arange(0, 3600 * 10, 3600)\n inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])\n inp['q'] = np.full(t.size, 0)\n inp['Tout'] = np.full(t.size, 273.15)\n\n # Bounds\n ubounds = [(0., 4000.)]\n xbounds = [(293.15, 296.15)]\n\n # Initial state\n x0 = [293.65]\n\n # Input callback function\n def inp_clb(index):\n return inp.loc[index]\n\n # Optimization\n mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)\n\n u, xctr, xemu, yemu, uhist = mpc.optimize(\n model=self.model,\n inp_ctr=None,\n inp_clb=inp_clb,\n inp_emu=inp.copy(),\n free=['q'],\n ubounds=ubounds,\n xbounds=xbounds,\n x0=x0,\n ynominal=[4000., 293.15],\n step=1,\n horizon=3\n )\n\n # Assert the solution is correct\n self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.3) # Ideally, should be even closer\n\n # Validate emulation with optimized control\n inp['q'] = u['q']\n yvld, xvld = self.model.simulate(inp, x0)\n\n # self.assertTrue(((yvld - yemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *\n self.assertTrue(((xvld - xemu).abs() < 1e-3).all().all()) # Might not be true for FMUs *\n # * FMU results might be shifted in time by one time step.\n # The reason is unknown, but FMU- or pyFMI-specific.\n\n # def test_2_inputs(self):\n # \"\"\"THE SOLVER HAS PROBLEMS WITH GETTING THE RIGHT SOLUTION. (?)\"\"\"\n # # Inputs\n # t = np.arange(0, 3600 * 10, 3600)\n # inp = pd.DataFrame(index=pd.Index(t, name='time'), columns=['q', 'Tout'])\n # inp['q'] = np.full(t.size, 0)\n # inp['Tout'] = np.full(t.size, 273.15)\n\n # # Bounds\n # ubounds = [(0., 10000.), (272.15, 275.)] # <-- Solver should try to yield Tout = 275\n # xbounds = [(293.15, 296.15)]\n\n # # Initial state\n # x0 = [293.65]\n\n # # Optimization\n # mpc = mshoot.MPCEmulation(emumod=self.model, cfun=cfun)\n\n # u, xctr, xemu, yemu, uhist = mpc.optimize(\n # model=self.model,\n # inp=inp,\n # free=['q', 'Tout'],\n # ubounds=ubounds,\n # xbounds=xbounds,\n # x0=x0,\n # unominal=[4000., 273.15],\n # ynominal=[4000., 293.15],\n # step=1,\n # horizon=4\n # )\n\n # ax = u.plot(title='u', subplots=True)\n # ax = xemu.plot(title='xemu')\n # plt.show()\n\n # # Assert the solution is correct\n # self.assertLess(abs(xemu['heatCapacitor.T'].iloc[-1] - 293.15), 0.01)\n\n # # Validate emulation with optimized control\n # inp['q'] = u['q']\n # yvld, xvld = self.model.simulate(inp, x0)\n\n # # self.assertTrue((yvld - yemu < 1e-3).all().all()) # Might not be true for FMUs *\n # # self.assertTrue((xvld - xemu < 1e-3).all().all()) # Might not be true for FMUs *\n # # * FMU results might be shifted in time by one time step.\n # # The reason is unknown, but FMU- or pyFMI-specific.\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n", "import os\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mshoot\n\nfmupath = os.path.join('examples', 'tutorial', 'modelica', 'R3C3.fmu')\n\n# 1) Emulation model\nmodel_emu = mshoot.SimFMU(\n fmupath,\n outputs=['y', 'u1_y'],\n states=['heatCapacitor1.T', 'heatCapacitor2.T', 'heatCapacitor3.T'],\n parameters={'C1': 75000, 'C2': 100000, 'C3': 50000, 'R1': 0.01, 'R2': 0.01, 'R3': 0.01})\n\n# 2) Control model\nmodel_ctr = mshoot.SimFMU(\n fmupath,\n outputs=['y', 'u1_y'],\n states=['heatCapacitor1.T', 'heatCapacitor2.T', 'heatCapacitor3.T'],\n parameters={'C1': 75000, 'C2': 100000, 'C3': 50000, 'R1': 0.01, 'R2': 0.01, 'R3': 0.01})\n\n# 3) Cost function\ndef cfun(xdf, ydf):\n cost = (ydf['u1_y'] ** 2).sum()\n return cost\n\n# 4) Define inputs (48 hours emulation, 1h step)\nt = np.arange(0, 48. * 3600., 3600.)\nu1 = np.zeros(48)\nu2 = np.sin(t / 86400. * 2. * np.pi) * 1000. + np.random.rand(48) * 1000. - 500. # Noisy sinusoid\n\ninp = pd.DataFrame(index = pd.Index(t, name = 'time'))\ninp['u1'] = u1\ninp['u2'] = u2\n\ninp.plot()\nplt.show()\n\n# 5) Define bounds\nTlo = np.where((t > 86400 / 2) & (t < 86400 * 1.5), 273.15 + 23, 273.15 + 17)\nThi = 273.15 + 25\n\n# 6) Instantiate MPCEmulation\nmpc = mshoot.MPCEmulation(model_emu, cfun)\n\n# 7) Optimize\nu, xctr, xemu, yemu, u_hist = mpc.optimize(\n model = model_ctr,\n inp_ctr = inp,\n inp_emu = inp,\n free = ['u1'],\n ubounds = [(-1000, 1000)],\n xbounds = [(273.15, 333.15), (273.15, 333.15), (Tlo, Thi)],\n x0 = [293.15, 293.15, 293.15],\n maxiter = 20,\n ynominal = [293.15, 1000.],\n step = 1,\n horizon = 3\n)\n\n# 8) Plot some results\nax1 = xemu.plot()\nax1.plot(xemu.index, Tlo, color = 'black')\nax1.plot(xemu.index, np.full(48, Thi), color = 'black')\n\nax2 = u.plot()\nax2.plot(u.index, u2, color = 'red')\n\nplt.show()\n" ]
[ [ "numpy.arange", "numpy.sum", "pandas.Index", "numpy.full" ], [ "numpy.zeros", "pandas.Index", "numpy.arange", "matplotlib.pyplot.show", "numpy.random.rand", "numpy.sin", "numpy.where", "numpy.full" ] ]
edges-collab/edges-cal
[ "9b7b28f71e1aa5347f901af38ef3bc0d28766e21" ]
[ "src/edges_cal/cal_coefficients.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nThe main user-facing module of ``edges-cal``.\n\nThis module contains wrappers around lower-level functions in other modules, providing\na one-stop interface for everything related to calibration.\n\"\"\"\nfrom __future__ import annotations\n\nimport attr\nimport h5py\nimport numpy as np\nimport tempfile\nimport warnings\nimport yaml\nfrom abc import ABCMeta, abstractmethod\nfrom astropy.convolution import Gaussian1DKernel, convolve\nfrom copy import copy\nfrom edges_io import io\nfrom edges_io.logging import logger\nfrom functools import lru_cache\nfrom hashlib import md5\nfrom matplotlib import pyplot as plt\nfrom pathlib import Path\nfrom scipy.interpolate import InterpolatedUnivariateSpline as Spline\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nfrom . import DATA_PATH\nfrom . import modelling as mdl\nfrom . import receiver_calibration_func as rcf\nfrom . import reflection_coefficient as rc\nfrom . import s11_correction as s11\nfrom . import tools\nfrom . import types as tp\nfrom . import xrfi\nfrom .cached_property import cached_property\nfrom .tools import EdgesFrequencyRange, FrequencyRange\n\n\nclass S1P:\n def __init__(\n self,\n s1p: tp.PathLike | io.S1P,\n f_low: float | None = None,\n f_high: float | None = None,\n switchval: int | None = None,\n ):\n \"\"\"\n An object representing the measurements of a VNA.\n\n The measurements are read in via a .s1p file\n\n Parameters\n ----------\n s1p : str, Path or :class:`io.S1P`\n The path to a valid .s1p file containing VNA measurements, or an S1P\n object of such a type.\n f_low, f_high : float\n The minimum/maximum frequency to keep.\n switchval : int\n The standard value of the switch for the component.\n \"\"\"\n try:\n s1p = Path(s1p)\n self.s1p = io.S1P(s1p)\n except TypeError:\n if isinstance(s1p, io.S1P):\n self.s1p = s1p\n else:\n raise TypeError(\n \"s1p must be a path to an s1p file, or an io.S1P object\"\n )\n\n self.load_name = self.s1p.kind\n self.repeat_num = self.s1p.repeat_num\n\n spec = self.s1p.s11\n f = self.s1p.freq\n\n self.freq = FrequencyRange(f, f_low, f_high)\n self.s11 = spec[self.freq.mask]\n self._switchval = switchval\n\n @cached_property\n def switchval(self):\n \"\"\"The standard value of the switch for the component.\"\"\"\n if self._switchval is not None:\n return self._switchval * np.ones_like(self.freq.freq)\n else:\n return None\n\n\n# For backwards compatibility\nVNA = S1P\n\n\nclass _S11Base(metaclass=ABCMeta):\n default_nterms = {\n \"ambient\": 37,\n \"hot_load\": 37,\n \"open\": 105,\n \"short\": 105,\n \"AntSim2\": 55,\n \"AntSim3\": 55,\n \"AntSim4\": 55,\n \"lna\": 37,\n }\n\n def __init__(\n self,\n *,\n load_s11: Union[io._S11SubDir, io.ReceiverReading],\n f_low: Optional[float] = None,\n f_high: Optional[float] = None,\n n_terms: Optional[int] = None,\n model_type: tp.Modelable = \"fourier\",\n ):\n \"\"\"\n A class representing relevant switch corrections for a load.\n\n Parameters\n ----------\n load_s11 : :class:`io._S11SubDir`\n An instance of the basic ``io`` S11 folder.\n f_low : float\n Minimum frequency to use. Default is all frequencies.\n f_high : float\n Maximum frequency to use. Default is all frequencies.\n resistance : float\n The resistance of the switch (in Ohms).\n n_terms : int\n The number of terms to use in fitting a model to the S11 (used to both\n smooth and interpolate the data). Must be odd.\n \"\"\"\n self.load_s11 = load_s11\n self.base_path = self.load_s11.path\n\n try:\n self.load_name = getattr(self.load_s11, \"load_name\")\n except AttributeError:\n self.load_name = None\n\n self.run_num = self.load_s11.run_num\n\n switchvals = {\"open\": 1, \"short\": -1, \"match\": 0}\n\n for name in self.load_s11.STANDARD_NAMES:\n setattr(\n self,\n name.lower(),\n S1P(\n s1p=self.load_s11.children[name.lower()],\n f_low=f_low,\n f_high=f_high,\n switchval=switchvals.get(name.lower()),\n ),\n )\n\n # Expose one of the frequency objects\n self.freq = self.open.freq\n self._nterms = int(n_terms) if n_terms is not None else None\n self.model_type = model_type\n\n @cached_property\n def n_terms(self):\n \"\"\"Number of terms to use (by default) in modelling the S11.\n\n Raises\n ------\n ValueError\n If n_terms is even.\n \"\"\"\n res = self._nterms or self.default_nterms.get(self.load_name, None)\n if not (isinstance(res, int) and res % 2):\n raise ValueError(\n f\"n_terms must be odd for S11 models. For {self.load_name} got \"\n f\"n_terms={res}.\"\n )\n return res\n\n @classmethod\n @abstractmethod\n def from_path(cls, **kwargs):\n pass # pragma: no cover\n\n @cached_property\n @abstractmethod\n def measured_load_s11_raw(self):\n pass # pragma: no cover\n\n @cached_property\n def corrected_load_s11(self) -> np.ndarray:\n \"\"\"The measured S11 of the load, corrected for internal switch.\"\"\"\n return self.measured_load_s11_raw\n\n @lru_cache()\n def get_corrected_s11_model(\n self,\n n_terms: int | None = None,\n model_type: tp.Modelable | None = None,\n ):\n \"\"\"Generate a callable model for the S11 correction.\n\n This should closely match :method:`s11_correction`.\n\n Parameters\n ----------\n n_terms : int\n Number of terms used in the fourier-based model. Not necessary if\n `load_name` is specified in the class.\n\n Returns\n -------\n callable :\n A function of one argument, f, which should be a frequency in the same units\n as `self.freq.freq`.\n\n Raises\n ------\n ValueError\n If n_terms is not an integer, or not odd.\n \"\"\"\n n_terms = n_terms or self.n_terms\n model_type = mdl.get_mdl(model_type or self.model_type)\n model = model_type(\n n_terms=n_terms,\n transform=mdl.UnitTransform(range=[self.freq.min, self.freq.max]),\n )\n emodel = model.at(x=self.freq.freq)\n\n cmodel = mdl.ComplexMagPhaseModel(mag=emodel, phs=emodel)\n\n s11_correction = self.corrected_load_s11\n\n return cmodel.fit(ydata=s11_correction)\n\n @cached_property\n def s11_model(self) -> callable:\n \"\"\"The S11 model.\"\"\"\n return self.get_corrected_s11_model()\n\n def plot_residuals(\n self,\n fig=None,\n ax=None,\n color_abs=\"C0\",\n color_diff=\"g\",\n label=None,\n title=None,\n decade_ticks=True,\n ylabels=True,\n ) -> plt.Figure:\n \"\"\"\n Make a plot of the residuals of the S11 model and the correction data.\n\n Residuals obtained via :func:`get_corrected_s11_model`\n\n Returns\n -------\n fig :\n Matplotlib Figure handle.\n \"\"\"\n if fig is None or ax is None or len(ax) != 4:\n fig, ax = plt.subplots(\n 4, 1, sharex=True, gridspec_kw={\"hspace\": 0.05}, facecolor=\"w\"\n )\n\n if decade_ticks:\n for axx in ax:\n axx.xaxis.set_ticks(\n [50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180],\n minor=[],\n )\n axx.grid(True)\n ax[-1].set_xlabel(\"Frequency [MHz]\")\n\n corr = self.corrected_load_s11\n model = self.s11_model(self.freq.freq)\n\n ax[0].plot(\n self.freq.freq, 20 * np.log10(np.abs(model)), color=color_abs, label=label\n )\n if ylabels:\n ax[0].set_ylabel(r\"$|S_{11}|$\")\n\n ax[1].plot(self.freq.freq, np.abs(model) - np.abs(corr), color_diff)\n if ylabels:\n ax[1].set_ylabel(r\"$\\Delta |S_{11}|$\")\n\n ax[2].plot(\n self.freq.freq, np.unwrap(np.angle(model)) * 180 / np.pi, color=color_abs\n )\n if ylabels:\n ax[2].set_ylabel(r\"$\\angle S_{11}$\")\n\n ax[3].plot(\n self.freq.freq,\n np.unwrap(np.angle(model)) - np.unwrap(np.angle(corr)),\n color_diff,\n )\n if ylabels:\n ax[3].set_ylabel(r\"$\\Delta \\angle S_{11}$\")\n\n if title is None:\n title = f\"{self.load_name} Reflection Coefficient Models\"\n\n if title:\n fig.suptitle(f\"{self.load_name} Reflection Coefficient Models\", fontsize=14)\n if label:\n ax[0].legend()\n\n return fig\n\n\nclass LoadS11(_S11Base):\n def __init__(self, *, internal_switch: s11.InternalSwitch, **kwargs):\n \"\"\"S11 for a lab calibration load.\n\n Parameters\n ----------\n internal_switch : :class:`s11.InternalSwitch`\n The internal switch state corresponding to the load.\n\n Other Parameters\n ----------------\n Passed through to :class:`_S11Base`.\n \"\"\"\n assert isinstance(internal_switch, s11.InternalSwitch)\n self.internal_switch = internal_switch\n super().__init__(**kwargs)\n\n @classmethod\n def from_path(\n cls,\n load_name: str,\n path: tp.PathLike,\n run_num_load: int = 1,\n run_num_switch: int = 1,\n repeat_num_load: int = None,\n repeat_num_switch: int = None,\n resistance: float = 50.166,\n model_internal_switch: mdl.Model = attr.NOTHING,\n **kwargs,\n ):\n \"\"\"\n Create a new object from a given path and load name.\n\n Parameters\n ----------\n load_name : str\n The name of the load to create.\n path : str or Path\n The path to the overall calibration observation.\n run_num_load : int\n The run to use (default is last run available).\n run_num_switch : int\n The run to use for the switch S11 (default is last run available).\n kwargs\n All other arguments are passed through to the constructor of\n :class:`LoadS11`.\n\n Returns\n -------\n s11 : :class:`LoadS11`\n The S11 of the load.\n \"\"\"\n antsim = load_name.startswith(\"AntSim\")\n path = Path(path)\n\n if not antsim:\n load_name = io.LOAD_ALIASES[load_name]\n\n s11_load_dir = (io.AntSimS11 if antsim else io.LoadS11)(\n path / \"S11\" / f\"{load_name}{run_num_load:02}\", repeat_num=repeat_num_load\n )\n\n internal_switch = s11.InternalSwitch(\n data=io.SwitchingState(\n path / \"S11\" / f\"SwitchingState{run_num_switch:02}\",\n repeat_num=repeat_num_switch,\n ),\n resistance=resistance,\n model=model_internal_switch,\n )\n return cls(load_s11=s11_load_dir, internal_switch=internal_switch, **kwargs)\n\n @cached_property\n def measured_load_s11_raw(self):\n \"\"\"The measured S11 of the load, calculated from raw internal standards.\"\"\"\n return rc.de_embed(\n self.open.switchval,\n self.short.switchval,\n self.match.switchval,\n self.open.s11,\n self.short.s11,\n self.match.s11,\n self.external.s11,\n )[0]\n\n @cached_property\n def corrected_load_s11(self) -> np.ndarray:\n \"\"\"The measured S11 of the load, corrected for the internal switch.\"\"\"\n return rc.gamma_de_embed(\n self.internal_switch.s11_model(self.freq.freq),\n self.internal_switch.s12_model(self.freq.freq),\n self.internal_switch.s22_model(self.freq.freq),\n self.measured_load_s11_raw,\n )\n\n\nclass LNA(_S11Base):\n def __init__(\n self, load_s11: io.ReceiverReading, resistance: float = 50.009, **kwargs\n ):\n \"\"\"A special case of :class:`SwitchCorrection` for the LNA.\n\n Parameters\n ----------\n load_s11 : :class:`io.ReceiverReading`\n The Receiver Reading S11 measurements.\n resistance : float\n The resistance of the receiver.\n kwargs :\n All other arguments passed to :class:`SwitchCorrection`.\n \"\"\"\n super().__init__(load_s11=load_s11, **kwargs)\n self.resistance = resistance\n self.load_name = \"lna\"\n self.repeat_num = self.load_s11.repeat_num\n\n @classmethod\n def from_path(\n cls,\n path: Union[str, Path],\n repeat_num: Optional[int] = None,\n run_num: int = 1,\n **kwargs,\n ):\n \"\"\"\n Create an instance from a given path.\n\n Parameters\n ----------\n path : str or Path\n Path to overall Calibration Observation.\n run_num_load : int\n The run to use for the LNA (default latest available).\n run_num_switch : int\n The run to use for the switching state (default lastest available).\n kwargs\n All other arguments passed through to :class:`SwitchCorrection`.\n\n Returns\n -------\n lna : :class:`LNA`\n The LNA object.\n \"\"\"\n path = Path(path)\n load_s11 = io.ReceiverReading(\n path=path / \"S11\" / f\"ReceiverReading{run_num:02}\",\n repeat_num=repeat_num,\n fix=False,\n )\n\n return cls(load_s11=load_s11, **kwargs)\n\n @cached_property\n def external(self):\n \"\"\"VNA S11 measurements for the load.\"\"\"\n return S1P(\n self.load_s11.children[\"receiverreading\"],\n f_low=self.freq.freq.min(),\n f_high=self.freq.freq.max(),\n )\n\n @cached_property\n def measured_load_s11_raw(self):\n \"\"\"Measured S11 of of the LNA.\"\"\"\n # Models of standards\n oa, sa, la = rc.agilent_85033E(\n self.freq.freq, self.resistance, match_delay=True\n )\n\n # Correction at switch\n return rc.de_embed(\n oa, sa, la, self.open.s11, self.short.s11, self.match.s11, self.external.s11\n )[0]\n\n\nclass LoadSpectrum:\n def __init__(\n self,\n spec_obj: List[io.Spectrum],\n resistance_obj: io.Resistance,\n switch_correction: Optional[LoadS11] = None,\n f_low: float = 40.0,\n f_high: Optional[float] = None,\n ignore_times_percent: float = 5.0,\n rfi_removal: str = \"1D2D\",\n rfi_kernel_width_time: int = 16,\n rfi_kernel_width_freq: int = 16,\n rfi_threshold: float = 6,\n cache_dir: Optional[Union[str, Path]] = None,\n t_load: float = 300.0,\n t_load_ns: float = 400.0,\n ):\n \"\"\"A class representing a measured spectrum from some Load.\n\n Parameters\n ----------\n spec_obj : :class:`io.Spectrum`\n The base Spectrum object defining the on-disk spectra.\n resistance_obj : :class:`io.Resistance`\n The base Resistance object defining the on-disk resistance measurements.\n switch_correction : :class:`SwitchCorrection`\n A `SwitchCorrection` for this particular load. If not given, will be\n constructed automatically.\n f_low : float\n Minimum frequency to keep.\n f_high : float\n Maximum frequency to keep.\n ignore_times_percent : float\n Must be between 0 and 100. Number of time-samples in a file to reject\n from the start of the file.\n rfi_removal : str\n Either '1D', '2D' or '1D2D'. If given, will perform median and mean-filtered\n xRFI over either the\n 2D waterfall, or integrated 1D spectrum. The latter is usually reasonable\n for calibration sources, while the former is good for field data. \"1D2D\"\n is a hybrid approach in which the variance per-frequency is determined\n from the 2D data, but filtering occurs only over frequency.\n rfi_kernel_width_time : int\n The kernel width for the detrending of data for\n RFI removal in the time dimension (only used if `rfi_removal` is \"2D\").\n rfi_kernel_width_freq : int\n The kernel width for the detrending of data for\n RFI removal in the frequency dimension.\n rfi_threshold : float\n The threshold (in equivalent standard deviation units) above which to\n flag data as RFI.\n cache_dir : str or Path\n An alternative directory in which to load/save cached reduced files. By\n default, the same as the path to the .mat files. If you don't have\n write permission there, it may be useful to use an alternative path.\n t_load\n Fiducial guess for the temperature of the internal load.\n t_load_ns\n Fiducial guess for the temperature of the internal load + noise source.\n \"\"\"\n self.spec_obj = spec_obj\n self.resistance_obj = resistance_obj\n\n self.load_name = self.spec_obj[0].load_name\n assert (\n self.load_name == self.resistance_obj.load_name\n ), \"spec and resistance load_name must be the same\"\n\n self.spec_files = (spec_obj.path for spec_obj in self.spec_obj)\n self.resistance_file = self.resistance_obj.path\n\n self.run_num = self.spec_obj[0].run_num\n\n self.cache_dir = Path(cache_dir or \".\")\n\n self.rfi_kernel_width_time = rfi_kernel_width_time\n self.rfi_kernel_width_freq = rfi_kernel_width_freq\n self.rfi_threshold = rfi_threshold\n\n assert rfi_removal in [\n \"1D\",\n \"2D\",\n \"1D2D\",\n False,\n None,\n ], \"rfi_removal must be either '1D', '2D', '1D2D, or False/None\"\n\n self.rfi_removal = rfi_removal\n\n self.switch_correction = switch_correction\n\n self.ignore_times_percent = ignore_times_percent\n self.freq = EdgesFrequencyRange(f_low=f_low, f_high=f_high)\n self.t_load = t_load\n self.t_load_ns = t_load_ns\n\n @classmethod\n def from_load_name(\n cls,\n load_name: str,\n direc: Union[str, Path],\n run_num: Optional[int] = None,\n filetype: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Instantiate the class from a given load name and directory.\n\n Parameters\n ----------\n load_name : str\n The load name (one of 'ambient', 'hot_load', 'open' or 'short').\n direc : str or Path\n The top-level calibration observation directory.\n run_num : int\n The run number to use for the spectra.\n filetype : str\n The filetype to look for (acq or h5).\n kwargs :\n All other arguments to :class:`LoadSpectrum`.\n\n Returns\n -------\n :class:`LoadSpectrum`.\n \"\"\"\n direc = Path(direc)\n\n spec = io.Spectrum.from_load(\n load=load_name, direc=direc / \"Spectra\", run_num=run_num, filetype=filetype\n )\n res = io.Resistance.from_load(\n load=load_name,\n direc=direc / \"Resistance\",\n run_num=run_num,\n filetype=filetype,\n )\n return cls(spec_obj=spec, resistance_obj=res, **kwargs)\n\n @cached_property\n def averaged_Q(self) -> np.ndarray:\n \"\"\"Ratio of powers averaged over time.\n\n Notes\n -----\n The formula is\n\n .. math:: Q = (P_source - P_load)/(P_noise - P_load)\n \"\"\"\n # TODO: should also get weights!\n spec = self._ave_and_var_spec[0][\"Q\"]\n\n if self.rfi_removal == \"1D\":\n flags, _ = xrfi.xrfi_medfilt(\n spec, threshold=self.rfi_threshold, kf=self.rfi_kernel_width_freq\n )\n spec[flags] = np.nan\n return spec\n\n @property\n def variance_Q(self) -> np.ndarray:\n \"\"\"Variance of Q across time (see averaged_Q).\"\"\"\n return self._ave_and_var_spec[1][\"Q\"]\n\n @property\n def averaged_spectrum(self) -> np.ndarray:\n \"\"\"T* = T_noise * Q + T_load.\"\"\"\n return self.averaged_Q * self.t_load_ns + self.t_load\n\n @property\n def variance_spectrum(self) -> np.ndarray:\n \"\"\"Variance of uncalibrated spectrum across time (see averaged_spectrum).\"\"\"\n return self.variance_Q * self.t_load_ns ** 2\n\n @property\n def ancillary(self) -> dict:\n \"\"\"Ancillary measurement data.\"\"\"\n return [d.data[\"meta\"] for d in self.spec_obj]\n\n @property\n def averaged_p0(self) -> np.ndarray:\n \"\"\"Power of the load, averaged over time.\"\"\"\n return self._ave_and_var_spec[0][\"p0\"]\n\n @property\n def averaged_p1(self) -> np.ndarray:\n \"\"\"Power of the noise-source, averaged over time.\"\"\"\n return self._ave_and_var_spec[0][\"p1\"]\n\n @property\n def averaged_p2(self) -> np.ndarray:\n \"\"\"Power of the load plus noise-source, averaged over time.\"\"\"\n return self._ave_and_var_spec[0][\"p2\"]\n\n @property\n def variance_p0(self) -> np.ndarray:\n \"\"\"Variance of the load, averaged over time.\"\"\"\n return self._ave_and_var_spec[1][\"p0\"]\n\n @property\n def variance_p1(self) -> np.ndarray:\n \"\"\"Variance of the noise-source, averaged over time.\"\"\"\n return self._ave_and_var_spec[1][\"p1\"]\n\n @property\n def variance_p2(self) -> np.ndarray:\n \"\"\"Variance of the load plus noise-source, averaged over time.\"\"\"\n return self._ave_and_var_spec[1][\"p2\"]\n\n @property\n def n_integrations(self) -> int:\n \"\"\"The number of integrations recorded for the spectrum (after ignoring).\"\"\"\n return self._ave_and_var_spec[2]\n\n def _get_integrated_filename(self):\n \"\"\"Determine a unique filename for the reduced data of this instance.\"\"\"\n params = (\n self.rfi_threshold,\n self.rfi_kernel_width_time,\n self.rfi_kernel_width_freq,\n self.rfi_removal,\n self.ignore_times_percent,\n self.freq.min,\n self.freq.max,\n self.t_load,\n self.t_load_ns,\n tuple(path.name for path in self.spec_files),\n )\n hsh = md5(str(params).encode()).hexdigest()\n\n return self.cache_dir / f\"{self.load_name}_{hsh}.h5\"\n\n @cached_property\n def _ave_and_var_spec(self) -> Tuple[Dict, Dict, int]:\n \"\"\"Get the mean and variance of the spectra.\"\"\"\n fname = self._get_integrated_filename()\n\n kinds = [\"p0\", \"p1\", \"p2\", \"Q\"]\n if fname.exists():\n logger.info(\n f\"Reading in previously-created integrated {self.load_name} spectra...\"\n )\n means = {}\n variances = {}\n with h5py.File(fname, \"r\") as fl:\n for kind in kinds:\n means[kind] = fl[kind + \"_mean\"][...]\n variances[kind] = fl[kind + \"_var\"][...]\n n_integrations = fl.attrs.get(\"n_integrations\", 0)\n return means, variances, n_integrations\n\n logger.info(f\"Reducing {self.load_name} spectra...\")\n spectra = self.get_spectra()\n\n means = {}\n variances = {}\n\n for key, spec in spectra.items():\n # Weird thing where there are zeros in the spectra.\n spec[spec == 0] = np.nan\n\n mean = np.nanmean(spec, axis=1)\n var = np.nanvar(spec, axis=1)\n n_intg = spec.shape[1]\n\n if self.rfi_removal == \"1D2D\":\n nsample = np.sum(~np.isnan(spec), axis=1)\n varfilt = xrfi.flagged_filter(\n var, size=2 * self.rfi_kernel_width_freq + 1\n )\n resid = mean - xrfi.flagged_filter(\n mean, size=2 * self.rfi_kernel_width_freq + 1\n )\n flags = np.logical_or(\n resid > self.rfi_threshold * np.sqrt(varfilt / nsample),\n var - varfilt\n > self.rfi_threshold * np.sqrt(2 * varfilt ** 2 / (nsample - 1)),\n )\n\n mean[flags] = np.nan\n var[flags] = np.nan\n\n means[key] = mean\n variances[key] = var\n\n if not self.cache_dir.exists():\n self.cache_dir.mkdir()\n\n with h5py.File(fname, \"w\") as fl:\n logger.info(f\"Saving reduced spectra to cache at {fname}\")\n for kind in kinds:\n fl[kind + \"_mean\"] = means[kind]\n fl[kind + \"_var\"] = variances[kind]\n fl.attrs[\"n_integrations\"] = n_intg\n\n return means, variances, n_intg\n\n def get_spectra(self) -> dict:\n \"\"\"Read all spectra and remove RFI.\n\n Returns\n -------\n dict :\n A dictionary with keys being different powers (p1, p2, p3, Q), and values\n being ndarrays.\n \"\"\"\n spec = self._read_spectrum()\n\n if self.rfi_removal == \"2D\":\n for key, val in spec.items():\n # Need to set nans and zeros to inf so that median/mean detrending\n # can work.\n val[np.isnan(val)] = np.inf\n\n if key != \"Q\":\n val[val == 0] = np.inf\n\n flags, _ = xrfi.xrfi_medfilt(\n val,\n threshold=self.rfi_threshold,\n kt=self.rfi_kernel_width_time,\n kf=self.rfi_kernel_width_freq,\n )\n val[flags] = np.nan\n spec[key] = val\n return spec\n\n def _read_spectrum(self) -> dict:\n \"\"\"\n Read the contents of the spectrum files into memory.\n\n Removes a starting percentage of times, and masks out certain frequencies.\n\n Returns\n -------\n dict :\n A dictionary of the contents of the file. Usually p0, p1, p2 (un-normalised\n powers of source, load, and load+noise respectively), and ant_temp (the\n uncalibrated, but normalised antenna temperature).\n \"\"\"\n data = [spec_obj.data for spec_obj in self.spec_obj]\n\n n_times = sum(len(d[\"time_ancillary\"][\"times\"]) for d in data)\n out = {\n \"p0\": np.empty((len(self.freq.freq), n_times)),\n \"p1\": np.empty((len(self.freq.freq), n_times)),\n \"p2\": np.empty((len(self.freq.freq), n_times)),\n \"Q\": np.empty((len(self.freq.freq), n_times)),\n }\n\n index_start_spectra = int((self.ignore_times_percent / 100) * n_times)\n for key, val in out.items():\n nn = 0\n for d in data:\n n = len(d[\"time_ancillary\"][\"times\"])\n val[:, nn : (nn + n)] = d[\"spectra\"][key][self.freq.mask]\n nn += n\n\n out[key] = val[:, index_start_spectra:]\n\n return out\n\n @cached_property\n def thermistor(self) -> np.ndarray:\n \"\"\"The thermistor readings.\"\"\"\n ary = self.resistance_obj.read()[0]\n\n return ary[int((self.ignore_times_percent / 100) * len(ary)) :]\n\n @cached_property\n def thermistor_temp(self):\n \"\"\"The associated thermistor temperature in K.\"\"\"\n return rcf.temperature_thermistor(self.thermistor[\"load_resistance\"])\n\n @cached_property\n def temp_ave(self):\n \"\"\"Average thermistor temperature (over time and frequency).\"\"\"\n return np.nanmean(self.thermistor_temp)\n\n def write(self, path=None):\n \"\"\"\n Write a HDF5 file containing the contents of the LoadSpectrum.\n\n Parameters\n ----------\n path : str\n Directory into which to save the file, or full path to file.\n If a directory, filename will be <load_name>_averaged_spectrum.h5.\n Default is current directory.\n \"\"\"\n path = Path(path or \".\")\n\n # Allow to pass in a directory name *or* full path.\n if path.is_dir():\n path /= f\"{self.load_name}_averaged_spectrum.h5\"\n\n with h5py.File(path, \"w\") as fl:\n fl.attrs[\"load_name\"] = self.load_name\n fl[\"freq\"] = self.freq.freq\n fl[\"averaged_raw_spectrum\"] = self.averaged_spectrum\n fl[\"temperature\"] = self.thermistor_temp\n\n def plot(\n self, thermistor=False, fig=None, ax=None, xlabel=True, ylabel=True, **kwargs\n ):\n \"\"\"\n Make a plot of the averaged uncalibrated spectrum associated with this load.\n\n Parameters\n ----------\n thermistor : bool\n Whether to plot the thermistor temperature on the same axis.\n fig : Figure\n Optionally, pass a matplotlib figure handle which will be used to plot.\n ax : Axis\n Optional, pass a matplotlib Axis handle which will be added to.\n xlabel : bool\n Whether to make an x-axis label.\n ylabel : bool\n Whether to plot the y-axis label\n kwargs :\n All other arguments are passed to `plt.subplots()`.\n \"\"\"\n if fig is None:\n fig, ax = plt.subplots(\n 1, 1, facecolor=kwargs.pop(\"facecolor\", \"white\"), **kwargs\n )\n\n if thermistor:\n ax.plot(self.freq.freq, self.thermistor_temp)\n if ylabel:\n ax.set_ylabel(\"Temperature [K]\")\n else:\n ax.plot(self.freq.freq, self.averaged_spectrum)\n if ylabel:\n ax.set_ylabel(\"$T^*$ [K]\")\n\n ax.grid(True)\n if xlabel:\n ax.set_xlabel(\"Frequency [MHz]\")\n\n\nclass HotLoadCorrection:\n\n _kinds = {\"s11\": 0, \"s12\": 1, \"s22\": 2}\n\n def __init__(\n self,\n path: Union[str, Path] = \":semi_rigid_s_parameters_WITH_HEADER.txt\",\n f_low: Optional[float] = None,\n f_high: Optional[float] = None,\n n_terms: int = 21,\n ):\n \"\"\"\n Corrections for the hot load.\n\n Measurements required to define the HotLoad temperature, from Monsalve et al.\n (2017), Eq. 8+9.\n\n Parameters\n ----------\n path : str or Path, optional\n Path to a file containing measurements of the semi-rigid cable reflection\n parameters. A preceding colon (:) indicates to prefix with DATA_PATH.\n The default file was measured in 2015, but there is also a file included\n that can be used from 2017: \":semi_rigid_s_parameters_2017.txt\".\n f_low, f_high : float\n Lowest/highest frequency to retain from measurements.\n \"\"\"\n # Get the path to the S11 file.\n if not isinstance(path, Path):\n path = DATA_PATH / path[1:] if path[0] == \":\" else Path(path)\n self.path = path\n data = np.genfromtxt(self.path)\n\n f = data[:, 0]\n self.freq = FrequencyRange(f, f_low, f_high)\n\n if data.shape[1] == 7: # Original file from 2015\n self.data = data[self.freq.mask, 1::2] + 1j * data[self.freq.mask, 2::2]\n elif data.shape[1] == 6: # File from 2017\n self.data = np.array(\n [\n data[self.freq.mask, 1] + 1j * data[self.freq.mask, 2],\n data[self.freq.mask, 3],\n data[self.freq.mask, 4] + 1j * data[self.freq.mask, 5],\n ]\n ).T\n else:\n raise IOError(\"Semi-Rigid Cable file has wrong data format.\")\n\n self.n_terms = int(n_terms)\n\n def _get_model_kind(self, kind):\n model = mdl.Polynomial(\n n_terms=self.n_terms,\n transform=mdl.UnitTransform(range=(self.freq.min, self.freq.max)),\n )\n model = mdl.ComplexMagPhaseModel(mag=model, phs=model)\n return model.fit(xdata=self.freq.freq, ydata=self.data[:, self._kinds[kind]])\n\n @cached_property\n def s11_model(self):\n \"\"\"The reflection coefficient.\"\"\"\n return self._get_model_kind(\"s11\")\n\n @cached_property\n def s12_model(self):\n \"\"\"The transmission coefficient.\"\"\"\n return self._get_model_kind(\"s12\")\n\n @cached_property\n def s22_model(self):\n \"\"\"The reflection coefficient from the other side.\"\"\"\n return self._get_model_kind(\"s22\")\n\n def power_gain(self, freq: np.ndarray, hot_load_s11: LoadS11) -> np.ndarray:\n \"\"\"\n Calculate the power gain.\n\n Parameters\n ----------\n freq : np.ndarray\n The frequencies.\n hot_load_s11 : :class:`LoadS11`\n The S11 of the hot load.\n\n Returns\n -------\n gain : np.ndarray\n The power gain as a function of frequency.\n \"\"\"\n assert isinstance(\n hot_load_s11, LoadS11\n ), \"hot_load_s11 must be a switch correction\"\n assert (\n hot_load_s11.load_name == \"hot_load\"\n ), \"hot_load_s11 must be a hot_load s11\"\n\n return self.get_power_gain(\n {\n \"s11\": self.s11_model(freq),\n \"s12s21\": self.s12_model(freq),\n \"s22\": self.s22_model(freq),\n },\n hot_load_s11.s11_model(freq),\n )\n\n @staticmethod\n def get_power_gain(\n semi_rigid_sparams: dict, hot_load_s11: np.ndarray\n ) -> np.ndarray:\n \"\"\"Define Eq. 9 from M17.\n\n Parameters\n ----------\n semi_rigid_sparams : dict\n A dictionary of reflection coefficient measurements as a function of\n frequency for the semi-rigid cable.\n hot_load_s11 : array-like\n The S11 measurement of the hot_load.\n\n Returns\n -------\n gain : np.ndarray\n The power gain.\n \"\"\"\n rht = rc.gamma_de_embed(\n semi_rigid_sparams[\"s11\"],\n semi_rigid_sparams[\"s12s21\"],\n semi_rigid_sparams[\"s22\"],\n hot_load_s11,\n )\n\n return (\n np.abs(semi_rigid_sparams[\"s12s21\"])\n * (1 - np.abs(rht) ** 2)\n / (\n (np.abs(1 - semi_rigid_sparams[\"s11\"] * rht)) ** 2\n * (1 - np.abs(hot_load_s11) ** 2)\n )\n )\n\n\nclass Load:\n def __init__(\n self,\n spectrum: LoadSpectrum,\n reflections: LoadS11,\n hot_load_correction: Optional[HotLoadCorrection] = None,\n ambient: Optional[LoadSpectrum] = None,\n ):\n \"\"\"Wrapper class containing all relevant information for a given load.\n\n Parameters\n ----------\n spectrum : :class:`LoadSpectrum`\n The spectrum for this particular load.\n reflections : :class:`SwitchCorrection`\n The S11 measurements for this particular load.\n hot_load_correction : :class:`HotLoadCorrection`\n If this is a hot load, provide a hot load correction.\n ambient : :class:`LoadSpectrum`\n If this is a hot load, need to provide an ambient spectrum to correct it.\n \"\"\"\n assert isinstance(spectrum, LoadSpectrum), \"spectrum must be a LoadSpectrum\"\n assert isinstance(reflections, LoadS11), \"spectrum must be a SwitchCorrection\"\n assert spectrum.load_name == reflections.load_name\n\n self.spectrum = spectrum\n self.reflections = reflections\n self.load_name = spectrum.load_name\n self.t_load = self.spectrum.t_load\n self.t_load_ns = self.spectrum.t_load_ns\n\n if self.load_name == \"hot_load\":\n self._correction = hot_load_correction\n self._ambient = ambient\n\n @classmethod\n def from_path(\n cls,\n path: Union[str, Path],\n load_name: str,\n f_low: Optional[float] = None,\n f_high: Optional[float] = None,\n reflection_kwargs: Optional[dict] = None,\n spec_kwargs: Optional[dict] = None,\n ):\n \"\"\"\n Define a full :class:`Load` from a path and name.\n\n Parameters\n ----------\n path : str or Path\n Path to the top-level calibration observation.\n load_name : str\n Name of a load to define.\n f_low, f_high : float\n Min/max frequencies to keep in measurements.\n reflection_kwargs : dict\n Extra arguments to pass through to :class:`SwitchCorrection`.\n spec_kwargs : dict\n Extra arguments to pass through to :class:`LoadSpectrum`.\n\n Returns\n -------\n load : :class:`Load`\n The load object, containing all info about spectra and S11's for that load.\n \"\"\"\n if not spec_kwargs:\n spec_kwargs = {}\n if not reflection_kwargs:\n reflection_kwargs = {}\n\n spec = LoadSpectrum.from_load_name(\n load_name,\n path,\n f_low=f_low,\n f_high=f_high,\n **spec_kwargs,\n )\n\n refl = LoadS11.from_path(\n load_name,\n path,\n f_low=f_low,\n f_high=f_high,\n **reflection_kwargs,\n )\n\n return cls(spec, refl)\n\n @property\n def s11_model(self):\n \"\"\"The S11 model.\"\"\"\n return self.reflections.s11_model\n\n @cached_property\n def temp_ave(self):\n \"\"\"The average temperature of the thermistor (over frequency and time).\"\"\"\n if self.load_name != \"hot_load\":\n return self.spectrum.temp_ave\n\n gain = self._correction.power_gain(self.freq.freq, self.reflections)\n # temperature\n return gain * self.spectrum.temp_ave + (1 - gain) * self._ambient.temp_ave\n\n @property\n def averaged_Q(self):\n \"\"\"Averaged power ratio.\"\"\"\n return self.spectrum.averaged_Q\n\n @property\n def averaged_spectrum(self):\n \"\"\"Averaged uncalibrated temperature.\"\"\"\n return self.spectrum.averaged_spectrum\n\n @property\n def freq(self):\n \"\"\"A :class:`FrequencyRange` object corresponding to this measurement.\"\"\"\n return self.spectrum.freq\n\n\nclass CalibrationObservation:\n _sources = (\"ambient\", \"hot_load\", \"open\", \"short\")\n\n def __init__(\n self,\n path: Union[str, Path],\n semi_rigid_path: Union[str, Path] = \":semi_rigid_s_parameters_WITH_HEADER.txt\",\n f_low: Optional[float] = 40,\n f_high: Optional[float] = None,\n run_num: Union[None, int, dict] = None,\n repeat_num: Union[None, int, dict] = None,\n resistance_f: Optional[float] = None,\n cterms: int = 5,\n wterms: int = 7,\n load_kwargs: Optional[dict] = None,\n s11_kwargs: Optional[dict] = None,\n load_spectra: Optional[dict] = None,\n load_s11s: Optional[dict] = None,\n compile_from_def: bool = True,\n include_previous: bool = False,\n internal_switch_kwargs: Optional[Dict[str, Any]] = None,\n ):\n \"\"\"\n A composite object representing a full Calibration Observation.\n\n This includes spectra of all calibrators, and methods to find the calibration\n parameters. It strictly follows Monsalve et al. (2017) in its formalism.\n While by default the class uses the calibrator sources (\"ambient\", \"hot_load\",\n \"open\", \"short\"), it can be modified to take other sources by setting\n ``CalibrationObservation._sources`` to a new tuple of strings.\n\n Parameters\n ----------\n path : str or Path\n Path to the directory containing all relevant measurements. It is assumed\n that in this directory is an `S11`, `Resistance` and `Spectra` directory.\n semi_rigid_path : str or Path, optional\n Path to a file containing S11 measurements for the semi rigid cable. Used to\n correct the hot load S11. Found automatically if not given.\n ambient_temp : int\n Ambient temperature (C) at which measurements were taken.\n f_low : float\n Minimum frequency to keep for all loads (and their S11's). If for some\n reason different frequency bounds are desired per-load, one can pass in\n full load objects through ``load_spectra``.\n f_high : float\n Maximum frequency to keep for all loads (and their S11's). If for some\n reason different frequency bounds are desired per-load, one can pass in\n full load objects through ``load_spectra``.\n run_num : int or dict\n Which run number to use for the calibrators. Default is to use the last run\n for each. Passing an int will attempt to use that run for each source. Pass\n a dict mapping sources to numbers to use different combinations.\n repeat_num : int or dict\n Which repeat number to use for the calibrators. Default is to use the last\n repeat for each. Passing an int will attempt to use that repeat for each\n source. Pass a dict mapping sources to numbers to use different\n combinations.\n resistance_f : float\n Female resistance (Ohms). Used for the LNA S11.\n cterms : int\n The number of terms to use for the polynomial fits to the calibration\n functions.\n wterms : int\n The number of terms to use for the polynomial fits to the noise-wave\n calibration functions.\n load_kwargs : dict\n Keyword arguments used to instantiate the calibrator :class:`LoadSpectrum`\n objects. See its documentation for relevant parameters. Parameters specified\n here are used for _all_ calibrator sources.\n s11_kwargs : dict\n Keyword arguments used to instantiate the calibrator :class:`LoadS11`\n objects. See its documentation for relevant parameters. Parameters specified\n here are used for _all_ calibrator sources.\n load_spectra : dict\n A dictionary mapping load names of calibration sources (eg. ambient, short)\n to either :class:`LoadSpectrum` instances or dictionaries of keywords to\n instantiate those objects. Useful for individually specifying\n properties of each load separately. Values in these dictionaries (if\n supplied) over-ride those given in ``load_kwargs`` (but values in\n ``load_kwargs`` are still used if not over-ridden).\n load_s11s : dict\n A dictionary mapping load names of calibration sources (eg. ambient, short)\n to :class:`LoadS11` instances or dictionaries of keywords to instantiate\n those objects. Useful for individually specifying properties of each load\n separately. Values in these dictionaries (if supplied) over-ride those\n given in ``s11_kwargs`` (but values in ``s11_kwargs`` are still used if not\n over-ridden).\n compile_from_def : bool\n Whether to attempt compiling a virtual observation from a\n ``definition.yaml`` inside the observation directory. This is the default\n behaviour, but can be turned off to enforce that the current directory\n should be used directly.\n include_previous : bool\n Whether to include the previous observation by default to supplement this\n one if required files are missing.\n\n Examples\n --------\n This will setup an observation with all default options applied:\n\n >>> path = '/CalibrationObservations/Receiver01_25C_2019_11_26_040_to_200MHz'\n >>> calobs = CalibrationObservation(path)\n\n To specify some options for constructing the various calibrator load spectra:\n\n >>> calobs = CalibrationObservation(\n >>> path,\n >>> load_kwargs={\"cache_dir\":\".\", \"ignore_times_percent\": 50}\n >>> )\n\n But if we typically wanted 50% of times ignored, but in one special case we'd\n like 80%:\n\n >>> calobs = CalibrationObservation(\n >>> path,\n >>> load_kwargs={\"cache_dir\":\".\", \"ignore_times_percent\": 50},\n >>> load_spectra={\"short\": {\"ignore_times_percent\": 80}}\n >>> )\n\n \"\"\"\n load_spectra = load_spectra or {}\n load_s11s = load_s11s or {}\n load_kwargs = load_kwargs or {}\n s11_kwargs = s11_kwargs or {}\n internal_switch_kwargs = internal_switch_kwargs or {}\n\n assert all(name in self._sources for name in load_spectra)\n assert all(name in self._sources + (\"lna\",) for name in load_s11s)\n\n self.io = io.CalibrationObservation(\n path,\n run_num=run_num,\n repeat_num=repeat_num,\n fix=False,\n compile_from_def=compile_from_def,\n include_previous=include_previous,\n )\n self.compiled_from_def = compile_from_def\n self.previous_included = include_previous\n\n self.path = Path(self.io.path)\n\n hot_load_correction = HotLoadCorrection(semi_rigid_path, f_low, f_high)\n\n self.internal_switch = s11.InternalSwitch(\n data=self.io.s11.switching_state,\n resistance=self.io.definition[\"measurements\"][\"resistance_m\"][\n self.io.s11.switching_state.run_num\n ],\n **internal_switch_kwargs,\n )\n\n self._loads = {}\n for source in self._sources:\n load = load_spectra.get(source, {})\n\n if isinstance(load, dict):\n load = LoadSpectrum(\n spec_obj=getattr(self.io.spectra, source),\n resistance_obj=getattr(self.io.resistance, source),\n f_low=f_low,\n f_high=f_high,\n **{**load_kwargs, **load},\n )\n\n # Ensure that we finally have a LoadSpectrum\n if not isinstance(load, LoadSpectrum):\n raise TypeError(\"load_spectra must be a dict of LoadSpectrum or dicts.\")\n\n refl = load_s11s.get(source, {})\n\n if isinstance(refl, dict):\n refl = LoadS11(\n load_s11=getattr(self.io.s11, source),\n internal_switch=self.internal_switch,\n f_low=f_low,\n f_high=f_high,\n **{**s11_kwargs, **refl},\n )\n\n if source == \"hot_load\":\n self._loads[source] = Load(\n load,\n refl,\n hot_load_correction=hot_load_correction,\n ambient=self._loads[\"ambient\"].spectrum,\n )\n else:\n self._loads[source] = Load(load, refl)\n\n for name, load in self._loads.items():\n setattr(self, name, load)\n\n refl = load_s11s.get(\"lna\", {})\n\n self.lna = LNA(\n load_s11=self.io.s11.receiver_reading,\n f_low=f_low,\n f_high=f_high,\n resistance=resistance_f\n or self.io.definition[\"measurements\"][\"resistance_f\"][\n self.io.s11.receiver_reading.run_num\n ],\n **{**s11_kwargs, **refl},\n )\n\n # We must use the most restricted frequency range available from all available\n # sources as well as the LNA.\n fmin = max(\n sum(\n (\n [load.spectrum.freq.min, load.reflections.freq.min]\n for load in self._loads.values()\n ),\n [],\n )\n + [self.lna.freq.min]\n )\n\n fmax = min(\n sum(\n (\n [load.spectrum.freq.max, load.reflections.freq.max]\n for load in self._loads.values()\n ),\n [],\n )\n + [self.lna.freq.max]\n )\n\n if fmax <= fmin:\n raise ValueError(\n \"The inputs loads and S11s have non-overlapping frequency ranges!\"\n )\n\n self.freq = EdgesFrequencyRange(f_low=fmin, f_high=fmax)\n\n # Now make everything actually consistent in its frequency range.\n for load in self._loads.values():\n load.spectrum.freq = self.freq\n\n self.cterms = cterms\n self.wterms = wterms\n self.t_load = self.ambient.t_load\n self.t_load_ns = self.ambient.t_load_ns\n\n @property\n def load_names(self) -> Tuple[str]:\n \"\"\"Names of the loads.\"\"\"\n return tuple(self._loads.keys())\n\n def new_load(\n self,\n load_name: str,\n run_num: int = 1,\n reflection_kwargs: Optional[dict] = None,\n spec_kwargs: Optional[dict] = None,\n ):\n \"\"\"Create a new load with the given load name.\n\n Uses files inside the current observation.\n\n Parameters\n ----------\n load_name : str\n The name of the load ('ambient', 'hot_load', 'open', 'short').\n run_num_spec : dict or int\n Run number to use for the spectrum.\n run_num_load : dict or int\n Run number to use for the load's S11.\n reflection_kwargs : dict\n Keyword arguments to construct the :class:`SwitchCorrection`.\n spec_kwargs : dict\n Keyword arguments to construct the :class:`LoadSpectrum`.\n \"\"\"\n reflection_kwargs = reflection_kwargs or {}\n spec_kwargs = spec_kwargs or {}\n\n # Fill up kwargs with keywords from this instance\n if \"resistance\" not in reflection_kwargs:\n reflection_kwargs[\n \"resistance\"\n ] = self.open.reflections.internal_switch.resistance\n\n for key in [\n \"ignore_times_percent\",\n \"rfi_removal\",\n \"rfi_kernel_width_freq\",\n \"rfi_kernel_width_time\",\n \"rfi_threshold\",\n \"cache_dir\",\n \"t_load\",\n \"t_load_ns\",\n ]:\n if key not in spec_kwargs:\n spec_kwargs[key] = getattr(self.open.spectrum, key)\n\n reflection_kwargs[\"run_num_load\"] = run_num\n reflection_kwargs[\"repeat_num_switch\"] = self.io.s11.switching_state.repeat_num\n reflection_kwargs[\"run_num_switch\"] = self.io.s11.switching_state.run_num\n spec_kwargs[\"run_num\"] = run_num\n\n return Load.from_path(\n path=self.io.path,\n load_name=load_name,\n f_low=self.freq.min,\n f_high=self.freq.max,\n reflection_kwargs=reflection_kwargs,\n spec_kwargs=spec_kwargs,\n )\n\n def plot_raw_spectra(self, fig=None, ax=None) -> plt.Figure:\n \"\"\"\n Plot raw uncalibrated spectra for all calibrator sources.\n\n Parameters\n ----------\n fig : :class:`plt.Figure`\n A matplotlib figure on which to make the plot. By default creates a new one.\n ax : :class:`plt.Axes`\n A matplotlib Axes on which to make the plot. By default creates a new one.\n\n Returns\n -------\n fig : :class:`plt.Figure`\n The figure on which the plot was made.\n \"\"\"\n if fig is None and ax is None:\n fig, ax = plt.subplots(\n len(self._sources), 1, sharex=True, gridspec_kw={\"hspace\": 0.05}\n )\n\n for i, (name, load) in enumerate(self._loads.items()):\n load.spectrum.plot(\n fig=fig, ax=ax[i], xlabel=(i == (len(self._sources) - 1))\n )\n ax[i].set_title(name)\n\n return fig\n\n def plot_s11_models(self, **kwargs):\n \"\"\"\n Plot residuals of S11 models for all sources.\n\n Returns\n -------\n dict:\n Each entry has a key of the source name, and the value is a matplotlib fig.\n \"\"\"\n out = {\n name: source.reflections.plot_residuals(**kwargs)\n for name, source in self._loads.items()\n }\n out.update({\"lna\": self.lna.plot_residuals(**kwargs)})\n return out\n\n @cached_property\n def s11_correction_models(self):\n \"\"\"Dictionary of S11 correction models, one for each source.\"\"\"\n try:\n return dict(self._injected_source_s11s)\n except (TypeError, AttributeError):\n return {\n name: source.s11_model(self.freq.freq)\n for name, source in self._loads.items()\n }\n\n @cached_property\n def source_thermistor_temps(self) -> Dict[str, Union[float, np.ndarray]]:\n \"\"\"Dictionary of input source thermistor temperatures.\"\"\"\n if (\n hasattr(self, \"_injected_source_temps\")\n and self._injected_source_temps is not None\n ):\n return self._injected_source_temps\n\n return {k: source.temp_ave for k, source in self._loads.items()}\n\n @cached_property\n def _calibration_coefficients(self):\n \"\"\"The calibration polynomials, evaluated at `freq.freq`.\"\"\"\n if (\n hasattr(self, \"_injected_averaged_spectra\")\n and self._injected_averaged_spectra is not None\n ):\n ave_spec = self._injected_averaged_spectra\n else:\n ave_spec = {\n k: source.averaged_spectrum for k, source in self._loads.items()\n }\n scale, off, Tu, TC, TS = rcf.get_calibration_quantities_iterative(\n self.freq.freq_recentred,\n temp_raw=ave_spec,\n gamma_rec=self.lna_s11,\n gamma_ant=self.s11_correction_models,\n temp_ant=self.source_thermistor_temps,\n cterms=self.cterms,\n wterms=self.wterms,\n temp_amb_internal=self.t_load,\n )\n return scale, off, Tu, TC, TS\n\n @cached_property\n def C1_poly(self): # noqa: N802\n \"\"\"`np.poly1d` object describing the Scaling calibration coefficient C1.\n\n The polynomial is defined to act on normalized frequencies such that `freq.min`\n and `freq.max` map to -1 and 1 respectively. Use :func:`~C1` as a direct\n function on frequency.\n \"\"\"\n return self._calibration_coefficients[0]\n\n @cached_property\n def C2_poly(self): # noqa: N802\n \"\"\"`np.poly1d` object describing the offset calibration coefficient C2.\n\n The polynomial is defined to act on normalized frequencies such that `freq.min`\n and `freq.max` map to -1 and 1 respectively. Use :func:`~C2` as a direct\n function on frequency.\n \"\"\"\n return self._calibration_coefficients[1]\n\n @cached_property\n def Tunc_poly(self): # noqa: N802\n \"\"\"`np.poly1d` object describing the uncorrelated noise-wave parameter, Tunc.\n\n The polynomial is defined to act on normalized frequencies such that `freq.min`\n and `freq.max` map to -1 and 1 respectively. Use :func:`~Tunc` as a direct\n function on frequency.\n \"\"\"\n return self._calibration_coefficients[2]\n\n @cached_property\n def Tcos_poly(self): # noqa: N802\n \"\"\"`np.poly1d` object describing the cosine noise-wave parameter, Tcos.\n\n The polynomial is defined to act on normalized frequencies such that `freq.min`\n and `freq.max` map to -1 and 1 respectively. Use :func:`~Tcos` as a direct\n function on frequency.\n \"\"\"\n return self._calibration_coefficients[3]\n\n @cached_property\n def Tsin_poly(self): # noqa: N802\n \"\"\"`np.poly1d` object describing the sine noise-wave parameter, Tsin.\n\n The polynomial is defined to act on normalized frequencies such that `freq.min`\n and `freq.max` map to -1 and 1 respectively. Use :func:`~Tsin` as a direct\n function on frequency.\n \"\"\"\n return self._calibration_coefficients[4]\n\n def C1(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802\n \"\"\"\n Scaling calibration parameter.\n\n Parameters\n ----------\n f : array-like\n The frequencies at which to evaluate C1. By default, the frequencies of this\n instance.\n \"\"\"\n if hasattr(self, \"_injected_c1\") and self._injected_c1 is not None:\n return np.array(self._injected_c1)\n fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)\n return self.C1_poly(fnorm)\n\n def C2(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802\n \"\"\"\n Offset calibration parameter.\n\n Parameters\n ----------\n f : array-like\n The frequencies at which to evaluate C2. By default, the frequencies of this\n instance.\n \"\"\"\n if hasattr(self, \"_injected_c2\") and self._injected_c2 is not None:\n return np.array(self._injected_c2)\n fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)\n return self.C2_poly(fnorm)\n\n def Tunc(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802\n \"\"\"\n Uncorrelated noise-wave parameter.\n\n Parameters\n ----------\n f : array-like\n The frequencies at which to evaluate Tunc. By default, the frequencies of\n thisinstance.\n \"\"\"\n if hasattr(self, \"_injected_t_unc\") and self._injected_t_unc is not None:\n return np.array(self._injected_t_unc)\n fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)\n return self.Tunc_poly(fnorm)\n\n def Tcos(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802\n \"\"\"\n Cosine noise-wave parameter.\n\n Parameters\n ----------\n f : array-like\n The frequencies at which to evaluate Tcos. By default, the frequencies of\n this instance.\n \"\"\"\n if hasattr(self, \"_injected_t_cos\") and self._injected_t_cos is not None:\n return np.array(self._injected_t_cos)\n fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)\n return self.Tcos_poly(fnorm)\n\n def Tsin(self, f: Optional[Union[float, np.ndarray]] = None): # noqa: N802\n \"\"\"\n Sine noise-wave parameter.\n\n Parameters\n ----------\n f : array-like\n The frequencies at which to evaluate Tsin. By default, the frequencies of\n this instance.\n \"\"\"\n if hasattr(self, \"_injected_t_sin\") and self._injected_t_sin is not None:\n return np.array(self._injected_t_sin)\n fnorm = self.freq.freq_recentred if f is None else self.freq.normalize(f)\n return self.Tsin_poly(fnorm)\n\n @cached_property\n def lna_s11(self):\n \"\"\"The corrected S11 of the LNA evaluated at the data frequencies.\"\"\"\n if hasattr(self, \"_injected_lna_s11\") and self._injected_lna_s11 is not None:\n return self._injected_lna_s11\n else:\n return self.lna.s11_model(self.freq.freq)\n\n def get_linear_coefficients(self, load: Union[Load, str]):\n \"\"\"\n Calibration coefficients a,b such that T = aT* + b (derived from Eq. 7).\n\n Parameters\n ----------\n load : str or :class:`Load`\n The load for which to get the linear coefficients.\n \"\"\"\n if isinstance(load, str):\n load_s11 = self.s11_correction_models[load]\n elif load.load_name in self.s11_correction_models:\n load_s11 = self.s11_correction_models[load.load_name]\n else:\n load_s11 = load.s11_model(self.freq.freq)\n\n return rcf.get_linear_coefficients(\n load_s11,\n self.lna_s11,\n self.C1(self.freq.freq),\n self.C2(self.freq.freq),\n self.Tunc(self.freq.freq),\n self.Tcos(self.freq.freq),\n self.Tsin(self.freq.freq),\n t_load=self.t_load,\n )\n\n def calibrate(self, load: Union[Load, str], q=None, temp=None):\n \"\"\"\n Calibrate the temperature of a given load.\n\n Parameters\n ----------\n load : :class:`Load` or str\n The load to calibrate.\n\n Returns\n -------\n array : calibrated antenna temperature in K, len(f).\n \"\"\"\n load = self._load_str_to_load(load)\n a, b = self.get_linear_coefficients(load)\n\n if q is not None:\n temp = self.t_load_ns * q + self.t_load\n elif temp is None:\n temp = load.averaged_spectrum\n\n return a * temp + b\n\n def _load_str_to_load(self, load: Union[Load, str]):\n if isinstance(load, str):\n try:\n load = self._loads[load]\n except AttributeError:\n raise AttributeError(\n \"load must be a Load object or a string (one of \"\n \"{ambient,hot_load,open,short})\"\n )\n else:\n assert isinstance(\n load, Load\n ), \"load must be a Load instance, got the {} {}\".format(load, type(Load))\n return load\n\n def decalibrate(\n self, temp: np.ndarray, load: Union[Load, str], freq: np.ndarray = None\n ):\n \"\"\"\n Decalibrate a temperature spectrum, yielding uncalibrated T*.\n\n Parameters\n ----------\n temp : array_like\n A temperature spectrum, with the same length as `freq.freq`.\n load : str or :class:`Load`\n The load to calibrate.\n freq : array-like\n The frequencies at which to decalibrate. By default, the frequencies of the\n instance.\n\n Returns\n -------\n array_like : T*, the normalised uncalibrated temperature.\n \"\"\"\n if freq is None:\n freq = self.freq.freq\n\n if freq.min() < self.freq.freq.min():\n warnings.warn(\n \"The minimum frequency is outside the calibrated range \"\n f\"({self.freq.freq.min()} - {self.freq.freq.max()} MHz)\"\n )\n\n if freq.min() > self.freq.freq.max():\n warnings.warn(\"The maximum frequency is outside the calibrated range \")\n\n a, b = self.get_linear_coefficients(load)\n return (temp - b) / a\n\n def get_K(\n self, freq: np.ndarray | None = None\n ) -> Dict[str, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]:\n \"\"\"Get the source-S11-dependent factors of Monsalve (2017) Eq. 7.\"\"\"\n if freq is None:\n freq = self.freq.freq\n gamma_ants = self.s11_correction_models\n else:\n gamma_ants = {\n name: source.s11_model(freq) for name, source in self._loads.items()\n }\n\n lna_s11 = self.lna.s11_model(freq)\n return {\n name: rcf.get_K(gamma_rec=lna_s11, gamma_ant=gamma_ant)\n for name, gamma_ant in gamma_ants.items()\n }\n\n def plot_calibrated_temp(\n self,\n load: Union[Load, str],\n bins: int = 2,\n fig=None,\n ax=None,\n xlabel=True,\n ylabel=True,\n ):\n \"\"\"\n Make a plot of calibrated temperature for a given source.\n\n Parameters\n ----------\n load : :class:`~LoadSpectrum` instance\n Source to plot.\n bins : int\n Number of bins to smooth over (std of Gaussian kernel)\n fig : Figure\n Optionally provide a matplotlib figure to add to.\n ax : Axis\n Optionally provide a matplotlib Axis to add to.\n xlabel : bool\n Whether to write the x-axis label\n ylabel : bool\n Whether to write the y-axis label\n\n Returns\n -------\n fig :\n The matplotlib figure that was created.\n \"\"\"\n load = self._load_str_to_load(load)\n\n if fig is None and ax is None:\n fig, ax = plt.subplots(1, 1, facecolor=\"w\")\n\n # binning\n temp_calibrated = self.calibrate(load)\n\n if bins > 0:\n freq_ave_cal = convolve(\n temp_calibrated, Gaussian1DKernel(stddev=bins), boundary=\"extend\"\n )\n else:\n freq_ave_cal = temp_calibrated\n freq_ave_cal[np.isinf(freq_ave_cal)] = np.nan\n\n rms = np.sqrt(np.mean((freq_ave_cal - np.mean(freq_ave_cal)) ** 2))\n\n ax.plot(\n self.freq.freq,\n freq_ave_cal,\n label=f\"Calibrated {load.spectrum.load_name} [RMS = {rms:.3f}]\",\n )\n\n temp_ave = self.source_thermistor_temps.get(load.load_name, load.temp_ave)\n\n if not hasattr(temp_ave, \"__len__\"):\n ax.axhline(temp_ave, color=\"C2\", label=\"Average thermistor temp\")\n else:\n ax.plot(\n self.freq.freq,\n temp_ave,\n color=\"C2\",\n label=\"Average thermistor temp\",\n )\n\n ax.set_ylim([np.nanmin(freq_ave_cal), np.nanmax(freq_ave_cal)])\n if xlabel:\n ax.set_xlabel(\"Frequency [MHz]\")\n\n if ylabel:\n ax.set_ylabel(\"Temperature [K]\")\n\n plt.ticklabel_format(useOffset=False)\n ax.grid()\n ax.legend()\n\n return plt.gcf()\n\n def get_load_residuals(self):\n \"\"\"Get residuals of the calibrated temperature for a each load.\"\"\"\n out = {}\n for source in self._sources:\n load = self._load_str_to_load(source)\n cal = self.calibrate(load)\n true = self.source_thermistor_temps[source]\n out[source] = cal - true\n return out\n\n def get_rms(self, smooth: int = 4):\n \"\"\"Return a dict of RMS values for each source.\n\n Parameters\n ----------\n smooth : int\n The number of bins over which to smooth residuals before taking the RMS.\n \"\"\"\n resids = self.get_load_residuals()\n out = {}\n for name, res in resids.items():\n if smooth > 1:\n res = convolve(res, Gaussian1DKernel(stddev=smooth), boundary=\"extend\")\n out[name] = np.sqrt(np.nanmean(res ** 2))\n return out\n\n def plot_calibrated_temps(self, bins=64, fig=None, ax=None):\n \"\"\"\n Plot all calibrated temperatures in a single figure.\n\n Parameters\n ----------\n bins : int\n Number of bins in the smoothed spectrum\n\n Returns\n -------\n fig :\n Matplotlib figure that was created.\n \"\"\"\n if fig is None or ax is None or len(ax) != len(self._sources):\n fig, ax = plt.subplots(\n len(self._sources),\n 1,\n sharex=True,\n gridspec_kw={\"hspace\": 0.05},\n figsize=(10, 12),\n )\n\n for i, source in enumerate(self._sources):\n self.plot_calibrated_temp(\n source,\n bins=bins,\n fig=fig,\n ax=ax[i],\n xlabel=i == (len(self._sources) - 1),\n )\n\n fig.suptitle(\"Calibrated Temperatures for Calibration Sources\", fontsize=15)\n return fig\n\n def write_coefficients(self, path: Optional[str] = None):\n \"\"\"\n Save a text file with the derived calibration co-efficients.\n\n Parameters\n ----------\n path : str\n Directory in which to write the file. The filename starts with\n `All_cal-params` and includes parameters of the class in the filename.\n By default, current directory.\n \"\"\"\n path = Path(path or \".\")\n\n if path.is_dir():\n path /= (\n f\"calibration_parameters_fmin{self.freq.freq.min()}_\"\n f\"fmax{self.freq.freq.max()}_C{self.cterms}_W{self.wterms}.txt\"\n )\n\n np.savetxt(\n path,\n [\n self.freq.freq,\n self.C1(),\n self.C2(),\n self.Tunc(),\n self.Tcos(),\n self.Tsin(),\n ],\n )\n\n def plot_coefficients(self, fig=None, ax=None):\n \"\"\"\n Make a plot of the calibration models, C1, C2, Tunc, Tcos and Tsin.\n\n Parameters\n ----------\n fig : Figure\n Optionally pass a matplotlib figure to add to.\n ax : Axis\n Optionally pass a matplotlib axis to pass to. Must have 5 axes.\n \"\"\"\n if fig is None or ax is None:\n fig, ax = plt.subplots(\n 5, 1, facecolor=\"w\", gridspec_kw={\"hspace\": 0.05}, figsize=(10, 9)\n )\n\n labels = [\n \"Scale ($C_1$)\",\n \"Offset ($C_2$) [K]\",\n r\"$T_{\\rm unc}$ [K]\",\n r\"$T_{\\rm cos}$ [K]\",\n r\"$T_{\\rm sin}$ [K]\",\n ]\n for i, (kind, label) in enumerate(\n zip([\"C1\", \"C2\", \"Tunc\", \"Tcos\", \"Tsin\"], labels)\n ):\n ax[i].plot(self.freq.freq, getattr(self, kind)())\n ax[i].set_ylabel(label, fontsize=13)\n ax[i].grid()\n plt.ticklabel_format(useOffset=False)\n\n if i == 4:\n ax[i].set_xlabel(\"Frequency [MHz]\", fontsize=13)\n\n fig.suptitle(\"Calibration Parameters\", fontsize=15)\n return fig\n\n def invalidate_cache(self):\n \"\"\"Invalidate all cached attributes so they must be recalculated.\"\"\"\n if not hasattr(self, \"_cached_\"):\n return\n\n for cache in self._cached_:\n del self.__dict__[cache]\n\n def update(self, **kwargs):\n \"\"\"Update the class in-place, invalidating the cache as well.\n\n Parameters\n ----------\n kwargs :\n All parameters to be updated.\n \"\"\"\n self.invalidate_cache()\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def write(self, filename: Union[str, Path]):\n \"\"\"\n Write all information required to calibrate a new spectrum to file.\n\n Parameters\n ----------\n filename : path\n The filename to write to.\n \"\"\"\n with h5py.File(filename, \"w\") as fl:\n # Write attributes\n fl.attrs[\"path\"] = str(self.io.original_path)\n fl.attrs[\"cterms\"] = self.cterms\n fl.attrs[\"wterms\"] = self.wterms\n fl.attrs[\"switch_path\"] = str(self.internal_switch.data.path)\n fl.attrs[\"switch_repeat_num\"] = self.internal_switch.data.repeat_num\n fl.attrs[\"switch_resistance\"] = self.internal_switch.resistance\n fl.attrs[\"switch_nterms\"] = self.internal_switch.n_terms[0]\n fl.attrs[\"switch_model\"] = str(self.internal_switch.model)\n fl.attrs[\"t_load\"] = self.open.spectrum.t_load\n fl.attrs[\"t_load_ns\"] = self.open.spectrum.t_load_ns\n\n fl[\"C1\"] = self.C1_poly.coefficients\n fl[\"C2\"] = self.C2_poly.coefficients\n fl[\"Tunc\"] = self.Tunc_poly.coefficients\n fl[\"Tcos\"] = self.Tcos_poly.coefficients\n fl[\"Tsin\"] = self.Tsin_poly.coefficients\n fl[\"frequencies\"] = self.freq.freq\n fl[\"lna_s11_real\"] = self.lna.s11_model(self.freq.freq).real\n fl[\"lna_s11_imag\"] = self.lna.s11_model(self.freq.freq).imag\n\n fl[\"internal_switch_s11_real\"] = np.real(\n self.internal_switch.s11_model(self.freq.freq)\n )\n fl[\"internal_switch_s11_imag\"] = np.imag(\n self.internal_switch.s11_model(self.freq.freq)\n )\n fl[\"internal_switch_s12_real\"] = np.real(\n self.internal_switch.s12_model(self.freq.freq)\n )\n fl[\"internal_switch_s12_imag\"] = np.imag(\n self.internal_switch.s12_model(self.freq.freq)\n )\n fl[\"internal_switch_s22_real\"] = np.real(\n self.internal_switch.s22_model(self.freq.freq)\n )\n fl[\"internal_switch_s22_imag\"] = np.imag(\n self.internal_switch.s22_model(self.freq.freq)\n )\n\n load_grp = fl.create_group(\"loads\")\n\n for name, load in self._loads.items():\n grp = load_grp.create_group(name)\n grp.attrs[\"s11_model\"] = yaml.dump(load.s11_model)\n grp[\"averaged_Q\"] = load.spectrum.averaged_Q\n grp[\"variance_Q\"] = load.spectrum.variance_Q\n grp[\"temp_ave\"] = load.temp_ave\n grp.attrs[\"n_integrations\"] = load.spectrum.n_integrations\n\n def to_calfile(self):\n \"\"\"Directly create a :class:`Calibration` object without writing to file.\"\"\"\n return Calibration.from_calobs(self)\n\n def inject(\n self,\n lna_s11: np.ndarray = None,\n source_s11s: Dict[str, np.ndarray] = None,\n c1: np.ndarray = None,\n c2: np.ndarray = None,\n t_unc: np.ndarray = None,\n t_cos: np.ndarray = None,\n t_sin: np.ndarray = None,\n averaged_spectra: Dict[str, np.ndarray] = None,\n thermistor_temp_ave: Dict[str, np.ndarray] = None,\n ) -> CalibrationObservation:\n \"\"\"Make a new :class:`CalibrationObservation` based on this, with injections.\n\n Parameters\n ----------\n lna_s11\n The LNA S11 as a function of frequency to inject.\n source_s11s\n Dictionary of ``{source: S11}`` for each source to inject.\n c1\n Scaling parameter as a function of frequency to inject.\n c2 : [type], optional\n Offset parameter to inject as a function of frequency.\n t_unc\n Uncorrelated temperature to inject (as function of frequency)\n t_cos\n Correlated temperature to inject (as function of frequency)\n t_sin\n Correlated temperature to inject (as function of frequency)\n averaged_spectra\n Dictionary of ``{source: spectrum}`` for each source to inject.\n\n Returns\n -------\n :class:`CalibrationObservation`\n A new observation object with the injected models.\n \"\"\"\n new = copy(self)\n new.invalidate_cache()\n new._injected_lna_s11 = lna_s11\n new._injected_source_s11s = source_s11s\n new._injected_c1 = c1\n new._injected_c2 = c2\n new._injected_t_unc = t_unc\n new._injected_t_cos = t_cos\n new._injected_t_sin = t_sin\n new._injected_averaged_spectra = averaged_spectra\n new._injected_source_temps = thermistor_temp_ave\n\n return new\n\n\[email protected]\nclass _LittleS11:\n s11_model: Callable = attr.ib()\n\n\[email protected]\nclass _LittleSpectrum:\n averaged_Q: np.ndarray = attr.ib()\n variance_Q: np.ndarray = attr.ib()\n n_integrations: int = attr.ib()\n\n\[email protected]\nclass _LittleLoad:\n reflections: _LittleS11 = attr.ib()\n spectrum: _LittleSpectrum = attr.ib()\n temp_ave: np.ndarray = attr.ib()\n\n\nclass Calibration:\n def __init__(self, filename: Union[str, Path]):\n \"\"\"\n A class defining an interface to a HDF5 file containing calibration information.\n\n Parameters\n ----------\n filename : str or Path\n The path to the calibration file.\n \"\"\"\n self.calfile = Path(filename)\n\n with h5py.File(filename, \"r\") as fl:\n self.calobs_path = fl.attrs[\"path\"]\n self.cterms = int(fl.attrs[\"cterms\"])\n self.wterms = int(fl.attrs[\"wterms\"])\n self.t_load = fl.attrs.get(\"t_load\", 300)\n self.t_load_ns = fl.attrs.get(\"t_load_ns\", 400)\n\n self.C1_poly = np.poly1d(fl[\"C1\"][...])\n self.C2_poly = np.poly1d(fl[\"C2\"][...])\n self.Tcos_poly = np.poly1d(fl[\"Tcos\"][...])\n self.Tsin_poly = np.poly1d(fl[\"Tsin\"][...])\n self.Tunc_poly = np.poly1d(fl[\"Tunc\"][...])\n\n self.freq = FrequencyRange(fl[\"frequencies\"][...])\n\n self._loads = {}\n if \"loads\" in fl:\n lg = fl[\"loads\"]\n\n self.load_names = list(lg.keys())\n\n for name, grp in lg.items():\n self._loads[name] = _LittleLoad(\n reflections=_LittleS11(\n s11_model=yaml.load(\n grp.attrs[\"s11_model\"], Loader=yaml.FullLoader\n ).at(x=self.freq.freq)\n ),\n spectrum=_LittleSpectrum(\n averaged_Q=grp[\"averaged_Q\"][...],\n variance_Q=grp[\"variance_Q\"][...],\n n_integrations=grp.attrs[\"n_integrations\"],\n ),\n temp_ave=grp[\"temp_ave\"][...],\n )\n\n self._lna_s11_rl = Spline(self.freq.freq, fl[\"lna_s11_real\"][...])\n self._lna_s11_im = Spline(self.freq.freq, fl[\"lna_s11_imag\"][...])\n\n self._intsw_s11_rl = Spline(\n self.freq.freq, fl[\"internal_switch_s11_real\"][...]\n )\n self._intsw_s11_im = Spline(\n self.freq.freq, fl[\"internal_switch_s11_imag\"][...]\n )\n self._intsw_s12_rl = Spline(\n self.freq.freq, fl[\"internal_switch_s12_real\"][...]\n )\n self._intsw_s12_im = Spline(\n self.freq.freq, fl[\"internal_switch_s12_imag\"][...]\n )\n self._intsw_s22_rl = Spline(\n self.freq.freq, fl[\"internal_switch_s22_real\"][...]\n )\n self._intsw_s22_im = Spline(\n self.freq.freq, fl[\"internal_switch_s22_imag\"][...]\n )\n\n @classmethod\n def from_calobs(cls, calobs: CalibrationObservation) -> Calibration:\n \"\"\"Generate a :class:`Calibration` from an in-memory observation.\"\"\"\n tmp = tempfile.mktemp()\n calobs.write(tmp)\n return cls(tmp)\n\n def lna_s11(self, freq=None):\n \"\"\"Get the LNA S11 at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self._lna_s11_rl(freq) + 1j * self._lna_s11_im(freq)\n\n def internal_switch_s11(self, freq=None):\n \"\"\"Get the S11 of the internal switch at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self._intsw_s11_rl(freq) + 1j * self._intsw_s11_im(freq)\n\n def internal_switch_s12(self, freq=None):\n \"\"\"Get the S12 of the internal switch at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self._intsw_s12_rl(freq) + 1j * self._intsw_s12_im(freq)\n\n def internal_switch_s22(self, freq=None):\n \"\"\"Get the S22 of the internal switch at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self._intsw_s22_rl(freq) + 1j * self._intsw_s22_im(freq)\n\n def C1(self, freq=None):\n \"\"\"Evaluate the Scale polynomial at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self.C1_poly(self.freq.normalize(freq))\n\n def C2(self, freq=None):\n \"\"\"Evaluate the Offset polynomial at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self.C2_poly(self.freq.normalize(freq))\n\n def Tcos(self, freq=None):\n \"\"\"Evaluate the cos temperature polynomial at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self.Tcos_poly(self.freq.normalize(freq))\n\n def Tsin(self, freq=None):\n \"\"\"Evaluate the sin temperature polynomial at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self.Tsin_poly(self.freq.normalize(freq))\n\n def Tunc(self, freq=None):\n \"\"\"Evaluate the uncorrelated temperature polynomial at given frequencies.\"\"\"\n if freq is None:\n freq = self.freq.freq\n return self.Tunc_poly(self.freq.normalize(freq))\n\n def _linear_coefficients(self, freq, ant_s11):\n return rcf.get_linear_coefficients(\n ant_s11,\n self.lna_s11(freq),\n self.C1(freq),\n self.C2(freq),\n self.Tunc(freq),\n self.Tcos(freq),\n self.Tsin(freq),\n self.t_load,\n )\n\n def calibrate_temp(self, freq: np.ndarray, temp: np.ndarray, ant_s11: np.ndarray):\n \"\"\"\n Calibrate given uncalibrated spectrum.\n\n Parameters\n ----------\n freq : np.ndarray\n The frequencies at which to calibrate\n temp : np.ndarray\n The temperatures to calibrate (in K).\n ant_s11 : np.ndarray\n The antenna S11 for the load.\n\n Returns\n -------\n temp : np.ndarray\n The calibrated temperature.\n \"\"\"\n a, b = self._linear_coefficients(freq, ant_s11)\n return temp * a + b\n\n def decalibrate_temp(self, freq, temp, ant_s11):\n \"\"\"\n De-calibrate given calibrated spectrum.\n\n Parameters\n ----------\n freq : np.ndarray\n The frequencies at which to calibrate\n temp : np.ndarray\n The temperatures to calibrate (in K).\n ant_s11 : np.ndarray\n The antenna S11 for the load.\n\n Returns\n -------\n temp : np.ndarray\n The calibrated temperature.\n\n Notes\n -----\n Using this and then :method:`calibrate_temp` immediately should be an identity\n operation.\n \"\"\"\n a, b = self._linear_coefficients(freq, ant_s11)\n return (temp - b) / a\n\n def calibrate_Q(\n self, freq: np.ndarray, q: np.ndarray, ant_s11: np.ndarray\n ) -> np.ndarray:\n \"\"\"\n Calibrate given power ratio spectrum.\n\n Parameters\n ----------\n freq : np.ndarray\n The frequencies at which to calibrate\n q : np.ndarray\n The power ratio to calibrate.\n ant_s11 : np.ndarray\n The antenna S11 for the load.\n\n Returns\n -------\n temp : np.ndarray\n The calibrated temperature.\n \"\"\"\n uncal_temp = self.t_load_ns * q + self.t_load\n\n return self.calibrate_temp(freq, uncal_temp, ant_s11)\n\n\ndef perform_term_sweep(\n calobs: CalibrationObservation,\n delta_rms_thresh: float = 0,\n max_cterms: int = 15,\n max_wterms: int = 15,\n explore_run_nums: bool = False,\n explore_repeat_nums: bool = False,\n direc=\".\",\n verbose=False,\n) -> CalibrationObservation:\n \"\"\"For a given calibration definition, perform a sweep over number of terms.\n\n There are options to save _every_ calibration solution, or just the \"best\" one.\n\n Parameters\n ----------\n calobs: :class:`CalibrationObservation` instance\n The definition calibration class. The `cterms` and `wterms` in this instance\n should define the *lowest* values of the parameters to sweep over.\n delta_rms_thresh : float\n The threshold in change in RMS between one set of parameters and the next that\n will define where to cut off. If zero, will run all sets of parameters up to\n the maximum terms specified.\n max_cterms : int\n The maximum number of cterms to trial.\n max_wterms : int\n The maximum number of wterms to trial.\n explore_run_nums : bool\n Whether to iterate over S11 run numbers to find the best residuals.\n explore_repeat_nums : bool\n Whether to iterate over S11 repeat numbers to find the best residuals.\n direc : str\n Directory to write resultant :class:`Calibration` file to.\n verbose : bool\n Whether to write out the RMS values derived throughout the sweep.\n\n Notes\n -----\n When exploring run/repeat nums, run nums are kept constant within a load (i.e. the\n match/short/open etc. all have either run_num=1 or run_num=2 for the same load.\n This is physically motivated.\n \"\"\"\n cterms = range(calobs.cterms, max_cterms)\n wterms = range(calobs.wterms, max_wterms)\n\n winner = np.zeros(len(cterms), dtype=int)\n\n s11_keys = [\"switching_state\", \"receiver_reading\"] + list(io.LOAD_ALIASES.keys())\n if explore_repeat_nums:\n # Note that we don't explore run_nums for spectra/resistance, because it's rare\n # to have those, and they'll only exist if one got completely botched (and that\n # should be set by the user).\n rep_num = {\n k: range(1, getattr(calobs.io.s11, k).max_repeat_num + 1) for k in s11_keys\n }\n else:\n rep_num = {k: [getattr(calobs.io.s11, k).repeat_num] for k in s11_keys}\n\n rep_num = tools.dct_of_list_to_list_of_dct(rep_num)\n\n if explore_run_nums:\n run_num = {\n \"switching_state\": range(\n 1, calobs.io.s11.get_highest_run_num(\"SwitchingState\") + 1\n ),\n \"receiver_reading\": range(\n 1, calobs.io.s11.get_highest_run_num(\"ReceiverReading\") + 1\n ),\n }\n else:\n run_num = {\n \"switching_state\": [calobs.io.s11.switching_state.run_num],\n \"receiver_reading\": [calobs.io.s11.receiver_reading.run_num],\n }\n\n run_num = tools.dct_of_list_to_list_of_dct(run_num)\n\n best_rms = np.inf\n for this_rep_num in rep_num:\n for this_run_num in run_num:\n\n tmp_run_num = copy(calobs.io.run_num)\n tmp_run_num.update(this_run_num)\n\n # Change the base io.CalObs because it will change with rep/run.\n calobs.io = io.CalibrationObservation(\n path=calobs.io.path,\n run_num=tmp_run_num,\n repeat_num=this_rep_num,\n fix=False,\n compile_from_def=calobs.compiled_from_def,\n include_previous=calobs.previous_included,\n )\n\n calobs.lna = LNA(\n calobs.io.s11.receiver_reading,\n f_low=calobs.freq.min,\n f_high=calobs.freq.max,\n resistance=calobs.lna.resistance,\n )\n\n # If we're changing anything else, we need to change each load.\n for name, load in calobs._loads.items():\n load.reflections = LoadS11.from_path(\n load_name=name,\n path=calobs.io.path,\n repeat_num_load=this_rep_num[name],\n run_num_switch=this_run_num[\"switching_state\"],\n repeat_num_switch=this_rep_num[\"switching_state\"],\n )\n\n if verbose:\n print(\n f\"SWEEPING SwSt={calobs.io.s11.switching_state.repeat_num}, \"\n f\"RcvRd={calobs.io.s11.receiver_reading.repeat_num} \"\n f\"[Sw={calobs.io.s11.switching_state.run_num}, \"\n f\"RR={calobs.io.s11.receiver_reading.run_num}, \"\n f\"open={calobs.io.s11.open.run_num}, \"\n f\"short={calobs.io.s11.short.run_num}, \"\n f\"ambient={calobs.io.s11.ambient.run_num}, \"\n f\"hot={calobs.io.s11.hot_load.run_num}]\"\n )\n print(\"-\" * 30)\n\n rms = np.zeros((len(cterms), len(wterms)))\n for i, c in enumerate(cterms):\n for j, w in enumerate(wterms):\n calobs.update(cterms=c, wterms=w)\n res = calobs.get_load_residuals()\n dof = sum(len(r) for r in res.values()) - c - w\n\n rms[i, j] = np.sqrt(\n sum(np.nansum(np.square(x)) for x in res.values()) / dof\n )\n\n if verbose:\n print(f\"Nc = {c:02}, Nw = {w:02}; RMS/dof = {rms[i, j]:1.3e}\")\n\n # If we've decreased by more than the threshold, this wterms becomes\n # the new winner (for this number of cterms)\n if j > 0 and rms[i, j] >= rms[i, j - 1] - delta_rms_thresh:\n winner[i] = j - 1\n break\n\n if (\n i > 0\n and rms[i, winner[i]]\n >= rms[i - 1, winner[i - 1]] - delta_rms_thresh\n ):\n break\n\n if verbose:\n print(\n f\"Best parameters found for Nc={cterms[i-1]}, \"\n f\"Nw={wterms[winner[i-1]]}, \"\n f\"with RMS = {rms[i-1, winner[i-1]]}.\"\n )\n print()\n\n if rms[i - 1, winner[i - 1]] < best_rms:\n best_run_combo = (\n calobs.io.run_num,\n calobs.io.s11.receiver_reading.repeat_num,\n calobs.io.s11.switching_state.repeat_num,\n )\n best_cterms = cterms[i - 1]\n best_wterms = wterms[winner[i - 1]]\n\n if verbose and (explore_repeat_nums or explore_run_nums):\n print(\"The very best parameters were found were for:\")\n print(f\"\\tSwitchingState Repeat = {best_run_combo[2]}\")\n print(f\"\\tReceiverReading Repeat = {best_run_combo[1]}\")\n print(f\"\\tRun Numbers = {best_run_combo[0]}\")\n print(f\"\\t# C-terms = {best_cterms}\")\n print(f\"\\t# W-terms = {best_wterms}\")\n\n calobs.update(cterms=best_cterms, wterms=best_wterms)\n calobs.io = io.CalibrationObservation(\n path=calobs.io.path,\n run_num=best_run_combo[0],\n repeat_num={\n \"switching_state\": best_run_combo[2],\n \"receiver_reading\": best_run_combo[1],\n },\n fix=False,\n compile_from_def=calobs.compiled_from_def,\n include_previous=calobs.previous_included,\n )\n\n calobs.lna = LNA(\n calobs.io.s11.receiver_reading,\n f_low=calobs.freq.min,\n f_high=calobs.freq.max,\n resistance=calobs.lna.resistance,\n )\n\n if direc is not None:\n direc = Path(direc)\n if not direc.exists():\n direc.mkdir(parents=True)\n\n pth = Path(calobs.path).parent.name\n\n pth = str(pth) + f\"_c{calobs.cterms}_w{calobs.wterms}.h5\"\n calobs.write(direc / pth)\n\n return calobs\n" ]
[ [ "numpy.ones_like", "numpy.nanvar", "numpy.nanmean", "scipy.interpolate.InterpolatedUnivariateSpline", "matplotlib.pyplot.gcf", "numpy.abs", "numpy.isnan", "numpy.mean", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.subplots", "numpy.poly1d", "numpy.square", "numpy.array", "numpy.nanmax", "numpy.isinf", "numpy.nanmin", "numpy.angle", "numpy.sqrt", "numpy.genfromtxt" ] ]
aivision2020/OctSceneScan
[ "3b22ecb4f701270f457a7c2d2702f758b8d584cf" ]
[ "test_module.py" ]
[ "from pathlib import Path\nimport copy\nimport time\nimport torch.optim as optim\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom model import *\nfrom data_utils import *\nimport torch.nn as nn\nfrom loguru import logger\n\nfeature_dim = 8\nblock_size = 16\npad=2\nn_conv=3\nthresh=0.5\ndebug = False\n\ndef test_bottom_io():\n tsdf = [torch.from_numpy(np.random.rand(1, 1, block_size+2*pad+2*n_conv,\n block_size+2*pad+2*n_conv,\n block_size+2*pad+2*n_conv)).float().to(device)]\n prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,\n block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)\n ).float().to(device)}\n mod = BottomLevel(feature_dim, block_size=block_size)\n if device == 'cuda':\n mod.cuda()\n out = mod(tsdf, prev)\n assert type(out) == list\n assert len(out) == 1\n out = out[0]\n assert len(out) == 1\n for X in out.keys():\n assert out[X].shape == (1, 2, block_size, block_size, block_size), out[X].shape\n\n\ndef test_convtrans():\n conv1 = nn.ConvTranspose3d(10, 10, kernel_size=4, stride=2, output_padding=0, padding=0, bias=False)\n dat = torch.ones(1, 10, block_size, block_size, block_size)\n y = conv1(dat)\n assert y.shape[-1] == block_size*2+2 , (y.shape, dat.shape)\n\n pad = nn.ReplicationPad3d(1)\n conv1 = nn.ConvTranspose3d(1, 1, kernel_size=3, stride=2,\n output_padding=1, padding=1, bias=False)\n dat = Variable(torch.ones(1, 1, 4, 4, 4))\n y = conv1(dat)\n assert y.shape[-1] == 8, y.shape\n\n\ndef test_data():\n data = TsdfGenerator(64)\n vis = visdom.Visdom()\n gt, tsdf_in = data.__getitem__(0)\n assert np.abs(tsdf_in).max() < 33\n\n\ndef test_ellipsoid():\n arr = ellipsoid(10, 10, 10, levelset=True)*10 # the output is ~normalized. multiple by 10\n assert arr.shape == (23, 23, 23), arr.shape\n dist = np.sqrt(11**2*3)-10\n assert np.abs(arr[0, 0, 0]) > dist, (arr[0, 0, 0], dist)\n print(arr[0, 0, 0], dist)\n\n a, b, c = 10, 15, 25\n arr = ellipsoid(a, b, c, levelset=True)\n # if we move 1 voxel in space the sdf should also not change by more than 1\n # compare to 1.01 for numeric reasons\n assert np.all(np.abs(np.diff(arr, axis=0)) <= 1.01), np.abs(np.diff(arr, axis=0)).max()\n assert np.all(np.abs(np.diff(arr, axis=1)) <= 1.01)\n assert np.all(np.abs(np.diff(arr, axis=2)) <= 1.01)\n\n\ndef test_criteria_trivial():\n data = TsdfGenerator(block_size, sigma=0.)\n gt, tsdf_in = data.__getitem_split__()\n gt = gt[None, :] # add dim for batch\n assert np.abs(tsdf_in).max() < 33\n gt_label = np.zeros_like(gt)\n gt_label[gt >= 0] = 1\n gt_label = torch.from_numpy(gt_label.astype(int))\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n assert len(criteria.gt_octree) == 1\n mock_out = np.concatenate((tsdf_in[None,:]<0, tsdf_in[None,:]>=0),\n axis=1).astype(float)\n mock_out=1000*(mock_out-0.5)\n mock_out = [{(0,0,0):torch.from_numpy(mock_out).float()}]\n loss = criteria(mock_out)\n assert loss.dim()==0\n assert loss < 0.01, loss\n\ndef test_gt():\n pass\n #get gt, \n #get gt_octree\n #retnder gt\n #render gt_octree\n\ndef test_criteria(levels=2):\n res=2**(levels-1)*block_size\n data = TsdfGenerator(res, sigma=0.9)\n gt, tsdf_in = data.__getitem_split__()\n gt = gt[None, :] # add dim for batch\n assert np.abs(tsdf_in).max() < res\n #labels should be symetric\n def count_label(gt, label, level=1):\n gt_label = np.zeros_like(gt)\n gt_label[gt >= 0] = 1\n gt_label = torch.from_numpy(gt_label.astype(int))\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n gt=criteria.gt_octree[level]\n return np.count_nonzero(np.array(list(gt.values()))==label)\n\n n_outside = count_label(gt, OUTSIDE)\n n_inside = count_label(gt, INSIDE)\n n_mixed = count_label(gt, MIXED)\n assert n_outside+n_inside+n_mixed==(2**(levels-2))**3\n rev_inside = count_label(-gt, OUTSIDE)\n assert n_inside==rev_inside, (n_inside, rev_inside)\n\n\n gt_label = np.zeros_like(gt)\n gt_label[gt >= 0] = 1\n gt_label = torch.from_numpy(gt_label.astype(int))\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n assert len(criteria.gt_octree) == levels\n assert len(criteria.gt_octree[0]) == (2**(levels-1))**3, len(criteria.gt_octree[0])\n assert len(criteria.gt_octree[-1]) == 1, len(criteria.gt_octree[-1])\n for l, level in enumerate(criteria.gt_octree):\n for k, v in level.items():\n assert v.dim() > 0, (l, k, v)\n\n\ndef test_basic_debug():\n T = torch.zeros(1,1,36,36,36)\n outplane = 16\n mod = nn.Conv3d(1, outplane, kernel_size=3, stride=1,\n padding=0, bias=False)\n T = mod(T)\n mod = nn.BatchNorm3d(outplane)\n T = mod(T)\n mod = nn.ReLU(inplace=True)\n T = mod(T)\n mod = nn.Conv3d(outplane, outplane, kernel_size=3, stride=1, \n padding=0, bias=False)\n T = mod(T)\n mod = nn.BatchNorm3d(outplane)\n T = mod(T)\n assert T.shape == (1,16,32,32,32)\n\n\ndef test_simple_net_single_data():\n data = TsdfGenerator(block_size, sigma=0.9)\n vis = visdom.Visdom()\n gt, tsdf_in = data.__getitem__(0)\n gt = gt[None, :] # add dim for batch\n assert np.abs(tsdf_in).max() < block_size\n gt_label = np.zeros_like(gt)\n gt_label[gt >= 0] = 1\n gt_label = torch.from_numpy(gt_label.astype(int)).to(device)\n rep_pad = nn.ReplicationPad3d(pad+n_conv)\n tsdf = [rep_pad(torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device))]\n #prev = {(0, 0, 0): torch.rand(1, feature_dim, block_size//2, block_size//2,\n # block_size//2).float().to(device)}\n prev = {(0, 0, 0): torch.from_numpy(np.random.rand(1, feature_dim,\n block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad)\n ).float().to(device)}\n #assert tsdf[0].shape == (1, 1, block_size, block_size, block_size)\n assert gt_label.shape == (1, block_size, block_size, block_size)\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n mod = BottomLevel(feature_dim, block_size)\n if device=='cuda':\n mod.cuda()\n criteria.cuda()\n optimizer = optim.Adam(mod.parameters(), lr=0.001) # , momentum=0.9)\n for it in range(1, 100):\n out = mod(tsdf, prev)\n assert len(out) == 1\n assert out[0][(0,0,0)].shape[1] == 2, out.shape\n loss = criteria(out)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (it+1) % 10 == 0:\n sdf_ = octree_to_sdf(out, block_size)\n print('level ', np.count_nonzero(sdf_ == 1))\n err = plotVoxelVisdom(gt[0], sdf_, tsdf_in[0], vis)\n assert np.abs(tsdf_in).max() < 33\n print(err)\n\n print(it, loss)\n assert err < 2\n\n\ndef test_bottom_layer( block_size = 32):\n dataset = TsdfGenerator(block_size, n_elips=1, sigma=0.9, epoch_size=1000)\n train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,\n num_workers=4)\n\n vis = visdom.Visdom()\n mod = BottomLevel(feature_dim, block_size)\n if device=='cuda':\n mod.cuda()\n optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.9)\n m = nn.ReplicationPad3d(mod.pad+mod.n_conv)\n prev = {(0, 0, 0): torch.rand(1, feature_dim,\n block_size//2+2*pad, block_size//2+2*pad, block_size//2+2*pad\n ).float().to(device)}\n gt_label = None\n for it, (gt, tsdf_in) in enumerate(train_loader):\n assert np.abs(tsdf_in).max() < 33\n assert gt.max() > 1 and gt.min() < -1\n gt_label = torch.ones_like(gt)*INSIDE\n gt_label[gt >= 0] = OUTSIDE\n gt_label = gt_label.long().to(device)\n tsdf = [m(tsdf_in).float().to(device)]\n for T in prev.values():\n assert torch.all(torch.isfinite(T))\n for T in tsdf:\n assert torch.all(torch.isfinite(T))\n out = mod(tsdf, prev)\n assert out[0][(0,0,0)].max()>out[0][(0,0,0)].min()\n for oct in out:\n if not np.all([torch.all(torch.isfinite(o)) for o in oct.values()]):\n import ipdb; ipdb.set_trace()\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n if device=='cuda':\n criteria.cuda()\n loss = criteria(out)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(it, loss)\n if it>1 and it%100 == 0:\n sdf_ = octree_to_sdf(out, block_size)\n err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)\n print(it, err)\n assert err < 2, err\n\n\ndef test_2tier_net_single_data():\n res = block_size*2\n dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=100)\n\n vis = visdom.Visdom()\n mod = TopLevel(feature_dim, BottomLevel(feature_dim, block_size), block_size=block_size)\n if device == 'cuda':\n mod.cuda()\n\n optimizer = optim.Adam(mod.parameters(), lr=0.01)#, momentum=0.9)\n gt, tsdf_in = dataset.__getitem__(0)\n assert np.abs(tsdf_in).max() < 33\n assert gt.max() > 1 and gt.min() < -1\n gt = torch.from_numpy(gt[None, :])\n gt_label = torch.zeros_like(gt)\n gt_label[gt >= 0] = 1\n gt_label = gt_label.long().to(device)\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n if device == 'cuda':\n criteria.cuda()\n tsdf = torch.from_numpy(copy.copy(tsdf_in)[None, :]).float().to(device)\n for it in range(1000):\n out = mod(tsdf)\n assert len(out) == 2\n for l in out[1:]:\n for v in l.values():\n # only level 0 can have a full bloc\n assert v.shape[-1] < block_size, (v.shape)\n loss = criteria(out)\n assert len(out) == 2\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(it, loss)\n if (it+1) % 10 == 0:\n #mod.eval()\n sdf_ = octree_to_sdf(out, block_size)\n err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0], vis)\n #mod.train()\n print(it, err)\n assert err < 2,err\n\n\ndef test_4tier_data(block_size=block_size):\n res=block_size*(2**3)\n dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000)\n gt, tsdf = dataset.__getitem__(0)\n\n mod = BottomLevel(feature_dim, block_size)\n for i in range(2): #add 2 mid layers\n print('adding mid layer')\n mod = MidLevel(feature_dim, feature_dim, mod, block_size,\n thresh=thresh, budget=4)\n mod = TopLevel(feature_dim, mod, block_size=block_size)\n out = mod(torch.from_numpy(tsdf[None,:]).float())\n\n\n\ndef test_2tier_net(res=64, block_size=block_size):\n dataset = TsdfGenerator(res, n_elips=1, sigma=0.9, epoch_size=10000, debug=False)\n train_loader = torch.utils.data.DataLoader(dataset, batch_size=1,\n num_workers=2)\n\n vis = visdom.Visdom()\n Force = False\n if not Force and Path('model_2tier.pth').exists():\n mod = torch.load('model_2tier.pth')\n else:\n layers = []\n layers.append(BottomLevel(feature_dim, block_size))\n while block_size*2**len(layers) <= res/2:\n print('adding mid layer', len(layers))\n layers.append(MidLevel(feature_dim, feature_dim, layers[-1],\n block_size, thresh=0.5, budget=4))\n mod = TopLevel(feature_dim, layers[-1], block_size=block_size)\n if device == 'cuda':\n mod.cuda()\n optimizer = optim.SGD(mod.parameters(), lr=0.0001, momentum=0.95)\n for it, (gt, tsdf_in) in enumerate(train_loader):\n assert np.abs(tsdf_in).max() < res\n assert gt.max() > 1 and gt.min() < -1\n gt_label = torch.zeros_like(gt, device=device)\n gt_label[gt >= 0] = 1\n gt_label = gt_label.long().to(device)\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n if device == 'cuda':\n criteria.cuda()\n #tsdf = tsdf_in.float().cuda()\n t_start = time.time()\n tsdf = tsdf_in.float().to(device)\n pred = mod(tsdf)\n forward_t = time.time()-t_start\n t = time.time()\n loss = criteria(pred)\n loss_t = time.time()-t\n t = time.time()\n optimizer.zero_grad()\n loss.backward()\n back_t = time.time()-t\n t = time.time()\n optimizer.step()\n step_t = time.time()-t\n t = time.time()\n print(it, loss.data)\n print('valuated ', [len(o) for o in pred])\n print('GT voxels ', np.count_nonzero([o.numel()>3 for o in criteria.gt_octree]))\n print('timing:{total:.3f}. forward {forward_t:.3f}, loss {loss_t:.3f}, back {back_t:.3f}, step {step_t:.3f}'.format(\n total=t-t_start, forward_t=forward_t, loss_t=loss_t, back_t=back_t, step_t=step_t))\n if (it+1) % 100 == 0:\n mod.eval()\n out = mod(tsdf)\n loss = criteria(out)\n for i in range(len(out)):\n resample = (2**i)\n print('Eval: level %d, %d/%d evaluated' % (i, len(out[i]),\n (res/block_size/resample)**3))\n sdf_ = octree_to_sdf(out, block_size)\n err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)\n if loss.data<1:\n import ipdb; ipdb.set_trace()\n mod.train()\n print(it, err)\n torch.save(mod, 'model_2tier.pth')\n if err < 2 :\n break\n #assert err < 2\n\ndef create_model(block_size, feature_dim, res):\n layers = []\n layers.append(BottomLevel(feature_dim, block_size))\n while block_size*2**len(layers) <= res/2:\n print('adding mid layer', len(layers))\n layers.append(MidLevel(feature_dim, feature_dim, layers[-1],\n block_size, thresh=0.1))\n mod = TopLevel(feature_dim, layers[-1], block_size=block_size)\n return mod\n\n\ndef test_simple_split(res=64, block_size=block_size):\n dataset = TsdfGenerator(res, n_elips=3, sigma=0.9, epoch_size=1000, debug=True)\n vis = visdom.Visdom()\n\n mod = torch.load('model.pth')\n if device == 'cuda':\n mod.cuda()\n mod.eval()\n gt, tsdf_in = dataset.__getitem_split__()\n gt = torch.from_numpy(gt[None, :])\n tsdf_in = torch.from_numpy(tsdf_in[None, :])\n\n gt_label = torch.zeros_like(gt, device=device)\n gt_label[gt >= 0] = 1\n gt_label = gt_label.long().to(device)\n criteria = OctreeCrossEntropyLoss(gt_label, block_size)\n if device == 'cuda':\n criteria.cuda()\n tsdf = tsdf_in.float().to(device)\n pred = mod(tsdf)\n loss = criteria(pred)\n print(loss.data)\n print('evaluated ', [len(o) for o in pred])\n\n for X in pred[0]:\n X_ = tuple(np.array(X)//2)\n print (X, pred[1][X_])\n assert pred[1][X_][0,2]>0.5\n sdf_ = octree_to_sdf(pred, block_size)\n err = plotVoxelVisdom(gt[0].numpy(), sdf_, tsdf_in[0][0].numpy(), vis)\n import ipdb; ipdb.set_trace()\n for X,v in criteria.gt_octree[0].items():\n if v.numel()>1:\n assert X[2]==1 #that's how we built the space\n\n\ndef test_split_subtree(padding=0):\n feat = torch.rand(1, feature_dim, block_size+2*padding,\n block_size+2*padding,\n block_size+2*padding\n ).float()\n split = split_tree(feat,padding=padding)\n assert len(split) == 8, len(split)\n assert torch.all(split[(0, 0, 0)][0, :, padding, padding, padding] ==\n feat[0, :, padding, padding, padding])\n assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==\n feat[0, :, block_size//2+padding, padding, padding])\n split[(1, 0, 0)][0, 0, padding, padding, padding] = 12.13\n #this is no longer true, I don't know how to do this inplace\n #assert feat[0, 0, block_size//2, 0, 0] == 12.13\n\ndef test_split_subtree_with_padding():\n padding=2\n feat = torch.rand(1, feature_dim, block_size, block_size,\n block_size).float()\n split = split_tree(feat, padding=2)\n assert len(split) == 8, len(split)\n octant = split[(0,0,0)]\n assert torch.all(octant[0, :padding, 0, 0, 0] == 0)\n assert torch.all(octant[0, -padding:, 0, 0, 0] == 0)\n assert octant.shape[-3:]==feat.shape[-3:]//2+padding*2\n assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])\n assert torch.all(octant[0, padding:-padding, 0, 0, 0] == feat[0, :, 0, 0, 0])\n assert torch.all(split[(1, 0, 0)][0, :, padding, padding, padding] ==\n feat[0, :, block_size//2, 0, 0])\n split[(1, 0, 0)][0, 0, 0, 0, 0] = 12.13\n assert feat[0, 0, block_size//2+padding, 0, 0] == 12.13\n\nif __name__ == '__main__':\n import sys\n logger.remove()\n logger.add(sys.stderr , format=\"{time} {level} {message}\", level=\"INFO\")\n\n #test_4tier_data()\n #test_criteria_trivial()\n #test_criteria()\n #test_criteria(4)\n #test_data()\n #test_ellipsoid()\n #test_convtrans()\n #test_split_subtree()\n #test_split_subtree(padding=2)\n #test_basic_debug()\n #test_bottom_io()\n #test_simple_net_single_data()\n #test_bottom_layer()\n # TODO why does this not converge? interesting\n #test_2tier_net_single_data()\n #test_2tier_net(res=32, block_size=block_size)\n test_2tier_net(res=64, block_size=block_size)\n test_simple_split(res=64, block_size=block_size)\n import ipdb; ipdb.set_trace()\n test_2tier_net(res=128, block_size=block_size)\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.diff", "torch.rand", "torch.nn.BatchNorm3d", "torch.save", "numpy.abs", "torch.nn.ConvTranspose3d", "torch.all", "torch.from_numpy", "numpy.random.rand", "torch.isfinite", "torch.ones_like", "torch.nn.ReplicationPad3d", "torch.ones", "torch.load", "torch.zeros", "numpy.count_nonzero", "numpy.array", "numpy.zeros_like", "torch.zeros_like", "numpy.sqrt", "numpy.concatenate", "torch.nn.ReLU", "torch.nn.Conv3d" ] ]
soumitrasamanta/FragGenie
[ "9ce493d88e3479a286ce88dc0c5b199ea7c7e441" ]
[ "fragment.py" ]
[ "\"\"\"\n-----------------------------------------------------------------------------\nAUTHOR: Soumitra Samanta ([email protected])\n-----------------------------------------------------------------------------\n\"\"\"\n\nimport subprocess\nimport os\nimport numpy as np\nfrom datetime import datetime\nimport pandas as pd\n\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors\n\n__all__ = [\n 'FragGenie'\n]\n\nclass FragGenie():\n \n def __init__(self, dir_fraggenie=''):\n \n self.dir_fraggenie = dir_fraggenie\n \n def to_numpy(self, array_str, sep=','):\n\n return np.fromstring(array_str[1:-1], sep=sep)\n\n def create_folder(self, folder_name):\n if len(folder_name):\n if not os.path.isdir(folder_name):\n os.makedirs(folder_name)\n\n return folder_name\n\n def mol_prop_mass(self, smiles):\n \"\"\"\n Molecular mass\n \"\"\"\n\n return [Descriptors.ExactMolWt(Chem.MolFromSmiles(sm)) for sm in smiles]\n \n def smiles2fraggenie_csv(\n self, \n input_path='', \n input_filename='test_input.csv', \n smiles_col='smiles',\n output_path='', \n output_filename='',\n num_bonds_to_break=3, \n min_fragment_mass=50,\n max_smiles_len=250, \n max_num_smiles=1000000000, \n flag_display='true',\n masses_option='METFRAG_MZ'\n ):\n \"\"\"Calculate FragGenie from csv file\"\"\"\n\n if(len(output_path)==0):\n output_path = input_path\n if(len(output_filename)==0):\n output_filename = ''.join([\n 'fraggenie_', datetime.today().strftime('%d%m%Y%H%M%S'), \n '_', str(np.random.random(1)[0])[2:], \n '_nbonds_', str(num_bonds_to_break), \n '_frgms_', str(min_fragment_mass), \n '_smlen_', str(max_smiles_len),\n '_', input_filename\n ])\n bash_cmd = ''.join([\n 'bash ', self.dir_fraggenie, \n 'fragment.sh ', \n input_path, \n input_filename, \n ' ', output_path, \n output_filename, \n ' ', smiles_col, \n ' ', str(num_bonds_to_break), \n ' ', str(min_fragment_mass), \n ' ', str(max_smiles_len), \n ' ', str(max_num_smiles), \n ' ', flag_display, \n ' ', masses_option\n ])\n\n subprocess.call(bash_cmd, shell=True)\n\n return output_path, output_filename, bash_cmd\n\n def smiles2fraggenie(\n self, \n smiles, \n num_bonds_to_break=3, \n min_fragment_mass=50, \n max_smiles_len=250, \n max_num_smiles=1000000000, \n flag_display='true',\n masses_option='METFRAG_MZ',\n input_path='dump/', \n input_filename='', \n massspec_sep=',', \n fill_non_break_mol=1, \n flag_del_temp_file=1,\n verbose=0\n ):\n \"\"\"Calculate FragGenie from smiles\"\"\"\n\n input_path = self.create_folder(input_path)\n if len(input_filename)==0:\n input_filename = ''.join(['smiles_', datetime.today().strftime('%d%m%Y%H%M%S'), \n '_', str(np.random.random(1)[0])[2:], \n '.csv'\n ])\n\n pd.DataFrame.from_dict({'smiles':smiles}).to_csv(''.join([input_path, input_filename]), index=False)\n\n output_path, output_filename, bash_cmd = self.smiles2fraggenie_csv(\n input_path=input_path, \n input_filename=input_filename, \n num_bonds_to_break=num_bonds_to_break, \n min_fragment_mass=min_fragment_mass, \n max_smiles_len=max_smiles_len,\n max_num_smiles=max_num_smiles,\n flag_display=flag_display, \n masses_option=masses_option\n )\n\n\n df_smiles = pd.read_csv(output_path+output_filename)\n\n # handle very small molecules which is unable to break into fraggenie (fill with mol mass) or unbreakable molecules\n if fill_non_break_mol:\n fraggenie = [None]*len(smiles)\n fraggenie_smiles = df_smiles['smiles'].tolist()\n count1 = 0\n count2 = 0\n for i, sm in enumerate(smiles):\n try:\n fraggenie[i] = self.to_numpy(df_smiles[masses_option][fraggenie_smiles.index(sm)], sep=massspec_sep)\n if len(fraggenie[i])==0:\n if verbose:\n print('Unable to break molecules: {}-{}' .format(i, smiles[i]))\n fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])\n count1 += 1\n except:\n if verbose:\n print('Unable to break molecules: {}-{}' .format(i, smiles[i]))\n fraggenie[i] = np.asarray([self.mol_prop_mass([smiles[i]])[0]])\n count2 += 1\n print('Total number of unbreakable molecules: {} (empty-{}, not all-{})' .format(count1+count2, count1, count2))\n else:\n fraggenie = df_smiles[masses_option].apply(self.to_numpy, sep=massspec_sep).tolist()\n \n\n if flag_del_temp_file:\n filename = ''.join([input_path, input_filename])\n if os.path.isfile(filename):\n if verbose:\n print('Removing \"{}\"' .format(filename))\n os.remove(filename)\n filename = ''.join([output_path, output_filename])\n if os.path.isfile(filename):\n if verbose:\n print('Removing \"{}\"' .format(filename))\n os.remove(filename)\n\n\n return fraggenie\n \nif __name__ == '__main__':\n \n fraggenie = FragGenie()\n \n output_path, output_filename, bash_cmd = fraggenie.smiles2fraggenie_csv(output_filename='fraggenie_test_input.csv')\n \n \n smiles = ['Cn1cnc2n(C)c(=O)n(C)c(=O)c12', \n 'BrC1CCCCc1CC', \n 'C#1C#CC1', \n 'C#1C#CCcCCCc1', \n 'C#1CCCCCCC=1', \n 'C#1CCcNccccccccc1', \n 'Cn1cnc2n(C)c(=O)n(C)c(=O)c12']\n \n fragment = fraggenie.smiles2fraggenie(smiles, fill_non_break_mol=1)\n \n for i in range(len(smiles)):\n print('smiles: {}\\nfragment: {}' .format(smiles[i], fragment[i]))\n \n " ]
[ [ "pandas.read_csv", "numpy.fromstring", "pandas.DataFrame.from_dict", "numpy.random.random" ] ]
BioGeek/annotated_deep_learning_paper_implementations
[ "e2516cc3063cdfdf11cda05f22a10082297aa33e" ]
[ "labml_nn/normalization/group_norm/experiment.py" ]
[ "\"\"\"\n---\ntitle: CIFAR10 Experiment to try Group Normalization\nsummary: >\n This trains is a simple convolutional neural network that uses group normalization\n to classify CIFAR10 images.\n---\n\n# CIFAR10 Experiment for Group Normalization\n\"\"\"\n\nimport torch.nn as nn\n\nfrom labml import experiment\nfrom labml.configs import option\nfrom labml_helpers.module import Module\nfrom labml_nn.experiments.cifar10 import CIFAR10Configs\nfrom labml_nn.normalization.group_norm import GroupNorm\n\n\nclass Model(Module):\n \"\"\"\n ### VGG model for CIFAR-10 classification\n \"\"\"\n\n def __init__(self, groups: int = 32):\n super().__init__()\n layers = []\n # RGB channels\n in_channels = 3\n # Number of channels in each layer in each block\n for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:\n # Convolution, Normalization and Activation layers\n for channels in block:\n layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),\n GroupNorm(groups, channels),\n nn.ReLU(inplace=True)]\n in_channels = channels\n # Max pooling at end of each block\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n\n # Create a sequential model with the layers\n self.layers = nn.Sequential(*layers)\n # Final logits layer\n self.fc = nn.Linear(512, 10)\n\n def forward(self, x):\n # The VGG layers\n x = self.layers(x)\n # Reshape for classification layer\n x = x.view(x.shape[0], -1)\n # Final linear layer\n return self.fc(x)\n\n\nclass Configs(CIFAR10Configs):\n # Number of groups\n groups: int = 16\n\n\n@option(Configs.model)\ndef model(c: Configs):\n \"\"\"\n ### Create model\n \"\"\"\n return Model(c.groups).to(c.device)\n\n\ndef main():\n # Create experiment\n experiment.create(name='cifar10', comment='group norm')\n # Create configurations\n conf = Configs()\n # Load configurations\n experiment.configs(conf, {\n 'optimizer.optimizer': 'Adam',\n 'optimizer.learning_rate': 2.5e-4,\n })\n # Start the experiment and run the training loop\n with experiment.start():\n conf.run()\n\n\n#\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
kdeweese/DualRandomizedKaczmarz
[ "3d339e893fe1dcb91677f3240047801ca3c43162" ]
[ "drkcode/python/kktmat.py" ]
[ "#!/usr/bin/env python\n# kktmat.py -- KKT matrix from Laplacian matrix\n#\n# Copyright (C) <2016> <Kevin Deweese>\n# All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the BSD license. See the LICENSE file for details.\n\nimport scipy\n\ndef kktmat(L):\n mat=scipy.sparse.coo_matrix(scipy.sparse.tril(L,-1))\n row=mat.row\n m=len(row)\n n=L.shape[0]\n col=mat.col\n val=mat.data\n \n #R=scipy.sparse.diags(-1/val,0)\n R=scipy.array(-1/val)\n i=scipy.concatenate([scipy.arange(0,m),scipy.arange(0,m)])\n j=scipy.concatenate([row,col])\n data=scipy.concatenate([scipy.ones(m),-scipy.ones(m)])\n B=scipy.sparse.coo_matrix((data,(i,j)))\n return {'R':R,'B':B} \n" ]
[ [ "scipy.concatenate", "scipy.ones", "scipy.sparse.coo_matrix", "scipy.sparse.tril", "scipy.arange", "scipy.array" ] ]
serre-lab/brownUnconference
[ "c51758f0bf695648832448c5c166e2a8dea14268" ]
[ "scripts/embeddings.py" ]
[ "import argparse\nimport csv\n\nimport torch\nimport transformers\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"MiniConf Portal Command Line\")\n\n parser.add_argument(\"papers\", default=False, help=\"papers file to parse\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n tokenizer = transformers.AutoTokenizer.from_pretrained(\"deepset/sentence_bert\")\n\n model = transformers.AutoModel.from_pretrained(\"deepset/sentence_bert\")\n model.eval()\n\n with open(args.papers, \"r\",encoding='utf-8') as f:\n abstracts = list(csv.DictReader(f))\n all_abstracts = torch.zeros(len(abstracts), 768)\n with torch.no_grad():\n for i, row in enumerate(abstracts):\n\n input_ids = torch.tensor([tokenizer.encode(row[\"abstract\"])][:512])\n all_hidden_states, _ = model(input_ids)[-2:]\n all_abstracts[i] = all_hidden_states.mean(0).mean(0)\n print(i)\n print(row['author'])\n torch.save(all_abstracts, \"embeddings.torch\")\n" ]
[ [ "torch.save", "torch.no_grad" ] ]
KeAWang/wilds
[ "3b808a84bd477d7877b77675eec2953128a87033" ]
[ "examples/algorithms/groupDRO.py" ]
[ "import torch\nfrom algorithms.single_model_algorithm import SingleModelAlgorithm\nfrom models.initializer import initialize_model\n\nclass GroupDRO(SingleModelAlgorithm):\n \"\"\"\n Group distributionally robust optimization.\n\n Original paper:\n @inproceedings{sagawa2019distributionally,\n title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},\n author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},\n booktitle={International Conference on Learning Representations},\n year={2019}\n } \n \"\"\"\n def __init__(self, config, d_out, grouper, loss, metric, n_train_steps, is_group_in_train):\n # check config\n assert config.uniform_over_groups\n # initialize model\n model = initialize_model(config, d_out).to(config.device)\n # initialize module\n super().__init__(\n config=config,\n model=model,\n grouper=grouper,\n loss=loss,\n metric=metric,\n n_train_steps=n_train_steps,\n )\n # additional logging\n self.logged_fields.append('group_weight')\n # step size\n self.group_weights_step_size = config.group_dro_step_size\n # initialize adversarial weights\n self.group_weights = torch.zeros(grouper.n_groups)\n self.group_weights[is_group_in_train] = 1\n self.group_weights = self.group_weights/self.group_weights.sum()\n self.group_weights = self.group_weights.to(self.device)\n\n def process_batch(self, batch):\n \"\"\"\n A helper function for update() and evaluate() that processes the batch\n Args:\n - batch (tuple of Tensors): a batch of data yielded by data loaders\n Output:\n - results (dictionary): information about the batch\n - g (Tensor)\n - y_true (Tensor)\n - metadata (Tensor)\n - loss (Tensor)\n - metrics (Tensor)\n all Tensors are of size (batch_size,)\n \"\"\"\n results = super().process_batch(batch)\n results['group_weight'] = self.group_weights\n return results\n\n def objective(self, results):\n \"\"\"\n Takes an output of SingleModelAlgorithm.process_batch() and computes the\n optimized objective. For group DRO, the objective is the weighted average\n of losses, where groups have weights groupDRO.group_weights.\n Args:\n - results (dictionary): output of SingleModelAlgorithm.process_batch()\n Output:\n - objective (Tensor): optimized objective; size (1,).\n \"\"\"\n group_losses, _, _ = self.loss.compute_group_wise(\n results['y_pred'],\n results['y_true'],\n results['g'],\n self.grouper.n_groups,\n return_dict=False)\n return group_losses @ self.group_weights\n\n def _update(self, results):\n \"\"\"\n Process the batch, update the log, and update the model, group weights, and scheduler.\n Args:\n - batch (tuple of Tensors): a batch of data yielded by data loaders\n Output:\n - results (dictionary): information about the batch, such as:\n - g (Tensor)\n - y_true (Tensor)\n - metadata (Tensor)\n - loss (Tensor)\n - metrics (Tensor)\n - objective (float)\n \"\"\"\n # compute group losses\n group_losses, _, _ = self.loss.compute_group_wise(\n results['y_pred'],\n results['y_true'],\n results['g'],\n self.grouper.n_groups,\n return_dict=False)\n # update group weights\n self.group_weights = self.group_weights * torch.exp(self.group_weights_step_size*group_losses.data)\n self.group_weights = (self.group_weights/(self.group_weights.sum()))\n # save updated group weights\n results['group_weight'] = self.group_weights\n # update model\n super()._update(results)\n" ]
[ [ "torch.zeros", "torch.exp" ] ]
IewNixIl/graduation_project_under
[ "67d0345208511bb06c35c3453227b2fa4ebef4a3" ]
[ "DATA/Labeling.py" ]
[ "import numpy\nfrom matplotlib import pyplot\nimport gdal\nfrom skimage import io,exposure\nfrom skimage.segmentation import slic,mark_boundaries\nimport os\nfrom PIL import Image\nimport shelve\nimport sys\nsys.path.append('..')\nfrom Config import config\n\n\n\n\ndef seg(path,n_segments=500, compactness=20):\n i=io.imread(path)[:,:,[3,2,1,7]]\n img=i[:,:,:3]\n img=(img-img.min())/(img.max()-img.min())\n img=img*255\n img=img.astype(numpy.uint8)\n\n img=exposure.adjust_gamma(img,0.5)\n segment=slic(img,n_segments=n_segments, compactness=compactness,enforce_connectivity=True)\n out=mark_boundaries(img,segment,color=[0,0,0.2])\n \n #img=exposure.adjust_gamma(img,0.5)\n #out=exposure.adjust_gamma(out,0.5)\n \n wdi=(i[:,:,3]-i[:,:,1])/(i[:,:,3]+i[:,:,1])\n \n wdi=(wdi/wdi.max())*255\n \n return segment,out,img,wdi\n \n\ndef getname(path,namelist):\n if namelist[0]==0:\n season='ROIs1158_spring'\n elif namelist[0]==1:\n season='ROIs1868_summer'\n elif namelist[0]==2:\n season='ROIs1970_fall'\n elif namelist[0]==3:\n season='ROIs2017_winter'\n \n path_s2=path+'\\\\'+season+'\\\\s2_'+str(namelist[1])+'\\\\'+season+'_s2_'+str(namelist[1])+'_p'+str(namelist[2])+'.tif'\n \n return path_s2\n \ndef transform(name):\n if 'spring' in name:\n season=0\n elif 'summer' in name:\n season=1\n elif 'fall' in name:\n season=2\n elif 'winter' in name:\n season=3\n \n l=[]\n l.append(season)\n l.append(int(name.split('_')[3]))\n l.append(int(name.split('_')[4].split('.')[0][1:]))\n \n return l\n \n\nclass UI:\n def __init__(self,mode='normal',init=0):\n '''mode = normal 正常\n mode=review 仅仅显示已经标记的 \n '''\n self.mode=mode\n self.path_label=config.path_labels\n if self.mode=='normal':\n with shelve.open(config.path_devision) as f:\n self.imglist=f['test']\n else:\n self.imglist=os.listdir(config.path_labels)\n\n self.n=init\n \n \n self.ifpress=False\n self.ifloadlabel=False\n \n fig=pyplot.figure()\n fig.canvas.mpl_disconnect(fig.canvas.manager.key_press_handler_id)\n fig.canvas.mpl_connect('key_press_event',self.on_key_press)\n fig.canvas.mpl_connect('button_press_event',self.on_button_press)\n fig.canvas.mpl_connect('motion_notify_event',self.on_button_move)\n fig.canvas.mpl_connect('button_release_event',self.on_button_release)\n \n self.fig=fig\n self.ax1=fig.add_subplot(3,2,1)\n self.ax2=fig.add_subplot(3,2,3)\n self.ax4=fig.add_subplot(3,2,5)\n self.ax3=fig.add_subplot(1,2,2)\n pyplot.get_current_fig_manager().window.state('zoomed')\n #self.ax2=fig.add_subplot(1,2,2)\n \n \n \n self.valuelist=[]\n self.label=numpy.zeros((256,256))\n self.ifloadlabel=True\n self.draw()\n \n \n \n pyplot.show()\n \n def on_key_press(self,event):\n if event.key=='a' or event.key=='left':\n self.n-=1\n print(self.n)\n self.valuelist=[]\n self.label=numpy.zeros(self.segment.shape)\n self.ifloadlabel=True\n self.draw()\n\n if event.key=='d' or event.key=='right':\n if self.n+1>=len(self.imglist):\n return\n self.n+=1\n print(self.n)\n self.valuelist=[]\n self.label=numpy.zeros(self.segment.shape)\n self.ifloadlabel=True\n self.draw()\n\n if event.key=='e' or event.key=='enter':\n self.save_label()\n \n if event.key=='Q':\n f=numpy.unique(self.segment).tolist()\n for i in f:\n if i not in self.valuelist:\n self.valuelist.append(i)\n for i in range(len(self.valuelist)):\n if i==0:\n flag=(self.segment==self.valuelist[i])\n else:\n flag=flag+(self.segment==self.valuelist[i])\n self.label=numpy.where(flag,1.0,0)\n \n self.draw()\n \n \n def on_button_press(self,event):\n \n try:\n r=int(event.ydata)\n c=int(event.xdata)\n except TypeError:\n return\n value=self.segment[r,c]\n if event.button==1:\n if value not in self.valuelist:\n self.ifpress=True\n self.valuelist.append(value)\n elif event.button==3:\n if value in self.valuelist:\n self.ifpress=True\n self.valuelist.remove(value)\n \n \n def on_button_move(self,event):\n if not self.ifpress:\n return\n \n try:\n r=int(event.ydata)\n c=int(event.xdata)\n except TypeError:\n return\n value=self.segment[r,c]\n if event.button==1:\n if value not in self.valuelist:\n self.valuelist.append(value)\n elif event.button==3:\n if value in self.valuelist:\n self.valuelist.remove(value)\n \n def on_button_release(self,event):\n if not self.ifpress:\n return\n self.ifpress=False\n for i in range(len(self.valuelist)):\n if i==0:\n flag=(self.segment==self.valuelist[i])\n else:\n flag=flag+(self.segment==self.valuelist[i])\n self.label=numpy.where(flag,1,0).astype(int)\n self.draw()\n \n \n def draw(self):\n \n if self.mode=='normal':\n segment,out,img,wdi=seg(getname(config.path,self.imglist[self.n]))\n else:\n \n segment,out,img,wdi=seg(getname(config.path,transform(self.imglist[self.n])))\n self.segment=segment\n if self.ifloadlabel:\n self.read_label()\n self.ifloadlabel=False\n #self.ax1.imshow(out)\n t=numpy.where(self.label==1,0.5,out[:,:,2])\n out[:,:,2]=t\n self.ax1.cla()\n self.ax2.cla()\n self.ax3.cla()\n self.ax4.cla()\n self.ax1.imshow(img)\n self.ax2.imshow(wdi,cmap='gray')\n self.ax3.imshow(out)\n self.ax4.imshow(self.label,cmap='gray')\n \n d=os.listdir(config.path_labels)\n self.ax3.set_title(str(len(d))+'/'+str(self.n+1))\n self.fig.canvas.draw_idle()\n \n def save_label(self):\n \n \n \n label=self.label*255\n label=label.astype(numpy.uint8)\n label=Image.fromarray(label)\n if self.mode=='normal':\n name=getname(config.path,self.imglist[self.n]).split('\\\\')[-1]\n name=name.split('_')\n name[2]='label'\n name='_'.join(name)\n else:\n name=self.imglist[self.n]\n label.save(self.path_label+'\\\\'+name)\n \n def read_label(self):\n \n dirlist=os.listdir(self.path_label)\n if self.mode=='normal':\n name=getname(config.path,self.imglist[self.n]).split('\\\\')[-1]\n name=name.split('_')\n name[2]='label'\n name='_'.join(name)\n else:\n name=self.imglist[self.n]\n if name in dirlist:\n self.label=numpy.array(Image.open(self.path_label+'\\\\'+name))/255\n self.label=self.label.astype(int)\n self.valuelist=list(numpy.unique(numpy.where(self.label==1,self.segment,-2)))\n self.valuelist.remove(-2)\n \n \ndef statistic():\n d=os.listdir(config.path_labels)\n n=numpy.array([0,0,0,0])\n for i in d:\n if 'spring' in i:\n n[0]=n[0]+1\n if 'summer' in i:\n n[1]=n[1]+1\n if 'fall' in i:\n n[2]=n[2]+1\n if 'winter' in i:\n n[3]=n[3]+1\n \n print(n)\n n=n/len(d)\n print(n) \n\nif __name__=='__main__':\n test=UI(mode='normal',init=100)\n #statistic()\n " ]
[ [ "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.get_current_fig_manager", "matplotlib.pyplot.show", "numpy.array", "numpy.where", "numpy.unique" ] ]
chenzhengda/tensorflow
[ "8debb698097670458b5f21d728bc6f734a7b5a53" ]
[ "tensorflow/python/ipu/keras/layers/recomputation.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nRecomputation IPU Keras layers\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\n\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.ipu.ops import pipelining_ops\n\n\nclass RecomputationCheckpoint(Layer):\n \"\"\"\n Layer for checkpointing values in a computational pipeline stage.\n When recomputation is enabled, these values will not be recomputed and they\n will be stored in memory instead.\n\n This layer can reduce memory liveness peaks when using recomputation if\n there are too many activations which need to be recomputed before the\n backpropagation operations can be executed.\n\n This layer should be used with the\n `RecomputationMode.RecomputeAndBackpropagateInterleaved` pipelining\n recomputation mode.\n\n Note that this layer has no effect when used with the\n `RecomputationMode.RecomputeThenBackpropagate` pipelining\n recomputation mode.\n \"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def call(self, inputs, **kwargs):\n \"\"\"\n Checkpoint the input tensors.\n\n Args:\n inputs: A tensor or a structure of tensors which should be checkpointed.\n\n Returns:\n A tensor or a structure of tensors which matches shape and type of\n `inputs`.\n \"\"\"\n return pipelining_ops.recomputation_checkpoint(inputs, name=self.name)\n\n def get_config(self):\n return {}\n" ]
[ [ "tensorflow.python.ipu.ops.pipelining_ops.recomputation_checkpoint" ] ]
leander-dsouza/Gazebo
[ "4e4c92115c9132b096f9b5a7fc9a9c0f5ed9e598" ]
[ "scripts/Tennis Ball Detection/ball_detection_taskphase.py" ]
[ "#!/usr/bin/env python3\nimport rospy\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nimport numpy as np\n\n\n\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5, 5))\nkernel1= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))\n\naratio = 1.0\n\ndef nothing(x):\n pass\n\n\n# *********************************************************************************************************************\ndef adjust_gamma(image, gamma=1.0):\n if gamma == 0:\n gamma = 0.01\n\n invGamma = 1.0 / gamma\n table = np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n\n return cv2.LUT(image, table)\n\n\n# *********************************************************************************************************************\n\nimg1= np.zeros((300, 512, 3), np.uint8)\ncv2.namedWindow('GAMMA')\n\ncv2.createTrackbar('g', 'GAMMA', 1, 10, nothing)\n\ndef callback(data):\n global aratio\n br = CvBridge()\n frame1 = br.imgmsg_to_cv2(data)\n frame1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2BGR)\n frame = frame1\n gamma = (cv2.getTrackbarPos('g', 'GAMMA')) * 0.1\n cv2.imshow('GAMMA', img1)\n frame = adjust_gamma(frame, gamma=gamma)\n\n cv2.putText(frame, \"g={}\".format(gamma), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)\n\n #cv2.imshow(\"camera\", frame)\n hsv = frame\n hsv = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV) #RGB reading\n hsv = cv2.GaussianBlur(hsv, (5, 5), 0)\n\n # define range of yellow color in HSV\n lower_yellow = np.array([29, 86, 6])\n upper_yellow = np.array([64, 255, 255])\n\n # Threshold the HSV image to get only blue colors\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel1)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel1)\n\n mask = cv2.erode(mask, kernel, iterations=2)\n mask = cv2.dilate(mask, kernel1, iterations=13)\n\n # Bitwise-AND mask and original image\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n # BOUNDING RECTANGLE .............................................................................................\n\n conts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n conts = np.array(conts)\n\n if len(conts) > 0:\n\n for i, contour in enumerate(conts):\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n aratio = (rect[1][0] / rect[1][1])\n if (aratio > 0.9) and (aratio < 1.1):\n cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)\n\n #print(\"Aspect Ratio\", aratio)\n\n # HOUGH CIRCLES........................................................................................................\n\n gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)\n\n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 200, param1=255, param2=20, minRadius=0, maxRadius=0)\n # # print circles\n\n # ensure at least some circles were found\n if circles is not None:\n # convert the (x, y) coordinates and radius of the circles to integers\n circles = np.round(circles[0, :]).astype(\"int\")\n # loop over the (x, y) coordinates and radius of the circles\n for (x, y, r) in circles:\n # draw the circle in the output image, then draw a rectangle in the image\n # corresponding to the center of the circle\n\n if (aratio > 0.9) and (aratio < 1.1):\n cv2.circle(res, (x, y), r, (0, 255, 0), 4)\n cv2.rectangle(res, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\n cv2.putText(frame, \"BALL DETECTED\", (430, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,\n (255, 0, 0),\n 3)\n\n # DISPLAY................................................................................................................\n\n cv2.putText(frame1, \"ORIGINAL FRAME\", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)\n cv2.putText(frame, \"OUTPUT FRAME\", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)\n\n cv2.putText(res, \"RESULTANT\", (10, 460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)\n\n mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2BGR)\n horizontal1 = np.hstack([frame1,frame])\n horizontal2 = np.hstack((mask,res))\n vertical = np.vstack((horizontal1,horizontal2))\n\n '''cv2.imshow('GAMMA CORRECTED', frame)\n cv2.imshow('MASK', mask)\n cv2.imshow('RESULT', res)\n cv2.imshow('ORIGINAL FRAME', frame1)'''\n\n cv2.putText(vertical, \"MASK\", (10, 940), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 3)\n cv2.imshow('RESULT', vertical)\n\n # .....................................................................................................................\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n quit()\n\n\n\ndef listener():\n rospy.init_node('listener', anonymous=True,disable_signals=True)\n\n rospy.Subscriber('/d435/camera/color/image_raw', Image, callback)\n\n rospy.spin()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n listener()\n\n" ]
[ [ "numpy.int0", "numpy.vstack", "numpy.zeros", "numpy.arange", "numpy.hstack", "numpy.array", "numpy.round" ] ]
rcourivaud/video-to-pose3D
[ "b908014fe2c531c075c11cee72bb798120f970c2" ]
[ "joints_detectors/Alphapose/yolo/video_demo_half.py" ]
[ "from __future__ import division\nimport time\nimport torch \nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\nimport cv2 \nfrom .util import *\nfrom .darknet import Darknet\nfrom .preprocess import prep_image, inp_to_image, letterbox_image\nimport pandas as pd\nimport random \nimport pickle as pkl\nimport argparse\n\n\ndef get_test_input(input_dim, CUDA):\n img = cv2.imread(\"dog-cycle-car.png\")\n img = cv2.resize(img, (input_dim, input_dim)) \n img_ = img[:,:,::-1].transpose((2,0,1))\n img_ = img_[np.newaxis,:,:,:]/255.0\n img_ = torch.from_numpy(img_).float()\n img_ = Variable(img_)\n \n if CUDA:\n img_ = img_.cuda()\n \n return img_\n\ndef prep_image(img, inp_dim):\n \"\"\"\n Prepare image for inputting to the neural network. \n \n Returns a Variable \n \"\"\"\n\n orig_im = img\n dim = orig_im.shape[1], orig_im.shape[0]\n img = (letterbox_image(orig_im, (inp_dim, inp_dim)))\n img_ = img[:,:,::-1].transpose((2,0,1)).copy()\n img_ = torch.from_numpy(img_).float().div(255.0).unsqueeze(0)\n return img_, orig_im, dim\n\ndef write(x, img):\n c1 = tuple(x[1:3].int())\n c2 = tuple(x[3:5].int())\n cls = int(x[-1])\n label = \"{0}\".format(classes[cls])\n color = random.choice(colors)\n cv2.rectangle(img, c1, c2,color, 1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]\n c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4\n cv2.rectangle(img, c1, c2,color, -1)\n cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);\n return img\n\ndef arg_parse():\n \"\"\"\n Parse arguements to the detect module\n \n \"\"\"\n \n \n parser = argparse.ArgumentParser(description='YOLO v2 Video Detection Module')\n \n parser.add_argument(\"--video\", dest = 'video', help = \n \"Video to run detection upon\",\n default = \"video.avi\", type = str)\n parser.add_argument(\"--dataset\", dest = \"dataset\", help = \"Dataset on which the network has been trained\", default = \"pascal\")\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.5)\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\n parser.add_argument(\"--cfg\", dest = 'cfgfile', help = \n \"Config file\",\n default = \"cfg/yolov3-spp.cfg\", type = str)\n parser.add_argument(\"--weights\", dest = 'weightsfile', help = \n \"weightsfile\",\n default = \"yolov3-spp.weights\", type = str)\n parser.add_argument(\"--reso\", dest = 'reso', help = \n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default = \"416\", type = str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = arg_parse()\n confidence = float(args.confidence)\n nms_thesh = float(args.nms_thresh)\n start = 0\n\n CUDA = torch.cuda.is_available()\n\n \n\n CUDA = torch.cuda.is_available()\n num_classes = 80 \n bbox_attrs = 5 + num_classes\n \n print(\"Loading network.....\")\n model = Darknet(args.cfgfile)\n model.load_weights(args.weightsfile)\n print(\"Network successfully loaded\")\n \n model.net_info[\"height\"] = args.reso\n inp_dim = int(model.net_info[\"height\"])\n assert inp_dim % 32 == 0 \n assert inp_dim > 32\n\n \n if CUDA:\n model.cuda().half()\n \n model(get_test_input(inp_dim, CUDA), CUDA)\n\n model.eval()\n \n videofile = 'video.avi'\n \n cap = cv2.VideoCapture(videofile)\n \n assert cap.isOpened(), 'Cannot capture source'\n \n frames = 0\n start = time.time() \n while cap.isOpened():\n \n ret, frame = cap.read()\n if ret:\n \n\n img, orig_im, dim = prep_image(frame, inp_dim)\n \n im_dim = torch.FloatTensor(dim).repeat(1,2) \n \n \n if CUDA:\n img = img.cuda().half()\n im_dim = im_dim.half().cuda()\n write_results = write_results_half\n predict_transform = predict_transform_half\n \n \n output = model(Variable(img, volatile = True), CUDA)\n output = write_results(output, confidence, num_classes, nms = True, nms_conf = nms_thesh)\n\n \n if type(output) == int:\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n cv2.imshow(\"frame\", orig_im)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n continue\n\n \n im_dim = im_dim.repeat(output.size(0), 1)\n scaling_factor = torch.min(inp_dim/im_dim,1)[0].view(-1,1)\n \n output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim[:,0].view(-1,1))/2\n output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim[:,1].view(-1,1))/2\n \n output[:,1:5] /= scaling_factor\n \n for i in range(output.shape[0]):\n output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim[i,0])\n output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim[i,1])\n \n \n classes = load_classes('data/coco.names')\n colors = pkl.load(open(\"pallete\", \"rb\"))\n \n list(map(lambda x: write(x, orig_im), output))\n \n \n cv2.imshow(\"frame\", orig_im)\n key = cv2.waitKey(1)\n if key & 0xFF == ord('q'):\n break\n frames += 1\n print(\"FPS of the video is {:5.2f}\".format( frames / (time.time() - start)))\n\n \n else:\n break\n \n\n \n \n\n" ]
[ [ "torch.FloatTensor", "torch.min", "torch.autograd.Variable", "torch.cuda.is_available", "torch.from_numpy", "torch.clamp" ] ]
tinapiao/Software-IC-Automation
[ "74b23cd94aa6e4658b110e93b5deb635e014f3a6" ]
[ "laygo/generators/serdes/des_layout_generator_woM5.py" ]
[ "#!/usr/bin/python\n########################################################################################################################\n#\n# Copyright (c) 2014, Regents of the University of California\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the\n# following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\n# disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n########################################################################################################################\n\n\"\"\"DES library\n\"\"\"\nimport laygo\nimport numpy as np\n#from logic_layout_generator import *\nfrom math import log\nimport yaml\nimport os\n#import logging;logging.basicConfig(level=logging.DEBUG)\n\ndef generate_boundary(laygen, objectname_pfix, placement_grid,\n devname_bottom, devname_top, devname_left, devname_right,\n shape_bottom=None, shape_top=None, shape_left=None, shape_right=None,\n transform_bottom=None, transform_top=None, transform_left=None, transform_right=None,\n origin=np.array([0, 0])):\n #generate a boundary structure to resolve boundary design rules\n pg = placement_grid\n #parameters\n if shape_bottom == None:\n shape_bottom = [np.array([1, 1]) for d in devname_bottom]\n if shape_top == None:\n shape_top = [np.array([1, 1]) for d in devname_top]\n if shape_left == None:\n shape_left = [np.array([1, 1]) for d in devname_left]\n if shape_right == None:\n shape_right = [np.array([1, 1]) for d in devname_right]\n if transform_bottom == None:\n transform_bottom = ['R0' for d in devname_bottom]\n if transform_top == None:\n transform_top = ['R0' for d in devname_top]\n if transform_left == None:\n transform_left = ['R0' for d in devname_left]\n if transform_right == None:\n transform_right = ['R0' for d in devname_right]\n\n #bottom\n dev_bottom=[]\n dev_bottom.append(laygen.place(\"I\" + objectname_pfix + 'BNDBTM0', devname_bottom[0], pg, xy=origin,\n shape=shape_bottom[0], transform=transform_bottom[0]))\n for i, d in enumerate(devname_bottom[1:]):\n dev_bottom.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDBTM'+str(i+1), templatename = d, gridname = pg, refinstname = dev_bottom[-1].name,\n shape=shape_bottom[i+1], transform=transform_bottom[i+1]))\n dev_left=[]\n dev_left.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDLFT0', templatename = devname_left[0], gridname = pg, refinstname = dev_bottom[0].name, direction='top',\n shape=shape_left[0], transform=transform_left[0]))\n for i, d in enumerate(devname_left[1:]):\n dev_left.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDLFT'+str(i+1), templatename = d, gridname = pg, refinstname = dev_left[-1].name, direction='top',\n shape=shape_left[i+1], transform=transform_left[i+1]))\n dev_right=[]\n dev_right.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDRHT0', templatename = devname_right[0], gridname = pg, refinstname = dev_bottom[-1].name, direction='top',\n shape=shape_right[0], transform=transform_right[0]))\n for i, d in enumerate(devname_right[1:]):\n dev_right.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDRHT'+str(i+1), templatename = d, gridname = pg, refinstname = dev_right[-1].name, direction='top',\n shape=shape_right[i+1], transform=transform_right[i+1]))\n dev_top=[]\n dev_top.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDTOP0', templatename = devname_top[0], gridname = pg, refinstname = dev_left[-1].name, direction='top',\n shape=shape_top[0], transform=transform_top[0]))\n for i, d in enumerate(devname_top[1:]):\n dev_top.append(laygen.relplace(name = \"I\" + objectname_pfix + 'BNDTOP'+str(i+1), templatename = d, gridname = pg, refinstname = dev_top[-1].name,\n shape=shape_top[i+1], transform=transform_top[i+1]))\n dev_right=[]\n return [dev_bottom, dev_top, dev_left, dev_right]\n\ndef generate_deserializer(laygen, objectname_pfix, templib_logic, placement_grid, routing_grid_m2m3,\n routing_grid_m4m5, num_des=8, num_flop=1, m_des_dff=1, origin=np.array([0, 0])):\n \"\"\"generate deserializer \"\"\"\n pg = placement_grid\n\n rg_m2m3 = routing_grid_m2m3\n rg_m4m5 = routing_grid_m4m5\n\n tap_name='tap'\n #ff_name = 'dff_1x'\n #ff_rst_name = 'dff_strsth_1x'\n ff_name = 'dff_'+str(m_des_dff)+'x'\n ff_rst_name = 'dff_strsth_'+str(m_des_dff)+'x'\n\n #Calculate layout size\n x0=num_flop * (2*laygen.templates.get_template(ff_name, templib_logic).xy[1][0] + laygen.templates.get_template(ff_rst_name, templib_logic).xy[1][0]) \\\n + 2*laygen.templates.get_template(tap_name, templib_logic).xy[1][0]\n num_row=int((num_des/num_flop + 0.99))+1\n #boundaries\n m_bnd = int(x0 / laygen.templates.get_template('boundary_bottom').xy[1][0])\n devname_bnd_left = []\n devname_bnd_right = []\n transform_bnd_left = []\n transform_bnd_right = []\n for i in range(num_row):\n if i%2==0:\n devname_bnd_left += ['nmos4_fast_left', 'pmos4_fast_left']\n devname_bnd_right += ['nmos4_fast_right', 'pmos4_fast_right']\n transform_bnd_left += ['R0', 'MX']\n transform_bnd_right += ['R0', 'MX']\n else:\n devname_bnd_left += ['pmos4_fast_left', 'nmos4_fast_left']\n devname_bnd_right += ['pmos4_fast_right', 'nmos4_fast_right']\n transform_bnd_left += ['R0', 'MX']\n transform_bnd_right += ['R0', 'MX']\n [bnd_bottom, bnd_top, bnd_left, bnd_right] = generate_boundary(laygen, objectname_pfix='BND0',\n placement_grid=pg,\n devname_bottom=['boundary_bottomleft',\n 'boundary_bottom',\n 'boundary_bottomright'],\n shape_bottom=[np.array([1, 1]), np.array([m_bnd, 1]),\n np.array([1, 1])],\n devname_top=['boundary_topleft', 'boundary_top',\n 'boundary_topright'],\n shape_top=[np.array([1, 1]), np.array([m_bnd, 1]),\n np.array([1, 1])],\n devname_left=devname_bnd_left,\n transform_left=transform_bnd_left,\n devname_right=devname_bnd_right,\n transform_right=transform_bnd_right,\n origin=np.array([0, 0]))\n #Calculate origins for placement\n tap_origin = origin + laygen.get_xy(obj = bnd_bottom[0], gridname = pg) \\\n + laygen.get_xy(obj = bnd_bottom[0].template, gridname = pg)\n array_origin = origin + laygen.get_xy(obj = bnd_bottom[0], gridname = pg) \\\n + laygen.get_xy(obj = bnd_bottom[0].template, gridname = pg) \\\n + np.array([laygen.get_xy(obj=laygen.get_template(name = tap_name, libname = templib_logic), gridname = pg)[0], 0])\n tapr_origin = tap_origin + m_bnd*np.array([laygen.get_xy(obj=laygen.get_template(name = 'boundary_bottom'), gridname = pg)[0], 0]) \\\n - np.array([laygen.get_xy(obj=laygen.get_template(name = tap_name, libname = templib_logic), gridname = pg)[0], 0])\n FF0_origin = array_origin + np.array([0, laygen.get_xy(obj=laygen.get_template(name = 'inv_1x', libname = templib_logic), gridname = pg)[1]]) + \\\n np.array([0, laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[1]])\n # placement\n iffout=[]\n iffin=[]\n iffdiv=[]\n iclkbuf=[]\n idivbuf=[]\n isp1x=[]\n itapl=[]\n itapr=[]\n tf='R0'\n if num_flop == 1: #Layout height reduction factor, no reduction\n for i in range(num_row):\n if i%2==0: tf='R0'\n else: tf='MX'\n if i==0: #Row for clock buffers \n itapl.append(laygen.place(name = \"I\" + objectname_pfix + 'TAPL0', templatename = tap_name,\n gridname = pg, xy=tap_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n itapr.append(laygen.place(name = \"I\" + objectname_pfix + 'TAPR0', templatename = tap_name,\n gridname = pg, xy=tapr_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n idivbuf.append(laygen.place(name = \"I\" + objectname_pfix + 'DIVBUF32x', templatename = 'inv_32x',\n gridname = pg, xy=array_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n idivbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'DIVBUF8x', templatename = 'inv_8x',\n gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n idivbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'DIVBUF2x', templatename = 'inv_2x',\n gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n idivbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',\n gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',\n gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',\n gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',\n gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',\n gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n else:\n itapl.append(laygen.relplace(name = \"I\" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,\n gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n itapr.append(laygen.relplace(name = \"I\" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,\n gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n if i==1: #Reference FF: FFOUT1\n iffout.append(laygen.place(name = \"I\" + objectname_pfix + 'FFOUT1', templatename = ff_name,\n gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n else:\n iffout.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFOUT'+str(i), templatename = ff_name,\n gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n refi = iffout[-1].name\n iffin.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFIN'+str(i), templatename = ff_name,\n gridname = pg, refinstname = refi, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n refi2 = iffin[-1].name\n iffdiv.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,\n gridname = pg, refinstname = refi2, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n if num_flop == 2: #Layout height reduced by half\n for i in range(num_row):\n if i%2==0: tf='R0'\n else: tf='MX'\n if i==0: #Low for clock buffers \n itapl.append(laygen.place(name = \"I\" + objectname_pfix + 'TAPL0', templatename = tap_name,\n gridname = pg, xy=tap_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n itapr.append(laygen.place(name = \"I\" + objectname_pfix + 'TAPR0', templatename = tap_name,\n gridname = pg, xy=tapr_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n idivbuf.append(laygen.place(name = \"I\" + objectname_pfix + 'DIVBUF32x', templatename = 'inv_32x',\n gridname = pg, xy=array_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n idivbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'DIVBUF8x', templatename = 'inv_8x',\n gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n idivbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'DIVBUF2x', templatename = 'inv_2x',\n gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n idivbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'DIVBUF1x', templatename = 'inv_1x',\n gridname = pg, refinstname = idivbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF1x', templatename = 'inv_1x',\n gridname = pg, refinstname = idivbuf[3].name, transform=tf, shape=np.array([1,1]), xy=np.array([0,0]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF2x', templatename = 'inv_2x',\n gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF8x', templatename = 'inv_8x',\n gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n iclkbuf.append(laygen.relplace(name = \"I\" + objectname_pfix + 'CLKBUF32x', templatename = 'inv_32x',\n gridname = pg, refinstname = iclkbuf[-1].name, transform=tf, shape=np.array([1,1]),\n template_libname=templib_logic))\n else:\n itapl.append(laygen.relplace(name = \"I\" + objectname_pfix + 'TAPL'+str(i), templatename = tap_name,\n gridname = pg, refinstname = itapl[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n itapr.append(laygen.relplace(name = \"I\" + objectname_pfix + 'TAPR'+str(i), templatename = tap_name,\n gridname = pg, refinstname = itapr[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n if i==1: #Reference FF: FFOUT1 and FFOUT2\n iffout.append(laygen.place(name = \"I\" + objectname_pfix + 'FFOUT1', templatename = ff_name,\n gridname = pg, xy=FF0_origin, transform=tf, shape=np.array([1,1]), template_libname = templib_logic))\n iffout.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFOUT2', templatename = ff_name,\n gridname = pg, refinstname = iffout[0].name, transform=tf, shape=np.array([1,1]),\n direction = 'right', template_libname=templib_logic))\n elif i==(num_row-1): #The last low depending on num_des: even or odd\n iffout.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,\n gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n if num_des%2==0: #If not, space should be placed rather than FF\n iffout.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,\n gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'right', template_libname=templib_logic))\n else: #FFOUTs will be the reference for FFIN and FFDIV\n iffout.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFOUT'+str(2*i-1), templatename = ff_name,\n gridname = pg, refinstname = iffout[-2].name, transform=tf, shape=np.array([1,1]),\n direction = 'top', template_libname=templib_logic))\n iffout.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFOUT'+str(2*i), templatename = ff_name,\n gridname = pg, refinstname = iffout[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'right', template_libname=templib_logic))\n for j in range(num_des): #Relplace of FFIN and the left side of FFDIV\n if iffout[j].transform=='MX': tf='MX'\n else: tf='R0'\n iffin.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFIN'+str(j+1), templatename = ff_name,\n gridname = pg, refinstname = iffout[j].name, transform=tf, shape=np.array([1,1]),\n xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))\n if j%2==0:\n iffdiv.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFDIV'+str(int(j/2+1)), templatename = ff_rst_name,\n gridname = pg, refinstname = iffin[j].name, transform=tf, shape=np.array([1,1]),\n xy=np.array([laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0], 0]), template_libname=templib_logic))\n for i in range(num_row, num_des+1): #Right side of FFDIV\n if num_des%2==1:\n if i%2==0: tf='R0'\n else: tf='MX'\n if num_des%2==0:\n if i%2==0: tf='MX'\n else: tf='R0'\n if i==num_row: #Even: relplaced by top FFDIV, odd: relplaced by second FFDIV from top\n iffdiv.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,\n gridname = pg, refinstname = iffdiv[int(num_des/2)-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'right', template_libname=templib_logic))\n else:\n iffdiv.append(laygen.relplace(name = \"I\" + objectname_pfix + 'FFDIV'+str(i), templatename = ff_rst_name,\n gridname = pg, refinstname = iffdiv[-1].name, transform=tf, shape=np.array([1,1]),\n direction = 'bottom', template_libname=templib_logic))\n\n #Space placement at the first row\n space_name = 'space_1x'\n space4x_name = 'space_4x'\n space_width = laygen.get_xy(obj=laygen.get_template(name = space_name, libname = templib_logic), gridname = pg)[0]\n space4_width = laygen.get_xy(obj=laygen.get_template(name = space4x_name, libname = templib_logic), gridname = pg)[0]\n inv_width=[]\n for i in (1,2,8,32):\n inv_width.append(laygen.get_xy(obj=laygen.get_template(name = 'inv_' + str(i) + 'x', libname = templib_logic), gridname = pg)[0])\n blank_width = tapr_origin[0] - array_origin[0] - 2 * (inv_width[0]+inv_width[1]+inv_width[2]+inv_width[3])\n m_space4 = int(blank_width / space4_width)\n m_space1 = int((blank_width-m_space4*space4_width)/space_width)\n ispace4=laygen.relplace(name = \"I\" + objectname_pfix + 'SPACE4', templatename = space4x_name,\n gridname = pg, refinstname = iclkbuf[3].name, transform='R0', shape=np.array([m_space4-1,1]),\n template_libname=templib_logic)\n ispace1=laygen.relplace(name = \"I\" + objectname_pfix + 'SPACE1', templatename = space_name,\n gridname = pg, refinstname = ispace4.name, transform='R0', shape=np.array([m_space1+4,1]),\n template_libname=templib_logic)\n #Space placement at the last row for odd num_des\n m_ff_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_name, libname = templib_logic), gridname = pg)[0] / space_width)\n m_ffrst_space = int(laygen.get_xy(obj=laygen.get_template(name = ff_rst_name, libname = templib_logic), gridname = pg)[0] / space_width)\n if (num_des%2)==1:\n if num_flop==2:\n ispace_out=laygen.relplace(name = \"I\" + objectname_pfix + 'SPACEOUT', templatename = space_name,\n gridname = pg, refinstname = iffout[num_des-1].name, transform=iffout[num_des-1].transform, shape=np.array([m_ff_space,1]),\n template_libname=templib_logic)\n ispace_in=laygen.relplace(name = \"I\" + objectname_pfix + 'SPACEIN', templatename = space_name,\n gridname = pg, refinstname = iffin[num_des-1].name, transform=iffin[num_des-1].transform, shape=np.array([m_ff_space,1]),\n template_libname=templib_logic)\n ispace_div=laygen.relplace(name = \"I\" + objectname_pfix + 'SPACEDIV', templatename = space_name,\n gridname = pg, refinstname = iffdiv[int(num_des/2)].name, transform=iffdiv[int(num_des/2)].transform, shape=np.array([m_ffrst_space,1]),\n template_libname=templib_logic)\n \n #Internal Pins\n ffin_in_xy=[]\n ffin_in_xy45=[]\n ffin_out_xy=[]\n ffout_in_xy=[]\n ffout_out_xy=[]\n ffdiv_in_xy=[]\n ffdiv_in_xy45=[]\n ffdiv_out_xy=[]\n ffdiv_rst_xy=[]\n ffdiv_st_xy=[]\n for i in range(num_des):\n ffin_in_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m3m4))\n ffin_out_xy.append(laygen.get_inst_pin_xy(iffin[i].name, 'O', rg_m3m4))\n ffout_in_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'I', rg_m3m4))\n ffout_out_xy.append(laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4))\n ffdiv_in_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m3m4))\n ffdiv_out_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'O', rg_m3m4))\n ffdiv_rst_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'RST', rg_m3m4))\n ffdiv_st_xy.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'ST', rg_m3m4))\n ffin_in_xy45.append(laygen.get_inst_pin_xy(iffin[i].name, 'I', rg_m4m5))\n ffdiv_in_xy45.append(laygen.get_inst_pin_xy(iffdiv[i].name, 'I', rg_m4m5))\n # Route\n for i in range(num_des):\n if num_flop==1: #Routing offset selection for rows in R0 and MX\n if iffin[i].transform=='MX': offset=1\n if iffin[i].transform=='R0': offset=4\n if iffdiv[i].transform=='MX': offset_div=1\n if iffdiv[i].transform=='R0': offset_div=3\n if num_flop==2: #Offset_div would be different because of different placement\n if i in range(int((num_des+1)/2)):\n if iffin[i].transform=='MX': \n if i%2==1:\n offset=1\n else:\n offset=8\n if iffin[i].transform=='R0': offset=3+i%2\n if iffdiv[i].transform=='MX': offset_div=1\n if iffdiv[i].transform=='R0': offset_div=3\n else:\n if iffin[i].transform=='MX':\n if i%2==1:\n offset=1\n else:\n offset=8\n if iffin[i].transform=='R0': offset=3+i%2\n if iffdiv[i].transform=='MX': offset_div=10\n if iffdiv[i].transform=='R0': offset_div=13\n if i in range(num_des-1):\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #in-to-in\n ffin_out_xy[i][0], ffin_in_xy[i+1][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4) \n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div-to-div \n ffdiv_out_xy[i][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_out_xy[i][1][1]+7-offset_div, rg_m3m4)\n #[rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], \n # ffdiv_in_xy[i+1][0], ffdiv_in_xy[i+1][0]-np.array([0,0]), ffdiv_in_xy[i+1][0][1], rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #in-to-out\n ffin_out_xy[i][0], ffout_in_xy[i][0], ffin_out_xy[i][1][1]+7-offset, rg_m3m4)\n if m_des_dff==1:\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div feedback\n ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div,\n rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #M3-to-M5\n ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)\n else:\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #div feedback\n ffdiv_out_xy[num_des-1][0], ffdiv_in_xy[0][0]+np.array([-2,0]), ffdiv_out_xy[num_des-1][1][1]+7-offset_div, \n rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], #M3-to-M5\n ffdiv_in_xy[0][0], ffdiv_in_xy[0][1]+np.array([-2,0]), ffdiv_in_xy[0][0][1], rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)\n #CLK Buffer\n for i in range(3):\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],\n laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iclkbuf[i + 1].name, 'I', rg_m3m4)[0],\n laygen.get_inst_pin_xy(iclkbuf[i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],\n laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(idivbuf[2 - i].name, 'I', rg_m3m4)[0],\n laygen.get_inst_pin_xy(idivbuf[3 - i].name, 'O', rg_m3m4)[0][1] + i % 2, rg_m3m4)\n\n #DIVCLK Route\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],\n laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'I', rg_m3m4)[0],\n laygen.get_inst_pin_xy(idivbuf[3].name, 'I', rg_m3m4)[0][1] + 3, rg_m3m4)\n for i in range(num_des):\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],\n laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffout[i].name, 'CLK', rg_m3m4)[0],\n laygen.get_inst_pin_xy(idivbuf[0].name, 'O', rg_m3m4)[0][1] + 5, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],\n laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffin[i].name, 'CLK', rg_m3m4)[0],\n laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4],\n laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0], laygen.get_inst_pin_xy(iffdiv[i].name, 'CLK', rg_m3m4)[0],\n laygen.get_inst_pin_xy(iclkbuf[3].name, 'O', rg_m3m4)[0][1] + 6, rg_m3m4)\n #RST Route\n for i in range(num_des):\n if i in range(int((num_des+1)/2)): #First half of FFDIVs\n if not i==int((num_des+1)/2)-1:\n rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)\n rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)\n #[rrstv, rrsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], \n # ffdiv_rst_xy[i][0], ffdiv_rst_xy[i+1][0], rg_m3m4)\n #[rstv, rsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], \n # ffdiv_st_xy[i][0], ffdiv_st_xy[i+1][0], rg_m3m4)\n else:\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], \n ffdiv_rst_xy[i][0], ffdiv_st_xy[i+1][0], ffdiv_rst_xy[i][1][1]+5, rg_m3m4)\n else: #Second half of FFDIVs\n if not i==num_des-1:\n rst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_st_xy[i][0], xy1=ffdiv_st_xy[i+1][0], gridname0=rg_m3m4)\n rrst=laygen.route(None, laygen.layers['metal'][3], xy0=ffdiv_rst_xy[i][0], xy1=ffdiv_rst_xy[i+1][0], gridname0=rg_m3m4)\n #[rrstv, rrsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], \n # ffdiv_rst_xy[i][0], ffdiv_rst_xy[i+1][0], rg_m3m4)\n #[rstv, rsth] = laygen.route_vh(laygen.layers['metal'][3], laygen.layers['metal'][4], \n # ffdiv_st_xy[i][0], ffdiv_st_xy[i+1][0], rg_m3m4)\n [rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],\n laygen.get_inst_pin_xy(iffdiv[0].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[0].name, 'ST', rg_m2m3)[0], rg_m2m3)\n [rh0, rv0] = laygen.route_hv(laygen.layers['metal'][2], laygen.layers['metal'][3],\n laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'VSS', rg_m2m3)[0], laygen.get_inst_pin_xy(iffdiv[num_des - 1].name, 'RST', rg_m2m3)[0], rg_m2m3)\n \n #Pin\n clkin_xy=laygen.get_inst_pin_xy(iclkbuf[0].name, 'I', rg_m3m4)\n rclkin=laygen.route(None, laygen.layers['metal'][3], xy0=clkin_xy[0], xy1=np.array([clkin_xy[0][0],0]), gridname0=rg_m3m4)\n laygen.boundary_pin_from_rect(rclkin, rg_m3m4, \"clk\", laygen.layers['pin'][3], size=0, direction='left')\n divin_xy=laygen.get_inst_pin_xy(idivbuf[len(divbuf_list)-1].name, 'I', rg_m3m4)\n rdivin=laygen.route(None, laygen.layers['metal'][3], xy0=divin_xy[0], xy1=np.array([divin_xy[0][0],0]), gridname0=rg_m3m4)\n laygen.boundary_pin_from_rect(rdivin, rg_m3m4, \"div<0>\", laygen.layers['pin'][3], size=0, direction='left')\n din_xy34=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m3m4)\n din_xy45=laygen.get_inst_pin_xy(iffin[0].name, 'I', rg_m4m5)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], \n din_xy34[0], np.array([din_xy34[0][0]-1,0]), din_xy34[0][1], \n rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)\n rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=din_xy34[0], xy1=din_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)\n laygen.boundary_pin_from_rect(rv1, rg_m3m4, \"in\", laygen.layers['pin'][3], size=4, direction='bottom')\n for i in range(num_des):\n datao_xy = laygen.get_inst_pin_xy(iffout[i].name, 'O', rg_m3m4)\n laygen.pin(name='dout<'+str(i)+'>', layer=laygen.layers['pin'][3], xy=datao_xy, gridname=rg_m3m4)\n clkdiv_xy = laygen.get_inst_pin_xy(iffout[-1].name, 'CLK', rg_m3m4)\n laygen.pin(name='clk_div', layer=laygen.layers['pin'][3], xy=clkdiv_xy, gridname=rg_m3m4)\n rst_xy34=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m3m4)\n rst_xy45=laygen.get_inst_pin_xy(iffdiv[0].name, 'RST', rg_m4m5)\n [rv0, rh0, rv1] = laygen.route_vhv(laygen.layers['metal'][3], laygen.layers['metal'][4], \n rst_xy34[0], np.array([rst_xy34[0][0]-2,0]), rst_xy34[0][1], \n rg_m3m4, layerv1=laygen.layers['metal'][3], gridname1=rg_m3m4)\n rdummy = laygen.route(None, laygen.layers['metal'][4], xy0=rst_xy34[0], xy1=rst_xy34[0]+np.array([-4,0]), gridname0=rg_m3m4)\n laygen.boundary_pin_from_rect(rv1, rg_m3m4, \"RST\", laygen.layers['pin'][3], size=4, direction='bottom')\n\n # power pin\n pwr_dim=laygen.get_xy(obj =itapl[-1].template, gridname=rg_m2m3)\n rvdd = []\n rvss = []\n if num_row%2==0: rp1='VSS'\n else: rp1='VDD'\n print(int(pwr_dim[0]/2))\n for i in range(0, int(pwr_dim[0]/2)):\n rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i, 0]), xy1=np.array([2*i, 0]), gridname0=rg_m2m3,\n refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),\n refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))\n rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+1, 0]), xy1=np.array([2*i+1, 0]), gridname0=rg_m2m3,\n refinstname0=itapl[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),\n refinstname1=itapl[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))\n laygen.pin(name = 'VDD'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')\n laygen.pin(name = 'VSS'+str(2*i-2), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')\n rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2+1, 0]), xy1=np.array([2*i+2+1, 0]), gridname0=rg_m2m3,\n refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),\n refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))\n rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*i+2, 0]), xy1=np.array([2*i+2, 0]), gridname0=rg_m2m3,\n refinstname0=itapr[0].name, refpinname0='VSS', refinstindex0=np.array([0, 0]),\n refinstname1=itapr[-1].name, refpinname1=rp1, refinstindex1=np.array([0, 0])))\n laygen.pin(name = 'VDD'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvdd[-1], gridname=rg_m2m3, netname='VDD')\n laygen.pin(name = 'VSS'+str(2*i-1), layer = laygen.layers['pin'][3], refobj = rvss[-1], gridname=rg_m2m3, netname='VSS')\n \n for i in range(num_row):\n for j in range(0, int(pwr_dim[0]/2)):\n rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j, 0]), xy1=np.array([2*j, 0]), gridname0=rg_m2m3,\n refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],\n refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))\n rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+1, 0]), xy1=np.array([2*j+1, 0]), gridname0=rg_m2m3,\n refinstname0=itapl[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),\n refinstname1=itapl[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))\n rvdd.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2+1, 0]), xy1=np.array([2*j+2+1, 0]), gridname0=rg_m2m3,\n refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]), via0=[[0, 0]],\n refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0])))\n rvss.append(laygen.route(None, laygen.layers['metal'][3], xy0=np.array([2*j+2, 0]), xy1=np.array([2*j+2, 0]), gridname0=rg_m2m3,\n refinstname0=itapr[i].name, refpinname0='VDD', refinstindex0=np.array([0, 0]),\n refinstname1=itapr[i].name, refpinname1='VSS', refinstindex1=np.array([0, 0]), via1=[[0, 0]]))\n \nif __name__ == '__main__':\n laygen = laygo.GridLayoutGenerator(config_file=\"laygo_config.yaml\")\n\n import imp\n try:\n imp.find_module('bag')\n laygen.use_phantom = False\n except ImportError:\n laygen.use_phantom = True\n\n tech=laygen.tech\n utemplib = tech+'_microtemplates_dense'\n logictemplib = tech+'_logic_templates'\n laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)\n laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)\n laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)\n laygen.templates.sel_library(utemplib)\n laygen.grids.sel_library(utemplib)\n\n #library load or generation\n workinglib = 'serdes_generated'\n laygen.add_library(workinglib)\n laygen.sel_library(workinglib)\n if os.path.exists(workinglib+'.yaml'): #generated layout file exists\n laygen.load_template(filename=workinglib+'.yaml', libname=workinglib)\n laygen.templates.sel_library(utemplib)\n\n #grid\n pg = 'placement_basic' #placement grid\n rg_m1m2 = 'route_M1_M2_cmos'\n rg_m1m2_thick = 'route_M1_M2_thick'\n rg_m2m3 = 'route_M2_M3_cmos'\n rg_m3m4 = 'route_M3_M4_basic'\n rg_m4m5 = 'route_M4_M5_basic'\n rg_m5m6 = 'route_M5_M6_basic'\n rg_m1m2_pin = 'route_M1_M2_basic'\n rg_m2m3_pin = 'route_M2_M3_basic'\n\n\n #display\n #laygen.display()\n #laygen.templates.display()\n #laygen.save_template(filename=workinglib+'_templates.yaml', libname=workinglib)\n\n mycell_list = []\n \n #load from preset\n load_from_file=True\n yamlfile_spec=\"serdes_spec.yaml\"\n yamlfile_size=\"serdes_size.yaml\"\n if load_from_file==True:\n with open(yamlfile_spec, 'r') as stream:\n specdict = yaml.load(stream)\n with open(yamlfile_size, 'r') as stream:\n sizedict = yaml.load(stream)\n cell_name='des_1to'+str(specdict['num_des'])\n num_des=specdict['num_des']\n num_flop=specdict['num_flop']\n m_des_dff=sizedict['m_des_dff']\n clkbuf_list=sizedict['des_clkbuf_list']\n divbuf_list=sizedict['des_divbuf_list']\n\n print(cell_name+\" generating\")\n mycell_list.append(cell_name)\n laygen.add_cell(cell_name)\n laygen.sel_cell(cell_name)\n generate_deserializer(laygen, objectname_pfix='DES', templib_logic=logictemplib, \n placement_grid=pg, routing_grid_m2m3=rg_m2m3, routing_grid_m4m5=rg_m4m5, num_des=num_des,\n num_flop=num_flop, m_des_dff=m_des_dff, origin=np.array([0, 0]))\n laygen.add_template_from_cell()\n\n laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)\n #bag export, if bag does not exist, gds export\n import imp\n try:\n imp.find_module('bag')\n import bag\n prj = bag.BagProject()\n for mycell in mycell_list:\n laygen.sel_cell(mycell)\n laygen.export_BAG(prj, array_delimiter=['[', ']'])\n except ImportError:\n laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+\".layermap\") # change layermapfile\n" ]
[ [ "numpy.array" ] ]
cypher-me/HAS-Qualifier-Challenges
[ "bb795303716155dad4a930880a58fecb5d9b50c5" ]
[ "centroids/challenge/ImageGen.py" ]
[ "from scipy import signal\nfrom scipy import misc\nfrom scipy import stats as st\nimport numpy as np\n\nW = 128\nL = 128\nBody_Width = 3\nBorder = Body_Width+1\nPoints = 10\nNoise_Max = 10\nBody_Separation = 15\nBody_Scale = 30\nOvScale = 3\n\n\ndef gkern(kernlen=21, nsig=3):\n ''' 2D Gaussian Kernel. '''\n x = np.linspace(-nsig, nsig, kernlen+1)\n kern1d = np.diff(st.norm.cdf(x))\n kern2d = np.outer(kern1d, kern1d)\n return kern2d/kern2d.sum()\n\ndef genBackground():\n return np.random.rand(W,L)*(Noise_Max)\n\ndef genStarCoords():\n while True:\n star_cords = np.random.rand(Points,3) # N x [x,y,m]\n star_cords = star_cords * np.array([[ W-2*Border , L-2*Border , Body_Scale ]]) \n star_cords = star_cords + np.ones((Points,3)) * np.array([[ Border, Border, Body_Separation ]])\n bad = False\n for ii in range(0, Points-1):\n x0, y0, m0 = star_cords[ii,:]\n for jj in range(ii+1, Points):\n x1, y1, m1 = star_cords[jj,:]\n if np.abs(x0 - x1) < 4*Border and np.abs(y0 - y1) < 4*Border:\n '''\n x = np.random.random() * (W-2*Border) + Border\n y = np.random.random() * (W-2*Border) + Border\n star_cords[jj,0] = x\n star_cords[jj,1] = y\n '''\n \n bad = True\n break\n \n if np.abs(m0 - m1) < 5:\n star_cords[jj,2] = m1 + 5\n if not bad:\n break\n return star_cords\n\ndef starGauss(OvScale):\n gausKern = gkern(Body_Width*OvScale, Body_Width/(OvScale/3))\n gausKern = gausKern * (Body_Scale/np.max(np.max(gausKern)))\n return gausKern\n\ndef genImage(star_cords):\n # Overscale it \n spots_O = np.zeros((W*OvScale, L*OvScale))\n \n for (x,y,m) in star_cords:\n x = OvScale * (x+0.5)\n y = OvScale * (y+0.5) \n x_0, y_0 = map(int, np.floor([x,y]))\n x_1, y_1 = map(int, np.ceil([x,y]))\n spots_O[x_0:x_1, y_0:y_1] = m\n\n gausKern = starGauss(OvScale)\n spots_B = signal.convolve2d(spots_O, gausKern, boundary='symm', mode='same')\n\n spots = np.zeros((W,L))\n for (x,y,m) in star_cords:\n x = int(x)\n y = int(y)\n x0 = max(0, x-Body_Width-1)\n x1 = min(W, x+Body_Width+1)\n y0 = max(0, y-Body_Width-1)\n y1 = min(L, y+Body_Width+1)\n for ii in range(x0,x1+1):\n for jj in range(y0, y1+1):\n spots[ii,jj] = np.mean(spots_B[ii*OvScale:(ii+1)*OvScale, jj*OvScale:(jj+1)*OvScale])\n \n final = np.trunc( np.clip(genBackground() + spots, 0, 255) )\n return final\n" ]
[ [ "numpy.ones", "numpy.ceil", "numpy.zeros", "numpy.mean", "numpy.floor", "numpy.abs", "numpy.max", "scipy.signal.convolve2d", "numpy.random.rand", "numpy.array", "numpy.linspace", "numpy.outer", "scipy.stats.norm.cdf" ] ]
xuanxu/py-pde
[ "de33d938aea8680eff872ae1b64569895662a248" ]
[ "pde/trackers/trackers.py" ]
[ "\"\"\"\nModule defining classes for tracking results from simulations.\n\nThe trackers defined in this module are:\n\n.. autosummary::\n :nosignatures:\n\n CallbackTracker\n ProgressTracker\n PrintTracker\n PlotTracker\n DataTracker\n SteadyStateTracker\n RuntimeTracker\n ConsistencyTracker\n MaterialConservationTracker\n\n.. codeauthor:: David Zwicker <[email protected]>\n\"\"\"\n\nfrom datetime import timedelta\nimport inspect\nimport sys\nimport time\nfrom typing import Callable, Optional, Union, IO, List, Any # @UnusedImport\n\nimport numpy as np\n\nfrom .base import TrackerBase, InfoDict, FinishedSimulation, Real\nfrom .intervals import IntervalData, RealtimeIntervals\nfrom ..fields.base import FieldBase\nfrom ..fields import FieldCollection\nfrom ..tools.parse_duration import parse_duration\nfrom ..tools.misc import get_progress_bar_class\n\n\n\nclass CallbackTracker(TrackerBase):\n \"\"\" Tracker that calls a function periodically \"\"\"\n \n def __init__(self, func: Callable,\n interval: IntervalData = 1):\n \"\"\" \n Args:\n func: The function to call periodically. The function signature\n should be `(state)` or `(state, time)`, where `state` contains\n the current state as an instance of\n :class:`~pde.fields.FieldBase` and `time` is a\n float value indicating the current time. Note that only a view\n of the state is supplied, implying that a copy needs to be made\n if the data should be stored.\n interval: |Arg_tracker_interval|\n \"\"\"\n super().__init__(interval=interval)\n self._callback = func\n self._num_args = len(inspect.signature(func).parameters)\n if not 0 < self._num_args < 3:\n raise ValueError('`func` must be a function accepting one or two '\n f'arguments, not {self._num_args}') \n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle data supplied to this tracker\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float): The associated time\n \"\"\"\n if self._num_args == 1:\n self._callback(field)\n else:\n self._callback(field, t)\n\n\n\nclass ProgressTracker(TrackerBase):\n \"\"\" Tracker that shows the progress of the simulation \"\"\"\n \n name = 'progress'\n\n \n def __init__(self, interval: IntervalData = None,\n ndigits: int = 5, leave: bool = True):\n \"\"\"\n Args:\n interval: |Arg_tracker_interval|\n The default value `None` updates the progress bar approximately\n every (real) second.\n ndigits (int): The number of digits after the decimal point that are\n shown maximally.\n leave (bool): Whether to leave the progress bar after the simulation\n has finished (default: True)\n \"\"\" \n if interval is None:\n # print every second by default\n interval = RealtimeIntervals(duration=1)\n \n super().__init__(interval=interval)\n self.ndigits = ndigits\n self.leave = leave\n \n\n def initialize(self, field: FieldBase, info: InfoDict = None) -> float:\n \"\"\" initialize the tracker with information about the simulation\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation \n \n Returns:\n float: The first time the tracker needs to handle data\n \"\"\"\n result = super().initialize(field, info)\n \n # get solver information\n controller_info = {} if info is None else info.get('controller', {})\n \n # initialize the progress bar\n pb_cls = get_progress_bar_class()\n self.progress_bar = pb_cls(total=controller_info.get('t_end'),\n initial=controller_info.get('t_start', 0),\n leave=self.leave)\n self.progress_bar.set_description('Initializing')\n\n return result\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle data supplied to this tracker\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float): The associated time\n \"\"\"\n # show an update\n if self.progress_bar.total:\n t_new = min(t, self.progress_bar.total)\n else:\n t_new = t\n self.progress_bar.n = round(t_new, self.ndigits)\n self.progress_bar.set_description('')\n \n \n def finalize(self, info: InfoDict = None) -> None:\n \"\"\" finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation \n \"\"\"\n super().finalize(info)\n self.progress_bar.set_description('')\n\n # limit progress bar to 100%\n controller_info = {} if info is None else info.get('controller', {}) \n t_final = controller_info.get('t_final', -np.inf)\n t_end = controller_info.get('t_end', -np.inf)\n if t_final >= t_end and self.progress_bar.total:\n self.progress_bar.n = self.progress_bar.total\n self.progress_bar.refresh()\n \n if (controller_info.get('successful', False) and self.leave and\n hasattr(self.progress_bar, 'sp')):\n # show progress bar in green if simulation was successful. We\n # need to overwrite the default behavior (and disable the\n # progress bar) since reaching steady state means the simulation\n # was successful even though it did not reach t_final\n try:\n self.progress_bar.sp(bar_style='success')\n except TypeError:\n self.progress_bar.close()\n else:\n self.disable = True\n else:\n self.progress_bar.close()\n \n \n def __del__(self):\n if hasattr(self, 'progress_bar') and not self.progress_bar.disable:\n self.progress_bar.close()\n\n\n\nclass PrintTracker(TrackerBase):\n \"\"\" Tracker that prints data to a stream (default: stdout) \"\"\"\n \n name = 'print'\n \n \n def __init__(self, interval: IntervalData = 1,\n stream: IO[str] = sys.stdout):\n \"\"\"\n \n Args:\n interval: |Arg_tracker_interval|\n stream: The stream used for printing\n \"\"\"\n super().__init__(interval=interval)\n self.stream = stream\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle data supplied to this tracker\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float): The associated time\n \"\"\"\n data = f\"c={field.data.mean():.3g}±{field.data.std():.3g}\"\n \n self.stream.write(f\"t={t:g}, {data}\\n\")\n self.stream.flush()\n\n\n\nclass PlotTracker(TrackerBase):\n \"\"\" Tracker that plots data on screen, to files, or writes a movie \"\"\"\n \n name = 'plot'\n \n def __init__(self, interval: IntervalData = 1,\n output_file: Optional[str] = None,\n output_folder: Optional[str] = None,\n movie_file: Optional[str] = None,\n quantities=None,\n show: bool = True):\n \"\"\"\n Args:\n interval: |Arg_tracker_interval|\n output_file (str, optional):\n Specifies a single image file, which is updated periodically, so\n that the progress can be monitored (e.g. on a compute cluster)\n output_folder (str, optional):\n Specifies a folder to which all images are written. The files\n will have names with increasing numbers.\n movie_file (str, optional):\n Specifies a filename to which a movie of all the frames is\n written after the simulation.\n quantities:\n |Args_plot_quantities|\n show (bool, optional):\n Determines whether the plot is shown while the simulation is\n running. If `False`, the files are created in the background.\n \"\"\"\n super().__init__(interval=interval)\n self.output_file = output_file\n self.output_folder = output_folder\n self.quantities = quantities\n self.show = show\n \n if movie_file is not None or output_folder is not None:\n from ..visualization.movies import Movie\n movie = Movie(filename=movie_file, image_folder=output_folder)\n self.movie: Optional[Movie] = movie\n self.movie._start() # initialize movie\n else:\n self.movie = None\n \n \n def initialize(self, field: FieldBase, info: InfoDict = None) -> float:\n \"\"\" initialize the tracker with information about the simulation\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation\n \n Returns:\n float: The first time the tracker needs to handle data\n \"\"\"\n from ..visualization.plotting import ScalarFieldPlot\n self.plot = ScalarFieldPlot(field, quantities=self.quantities,\n show=self.show)\n \n return super().initialize(field, info=info)\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle data supplied to this tracker\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float): The associated time\n \"\"\"\n self.plot.show_data(field, title=f'Time {t:g}')\n if self.output_file:\n self.plot.fig.savefig(self.output_file)\n if self.movie:\n self.movie.add_figure(self.plot.fig)\n \n\n def finalize(self, info: InfoDict = None) -> None:\n \"\"\" finalize the tracker, supplying additional information\n\n Args:\n info (dict):\n Extra information from the simulation \n \"\"\"\n super().finalize(info)\n if self.movie:\n if self.movie.filename:\n # write out movie file if requested\n self._logger.info(f'Writing movie to {self.movie.filename}...')\n self.movie.save()\n # finalize movie (e.g. delete temporary files)\n self.movie._end()\n if not self.show:\n del self.plot\n \n \n \nclass DataTracker(CallbackTracker):\n \"\"\" Tracker that stores custom data obtained by calling a function\n \n Attributes:\n times (list):\n The time points at which the data is stored\n data (list):\n The actually stored data, which is a list of the objects returned by\n the callback function. \n \"\"\"\n \n def __init__(self, func: Callable,\n interval: IntervalData = 1):\n \"\"\" \n Args:\n func: The function to call periodically. The function signature\n should be `(state)` or `(state, time)`, where `state` contains\n the current state as an instance of\n :class:`~pde.fields.FieldBase` and `time` is a\n float value indicating the current time. Note that only a view\n of the state is supplied, implying that a copy needs to be made\n if the data should be stored.\n interval: |Arg_tracker_interval|\n \"\"\"\n super().__init__(func=func, interval=interval)\n self.times: List[float] = []\n self.data: List[Any] = []\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle data supplied to this tracker\n \n Args:\n field (:class:`~pde.fields.FieldBase`):\n The current state of the simulation\n t (float): The associated time\n \"\"\"\n self.times.append(t)\n if self._num_args == 1:\n self.data.append(self._callback(field))\n else:\n self.data.append(self._callback(field, t))\n \n \n @property\n def dataframe(self):\n \"\"\" pandas.DataFrame: the data as a pandas DataFrame \"\"\"\n import pandas as pd\n df = pd.DataFrame(self.data)\n # insert the times and use them as an index\n df.insert(0, 'time', self.times)\n return df\n \n \n \nclass SteadyStateTracker(TrackerBase):\n \"\"\" Tracker that interrupts the simulation once steady state is reached\n \n Steady state is obtained when the state does not change anymore. This is the\n case when the derivative is close to zero.\n \"\"\"\n\n name = 'steady_state'\n\n\n def __init__(self, interval: IntervalData = None,\n atol: float = 1e-8,\n rtol: float = 1e-5):\n \"\"\"\n Args:\n interval: |Arg_tracker_interval|\n The default value `None` checks for the steady state\n approximately every (real) second.\n atol (float): Absolute tolerance that must be reached to abort the\n simulation\n rtol (float): Relative tolerance that must be reached to abort the\n simulation\n \"\"\" \n if interval is None:\n interval = RealtimeIntervals(duration=1)\n super().__init__(interval=interval)\n self.atol = atol \n self.rtol = rtol\n self._last_data = None\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle the data of `field` for a give `time` \"\"\"\n if self._last_data is not None:\n # scale with dt to make test independent of dt\n atol = self.atol * self.interval.dt\n rtol = self.rtol * self.interval.dt\n if np.allclose(self._last_data, field.data,\n rtol=rtol, atol=atol, equal_nan=True):\n raise FinishedSimulation('Reached stationary state')\n \n self._last_data = field.data.copy() # store data from last timestep\n \n\n\nclass RuntimeTracker(TrackerBase):\n \"\"\" Tracker that interrupts the simulation once a duration has passed \"\"\"\n\n\n def __init__(self, max_runtime: Union[Real, str],\n interval: IntervalData = 1): \n \"\"\"\n Args:\n max_runtime (float or str):\n The maximal runtime of the simulation. If the runtime is\n exceeded, the simulation is interrupted. Values can be either\n given as a number (interpreted as seconds) or as a string, which\n is then parsed using the function\n :func:`~pde.tools.parse_duration.parse_duration`.\n interval: |Arg_tracker_interval|\n \"\"\"\n super().__init__(interval=interval)\n \n try:\n self.max_runtime = float(max_runtime)\n except ValueError:\n td = parse_duration(str(max_runtime))\n self.max_runtime = td.total_seconds()\n\n\n def initialize(self, field: FieldBase, info: InfoDict = None) -> float:\n \"\"\" \n Args:\n field (:class:`~pde.fields.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation \n \n Returns:\n float: The first time the tracker needs to handle data\n \"\"\"\n self.max_time = time.time() + self.max_runtime\n return super().initialize(field, info)\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle the data of `field` for a give `time` \"\"\"\n if time.time() > self.max_time:\n dt = timedelta(seconds=self.max_runtime)\n raise FinishedSimulation(f'Reached maximal runtime of {str(dt)}')\n\n \n \nclass ConsistencyTracker(TrackerBase):\n \"\"\" Tracker that interrupts the simulation when the state is not finite \"\"\" \n\n name = 'consistency'\n \n \n def __init__(self, interval: IntervalData = None):\n \"\"\"\n Args:\n interval: |Arg_tracker_interval| The default value `None` checks for\n consistency approximately every (real) second.\n \"\"\" \n if interval is None:\n interval = RealtimeIntervals(duration=1)\n super().__init__(interval=interval)\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle the data of `field` for a give `time` \"\"\"\n if not np.all(np.isfinite(field.data)):\n raise StopIteration('Field was not finite')\n \n self._last = field.data.copy() # store data from last timestep\n \n\n\nclass MaterialConservationTracker(TrackerBase):\n \"\"\" Ensure that the amount of material is conserved \"\"\"\n\n name = 'material_conservation'\n\n\n def __init__(self, interval: IntervalData = 1,\n atol: float = 1e-4,\n rtol: float = 1e-4):\n \"\"\"\n Args:\n interval: |Arg_tracker_interval|\n atol (float): Absolute tolerance for amount deviations\n rtol (float): Relative tolerance for amount deviations\n \"\"\"\n super().__init__(interval=interval)\n self.atol = atol \n self.rtol = rtol\n \n \n def initialize(self, field: FieldBase, info: InfoDict = None) -> float:\n \"\"\" \n Args:\n field (:class:`~pde.fields.base.FieldBase`):\n An example of the data that will be analyzed by the tracker\n info (dict):\n Extra information from the simulation \n \n Returns:\n float: The first time the tracker needs to handle data\n \"\"\"\n if isinstance(field, FieldCollection):\n self._reference = np.array([f.magnitude for f in field])\n else:\n self._reference = field.magnitude # type: ignore\n \n return super().initialize(field, info)\n \n \n def handle(self, field: FieldBase, t: float) -> None:\n \"\"\" handle the data of `field` for a give `time` \"\"\"\n if isinstance(field, FieldCollection):\n mags = np.array([f.magnitude for f in field])\n else:\n mags = field.magnitude # type: ignore\n \n c = np.isclose(mags, self._reference, rtol=self.rtol, atol=self.atol)\n if not np.all(c):\n if isinstance(field, FieldCollection):\n msg = f'Material of field {np.flatnonzero(~c)} is not conserved'\n else:\n msg = f'Material is not conserved'\n raise StopIteration(msg)\n \n \n__all__ = ['CallbackTracker', 'ProgressTracker', 'PrintTracker', 'PlotTracker',\n 'DataTracker', 'SteadyStateTracker', 'RuntimeTracker',\n 'ConsistencyTracker', 'MaterialConservationTracker']\n" ]
[ [ "numpy.allclose", "pandas.DataFrame", "numpy.isclose", "numpy.all", "numpy.array", "numpy.flatnonzero", "numpy.isfinite" ] ]
NunoEdgarGFlowHub/pgmpy
[ "ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce" ]
[ "pgmpy/readwrite/XMLBIF.py" ]
[ "#!/usr/bin/env python\n\ntry:\n from lxml import etree\nexcept ImportError:\n try:\n import xml.etree.ElementTree as etree\n except ImportError:\n #try:\n # import xml.etree.cElementTree as etree\n # commented out because xml.etree.cElementTree is giving errors with dictionary attributes\n print(\"Failed to import ElementTree from any known place\")\n \nimport numpy as np\n\nfrom pgmpy.models import BayesianModel\nfrom pgmpy.factors import TabularCPD, State\nfrom pgmpy.extern.six.moves import map, range\n\n\nclass XMLBIFReader(object):\n \"\"\"\n Base class for reading network file in XMLBIF format.\n \"\"\"\n def __init__(self, path=None, string=None):\n \"\"\"\n Initialisation of XMLBIFReader object.\n\n Parameters\n ----------\n path : file or str\n File of XMLBIF data\n string : str\n String of XMLBIF data\n\n Examples\n --------\n # xmlbif_test.xml is the file present in\n # http://www.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/\n >>> reader = XMLBIFReader(\"xmlbif_test.xml\")\n \"\"\"\n if path:\n self.network = etree.ElementTree(file=path).getroot().find('NETWORK')\n elif string:\n self.network = etree.fromstring(string).find('NETWORK')\n else:\n raise ValueError(\"Must specify either path or string\")\n self.network_name = self.network.find('NAME').text\n self.variables = self.get_variables()\n self.variable_parents = self.get_parents()\n self.edge_list = self.get_edges()\n self.variable_states = self.get_states()\n self.variable_CPD = self.get_cpd()\n self.variable_property = self.get_property()\n\n def get_variables(self):\n \"\"\"\n Returns list of variables of the network\n\n Examples\n --------\n >>> reader = XMLBIF.XMLBIFReader(\"xmlbif_test.xml\")\n >>> reader.get_variables()\n ['light-on', 'bowel-problem', 'dog-out', 'hear-bark', 'family-out']\n \"\"\"\n variables = [variable.find('NAME').text for variable in self.network.findall('VARIABLE')]\n return variables\n\n def get_edges(self):\n \"\"\"\n Returns the edges of the network\n\n Examples\n --------\n >>> reader = XMLBIF.XMLBIFReader(\"xmlbif_test.xml\")\n >>> reader.get_edges()\n [['family-out', 'light-on'],\n ['family-out', 'dog-out'],\n ['bowel-problem', 'dog-out'],\n ['dog-out', 'hear-bark']]\n \"\"\"\n edge_list = [[value, key] for key in self.variable_parents\n for value in self.variable_parents[key]]\n return edge_list\n\n def get_states(self):\n \"\"\"\n Returns the states of variables present in the network\n\n Examples\n --------\n >>> reader = XMLBIF.XMLBIFReader(\"xmlbif_test.xml\")\n >>> reader.get_states()\n {'bowel-problem': ['true', 'false'],\n 'dog-out': ['true', 'false'],\n 'family-out': ['true', 'false'],\n 'hear-bark': ['true', 'false'],\n 'light-on': ['true', 'false']}\n \"\"\"\n variable_states = {variable.find('NAME').text: [outcome.text for outcome in variable.findall('OUTCOME')]\n for variable in self.network.findall('VARIABLE')}\n return variable_states\n\n def get_parents(self):\n \"\"\"\n Returns the parents of the variables present in the network\n\n Examples\n --------\n >>> reader = XMLBIF.XMLBIFReader(\"xmlbif_test.xml\")\n >>> reader.get_parents()\n {'bowel-problem': [],\n 'dog-out': ['family-out', 'bowel-problem'],\n 'family-out': [],\n 'hear-bark': ['dog-out'],\n 'light-on': ['family-out']}\n \"\"\"\n variable_parents = {definition.find('FOR').text: [edge.text for edge in definition.findall('GIVEN')][::-1]\n for definition in self.network.findall('DEFINITION')}\n return variable_parents\n\n def get_cpd(self):\n \"\"\"\n Returns the CPD of the variables present in the network\n\n Examples\n --------\n >>> reader = XMLBIF.XMLBIFReader(\"xmlbif_test.xml\")\n >>> reader.get_cpd()\n {'bowel-problem': array([[ 0.01],\n [ 0.99]]),\n 'dog-out': array([[ 0.99, 0.01, 0.97, 0.03],\n [ 0.9 , 0.1 , 0.3 , 0.7 ]]),\n 'family-out': array([[ 0.15],\n [ 0.85]]),\n 'hear-bark': array([[ 0.7 , 0.3 ],\n [ 0.01, 0.99]]),\n 'light-on': array([[ 0.6 , 0.4 ],\n [ 0.05, 0.95]])}\n \"\"\"\n variable_CPD = {definition.find('FOR').text: list(map(float, table.text.split()))\n for definition in self.network.findall('DEFINITION')\n for table in definition.findall('TABLE')}\n for variable in variable_CPD:\n arr = np.array(variable_CPD[variable])\n arr = arr.reshape((len(self.variable_states[variable]),\n arr.size//len(self.variable_states[variable])))\n variable_CPD[variable] = arr\n return variable_CPD\n\n def get_property(self):\n \"\"\"\n Returns the property of the variable\n\n Examples\n --------\n >>> reader = XMLBIF.XMLBIFReader(\"xmlbif_test.xml\")\n >>> reader.get_property()\n {'bowel-problem': ['position = (190, 69)'],\n 'dog-out': ['position = (155, 165)'],\n 'family-out': ['position = (112, 69)'],\n 'hear-bark': ['position = (154, 241)'],\n 'light-on': ['position = (73, 165)']}\n \"\"\"\n variable_property = {variable.find('NAME').text: [property.text for property in variable.findall('PROPERTY')]\n for variable in self.network.findall('VARIABLE')}\n return variable_property\n\n def get_model(self):\n model = BayesianModel(self.get_edges())\n model.name = self.network_name\n\n tabular_cpds = []\n for var, values in self.variable_CPD.items():\n cpd = TabularCPD(var, len(self.variable_states[var]), values,\n evidence=self.variable_parents[var],\n evidence_card=[len(self.variable_states[evidence_var])\n for evidence_var in self.variable_parents[var]])\n tabular_cpds.append(cpd)\n\n model.add_cpds(*tabular_cpds)\n\n for node, properties in self.variable_property.items():\n for prop in properties:\n prop_name, prop_value = map(lambda t: t.strip(), prop.split('='))\n model.node[node][prop_name] = prop_value\n\n return model\n\n\nclass XMLBIFWriter(object):\n \"\"\"\n Base class for writing XMLBIF network file format.\n \"\"\"\n def __init__(self, model, encoding='utf-8', prettyprint=True):\n \"\"\"\n Initialise a XMLBIFWriter object.\n\n Parameters\n ----------\n model: BayesianModel Instance\n Model to write\n encoding: str (optional)\n Encoding for text data\n prettyprint: Bool(optional)\n Indentation in output XML if true\n\n Examples\n --------\n >>> writer = XMLBIFWriter(model)\n \"\"\"\n if not isinstance(model, BayesianModel):\n raise TypeError(\"model must an instance of BayesianModel\")\n self.model = model\n\n self.encoding = encoding\n self.prettyprint = prettyprint\n\n self.xml = etree.Element(\"BIF\", attrib={'version': '0.3'})\n self.network = etree.SubElement(self.xml, 'NETWORK')\n if self.model.name:\n etree.SubElement(self.network, 'NAME').text = self.model.name\n\n self.variables = self.get_variables()\n self.states = self.get_states()\n self.properties = self.get_properties()\n self.definition = self.get_definition()\n self.tables = self.get_cpd()\n\n def __str__(self):\n \"\"\"\n Return the XML as string.\n \"\"\"\n if self.prettyprint:\n self.indent(self.xml)\n return etree.tostring(self.xml, encoding=self.encoding)\n\n def indent(self, elem, level=0):\n \"\"\"\n Inplace prettyprint formatter.\n \"\"\"\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n self.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n def get_variables(self):\n \"\"\"\n Add variables to XMLBIF\n\n Return\n ------\n dict: dict of type {variable: variable tags}\n\n Examples\n --------\n >>> writer = XMLBIFWriter(model)\n >>> writer.get_variables()\n {'bowel-problem': <Element VARIABLE at 0x7fe28607dd88>,\n 'family-out': <Element VARIABLE at 0x7fe28607de08>,\n 'hear-bark': <Element VARIABLE at 0x7fe28607de48>,\n 'dog-out': <Element VARIABLE at 0x7fe28607ddc8>,\n 'light-on': <Element VARIABLE at 0x7fe28607de88>}\n \"\"\"\n variables = self.model.nodes()\n variable_tag = {}\n for var in sorted(variables):\n variable_tag[var] = etree.SubElement(self.network, \"VARIABLE\", attrib={'TYPE': 'nature'})\n etree.SubElement(variable_tag[var], \"NAME\").text = var\n return variable_tag\n\n def get_states(self):\n \"\"\"\n Add outcome to variables of XMLBIF\n\n Return\n ------\n dict: dict of type {variable: outcome tags}\n\n Examples\n --------\n >>> writer = XMLBIFWriter(model)\n >>> writer.get_states()\n {'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],\n 'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],\n 'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],\n 'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],\n 'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}\n \"\"\"\n outcome_tag = {}\n cpds = self.model.get_cpds()\n for cpd in cpds:\n var = cpd.variable\n outcome_tag[var] = []\n for state in [State(var, state) for state in range(cpd.get_cardinality([var])[var])]:\n # for state in [cpd.variables[var]:\n state_tag = etree.SubElement(self.variables[var], \"OUTCOME\")\n state_tag.text = str(state.state)\n outcome_tag[var].append(state_tag)\n return outcome_tag\n\n def get_properties(self):\n \"\"\"\n Add property to variables in XMLBIF\n\n Return\n ------\n dict: dict of type {variable: property tag}\n\n Examples\n --------\n >>> writer = XMLBIFWriter(model)\n >>> writer.get_property()\n {'light-on': <Element PROPERTY at 0x7f7a2ffac1c8>,\n 'family-out': <Element PROPERTY at 0x7f7a2ffac148>,\n 'hear-bark': <Element PROPERTY at 0x7f7a2ffac188>,\n 'bowel-problem': <Element PROPERTY at 0x7f7a2ffac0c8>,\n 'dog-out': <Element PROPERTY at 0x7f7a2ffac108>}\n \"\"\"\n variables = self.model.nodes()\n property_tag = {}\n for var in sorted(variables):\n properties = self.model.node[var]\n property_tag[var] = etree.SubElement(self.variables[var], \"PROPERTY\")\n for prop, val in properties.items():\n property_tag[var].text = str(prop) + \" = \" + str(val)\n return property_tag\n\n def get_definition(self):\n \"\"\"\n Add Definition to XMLBIF\n\n Return\n ------\n dict: dict of type {variable: definition tag}\n\n Examples\n --------\n >>> writer = XMLBIFWriter(model)\n >>> writer.get_definition()\n {'hear-bark': <Element DEFINITION at 0x7f1d48977408>,\n 'family-out': <Element DEFINITION at 0x7f1d489773c8>,\n 'dog-out': <Element DEFINITION at 0x7f1d48977388>,\n 'bowel-problem': <Element DEFINITION at 0x7f1d48977348>,\n 'light-on': <Element DEFINITION at 0x7f1d48977448>}\n \"\"\"\n cpds = self.model.get_cpds()\n cpds.sort(key=lambda x: x.variable)\n definition_tag = {}\n for cpd in cpds:\n definition_tag[cpd.variable] = etree.SubElement(self.network, \"DEFINITION\")\n etree.SubElement(definition_tag[cpd.variable], \"FOR\").text = cpd.variable\n for child in sorted([] if cpd.evidence is None else cpd.evidence):\n etree.SubElement(definition_tag[cpd.variable], \"GIVEN\").text = child\n\n return definition_tag\n\n def get_cpd(self):\n \"\"\"\n Add Table to XMLBIF.\n\n Return\n ---------------\n dict: dict of type {variable: table tag}\n\n Examples\n -------\n >>> writer = XMLBIFWriter(model)\n >>> writer.get_cpd()\n {'dog-out': <Element TABLE at 0x7f240726f3c8>,\n 'light-on': <Element TABLE at 0x7f240726f488>,\n 'bowel-problem': <Element TABLE at 0x7f240726f388>,\n 'family-out': <Element TABLE at 0x7f240726f408>,\n 'hear-bark': <Element TABLE at 0x7f240726f448>}\n \"\"\"\n cpds = self.model.get_cpds()\n definition_tag = self.definition\n table_tag = {}\n for cpd in cpds:\n table_tag[cpd.variable] = etree.SubElement(definition_tag[cpd.variable], \"TABLE\")\n table_tag[cpd.variable].text = ''\n for val in cpd.values.ravel():\n table_tag[cpd.variable].text += str(val) + ' '\n\n return table_tag\n\n def write_xmlbif(self, filename):\n \"\"\"\n Write the xml data into the file.\n\n Parameters\n ----------\n filename: Name of the file.\n\n Examples\n -------\n >>> writer = XMLBIFWriter(model)\n >>> writer.write_xmlbif(test_file)\n \"\"\"\n writer = self.__str__()[:-1].decode('utf-8')\n with open(filename, 'w') as fout:\n fout.write(writer)\n" ]
[ [ "numpy.array" ] ]
peterwauligmann/sparse_mm
[ "344c06c183854f72224c1e88ad2ced2e092d4efb" ]
[ "matmul.py" ]
[ "from typing import Tuple\n\nfrom codegen.ast import *\nfrom codegen.sugar import *\nfrom codegen.forms import *\nfrom codegen.precision import *\n\nimport scripts.old_arm\nimport scripts.max_bn_knl\n\nfrom cursors import *\n\nimport architecture\nimport numpy\n\ndef decompose_pattern(k, n, pattern:Matrix[bool], bk:int, bn:int) -> Tuple[Matrix[int], List[Matrix[bool]]]:\n Bk,Bn = k//bk, n//bn\n patterns = []\n x = 0\n\n n_overhead = n % bn\n k_overhead = k % bk\n\n if n_overhead > 0:\n Bn += 1\n if k_overhead > 0:\n Bk += 1\n\n blocks = Matrix.full(Bk,Bn,-1)\n\n for Bni in range(Bn):\n for Bki in range(Bk):\n if Bni + 1 == Bn and n_overhead > 0 and Bki + 1 == Bk and k_overhead > 0:\n block = pattern[(Bki*bk):((Bki+1)*bk+k_overhead), (Bni*bn):((Bni)*bn+n_overhead)]\n elif Bni + 1 == Bn and n_overhead > 0:\n block = pattern[(Bki*bk):((Bki+1)*bk), (Bni*bn):((Bni)*bn+n_overhead)]\n elif Bki + 1 == Bk and k_overhead > 0:\n block = pattern[(Bki*bk):((Bki+1)*bk+k_overhead), (Bni*bn):((Bni+1)*bn)]\n else:\n block = pattern[(Bki*bk):((Bki+1)*bk), (Bni*bn):((Bni+1)*bn)]\n \n blocks[Bki,Bni] = x\n x += 1\n patterns.append(block)\n\n mtx_overhead = [0] * n\n\n for i in range(n):\n for j in range(k, pattern.rows):\n if pattern[j, i]:\n mtx_overhead[i] += 1\n\n return blocks, patterns, mtx_overhead\n\nclass MatMul:\n def __init__(self,\n m: int, \n n: int, \n k: int, \n lda: int, \n ldb: int, \n ldc: int,\n alpha: str,\n beta: str,\n mtx_filename: str,\n mtx_format: str = 'any',\n output_funcname: str = None,\n output_filename: str = None,\n output_overwrite: bool = False,\n bm: int = None, \n bn: int = None, \n bk: int = None,\n arch: str = 'knl',\n precision: str = 'd',\n prefetching: str = None,\n **kwargs # Accept and ignore args which don't belong\n ) -> None:\n\n self.m = m\n self.n = n\n self.k = k\n\n self.lda = lda\n self.ldb = ldb\n self.ldc = ldc\n\n try:\n self.alpha = float(alpha)\n except:\n self.alpha = 'generic'\n try:\n self.beta = float(beta)\n except:\n self.beta = 'generic'\n\n if arch == 'skx':\n arch = 'knl'\n\n self.arch = arch\n assert precision.lower() in ['s', 'd']\n self.precision = Precision.DOUBLE if precision.lower() == 'd' else Precision.SINGLE\n\n architecture.init()\n architecture.arch = arch\n architecture.Generator = architecture.get_class(\"codegen.architectures.\" + arch + \".generator.Generator\")\n architecture.operands = architecture.get_class(\"codegen.architectures.\" + arch + \".operands\")\n\n self.generator = architecture.Generator(self.precision)\n\n self.v_size = self.generator.get_v_size()\n\n if bk == None:\n bk = 2 if arch == 'knl' else 1\n\n if bm == None or bn == None:\n if arch == 'knl':\n (self.bm, self.bn) = scripts.max_bn_knl.getBlocksize(m, n, bk, self.v_size)\n elif arch == 'arm':\n (self.bm, self.bn) = scripts.old_arm.getBlocksize(m, n, bk, self.v_size)\n else: \n self.bm = bm\n self.bn = bn\n\n self.bk = bk\n\n self.prefetching = prefetching\n\n self.mtx_filename = mtx_filename\n self.mtx_format = mtx_format\n\n self.output_funcname = output_funcname\n self.output_filename = output_filename\n self.output_overwrite = output_overwrite\n\n if ldb == 0:\n pattern = Matrix.load(mtx_filename)\n else:\n mtx = numpy.zeros((k, n))\n for i in range(k):\n for j in range(n):\n mtx[i, j] = 1\n pattern = Matrix(mtx)\n\n blocks,patterns,mtx_overhead = decompose_pattern(self.k, self.n, pattern, self.bk, self.bn)\n\n self.nnz = 0\n self.flop = 0\n\n if ldb == 0:\n for i in range(n):\n for j in range(k):\n if pattern[j,i]:\n self.nnz += 1\n self.flop = self.nnz * m * 2\n self.nnz += sum(mtx_overhead)\n else:\n self.nnz = ldb * self.n\n self.flop = m * n * k * 2\n\n prefetchReg = self.generator.init_prefetching(self.prefetching)\n\n assert(self.m % self.v_size == 0)\n\n self.A_regs, self.B_regs, self.C_regs, self.starting_regs, self.alpha_reg, self.beta_reg, self.loop_reg, self.additional_regs = self.generator.make_reg_blocks(self.bm, self.bn, self.bk, self.v_size, self.nnz, self.m, self.n, self.k)\n\n self.A = DenseCursor(\"A\", self.starting_regs[0], self.m, self.k, self.lda, self.bm, self.bk, self.precision.value)\n self.B = BlockCursor(\"B\", self.starting_regs[1], self.k, self.n, self.ldb, self.bk, self.bn, self.precision.value, blocks, patterns,mtx_overhead)\n self.C = DenseCursor(\"C\", self.starting_regs[2], self.m, self.n, self.ldc, self.bm, self.bn, self.precision.value)\n self.C_pf = DenseCursor(\"C_pf\", prefetchReg, self.m, self.n, self.ldc, self.bm, self.bn, self.precision.value) if prefetchReg else None\n\n\n def make_nk_unroll(self):\n\n asm = block(\"Unrolling over bn and bk\")\n A_ptr = CursorLocation()\n B_ptr = self.B.start()\n C_ptr = CursorLocation()\n C_pf_ptr = CursorLocation()\n\n Bn = self.n // self.bn\n Bk = self.k // self.bk\n vm = self.bm // self.v_size\n\n n_overhead = self.n % self.bn\n k_overhead = self.k % self.bk\n\n if n_overhead > 0:\n Bn += 1\n if k_overhead > 0:\n Bk += 1\n\n asm.add(self.generator.make_b_pointers(self.starting_regs[1], self.additional_regs, self.nnz))\n\n for Bni in range(0,Bn):\n \n regs = self.C_regs\n \n if Bni + 1 == Bn and n_overhead > 0:\n regs = self.C_regs[0:vm, 0:n_overhead]\n\n if self.alpha == 1.0 and self.beta != 0.0:\n asm.add(self.generator.move_register_block(self.C, C_ptr, Coords(), regs, self.v_size, self.additional_regs, None, False))\n if self.beta != 1.0:\n for ic in range(regs.shape[1]):\n for ir in range(regs.shape[0]):\n asm.add(mul(regs[ir,ic], self.beta_reg[1], regs[ir,ic]))\n else:\n asm.add(self.generator.make_zero_block(regs, self.additional_regs))\n\n for Bki in range(0,Bk):\n\n to_A = Coords(right=Bki)\n to_B = Coords(right=Bni, down=Bki, absolute=True)\n\n if self.B.has_nonzero_block(B_ptr, to_B):\n asm.add(self.generator.make_microkernel(self.A, self.B, A_ptr, B_ptr, self.A_regs, self.B_regs, regs, self.v_size, self.additional_regs, to_A, to_B))\n\n if self.alpha != 1.0:\n store_block = block(\"\")\n \n for x in range(0, regs.shape[1], self.A_regs.shape[1]):\n A_regs_cut = self.A_regs[0:min(self.A_regs.shape[0], regs.shape[0]), 0:regs.shape[1]-x]\n if self.beta != 0.0:\n store_block.add(self.generator.move_register_block(self.C, C_ptr, Coords(), A_regs_cut, self.v_size, self.additional_regs, None, False, None, self.ldc * x))\n\n\n for ir in range(A_regs_cut.shape[0]):\n for ic in range(A_regs_cut.shape[1]):\n if self.beta != 0.0 and self.beta != 1.0:\n store_block.add(mul(A_regs_cut[ir,ic], self.beta_reg[1], A_regs_cut[ir,ic]))\n if self.beta == 0.0:\n store_block.add(mul(regs[ir, x + ic], self.alpha_reg[1], A_regs_cut[ir, ic], \"C = C + alpha * AB\"))\n else:\n store_block.add(fma(regs[ir, x + ic], self.alpha_reg[1], A_regs_cut[ir, ic], \"C = C + alpha * AB\", False))\n\n store_block.add(self.generator.move_register_block(self.C, C_ptr, Coords(), A_regs_cut, self.v_size, self.additional_regs, None, True, self.prefetching, self.ldc * x))\n asm.add(store_block)\n\n else:\n asm.add(self.generator.move_register_block(self.C, C_ptr, Coords(), regs, self.v_size, self.additional_regs, None, True, self.prefetching))\n\n if (Bni != Bn-1):\n move_C, C_ptr = self.C.move(C_ptr, Coords(right=1))\n asm.add(move_C)\n if self.C_pf:\n move_C_pf, C_pf_ptr = self.C_pf.move(C_pf_ptr, Coords(right=1))\n asm.add(move_C_pf)\n\n\n return asm\n\n\n\n def make(self):\n \n A_ptr = CursorLocation()\n C_ptr = CursorLocation()\n C_pf_ptr = CursorLocation()\n\n Bm = self.m // self.bm\n Bn = self.n // self.bn\n Bk = self.k // self.bk\n\n if self.n % self.bn != 0:\n Bn += 1\n\n loopBody = [\n self.make_nk_unroll(),\n self.A.move(A_ptr, Coords(down=1))[0],\n self.C.move(C_ptr, Coords(down=1, right=1-Bn))[0]\n ]\n if self.C_pf:\n loopBody.append(self.C_pf.move(C_pf_ptr, Coords(down=1, right=1-Bn))[0])\n\n asm = block(\"unrolled_{}x{}x{}\".format(self.m,self.n,self.k),\n self.generator.bcst_alpha_beta(self.alpha_reg, self.beta_reg),\n self.generator.make_scaling_offsets(self.additional_regs, self.nnz),\n loop(self.loop_reg, 0, Bm, 1).body(*loopBody)\n )\n\n vm_overhead = (self.m % self.bm) // self.v_size\n\n if vm_overhead > 0:\n self.m = self.m % self.bm\n self.bm = self.m % self.bm\n self.A_regs = self.A_regs[0:self.bm // self.v_size, 0:self.bk]\n self.C_regs = self.C_regs[0:self.bm // self.v_size, 0:self.bn]\n self.A.r = self.m\n asm.add(self.make_nk_unroll())\n\n\n return asm\n" ]
[ [ "numpy.zeros" ] ]