repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
mikehuisman/metadl
[ "61ece0364b08e67412ab87da4a41425b2e88a562" ]
[ "metadl/core/scoring/scoring.py" ]
[ "\"\"\" Runs the scoring procedure for the challenge.\nIt assumes that there exists a ./model_dir folder containing both the \nsubmission code and the saved learner. \nIt will create a folder named ./scoring_output (default) in which a txt file \nwill contain the average score over 600 episodes. You can change the folder \nname via the score_dir flag.\n\nUsage example executed from the metadl/ repository : \n\npython -m metadl.core.scoring.scoring --meta_test_dir=<path_dataset.meta_test> \n\"\"\" \nimport os \nfrom sys import path\n\nimport scipy.stats\nimport gin\nimport numpy as np \nfrom absl import app\nfrom absl import flags \nfrom absl import logging\nimport tensorflow as tf\n\nfrom metadl.data.dataset import DataGenerator\nfrom metadl.core.ingestion.ingestion import get_gin_path, show_dir\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('meta_test_dir', \n '/Users/adrian/GitInria/meta-dataset/records/',\n ('Directory of the meta-test dataset. This directory '\n + 'should contain records and a json spec file.'))\n\nflags.DEFINE_string('saved_model_dir',\n './model_dir',\n ('Directory path that contains the participant\\'s code '\n + 'along with the serialized learner from meta-fit.'))\n\nflags.DEFINE_string('score_dir',\n './scoring_output',\n 'Path to the score directory.')\n\nflags.DEFINE_string('evaltype',\n 'test',\n 'Data type on which to perform evaluation. [train, val, test]')\n\ntf.random.set_seed(1234)\ndef NwayKshot_accuracy(predictions, ground_truth, metric):\n \"\"\" N-way, K-shot accuracy which corresponds to the accuracy in a\n multi-classification context with N classes.\n\n Args:\n predictions : tensors, sparse tensors corresponding to the predicted \n labels.\n ground_truth : tensors, sparse tensors corresponding the ground truth \n labels.\n metric : keras.metrics , the metric we use to evaluate the \n classification performance of the meta-learning algorithm. We use \n the SparseCategoricalAccuracy in this challenge.\n\n Retruns:\n score : Float, the resulting performance using the given metric.\n \"\"\"\n ground_truth = tf.expand_dims(ground_truth, axis = 1)\n predictions = tf.expand_dims(predictions, axis = 1)\n logging.debug('Predictions shape : {} - Ground truth shape : {}'.format(\n predictions.shape, ground_truth.shape))\n\n metric.update_state(ground_truth, predictions)\n score = metric.result()\n logging.debug('An episode score: {}'.format(score))\n metric.reset_states()\n return score\n \ndef is_one_hot_vector(x, axis=None, keepdims=False):\n \"\"\"Check if a vector 'x' is one-hot (i.e. one entry is 1 and others 0).\"\"\"\n norm_1 = np.linalg.norm(x, ord=1, axis=axis, keepdims=keepdims)\n norm_inf = np.linalg.norm(x, ord=np.inf, axis=axis, keepdims=keepdims)\n return np.logical_and(norm_1 == 1, norm_inf == 1)\n\ndef write_score(score, conf_int, file_score, duration=-1):\n \"\"\"Write score of the k-th task in the given file_score.\"\"\"\n file_score.write('set1_score: {:.6f}\\n'.format(float(score)))\n file_score.write('conf_int: {:.3f}\\n'.format(float(conf_int)))\n file_score.write('Duration: {:.6f}\\n'.format(float(duration)))\n \ndef extract_elapsed_time(saved_model_dir):\n \"\"\" Extracts elapsed time from the metadata file. It corresponds to the \n meta-training time, the duration of the ingestion process.\n \"\"\"\n if not os.path.isdir(saved_model_dir): \n raise ValueError('Saved model directory does not exists.')\n\n if os.path.isfile(os.path.join(saved_model_dir, 'metadata')):\n with open(os.path.join(saved_model_dir, 'metadata'), 'r') as f : \n lines = f.readlines()\n for line in lines : \n splitted_line = line.split(' ')\n for k, word in enumerate(splitted_line): \n if 'elapsed' in splitted_line[k]:\n elapsed_time = float(splitted_line[k+1])\n return elapsed_time\n \n return -1\n\ndef process_task(task):\n \"\"\"We are using the meta-dataset code to generate episodes from a dataset. \n Generated episodes have a specific format. Each is processed such that the \n the support and query sets are ready to be used by the participants. Each\n set is returned as a tf.data.Dataset object.\n The que_labs are kept hidden.\n\n Returns : \n support_dataset : tf.data.Dataset containing the support examples and \n labels.\n query_dataset : tf.data.Dataset containing the query examples\n que_labs : tuple (query_batch_size, 1), the query examples labels \n i.e. the ground truth labels.\n \"\"\"\n sup_set = tf.data.Dataset.from_tensor_slices(\\\n (task[0][1], task[0][0]))\n dim = task[0][4].shape[1]\n arr = np.arange(dim)\n np.random.shuffle(arr) # shuffling arr\n query_labs = task[0][4]\n query_imgs = task[0][3]\n \n query_labs_s = tf.gather(query_labs, arr, axis=1)\n query_imgs_s = tf.gather(query_imgs, arr, axis=1)\n\n que_set = tf.data.Dataset.from_tensor_slices(\n (query_labs_s, query_imgs_s)\n )\n new_ds = tf.data.Dataset.zip((sup_set, que_set))\n for ((supp_labs, supp_img), (que_labs, que_img)) \\\n in new_ds :\n\n logging.debug('Supp labs : {}'.format(supp_labs))\n logging.debug('Query labs : {}'.format(que_labs))\n\n support_set = tf.data.Dataset.from_tensor_slices(\\\n (supp_img, supp_labs))\n query_set = tf.data.Dataset.from_tensor_slices(\\\n (que_img,))\n support_set = support_set.batch(5)\n query_set = query_set.batch(95)\n\n return support_set, query_set, que_labs\n\ndef scoring(argv):\n \"\"\" \n For each task, load and fit the Learner with the support set and evaluate\n the submission performance with the query set. \n A directory 'scoring_output' is created and contains a txt file that \n contains the submission score and duration. Note that the former is the \n time elapsed during the ingestion program and hence the meta-fit() \n duration.\n\n The metric considered here is the Sparse Categorical Accuracy for a \n 5 classes image classification problem.\n \"\"\"\n del argv\n saved_model_dir = FLAGS.saved_model_dir\n meta_test_dir = FLAGS.meta_test_dir\n eval_type = FLAGS.evaltype\n \n # Making eval type compatible with DataGenerator specs\n if eval_type == 'train' or eval_type == 'val':\n data_generator_eval_type = 'train'\n elif eval_type == 'test':\n data_generator_eval_type = 'test'\n # Use CodaLab's path `run/input/ref` in parallel with `run/input/res`\n if not os.path.isdir(meta_test_dir): \n meta_test_dir = os.path.join(saved_model_dir, os.pardir, 'ref')\n\n # Evaluation type scenario: if meta_test is specified -> act as normal \n # scoring on meta_test data\n if (eval_type == 'train' or eval_type == 'val') and 'meta_test' in meta_test_dir:\n raise ValueError('Cannot perform train/val evaluation on meta-test data!')\n #if 'meta_test' not in meta_test_dir:\n # if eval_type == 'test':\n # meta_test_dir = os.path.join(meta_test_dir, 'meta_test')\n # else:\n # meta_test_dir = os.path.join(meta_test_dir, 'meta_train')\n\n code_dir = os.path.join(saved_model_dir, 'code_dir')\n score_dir = FLAGS.score_dir\n \n path.append(code_dir)\n from model import MyLearner\n if(os.path.exists(os.path.join(code_dir, 'model.gin'))):\n gin.parse_config_file(os.path.join(code_dir, 'model.gin'))\n\n logging.info('Ingestion done! Starting scoring process ... ')\n logging.info('Creating the meta-test episode generator ... \\n ')\n generator = DataGenerator(path_to_records=meta_test_dir,\n batch_config=None,\n episode_config=[28, 5, 1, 19],\n pool= data_generator_eval_type,\n mode='episode')\n \n if eval_type == 'test':\n meta_test_dataset = generator.meta_test_pipeline\n elif eval_type == 'train':\n meta_test_dataset = generator.meta_train_pipeline\n elif eval_type == 'val':\n meta_test_dataset = generator.meta_valid_pipeline\n else:\n raise ValueError('Wrong eval_type : {}'.format(eval_type))\n\n logging.info('Evaluating performance on episodes ... ')\n\n meta_test_dataset = meta_test_dataset.batch(1)\n meta_test_dataset = meta_test_dataset.prefetch(5)\n learner = MyLearner()\n \n if (not os.path.isdir(score_dir)):\n os.mkdir(score_dir)\n score_file = os.path.join(score_dir, 'scores.txt')\n results = []\n metric = tf.metrics.SparseCategoricalAccuracy()\n nbr_episodes = 600\n\n for k , task in enumerate(meta_test_dataset) :\n support_set, query_set, ground_truth = process_task(task)\n learner.load(saved_model_dir)\n predictor = learner.fit(support_set)\n predictions = predictor.predict(query_set)\n score = NwayKshot_accuracy(predictions, ground_truth, metric)\n results.append(score)\n\n logging.debug('Score on {} : {}'.format(k, score))\n logging.debug('Results : {}'.format(results[:20]))\n if(k > nbr_episodes):\n break\n def mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, h\n\n m, conf_int = mean_confidence_interval(results)\n with open(score_file, 'w') as f :\n write_score(m,\n conf_int,\n f, \n extract_elapsed_time(saved_model_dir))\n\n logging.info(('Scoring done! The average score over {} '\n + 'episodes is : {:.3%}').format(nbr_episodes,\n sum(results)/len(results))\n )\n\nif __name__ == '__main__':\n np.random.seed(seed=1234)\n tf.get_logger().setLevel('ERROR')\n app.run(scoring)\n\n\n \n" ]
[ [ "numpy.random.seed", "numpy.arange", "tensorflow.data.Dataset.from_tensor_slices", "numpy.linalg.norm", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.metrics.SparseCategoricalAccuracy", "tensorflow.gather", "tensorflow.data.Dataset.zip", "tensorflow.get_logger", "numpy.mean", "numpy.array", "numpy.logical_and", "tensorflow.random.set_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EnTimeMent/Group-Behavior-Recognition
[ "d6606e9e7bef836a9ccc5b4ada66933a4770171c" ]
[ "Graph-based/processor/recognition.py" ]
[ "#!/usr/bin/env python\n# pylint: disable=W0201\nimport sys\nimport argparse\nimport yaml\nimport numpy as np\n\n# torch\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# torchlight\nimport torchlight\nfrom torchlight import str2bool\nfrom torchlight import DictAction\nfrom torchlight import import_class\n\nfrom .processor import Processor\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv1d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif classname.find('Conv2d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n if m.bias is not None:\n m.bias.data.fill_(0)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass REC_Processor(Processor):\n \"\"\"\n Processor for Skeleton-based Action Recgnition\n \"\"\"\n\n def load_model(self):\n # print(\"load model\")\n self.model = self.io.load_model(self.arg.model,\n **(self.arg.model_args))\n self.model.apply(weights_init)\n self.loss = nn.CrossEntropyLoss()\n # self.loss = nn.BCEWithLogitsLoss()\n\n def load_optimizer(self):\n if self.arg.optimizer == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.arg.base_lr,\n momentum=0.9,\n nesterov=self.arg.nesterov,\n weight_decay=self.arg.weight_decay)\n elif self.arg.optimizer == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.arg.base_lr,\n weight_decay=self.arg.weight_decay)\n else:\n raise ValueError()\n\n def adjust_lr(self):\n if self.arg.optimizer == 'SGD' and self.arg.step:\n lr = self.arg.base_lr * (\n 0.1**np.sum(self.meta_info['epoch'] >= np.array(self.arg.step)))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n self.lr = lr\n else:\n self.lr = self.arg.base_lr\n\n def show_topk(self, k):\n rank = self.result.argsort()\n hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]\n accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)\n self.io.print_log('\\tTop{}: {:.2f}%'.format(k, 100 * accuracy))\n\n def train(self):\n self.model.train()\n self.adjust_lr()\n loader = self.data_loader['train']\n loss_value = []\n result_frag = []\n label_frag = []\n # print(\"train\")\n for data, label in loader:\n\n # get data\n data = data.float().to(self.dev)\n label = label.long().to(self.dev)\n\n # forward\n output = self.model(data)\n result_frag.extend(\n output.data.cpu().numpy().argmax(axis=1))\n label_frag.extend(label.data.cpu().numpy())\n\n # print(output)\n loss = self.loss(output, label)\n # print(label)\n # backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # statistics\n self.iter_info['loss'] = loss.data.item()\n self.iter_info['lr'] = '{:.6f}'.format(self.lr)\n loss_value.append(self.iter_info['loss'])\n self.show_iter_info()\n self.meta_info['iter'] += 1\n\n ac = accuracy_score(label_frag, result_frag)\n # print(result_frag)\n # print(label_frag)\n print(\"train acc: {}\".format(ac))\n\n self.epoch_info['mean_loss'] = np.mean(loss_value)\n self.show_epoch_info()\n # self.io.print_timer()\n\n def test(self, evaluation=True):\n\n self.model.eval()\n loader = self.data_loader['test']\n loss_value = []\n result_frag = []\n label_frag = []\n\n for data, label in loader:\n\n # get data\n data = data.float().to(self.dev)\n label = label.long().to(self.dev)\n\n # inference\n with torch.no_grad():\n output = self.model(data)\n result_frag.append(output.data.cpu().numpy())\n\n # get loss\n if evaluation:\n loss = self.loss(output, label)\n loss_value.append(loss.item())\n label_frag.append(label.data.cpu().numpy())\n\n self.result = np.concatenate(result_frag)\n # print(self.result)\n if evaluation:\n self.label = np.concatenate(label_frag)\n self.epoch_info['mean_loss'] = np.mean(loss_value)\n self.show_epoch_info()\n\n # show top-k accuracy\n for k in self.arg.show_topk:\n self.show_topk(k)\n top = self.result.argmax(axis=1)\n print(top)\n print(self.label)\n cm = confusion_matrix(self.label, top)\n print(cm)\n\n @staticmethod\n def get_parser(add_help=False):\n\n # parameter priority: command line > config > default\n parent_parser = Processor.get_parser(add_help=False)\n parser = argparse.ArgumentParser(\n add_help=add_help,\n parents=[parent_parser],\n description='Spatial Temporal Graph Convolution Network')\n\n # region arguments yapf: disable\n # evaluation\n parser.add_argument('--show_topk', type=int,\n default=[1], nargs='+', help='which Top K accuracy will be shown')\n # optim\n parser.add_argument('--base_lr', type=float,\n default=0.01, help='initial learning rate')\n parser.add_argument('--step', type=int, default=[], nargs='+',\n help='the epoch where optimizer reduce the learning rate')\n parser.add_argument('--optimizer', default='SGD',\n help='type of optimizer')\n parser.add_argument('--nesterov', type=str2bool,\n default=True, help='use nesterov or not')\n parser.add_argument('--weight_decay', type=float,\n default=0.0001, help='weight decay for optimizer')\n # endregion yapf: enable\n\n return parser\n" ]
[ [ "torch.nn.CrossEntropyLoss", "sklearn.metrics.confusion_matrix", "numpy.concatenate", "numpy.mean", "torch.no_grad", "numpy.array", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
paritoshmittal09/pandas
[ "862d2d89b8fe0a93ec8e714315175e2eba1fa6e5" ]
[ "pandas/core/groupby/groupby.py" ]
[ "\"\"\"\nProvide the groupby split-apply-combine paradigm. Define the GroupBy\nclass providing the base-class of operations.\n\nThe SeriesGroupBy and DataFrameGroupBy sub-class\n(defined in pandas.core.groupby.generic)\nexpose these user-facing objects to provide specific functionailty.\n\"\"\"\n\nimport types\nfrom functools import wraps, partial\nimport datetime\nimport collections\nimport warnings\nfrom contextlib import contextmanager\n\nimport numpy as np\n\nfrom pandas._libs import groupby as libgroupby, Timestamp\nfrom pandas.util._validators import validate_kwargs\nfrom pandas.util._decorators import (\n cache_readonly, Substitution, Appender)\n\nfrom pandas import compat\nfrom pandas.compat import zip, range, callable, set_function_name\nfrom pandas.compat.numpy import function as nv\n\nfrom pandas.core.dtypes.common import (\n is_numeric_dtype,\n is_scalar,\n ensure_float)\nfrom pandas.core.dtypes.cast import maybe_downcast_to_dtype\nfrom pandas.core.dtypes.missing import isna, notna\n\nfrom pandas.core.groupby import base\nfrom pandas.core.base import (PandasObject, SelectionMixin, GroupByError,\n DataError, SpecificationError)\nfrom pandas.core.index import Index, MultiIndex\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.series import Series\nfrom pandas.core.sorting import get_group_index_sorter\nimport pandas.core.common as com\nimport pandas.core.algorithms as algorithms\nfrom pandas.core.config import option_context\n\n_doc_template = \"\"\"\n\n See also\n --------\n pandas.Series.%(name)s\n pandas.DataFrame.%(name)s\n pandas.Panel.%(name)s\n\"\"\"\n\n_apply_docs = dict(\n template=\"\"\"\n Apply function `func` group-wise and combine the results together.\n\n The function passed to `apply` must take a {input} as its first\n argument and return a DataFrame, Series or scalar. `apply` will\n then take care of combining the results back together into a single\n dataframe or series. `apply` is therefore a highly flexible\n grouping method.\n\n While `apply` is a very flexible method, its downside is that\n using it can be quite a bit slower than using more specific methods\n like `agg` or `transform`. Pandas offers a wide range of method that will\n be much faster than using `apply` for their specific purposes, so try to\n use them before reaching for `apply`.\n\n Parameters\n ----------\n func : callable\n A callable that takes a {input} as its first argument, and\n returns a dataframe, a series or a scalar. In addition the\n callable may take positional and keyword arguments.\n args, kwargs : tuple and dict\n Optional positional and keyword arguments to pass to `func`.\n\n Returns\n -------\n applied : Series or DataFrame\n\n Notes\n -----\n In the current implementation `apply` calls `func` twice on the\n first group to decide whether it can take a fast or slow code\n path. This can lead to unexpected behavior if `func` has\n side-effects, as they will take effect twice for the first\n group.\n\n Examples\n --------\n {examples}\n\n See also\n --------\n pipe : Apply function to the full GroupBy object instead of to each\n group.\n aggregate : Apply aggregate function to the GroupBy object.\n transform : Apply function column-by-column to the GroupBy object.\n Series.apply : Apply a function to a Series.\n DataFrame.apply : Apply a function to each row or column of a DataFrame.\n \"\"\",\n dataframe_examples=\"\"\"\n >>> df = pd.DataFrame({'A': 'a a b'.split(),\n 'B': [1,2,3],\n 'C': [4,6, 5]})\n >>> g = df.groupby('A')\n\n Notice that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Example 1: below the function passed to `apply` takes a DataFrame as\n its argument and returns a DataFrame. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> g[['B', 'C']].apply(lambda x: x / x.sum())\n B C\n 0 0.333333 0.4\n 1 0.666667 0.6\n 2 1.000000 1.0\n\n Example 2: The function passed to `apply` takes a DataFrame as\n its argument and returns a Series. `apply` combines the result for\n each group together into a new DataFrame:\n\n >>> g[['B', 'C']].apply(lambda x: x.max() - x.min())\n B C\n A\n a 1 2\n b 0 0\n\n Example 3: The function passed to `apply` takes a DataFrame as\n its argument and returns a scalar. `apply` combines the result for\n each group together into a Series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.C.max() - x.B.min())\n A\n a 5\n b 2\n dtype: int64\n \"\"\",\n series_examples=\"\"\"\n >>> s = pd.Series([0, 1, 2], index='a a b'.split())\n >>> g = s.groupby(s.index)\n\n From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``.\n Calling `apply` in various ways, we can get different grouping results:\n\n Example 1: The function passed to `apply` takes a Series as\n its argument and returns a Series. `apply` combines the result for\n each group together into a new Series:\n\n >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)\n 0 0.0\n 1 0.5\n 2 4.0\n dtype: float64\n\n Example 2: The function passed to `apply` takes a Series as\n its argument and returns a scalar. `apply` combines the result for\n each group together into a Series, including setting the index as\n appropriate:\n\n >>> g.apply(lambda x: x.max() - x.min())\n a 1\n b 0\n dtype: int64\n \"\"\")\n\n_pipe_template = \"\"\"\\\nApply a function `func` with arguments to this %(klass)s object and return\nthe function's result.\n\n%(versionadded)s\n\nUse `.pipe` when you want to improve readability by chaining together\nfunctions that expect Series, DataFrames, GroupBy or Resampler objects.\nInstead of writing\n\n>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)\n\nYou can write\n\n>>> (df.groupby('group')\n... .pipe(f)\n... .pipe(g, arg1=a)\n... .pipe(h, arg2=b, arg3=c))\n\nwhich is much more readable.\n\nParameters\n----------\nfunc : callable or tuple of (callable, string)\n Function to apply to this %(klass)s object or, alternatively,\n a `(callable, data_keyword)` tuple where `data_keyword` is a\n string indicating the keyword of `callable` that expects the\n %(klass)s object.\nargs : iterable, optional\n positional arguments passed into `func`.\nkwargs : dict, optional\n a dictionary of keyword arguments passed into `func`.\n\nReturns\n-------\nobject : the return type of `func`.\n\nNotes\n-----\nSee more `here\n<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_\n\nExamples\n--------\n%(examples)s\n\nSee Also\n--------\npandas.Series.pipe : Apply a function with arguments to a series\npandas.DataFrame.pipe: Apply a function with arguments to a dataframe\napply : Apply function to each group instead of to the\n full %(klass)s object.\n\"\"\"\n\n_transform_template = \"\"\"\nCall function producing a like-indexed %(klass)s on each group and\nreturn a %(klass)s having the same indexes as the original object\nfilled with the transformed values\n\nParameters\n----------\nf : function\n Function to apply to each group\n\nNotes\n-----\nEach group is endowed the attribute 'name' in case you need to know\nwhich group you are working on.\n\nThe current implementation imposes three requirements on f:\n\n* f must return a value that either has the same shape as the input\n subframe or can be broadcast to the shape of the input subframe.\n For example, f returns a scalar it will be broadcast to have the\n same shape as the input subframe.\n* if this is a DataFrame, f must support application column-by-column\n in the subframe. If f also supports application to the entire subframe,\n then a fast path is used starting from the second chunk.\n* f must not mutate groups. Mutation is not supported and may\n produce unexpected results.\n\nReturns\n-------\n%(klass)s\n\nSee also\n--------\naggregate, transform\n\nExamples\n--------\n\n# Same shape\n>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n... 'foo', 'bar'],\n... 'B' : ['one', 'one', 'two', 'three',\n... 'two', 'two'],\n... 'C' : [1, 5, 5, 2, 5, 5],\n... 'D' : [2.0, 5., 8., 1., 2., 9.]})\n>>> grouped = df.groupby('A')\n>>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n C D\n0 -1.154701 -0.577350\n1 0.577350 0.000000\n2 0.577350 1.154701\n3 -1.154701 -1.000000\n4 0.577350 -0.577350\n5 0.577350 1.000000\n\n# Broadcastable\n>>> grouped.transform(lambda x: x.max() - x.min())\n C D\n0 4 6.0\n1 3 8.0\n2 4 6.0\n3 3 8.0\n4 4 6.0\n5 3 8.0\n\n\"\"\"\n\n\nclass GroupByPlot(PandasObject):\n \"\"\"\n Class implementing the .plot attribute for groupby objects\n \"\"\"\n\n def __init__(self, groupby):\n self._groupby = groupby\n\n def __call__(self, *args, **kwargs):\n def f(self):\n return self.plot(*args, **kwargs)\n f.__name__ = 'plot'\n return self._groupby.apply(f)\n\n def __getattr__(self, name):\n def attr(*args, **kwargs):\n def f(self):\n return getattr(self.plot, name)(*args, **kwargs)\n return self._groupby.apply(f)\n return attr\n\n\n@contextmanager\ndef _group_selection_context(groupby):\n \"\"\"\n set / reset the _group_selection_context\n \"\"\"\n groupby._set_group_selection()\n yield groupby\n groupby._reset_group_selection()\n\n\nclass _GroupBy(PandasObject, SelectionMixin):\n _group_selection = None\n _apply_whitelist = frozenset([])\n\n def __init__(self, obj, keys=None, axis=0, level=None,\n grouper=None, exclusions=None, selection=None, as_index=True,\n sort=True, group_keys=True, squeeze=False,\n observed=False, **kwargs):\n\n self._selection = selection\n\n if isinstance(obj, NDFrame):\n obj._consolidate_inplace()\n\n self.level = level\n\n if not as_index:\n if not isinstance(obj, DataFrame):\n raise TypeError('as_index=False only valid with DataFrame')\n if axis != 0:\n raise ValueError('as_index=False only valid for axis=0')\n\n self.as_index = as_index\n self.keys = keys\n self.sort = sort\n self.group_keys = group_keys\n self.squeeze = squeeze\n self.observed = observed\n self.mutated = kwargs.pop('mutated', False)\n\n if grouper is None:\n from pandas.core.groupby.grouper import _get_grouper\n grouper, exclusions, obj = _get_grouper(obj, keys,\n axis=axis,\n level=level,\n sort=sort,\n observed=observed,\n mutated=self.mutated)\n\n self.obj = obj\n self.axis = obj._get_axis_number(axis)\n self.grouper = grouper\n self.exclusions = set(exclusions) if exclusions else set()\n\n # we accept no other args\n validate_kwargs('group', kwargs, {})\n\n def __len__(self):\n return len(self.groups)\n\n def __unicode__(self):\n # TODO: Better unicode/repr for GroupBy object\n return object.__repr__(self)\n\n def _assure_grouper(self):\n \"\"\"\n we create the grouper on instantiation\n sub-classes may have a different policy\n \"\"\"\n pass\n\n @property\n def groups(self):\n \"\"\" dict {group name -> group labels} \"\"\"\n self._assure_grouper()\n return self.grouper.groups\n\n @property\n def ngroups(self):\n self._assure_grouper()\n return self.grouper.ngroups\n\n @property\n def indices(self):\n \"\"\" dict {group name -> group indices} \"\"\"\n self._assure_grouper()\n return self.grouper.indices\n\n def _get_indices(self, names):\n \"\"\"\n safe get multiple indices, translate keys for\n datelike to underlying repr\n \"\"\"\n\n def get_converter(s):\n # possibly convert to the actual key types\n # in the indices, could be a Timestamp or a np.datetime64\n if isinstance(s, (Timestamp, datetime.datetime)):\n return lambda key: Timestamp(key)\n elif isinstance(s, np.datetime64):\n return lambda key: Timestamp(key).asm8\n else:\n return lambda key: key\n\n if len(names) == 0:\n return []\n\n if len(self.indices) > 0:\n index_sample = next(iter(self.indices))\n else:\n index_sample = None # Dummy sample\n\n name_sample = names[0]\n if isinstance(index_sample, tuple):\n if not isinstance(name_sample, tuple):\n msg = (\"must supply a tuple to get_group with multiple\"\n \" grouping keys\")\n raise ValueError(msg)\n if not len(name_sample) == len(index_sample):\n try:\n # If the original grouper was a tuple\n return [self.indices[name] for name in names]\n except KeyError:\n # turns out it wasn't a tuple\n msg = (\"must supply a a same-length tuple to get_group\"\n \" with multiple grouping keys\")\n raise ValueError(msg)\n\n converters = [get_converter(s) for s in index_sample]\n names = [tuple(f(n) for f, n in zip(converters, name))\n for name in names]\n\n else:\n converter = get_converter(index_sample)\n names = [converter(name) for name in names]\n\n return [self.indices.get(name, []) for name in names]\n\n def _get_index(self, name):\n \"\"\" safe get index, translate keys for datelike to underlying repr \"\"\"\n return self._get_indices([name])[0]\n\n @cache_readonly\n def _selected_obj(self):\n\n if self._selection is None or isinstance(self.obj, Series):\n if self._group_selection is not None:\n return self.obj[self._group_selection]\n return self.obj\n else:\n return self.obj[self._selection]\n\n def _reset_group_selection(self):\n \"\"\"\n Clear group based selection. Used for methods needing to return info on\n each group regardless of whether a group selection was previously set.\n \"\"\"\n if self._group_selection is not None:\n # GH12839 clear cached selection too when changing group selection\n self._group_selection = None\n self._reset_cache('_selected_obj')\n\n def _set_group_selection(self):\n \"\"\"\n Create group based selection. Used when selection is not passed\n directly but instead via a grouper.\n\n NOTE: this should be paired with a call to _reset_group_selection\n \"\"\"\n grp = self.grouper\n if not (self.as_index and\n getattr(grp, 'groupings', None) is not None and\n self.obj.ndim > 1 and\n self._group_selection is None):\n return\n\n ax = self.obj._info_axis\n groupers = [g.name for g in grp.groupings\n if g.level is None and g.in_axis]\n\n if len(groupers):\n # GH12839 clear selected obj cache when group selection changes\n self._group_selection = ax.difference(Index(groupers)).tolist()\n self._reset_cache('_selected_obj')\n\n def _set_result_index_ordered(self, result):\n # set the result index on the passed values object and\n # return the new object, xref 8046\n\n # the values/counts are repeated according to the group index\n # shortcut if we have an already ordered grouper\n if not self.grouper.is_monotonic:\n index = Index(np.concatenate(\n self._get_indices(self.grouper.result_index)))\n result.set_axis(index, axis=self.axis, inplace=True)\n result = result.sort_index(axis=self.axis)\n\n result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,\n inplace=True)\n return result\n\n def _dir_additions(self):\n return self.obj._dir_additions() | self._apply_whitelist\n\n def __getattr__(self, attr):\n if attr in self._internal_names_set:\n return object.__getattribute__(self, attr)\n if attr in self.obj:\n return self[attr]\n if hasattr(self.obj, attr):\n return self._make_wrapper(attr)\n\n raise AttributeError(\"%r object has no attribute %r\" %\n (type(self).__name__, attr))\n\n @Substitution(klass='GroupBy',\n versionadded='.. versionadded:: 0.21.0',\n examples=\"\"\"\\\n>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})\n>>> df\n A B\n0 a 1\n1 b 2\n2 a 3\n3 b 4\n\nTo get the difference between each groups maximum and minimum value in one\npass, you can do\n\n>>> df.groupby('A').pipe(lambda x: x.max() - x.min())\n B\nA\na 2\nb 2\"\"\")\n @Appender(_pipe_template)\n def pipe(self, func, *args, **kwargs):\n return com._pipe(self, func, *args, **kwargs)\n\n plot = property(GroupByPlot)\n\n def _make_wrapper(self, name):\n if name not in self._apply_whitelist:\n is_callable = callable(getattr(self._selected_obj, name, None))\n kind = ' callable ' if is_callable else ' '\n msg = (\"Cannot access{0}attribute {1!r} of {2!r} objects, try \"\n \"using the 'apply' method\".format(kind, name,\n type(self).__name__))\n raise AttributeError(msg)\n\n self._set_group_selection()\n\n # need to setup the selection\n # as are not passed directly but in the grouper\n f = getattr(self._selected_obj, name)\n if not isinstance(f, types.MethodType):\n return self.apply(lambda self: getattr(self, name))\n\n f = getattr(type(self._selected_obj), name)\n\n def wrapper(*args, **kwargs):\n # a little trickery for aggregation functions that need an axis\n # argument\n kwargs_with_axis = kwargs.copy()\n if 'axis' not in kwargs_with_axis or \\\n kwargs_with_axis['axis'] is None:\n kwargs_with_axis['axis'] = self.axis\n\n def curried_with_axis(x):\n return f(x, *args, **kwargs_with_axis)\n\n def curried(x):\n return f(x, *args, **kwargs)\n\n # preserve the name so we can detect it when calling plot methods,\n # to avoid duplicates\n curried.__name__ = curried_with_axis.__name__ = name\n\n # special case otherwise extra plots are created when catching the\n # exception below\n if name in base.plotting_methods:\n return self.apply(curried)\n\n try:\n return self.apply(curried_with_axis)\n except Exception:\n try:\n return self.apply(curried)\n except Exception:\n\n # related to : GH3688\n # try item-by-item\n # this can be called recursively, so need to raise\n # ValueError\n # if we don't have this method to indicated to aggregate to\n # mark this column as an error\n try:\n return self._aggregate_item_by_item(name,\n *args, **kwargs)\n except (AttributeError):\n raise ValueError\n\n return wrapper\n\n def get_group(self, name, obj=None):\n \"\"\"\n Constructs NDFrame from group with provided name\n\n Parameters\n ----------\n name : object\n the name of the group to get as a DataFrame\n obj : NDFrame, default None\n the NDFrame to take the DataFrame out of. If\n it is None, the object groupby was called on will\n be used\n\n Returns\n -------\n group : same type as obj\n \"\"\"\n if obj is None:\n obj = self._selected_obj\n\n inds = self._get_index(name)\n if not len(inds):\n raise KeyError(name)\n\n return obj._take(inds, axis=self.axis)\n\n def __iter__(self):\n \"\"\"\n Groupby iterator\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n \"\"\"\n return self.grouper.get_iterator(self.obj, axis=self.axis)\n\n @Appender(_apply_docs['template']\n .format(input=\"dataframe\",\n examples=_apply_docs['dataframe_examples']))\n def apply(self, func, *args, **kwargs):\n\n func = self._is_builtin_func(func)\n\n # this is needed so we don't try and wrap strings. If we could\n # resolve functions to their callable functions prior, this\n # wouldn't be needed\n if args or kwargs:\n if callable(func):\n\n @wraps(func)\n def f(g):\n with np.errstate(all='ignore'):\n return func(g, *args, **kwargs)\n else:\n raise ValueError('func must be a callable if args or '\n 'kwargs are supplied')\n else:\n f = func\n\n # ignore SettingWithCopy here in case the user mutates\n with option_context('mode.chained_assignment', None):\n try:\n result = self._python_apply_general(f)\n except Exception:\n\n # gh-20949\n # try again, with .apply acting as a filtering\n # operation, by excluding the grouping column\n # This would normally not be triggered\n # except if the udf is trying an operation that\n # fails on *some* columns, e.g. a numeric operation\n # on a string grouper column\n\n with _group_selection_context(self):\n return self._python_apply_general(f)\n\n return result\n\n def _python_apply_general(self, f):\n keys, values, mutated = self.grouper.apply(f, self._selected_obj,\n self.axis)\n\n return self._wrap_applied_output(\n keys,\n values,\n not_indexed_same=mutated or self.mutated)\n\n def _iterate_slices(self):\n yield self._selection_name, self._selected_obj\n\n def transform(self, func, *args, **kwargs):\n raise com.AbstractMethodError(self)\n\n def _cumcount_array(self, ascending=True):\n \"\"\"\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Notes\n -----\n this is currently implementing sort=False\n (though the default is sort=True) for groupby in general\n \"\"\"\n ids, _, ngroups = self.grouper.group_info\n sorter = get_group_index_sorter(ids, ngroups)\n ids, count = ids[sorter], len(ids)\n\n if count == 0:\n return np.empty(0, dtype=np.int64)\n\n run = np.r_[True, ids[:-1] != ids[1:]]\n rep = np.diff(np.r_[np.nonzero(run)[0], count])\n out = (~run).cumsum()\n\n if ascending:\n out -= np.repeat(out[run], rep)\n else:\n out = np.repeat(out[np.r_[run[1:], True]], rep) - out\n\n rev = np.empty(count, dtype=np.intp)\n rev[sorter] = np.arange(count, dtype=np.intp)\n return out[rev].astype(np.int64, copy=False)\n\n def _try_cast(self, result, obj, numeric_only=False):\n \"\"\"\n try to cast the result to our obj original type,\n we may have roundtripped thru object in the mean-time\n\n if numeric_only is True, then only try to cast numerics\n and not datetimelikes\n\n \"\"\"\n if obj.ndim > 1:\n dtype = obj.values.dtype\n else:\n dtype = obj.dtype\n\n if not is_scalar(result):\n if numeric_only and is_numeric_dtype(dtype) or not numeric_only:\n result = maybe_downcast_to_dtype(result, dtype)\n\n return result\n\n def _transform_should_cast(self, func_nm):\n \"\"\"\n Parameters:\n -----------\n func_nm: str\n The name of the aggregation function being performed\n\n Returns:\n --------\n bool\n Whether transform should attempt to cast the result of aggregation\n \"\"\"\n return (self.size().fillna(0) > 0).any() and (\n func_nm not in base.cython_cast_blacklist)\n\n def _cython_transform(self, how, numeric_only=True, **kwargs):\n output = collections.OrderedDict()\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.transform(obj.values, how,\n **kwargs)\n except NotImplementedError:\n continue\n except AssertionError as e:\n raise GroupByError(str(e))\n if self._transform_should_cast(how):\n output[name] = self._try_cast(result, obj)\n else:\n output[name] = result\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_transformed_output(output, names)\n\n def _cython_agg_general(self, how, alt=None, numeric_only=True,\n min_count=-1):\n output = {}\n for name, obj in self._iterate_slices():\n is_numeric = is_numeric_dtype(obj.dtype)\n if numeric_only and not is_numeric:\n continue\n\n try:\n result, names = self.grouper.aggregate(obj.values, how,\n min_count=min_count)\n except AssertionError as e:\n raise GroupByError(str(e))\n output[name] = self._try_cast(result, obj)\n\n if len(output) == 0:\n raise DataError('No numeric types to aggregate')\n\n return self._wrap_aggregated_output(output, names)\n\n def _python_agg_general(self, func, *args, **kwargs):\n func = self._is_builtin_func(func)\n f = lambda x: func(x, *args, **kwargs)\n\n # iterate through \"columns\" ex exclusions to populate output dict\n output = {}\n for name, obj in self._iterate_slices():\n try:\n result, counts = self.grouper.agg_series(obj, f)\n output[name] = self._try_cast(result, obj, numeric_only=True)\n except TypeError:\n continue\n\n if len(output) == 0:\n return self._python_apply_general(f)\n\n if self.grouper._filter_empty_groups:\n\n mask = counts.ravel() > 0\n for name, result in compat.iteritems(output):\n\n # since we are masking, make sure that we have a float object\n values = result\n if is_numeric_dtype(values.dtype):\n values = ensure_float(values)\n\n output[name] = self._try_cast(values[mask], result)\n\n return self._wrap_aggregated_output(output)\n\n def _wrap_applied_output(self, *args, **kwargs):\n raise com.AbstractMethodError(self)\n\n def _concat_objects(self, keys, values, not_indexed_same=False):\n from pandas.core.reshape.concat import concat\n\n def reset_identity(values):\n # reset the identities of the components\n # of the values to prevent aliasing\n for v in com._not_none(*values):\n ax = v._get_axis(self.axis)\n ax._reset_identity()\n return values\n\n if not not_indexed_same:\n result = concat(values, axis=self.axis)\n ax = self._selected_obj._get_axis(self.axis)\n\n if isinstance(result, Series):\n result = result.reindex(ax)\n else:\n\n # this is a very unfortunate situation\n # we have a multi-index that is NOT lexsorted\n # and we have a result which is duplicated\n # we can't reindex, so we resort to this\n # GH 14776\n if isinstance(ax, MultiIndex) and not ax.is_unique:\n indexer = algorithms.unique1d(\n result.index.get_indexer_for(ax.values))\n result = result.take(indexer, axis=self.axis)\n else:\n result = result.reindex(ax, axis=self.axis)\n\n elif self.group_keys:\n\n values = reset_identity(values)\n if self.as_index:\n\n # possible MI return case\n group_keys = keys\n group_levels = self.grouper.levels\n group_names = self.grouper.names\n\n result = concat(values, axis=self.axis, keys=group_keys,\n levels=group_levels, names=group_names,\n sort=False)\n else:\n\n # GH5610, returns a MI, with the first level being a\n # range index\n keys = list(range(len(values)))\n result = concat(values, axis=self.axis, keys=keys)\n else:\n values = reset_identity(values)\n result = concat(values, axis=self.axis)\n\n if (isinstance(result, Series) and\n getattr(self, '_selection_name', None) is not None):\n\n result.name = self._selection_name\n\n return result\n\n def _apply_filter(self, indices, dropna):\n if len(indices) == 0:\n indices = np.array([], dtype='int64')\n else:\n indices = np.sort(np.concatenate(indices))\n if dropna:\n filtered = self._selected_obj.take(indices, axis=self.axis)\n else:\n mask = np.empty(len(self._selected_obj.index), dtype=bool)\n mask.fill(False)\n mask[indices.astype(int)] = True\n # mask fails to broadcast when passed to where; broadcast manually.\n mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T\n filtered = self._selected_obj.where(mask) # Fill with NaNs.\n return filtered\n\n\nclass GroupBy(_GroupBy):\n\n \"\"\"\n Class for grouping and aggregating relational data. See aggregate,\n transform, and apply functions on this object.\n\n It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:\n\n ::\n\n grouped = groupby(obj, ...)\n\n Parameters\n ----------\n obj : pandas object\n axis : int, default 0\n level : int, default None\n Level of MultiIndex\n groupings : list of Grouping objects\n Most users should ignore this\n exclusions : array-like, optional\n List of columns to exclude\n name : string\n Most users should ignore this\n\n Notes\n -----\n After grouping, see aggregate, apply, and transform functions. Here are\n some other brief notes about usage. When grouping by multiple groups, the\n result index will be a MultiIndex (hierarchical) by default.\n\n Iteration produces (key, group) tuples, i.e. chunking the data by group. So\n you can write code like:\n\n ::\n\n grouped = obj.groupby(keys, axis=axis)\n for key, group in grouped:\n # do something with the data\n\n Function calls on GroupBy, if not specially implemented, \"dispatch\" to the\n grouped data. So if you group a DataFrame and wish to invoke the std()\n method on each group, you can simply do:\n\n ::\n\n df.groupby(mapper).std()\n\n rather than\n\n ::\n\n df.groupby(mapper).aggregate(np.std)\n\n You can pass arguments to these \"wrapped\" functions, too.\n\n See the online documentation for full exposition on these topics and much\n more\n\n Returns\n -------\n **Attributes**\n groups : dict\n {group name -> group labels}\n len(grouped) : int\n Number of groups\n \"\"\"\n def _bool_agg(self, val_test, skipna):\n \"\"\"Shared func to call any / all Cython GroupBy implementations\"\"\"\n\n def objs_to_bool(vals):\n try:\n vals = vals.astype(np.bool)\n except ValueError: # for objects\n vals = np.array([bool(x) for x in vals])\n\n return vals.view(np.uint8)\n\n def result_to_bool(result):\n return result.astype(np.bool, copy=False)\n\n return self._get_cythonized_result('group_any_all', self.grouper,\n aggregate=True,\n cython_dtype=np.uint8,\n needs_values=True,\n needs_mask=True,\n pre_processing=objs_to_bool,\n post_processing=result_to_bool,\n val_test=val_test, skipna=skipna)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def any(self, skipna=True):\n \"\"\"\n Returns True if any value in the group is truthful, else False\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('any', skipna)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def all(self, skipna=True):\n \"\"\"Returns True if all values in the group are truthful, else False\n\n Parameters\n ----------\n skipna : bool, default True\n Flag to ignore nan values during truth testing\n \"\"\"\n return self._bool_agg('all', skipna)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def count(self):\n \"\"\"Compute count of group, excluding missing values\"\"\"\n\n # defined here for API doc\n raise NotImplementedError\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def mean(self, *args, **kwargs):\n \"\"\"\n Compute mean of groups, excluding missing values.\n\n Returns\n -------\n pandas.Series or pandas.DataFrame\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5],\n ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])\n\n Groupby one column and return the mean of the remaining columns in\n each group.\n\n >>> df.groupby('A').mean()\n >>>\n B C\n A\n 1 3.0 1.333333\n 2 4.0 1.500000\n\n Groupby two columns and return the mean of the remaining column.\n\n >>> df.groupby(['A', 'B']).mean()\n >>>\n C\n A B\n 1 2.0 2\n 4.0 1\n 2 3.0 1\n 5.0 2\n\n Groupby one column and return the mean of only particular column in\n the group.\n\n >>> df.groupby('A')['B'].mean()\n >>>\n A\n 1 3.0\n 2 4.0\n Name: B, dtype: float64\n \"\"\"\n nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])\n try:\n return self._cython_agg_general('mean', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n with _group_selection_context(self):\n f = lambda x: x.mean(axis=self.axis, **kwargs)\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def median(self, **kwargs):\n \"\"\"\n Compute median of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n try:\n return self._cython_agg_general('median', **kwargs)\n except GroupByError:\n raise\n except Exception: # pragma: no cover\n\n def f(x):\n if isinstance(x, np.ndarray):\n x = Series(x)\n return x.median(axis=self.axis, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def std(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute standard deviation of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n # TODO: implement at Cython level?\n nv.validate_groupby_func('std', args, kwargs)\n return np.sqrt(self.var(ddof=ddof, **kwargs))\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def var(self, ddof=1, *args, **kwargs):\n \"\"\"\n Compute variance of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n nv.validate_groupby_func('var', args, kwargs)\n if ddof == 1:\n try:\n return self._cython_agg_general('var', **kwargs)\n except Exception:\n f = lambda x: x.var(ddof=ddof, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n else:\n f = lambda x: x.var(ddof=ddof, **kwargs)\n with _group_selection_context(self):\n return self._python_agg_general(f)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def sem(self, ddof=1):\n \"\"\"\n Compute standard error of the mean of groups, excluding missing values\n\n For multiple groupings, the result index will be a MultiIndex\n\n Parameters\n ----------\n ddof : integer, default 1\n degrees of freedom\n \"\"\"\n\n return self.std(ddof=ddof) / np.sqrt(self.count())\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def size(self):\n \"\"\"Compute group sizes\"\"\"\n result = self.grouper.size()\n\n if isinstance(self.obj, Series):\n result.name = getattr(self.obj, 'name', None)\n return result\n\n @classmethod\n def _add_numeric_operations(cls):\n \"\"\" add numeric operations to the GroupBy generically \"\"\"\n\n def groupby_function(name, alias, npfunc,\n numeric_only=True, _convert=False,\n min_count=-1):\n\n _local_template = \"Compute %(f)s of group values\"\n\n @Substitution(name='groupby', f=name)\n @Appender(_doc_template)\n @Appender(_local_template)\n def f(self, **kwargs):\n if 'numeric_only' not in kwargs:\n kwargs['numeric_only'] = numeric_only\n if 'min_count' not in kwargs:\n kwargs['min_count'] = min_count\n\n self._set_group_selection()\n try:\n return self._cython_agg_general(\n alias, alt=npfunc, **kwargs)\n except AssertionError as e:\n raise SpecificationError(str(e))\n except Exception:\n result = self.aggregate(\n lambda x: npfunc(x, axis=self.axis))\n if _convert:\n result = result._convert(datetime=True)\n return result\n\n set_function_name(f, name, cls)\n\n return f\n\n def first_compat(x, axis=0):\n\n def first(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[0]\n\n if isinstance(x, DataFrame):\n return x.apply(first, axis=axis)\n else:\n return first(x)\n\n def last_compat(x, axis=0):\n\n def last(x):\n\n x = np.asarray(x)\n x = x[notna(x)]\n if len(x) == 0:\n return np.nan\n return x[-1]\n\n if isinstance(x, DataFrame):\n return x.apply(last, axis=axis)\n else:\n return last(x)\n\n cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)\n cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)\n cls.min = groupby_function('min', 'min', np.min, numeric_only=False)\n cls.max = groupby_function('max', 'max', np.max, numeric_only=False)\n cls.first = groupby_function('first', 'first', first_compat,\n numeric_only=False)\n cls.last = groupby_function('last', 'last', last_compat,\n numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def ohlc(self):\n \"\"\"\n Compute sum of values, excluding missing values\n For multiple groupings, the result index will be a MultiIndex\n \"\"\"\n\n return self._apply_to_column_groupbys(\n lambda x: x._cython_agg_general('ohlc'))\n\n @Appender(DataFrame.describe.__doc__)\n def describe(self, **kwargs):\n with _group_selection_context(self):\n result = self.apply(lambda x: x.describe(**kwargs))\n if self.axis == 1:\n return result.T\n return result.unstack()\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def resample(self, rule, *args, **kwargs):\n \"\"\"\n Provide resampling when using a TimeGrouper\n Return a new grouper with our resampler appended\n \"\"\"\n from pandas.core.resample import get_resampler_for_grouping\n return get_resampler_for_grouping(self, rule, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def rolling(self, *args, **kwargs):\n \"\"\"\n Return a rolling grouper, providing rolling\n functionality per group\n\n \"\"\"\n from pandas.core.window import RollingGroupby\n return RollingGroupby(self, *args, **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def expanding(self, *args, **kwargs):\n \"\"\"\n Return an expanding grouper, providing expanding\n functionality per group\n\n \"\"\"\n from pandas.core.window import ExpandingGroupby\n return ExpandingGroupby(self, *args, **kwargs)\n\n def _fill(self, direction, limit=None):\n \"\"\"Shared function for `pad` and `backfill` to call Cython method\n\n Parameters\n ----------\n direction : {'ffill', 'bfill'}\n Direction passed to underlying Cython function. `bfill` will cause\n values to be filled backwards. `ffill` and any other values will\n default to a forward fill\n limit : int, default None\n Maximum number of consecutive values to fill. If `None`, this\n method will convert to -1 prior to passing to Cython\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n\n See Also\n --------\n pad\n backfill\n \"\"\"\n # Need int value for Cython\n if limit is None:\n limit = -1\n\n return self._get_cythonized_result('group_fillna_indexer',\n self.grouper, needs_mask=True,\n cython_dtype=np.int64,\n result_is_index=True,\n direction=direction, limit=limit)\n\n @Substitution(name='groupby')\n def pad(self, limit=None):\n \"\"\"\n Forward fill the values\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.pad\n DataFrame.pad\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('ffill', limit=limit)\n ffill = pad\n\n @Substitution(name='groupby')\n def backfill(self, limit=None):\n \"\"\"\n Backward fill the values\n\n Parameters\n ----------\n limit : integer, optional\n limit of how many values to fill\n\n See Also\n --------\n Series.backfill\n DataFrame.backfill\n Series.fillna\n DataFrame.fillna\n \"\"\"\n return self._fill('bfill', limit=limit)\n bfill = backfill\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def nth(self, n, dropna=None):\n \"\"\"\n Take the nth row from each group if n is an int, or a subset of rows\n if n is a list of ints.\n\n If dropna, will take the nth non-null row, dropna is either\n Truthy (if a Series) or 'all', 'any' (if a DataFrame);\n this is equivalent to calling dropna(how=dropna) before the\n groupby.\n\n Parameters\n ----------\n n : int or list of ints\n a single nth value for the row or a list of nth values\n dropna : None or str, optional\n apply the specified dropna operation before counting which row is\n the nth row. Needs to be None, 'any' or 'all'\n\n Examples\n --------\n\n >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],\n ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])\n >>> g = df.groupby('A')\n >>> g.nth(0)\n B\n A\n 1 NaN\n 2 3.0\n >>> g.nth(1)\n B\n A\n 1 2.0\n 2 5.0\n >>> g.nth(-1)\n B\n A\n 1 4.0\n 2 5.0\n >>> g.nth([0, 1])\n B\n A\n 1 NaN\n 1 2.0\n 2 3.0\n 2 5.0\n\n Specifying `dropna` allows count ignoring ``NaN``\n\n >>> g.nth(0, dropna='any')\n B\n A\n 1 2.0\n 2 3.0\n\n NaNs denote group exhausted when using dropna\n\n >>> g.nth(3, dropna='any')\n B\n A\n 1 NaN\n 2 NaN\n\n Specifying `as_index=False` in `groupby` keeps the original index.\n\n >>> df.groupby('A', as_index=False).nth(1)\n A B\n 1 1 2.0\n 4 2 5.0\n \"\"\"\n\n if isinstance(n, int):\n nth_values = [n]\n elif isinstance(n, (set, list, tuple)):\n nth_values = list(set(n))\n if dropna is not None:\n raise ValueError(\n \"dropna option with a list of nth values is not supported\")\n else:\n raise TypeError(\"n needs to be an int or a list/set/tuple of ints\")\n\n nth_values = np.array(nth_values, dtype=np.intp)\n self._set_group_selection()\n\n if not dropna:\n mask = np.in1d(self._cumcount_array(), nth_values) | \\\n np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)\n\n out = self._selected_obj[mask]\n if not self.as_index:\n return out\n\n ids, _, _ = self.grouper.group_info\n out.index = self.grouper.result_index[ids[mask]]\n\n return out.sort_index() if self.sort else out\n\n if dropna not in ['any', 'all']:\n if isinstance(self._selected_obj, Series) and dropna is True:\n warnings.warn(\"the dropna={dropna} keyword is deprecated,\"\n \"use dropna='all' instead. \"\n \"For a Series groupby, dropna must be \"\n \"either None, 'any' or 'all'.\".format(\n dropna=dropna),\n FutureWarning,\n stacklevel=2)\n dropna = 'all'\n else:\n # Note: when agg-ing picker doesn't raise this,\n # just returns NaN\n raise ValueError(\"For a DataFrame groupby, dropna must be \"\n \"either None, 'any' or 'all', \"\n \"(was passed %s).\" % (dropna),)\n\n # old behaviour, but with all and any support for DataFrames.\n # modified in GH 7559 to have better perf\n max_len = n if n >= 0 else - 1 - n\n dropped = self.obj.dropna(how=dropna, axis=self.axis)\n\n # get a new grouper for our dropped obj\n if self.keys is None and self.level is None:\n\n # we don't have the grouper info available\n # (e.g. we have selected out\n # a column that is not in the current object)\n axis = self.grouper.axis\n grouper = axis[axis.isin(dropped.index)]\n\n else:\n\n # create a grouper with the original parameters, but on the dropped\n # object\n from pandas.core.groupby.grouper import _get_grouper\n grouper, _, _ = _get_grouper(dropped, key=self.keys,\n axis=self.axis, level=self.level,\n sort=self.sort,\n mutated=self.mutated)\n\n grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)\n sizes, result = grb.size(), grb.nth(n)\n mask = (sizes < max_len).values\n\n # set the results which don't meet the criteria\n if len(result) and mask.any():\n result.loc[mask] = np.nan\n\n # reset/reindex to the original groups\n if len(self.obj) == len(dropped) or \\\n len(result) == len(self.grouper.result_index):\n result.index = self.grouper.result_index\n else:\n result = result.reindex(self.grouper.result_index)\n\n return result\n\n @Substitution(name='groupby')\n def ngroup(self, ascending=True):\n \"\"\"\n Number each group from 0 to the number of groups - 1.\n\n This is the enumerative complement of cumcount. Note that the\n numbers given to the groups match the order in which the groups\n would be seen when iterating over the groupby object, not the\n order they are first observed.\n\n .. versionadded:: 0.20.2\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from number of group - 1 to 0.\n\n Examples\n --------\n\n >>> df = pd.DataFrame({\"A\": list(\"aaabba\")})\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').ngroup()\n 0 0\n 1 0\n 2 0\n 3 1\n 4 1\n 5 0\n dtype: int64\n >>> df.groupby('A').ngroup(ascending=False)\n 0 1\n 1 1\n 2 1\n 3 0\n 4 0\n 5 1\n dtype: int64\n >>> df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()\n 0 0\n 1 0\n 2 1\n 3 3\n 4 2\n 5 0\n dtype: int64\n\n See also\n --------\n .cumcount : Number the rows in each group.\n \"\"\"\n\n with _group_selection_context(self):\n index = self._selected_obj.index\n result = Series(self.grouper.group_info[0], index)\n if not ascending:\n result = self.ngroups - 1 - result\n return result\n\n @Substitution(name='groupby')\n def cumcount(self, ascending=True):\n \"\"\"\n Number each item in each group from 0 to the length of that group - 1.\n\n Essentially this is equivalent to\n\n >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index))\n\n Parameters\n ----------\n ascending : bool, default True\n If False, number in reverse, from length of group - 1 to 0.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],\n ... columns=['A'])\n >>> df\n A\n 0 a\n 1 a\n 2 a\n 3 b\n 4 b\n 5 a\n >>> df.groupby('A').cumcount()\n 0 0\n 1 1\n 2 2\n 3 0\n 4 1\n 5 3\n dtype: int64\n >>> df.groupby('A').cumcount(ascending=False)\n 0 3\n 1 2\n 2 1\n 3 1\n 4 0\n 5 0\n dtype: int64\n\n See also\n --------\n .ngroup : Number the groups themselves.\n \"\"\"\n\n with _group_selection_context(self):\n index = self._selected_obj.index\n cumcounts = self._cumcount_array(ascending=ascending)\n return Series(cumcounts, index)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def rank(self, method='average', ascending=True, na_option='keep',\n pct=False, axis=0):\n \"\"\"\n Provides the rank of values within each group.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n na_option : {'keep', 'top', 'bottom'}, default 'keep'\n * keep: leave NA values where they are\n * top: smallest rank if ascending\n * bottom: smallest rank if descending\n pct : boolean, default False\n Compute percentage rank of data within each group\n axis : int, default 0\n The axis of the object over which to compute the rank.\n\n Returns\n -----\n DataFrame with ranking of values within each group\n \"\"\"\n if na_option not in {'keep', 'top', 'bottom'}:\n msg = \"na_option must be one of 'keep', 'top', or 'bottom'\"\n raise ValueError(msg)\n return self._cython_transform('rank', numeric_only=False,\n ties_method=method, ascending=ascending,\n na_option=na_option, pct=pct, axis=axis)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cumprod(self, axis=0, *args, **kwargs):\n \"\"\"Cumulative product for each group\"\"\"\n nv.validate_groupby_func('cumprod', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))\n\n return self._cython_transform('cumprod', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cumsum(self, axis=0, *args, **kwargs):\n \"\"\"Cumulative sum for each group\"\"\"\n nv.validate_groupby_func('cumsum', args, kwargs,\n ['numeric_only', 'skipna'])\n if axis != 0:\n return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))\n\n return self._cython_transform('cumsum', **kwargs)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cummin(self, axis=0, **kwargs):\n \"\"\"Cumulative min for each group\"\"\"\n if axis != 0:\n return self.apply(lambda x: np.minimum.accumulate(x, axis))\n\n return self._cython_transform('cummin', numeric_only=False)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def cummax(self, axis=0, **kwargs):\n \"\"\"Cumulative max for each group\"\"\"\n if axis != 0:\n return self.apply(lambda x: np.maximum.accumulate(x, axis))\n\n return self._cython_transform('cummax', numeric_only=False)\n\n def _get_cythonized_result(self, how, grouper, aggregate=False,\n cython_dtype=None, needs_values=False,\n needs_mask=False, needs_ngroups=False,\n result_is_index=False,\n pre_processing=None, post_processing=None,\n **kwargs):\n \"\"\"Get result for Cythonized functions\n\n Parameters\n ----------\n how : str, Cythonized function name to be called\n grouper : Grouper object containing pertinent group info\n aggregate : bool, default False\n Whether the result should be aggregated to match the number of\n groups\n cython_dtype : default None\n Type of the array that will be modified by the Cython call. If\n `None`, the type will be inferred from the values of each slice\n needs_values : bool, default False\n Whether the values should be a part of the Cython call\n signature\n needs_mask : bool, default False\n Whether boolean mask needs to be part of the Cython call\n signature\n needs_ngroups : bool, default False\n Whether number of groups is part of the Cython call signature\n result_is_index : bool, default False\n Whether the result of the Cython operation is an index of\n values to be retrieved, instead of the actual values themselves\n pre_processing : function, default None\n Function to be applied to `values` prior to passing to Cython\n Raises if `needs_values` is False\n post_processing : function, default None\n Function to be applied to result of Cython function\n **kwargs : dict\n Extra arguments to be passed back to Cython funcs\n\n Returns\n -------\n `Series` or `DataFrame` with filled values\n \"\"\"\n if result_is_index and aggregate:\n raise ValueError(\"'result_is_index' and 'aggregate' cannot both \"\n \"be True!\")\n if post_processing:\n if not callable(pre_processing):\n raise ValueError(\"'post_processing' must be a callable!\")\n if pre_processing:\n if not callable(pre_processing):\n raise ValueError(\"'pre_processing' must be a callable!\")\n if not needs_values:\n raise ValueError(\"Cannot use 'pre_processing' without \"\n \"specifying 'needs_values'!\")\n\n labels, _, ngroups = grouper.group_info\n output = collections.OrderedDict()\n base_func = getattr(libgroupby, how)\n\n for name, obj in self._iterate_slices():\n if aggregate:\n result_sz = ngroups\n else:\n result_sz = len(obj.values)\n\n if not cython_dtype:\n cython_dtype = obj.values.dtype\n\n result = np.zeros(result_sz, dtype=cython_dtype)\n func = partial(base_func, result, labels)\n if needs_values:\n vals = obj.values\n if pre_processing:\n vals = pre_processing(vals)\n func = partial(func, vals)\n\n if needs_mask:\n mask = isna(obj.values).view(np.uint8)\n func = partial(func, mask)\n\n if needs_ngroups:\n func = partial(func, ngroups)\n\n func(**kwargs) # Call func to modify indexer values in place\n\n if result_is_index:\n result = algorithms.take_nd(obj.values, result)\n\n if post_processing:\n result = post_processing(result)\n\n output[name] = result\n\n if aggregate:\n return self._wrap_aggregated_output(output)\n else:\n return self._wrap_transformed_output(output)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def shift(self, periods=1, freq=None, axis=0):\n \"\"\"\n Shift each group by periods observations\n\n Parameters\n ----------\n periods : integer, default 1\n number of periods to shift\n freq : frequency string\n axis : axis to shift, default 0\n \"\"\"\n\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.shift(periods, freq, axis))\n\n return self._get_cythonized_result('group_shift_indexer',\n self.grouper, cython_dtype=np.int64,\n needs_ngroups=True,\n result_is_index=True,\n periods=periods)\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,\n axis=0):\n \"\"\"Calculate pct_change of each value to previous entry in group\"\"\"\n if freq is not None or axis != 0:\n return self.apply(lambda x: x.pct_change(periods=periods,\n fill_method=fill_method,\n limit=limit, freq=freq,\n axis=axis))\n\n filled = getattr(self, fill_method)(limit=limit).drop(\n self.grouper.names, axis=1)\n shifted = filled.shift(periods=periods, freq=freq)\n\n return (filled / shifted) - 1\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def head(self, n=5):\n \"\"\"\n Returns first n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.head(n))``,\n except ignores as_index flag.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],\n columns=['A', 'B'])\n >>> df.groupby('A', as_index=False).head(1)\n A B\n 0 1 2\n 2 5 6\n >>> df.groupby('A').head(1)\n A B\n 0 1 2\n 2 5 6\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array() < n\n return self._selected_obj[mask].dropna(subset=[self.keys])\n\n @Substitution(name='groupby')\n @Appender(_doc_template)\n def tail(self, n=5):\n \"\"\"\n Returns last n rows of each group\n\n Essentially equivalent to ``.apply(lambda x: x.tail(n))``,\n except ignores as_index flag.\n\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],\n columns=['A', 'B'])\n >>> df.groupby('A').tail(1)\n A B\n 1 a 2\n 3 b 2\n >>> df.groupby('A').head(1)\n A B\n 0 a 1\n 2 b 1\n \"\"\"\n self._reset_group_selection()\n mask = self._cumcount_array(ascending=False) < n\n return self._selected_obj[mask].dropna(subset=[self.keys])\n\n\nGroupBy._add_numeric_operations()\n\n\n@Appender(GroupBy.__doc__)\ndef groupby(obj, by, **kwds):\n if isinstance(obj, Series):\n from pandas.core.groupby.generic import SeriesGroupBy\n klass = SeriesGroupBy\n elif isinstance(obj, DataFrame):\n from pandas.core.groupby.generic import DataFrameGroupBy\n klass = DataFrameGroupBy\n else: # pragma: no cover\n raise TypeError('invalid type: %s' % type(obj))\n\n return klass(obj, by, **kwds)\n" ]
[ [ "pandas.core.window.ExpandingGroupby", "numpy.asarray", "pandas.core.common.AbstractMethodError", "numpy.minimum.accumulate", "pandas.core.groupby.grouper._get_grouper", "pandas.core.sorting.get_group_index_sorter", "pandas.core.dtypes.missing.notna", "numpy.concatenate", "pandas.compat.iteritems", "pandas.util._decorators.Substitution", "pandas.core.dtypes.common.is_numeric_dtype", "pandas.core.window.RollingGroupby", "pandas.core.series.Series", "numpy.arange", "pandas.compat.callable", "pandas.core.common._not_none", "pandas.core.config.option_context", "pandas.util._validators.validate_kwargs", "pandas.core.common._pipe", "numpy.repeat", "pandas.compat.set_function_name", "numpy.zeros", "pandas.core.algorithms.take_nd", "pandas.util._decorators.Appender", "pandas.core.base.DataError", "numpy.nonzero", "pandas._libs.Timestamp", "pandas.core.resample.get_resampler_for_grouping", "pandas.core.reshape.concat.concat", "numpy.errstate", "numpy.array", "pandas.core.dtypes.common.ensure_float", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "pandas.core.dtypes.common.is_scalar", "pandas.compat.zip", "pandas.core.dtypes.missing.isna", "numpy.maximum.accumulate", "pandas.compat.numpy.function.validate_groupby_func", "numpy.empty", "pandas.core.index.Index" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jspaezp/jspp_imageutils
[ "6376e274a1b0675622a7979c181b9effc125aa09" ]
[ "jspp_imageutils/annotations/convert.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# modified from:\n# https://gist.github.com/rotemtam/88d9a4efae243fc77ed4a0f9917c8f6c\n\nimport os\nimport glob\nimport click\n\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\n\ndef xml_to_csv(path: str) -> pd.DataFrame:\n xml_list = []\n for xml_file in glob.glob(path):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n bbx = member.find('bndbox')\n xmin = int(bbx.find('xmin').text)\n ymin = int(bbx.find('ymin').text)\n xmax = int(bbx.find('xmax').text)\n ymax = int(bbx.find('ymax').text)\n label = member.find('name').text\n\n # The columns are organized as the csv required by keras-retinanet\n # https://github.com/fizyr/keras-retinanet#csv-datasets\n # path/to/image.jpg,x1,y1,x2,y2,class_name\n\n value = (root.find('filename').text,\n # int(root.find('size')[0].text),\n # int(root.find('size')[1].text),\n xmin, ymin,\n xmax, ymax,\n label)\n xml_list.append(value)\n column_name = ['filename',\n # 'width',\n # 'height',\n 'xmin',\n 'ymin',\n 'xmax',\n 'ymax',\n 'class']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n return xml_df\n\n\ndef xml_to_csv_file(infile: str, outfile: str):\n xml_df = xml_to_csv(infile)\n print(xml_df)\n xml_df.to_csv(outfile, index=None)\n\n\[email protected](help='Converts a pascal xml to csv')\ndef cli():\n pass\n\n\[email protected]()\[email protected]('--dir', type=str,\n help='Name of source directory,' +\n ' will convert all xml files in it')\[email protected]('--out_dir', type=str, help='Name of the destination directory')\ndef directory(dir, out_dir):\n files_convert = [x for x in os.listdir(dir) if x.endswith(\"xml\")]\n for xml_file in files_convert:\n base = os.path.basename(xml_file)\n filename = os.path.splitext(base)[0]\n\n out_filename = filename + \".csv\"\n out_path = os.path.join(out_dir, out_filename)\n xml_to_csv_file(os.path.join(dir, xml_file), out_path)\n\n\[email protected]()\[email protected]('--file', type=str, help='File to be converted to csv')\[email protected]('--out', type=str, help='Name of the destination file')\ndef xml(file, out):\n xml_to_csv_file(file, out)\n\n\nif __name__ == '__main__':\n cli()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mikkokotola/AdvancedMachineLearning
[ "574e82d4104ac04f1cb9889beb5be7d122bd0d01" ]
[ "Week6/AdvML_Week6_ex2.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\n## Advanced Course in Machine Learning\n## Week 6\n## Exercise 2 / Random forest\n\nimport numpy as np\nimport scipy\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom numpy import linalg as LA\nfrom sklearn import decomposition\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.manifold import TSNE\nimport math\nimport sys\n\nimport mnist\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import cross_val_score\n\nsns.set_style(\"darkgrid\")\n\n\n# In[4]:\n\n\nx_train, t_train, x_test, t_test = mnist.load()\n\n\n# In[48]:\n\n\nx_train = x_train[0:50000,:]\nt_train = t_train[0:50000]\n\n\n# In[49]:\n\n\nprint(x_train.shape)\nprint(t_train.shape)\nprint(x_test.shape)\nprint(t_test.shape)\n\n\n# In[69]:\n\n\nstartTestIx = 0\nendTestIx = 100\n\n\n# clf.classes_\n# clf.feature_importances_\n# print(clf.max_features_)\n# print(clf.n_classes_)\n# print(clf.n_features_)\n# print(clf.n_outputs_)\n# #clf.tree_\n\n# In[70]:\n\n\n# Randomly select the samples and features for the tree\ndef sample(n, k, x_train, t_train):\n idx = np.random.randint(x_train.shape[0], size=n)\n fidx = np.random.randint(x_train.shape[1], size=k)\n x = x_train[idx, :]\n x = x[:, fidx]\n y = t_train[idx]\n return x, y, idx, fidx\n #print(\"Rows: \", idx, \", features \", fidx)\n #print(x.shape)\n #print(y.shape)\n\n\n# In[71]:\n\n\ndef trainTree(x_train, t_train):\n clf = DecisionTreeClassifier(random_state=0)\n clf = clf.fit(x_train, t_train)\n return clf\n\n#cross_val_score(clf, x_train, t_train, cv=10)\n\n\n# In[72]:\n\n\ndef ensureAllClasses(newPred, clf):\n for i in range(10):\n if i not in clf.classes_:\n newPred = np.insert(newPred, i, 0, axis=1)\n return newPred\n\n\n# In[75]:\n\n\n# Main loop\ndef main(M, n, k):\n pred = np.zeros(shape = (endTestIx - startTestIx, 10), dtype = 'float32')\n for m in range(M):\n x, y, idx, fidx = sample(n, k, x_train, t_train)\n clf = trainTree(x, y)\n newPred = clf.predict_proba(x_test[startTestIx:endTestIx,fidx])\n newPred = ensureAllClasses(newPred, clf)\n pred = np.add(pred, newPred)\n\n pred_classes = np.argmax(pred, axis=1)\n\n correct = pred_classes == t_test[startTestIx:endTestIx]\n acc = sum(correct)/len(correct)\n #print(pred_classes)\n #print (acc)\n return acc\n\n\n# In[85]:\n\n\nMmax = 100\nn = 1000\nk = 20\naccs = list()\nfor m in range(1, Mmax):\n accs.append(main(m, n, k))\n\nplt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')\nsns.lineplot(range(1,Mmax), accs)\nplt.xlabel('Number of trees (M)')\nplt.ylabel('Accuracy of predictions (%)')\nplt.title('Number of trees vs. accuracy, n = {0}, k = {1}'.format(n, k))\nplt.show()\n\n\n# In[80]:\n\n\nM = 100\nn = 1000\nkmax = 200\naccs = list()\nfor k in range(1, kmax, 10):\n accs.append(main(M, n, k))\n\nplt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')\nsns.lineplot(range(1,kmax,10), accs)\nplt.xlabel('Number of features per tree (k)')\nplt.ylabel('Accuracy of predictions (%)')\nplt.title('Number of features per tree vs. accuracy, M = {0}, n = {1}'.format(M, n))\nplt.show()\n\n\n# In[81]:\n\n\nM = 100\nnmax = 5000\nk = 50\naccs = list()\nfor n in range(1, nmax, 100):\n accs.append(main(M, n, k))\n\nplt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k')\nsns.lineplot(range(1, nmax, 100), accs)\nplt.xlabel('Number of samples per tree (n)')\nplt.ylabel('Accuracy of predictions (%)')\nplt.title('Number of samples per tree vs. accuracy, M = {0}, k = {1}'.format(M, k))\nplt.show()\n\n\n# In[84]:\n\n\nM = 100\nn = 1000\nk = 50\n\nrepeats = 50\n\naccs = list()\nfor i in range(50):\n accs.append(main(M, n, k))\n\navAcc = sum(accs)/len(accs)\nprint(avAcc)\n\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.add", "sklearn.tree.DecisionTreeClassifier", "numpy.argmax", "numpy.random.randint", "numpy.insert", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yamaguchi1024/MeshCNN
[ "197530eab2aa4c2419511c1854dcbc662377f340" ]
[ "models/layers/mesh_pool.py" ]
[ "import torch\nimport torch.nn as nn\nfrom threading import Thread\nfrom models.layers.mesh_union import MeshUnion\nimport numpy as np\nfrom heapq import heappop, heapify\n\n\nclass MeshPool(nn.Module):\n \n def __init__(self, target, multi_thread=False):\n super(MeshPool, self).__init__()\n self.__out_target = target\n self.__multi_thread = multi_thread\n self.__fe = None\n self.__updated_fe = None\n self.__meshes = None\n self.__merge_edges = [-1, -1]\n\n def __call__(self, fe, meshes):\n return self.forward(fe, meshes)\n\n def forward(self, fe, meshes):\n self.__updated_fe = [[] for _ in range(len(meshes))]\n pool_threads = []\n self.__fe = fe\n self.__meshes = meshes\n # iterate over batch\n for mesh_index in range(len(meshes)):\n if self.__multi_thread:\n pool_threads.append(Thread(target=self.__pool_main, args=(mesh_index,)))\n pool_threads[-1].start()\n else:\n self.__pool_main(mesh_index)\n if self.__multi_thread:\n for mesh_index in range(len(meshes)):\n pool_threads[mesh_index].join()\n out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target)\n return out_features\n\n def __pool_main(self, mesh_index):\n mesh = self.__meshes[mesh_index]\n queue = self.__build_queue(self.__fe[mesh_index, :, :mesh.edges_count], mesh.edges_count)\n # recycle = []\n # last_queue_len = len(queue)\n last_count = mesh.edges_count + 1\n mask = np.ones(mesh.edges_count, dtype=np.uint8)\n edge_groups = MeshUnion(mesh.edges_count, self.__fe.device)\n while mesh.edges_count > self.__out_target:\n value, edge_id = heappop(queue)\n edge_id = int(edge_id)\n if mask[edge_id]:\n self.__pool_edge(mesh, edge_id, mask, edge_groups)\n mesh.clean(mask, edge_groups)\n fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target)\n self.__updated_fe[mesh_index] = fe\n\n def __pool_edge(self, mesh, edge_id, mask, edge_groups):\n if self.has_boundaries(mesh, edge_id):\n return False\n elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0)\\\n and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \\\n and self.__is_one_ring_valid(mesh, edge_id):\n self.__merge_edges[0] = self.__pool_side(mesh, edge_id, mask, edge_groups, 0)\n self.__merge_edges[1] = self.__pool_side(mesh, edge_id, mask, edge_groups, 2)\n mesh.merge_vertices(edge_id)\n mask[edge_id] = False\n MeshPool.__remove_group(mesh, edge_groups, edge_id)\n mesh.edges_count -= 1\n return True\n else:\n return False\n\n def __clean_side(self, mesh, edge_id, mask, edge_groups, side):\n if mesh.edges_count <= self.__out_target:\n return False\n invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side)\n while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target:\n self.__remove_triplete(mesh, mask, edge_groups, invalid_edges)\n if mesh.edges_count <= self.__out_target:\n return False\n if self.has_boundaries(mesh, edge_id):\n return False\n invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side)\n return True\n\n @staticmethod\n def has_boundaries(mesh, edge_id):\n for edge in mesh.gemm_edges[edge_id]:\n if edge == -1 or -1 in mesh.gemm_edges[edge]:\n return True\n return False\n\n\n @staticmethod\n def __is_one_ring_valid(mesh, edge_id):\n v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1))\n v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1))\n shared = v_a & v_b - set(mesh.edges[edge_id])\n return len(shared) == 2\n\n def __pool_side(self, mesh, edge_id, mask, edge_groups, side):\n info = MeshPool.__get_face_info(mesh, edge_id, side)\n key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info\n self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b])\n self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], mesh.sides[key_b, other_side_b + 1])\n MeshPool.__union_groups(mesh, edge_groups, key_b, key_a)\n MeshPool.__union_groups(mesh, edge_groups, edge_id, key_a)\n mask[key_b] = False\n MeshPool.__remove_group(mesh, edge_groups, key_b)\n mesh.remove_edge(key_b)\n mesh.edges_count -= 1\n return key_a\n\n @staticmethod\n def __get_invalids(mesh, edge_id, edge_groups, side):\n info = MeshPool.__get_face_info(mesh, edge_id, side)\n key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info\n shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b)\n if len(shared_items) == 0:\n return []\n else:\n assert (len(shared_items) == 2)\n middle_edge = other_keys_a[shared_items[0]]\n update_key_a = other_keys_a[1 - shared_items[0]]\n update_key_b = other_keys_b[1 - shared_items[1]]\n update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]]\n update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]]\n MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a)\n MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b)\n MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, MeshPool.__get_other_side(update_side_b))\n MeshPool.__union_groups(mesh, edge_groups, key_a, edge_id)\n MeshPool.__union_groups(mesh, edge_groups, key_b, edge_id)\n MeshPool.__union_groups(mesh, edge_groups, key_a, update_key_a)\n MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_a)\n MeshPool.__union_groups(mesh, edge_groups, key_b, update_key_b)\n MeshPool.__union_groups(mesh, edge_groups, middle_edge, update_key_b)\n return [key_a, key_b, middle_edge]\n\n @staticmethod\n def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b):\n mesh.gemm_edges[edge_a_key, side_a] = edge_b_key\n mesh.gemm_edges[edge_b_key, side_b] = edge_a_key\n mesh.sides[edge_a_key, side_a] = side_b\n mesh.sides[edge_b_key, side_b] = side_a\n\n @staticmethod\n def __get_shared_items(list_a, list_b):\n shared_items = []\n for i in range(len(list_a)):\n for j in range(len(list_b)):\n if list_a[i] == list_b[j]:\n shared_items.extend([i, j])\n return shared_items\n\n @staticmethod\n def __get_other_side(side):\n return side + 1 - 2 * (side % 2)\n\n @staticmethod\n def __get_face_info(mesh, edge_id, side):\n key_a = mesh.gemm_edges[edge_id, side]\n key_b = mesh.gemm_edges[edge_id, side + 1]\n side_a = mesh.sides[edge_id, side]\n side_b = mesh.sides[edge_id, side + 1]\n other_side_a = (side_a - (side_a % 2) + 2) % 4\n other_side_b = (side_b - (side_b % 2) + 2) % 4\n other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]]\n other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]]\n return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b\n\n @staticmethod\n def __remove_triplete(mesh, mask, edge_groups, invalid_edges):\n vertex = set(mesh.edges[invalid_edges[0]])\n for edge_key in invalid_edges:\n vertex &= set(mesh.edges[edge_key])\n mask[edge_key] = False\n MeshPool.__remove_group(mesh, edge_groups, edge_key)\n mesh.edges_count -= 3\n vertex = list(vertex)\n assert(len(vertex) == 1)\n mesh.remove_vertex(vertex[0])\n\n def __build_queue(self, features, edges_count):\n # delete edges with smallest norm\n squared_magnitude = torch.sum(features * features, 0)\n if squared_magnitude.shape[-1] != 1:\n squared_magnitude = squared_magnitude.unsqueeze(-1)\n edge_ids = torch.arange(edges_count, device=squared_magnitude.device, dtype=torch.float32).unsqueeze(-1)\n heap = torch.cat((squared_magnitude, edge_ids), dim=-1).tolist()\n heapify(heap)\n return heap\n\n @staticmethod\n def __union_groups(mesh, edge_groups, source, target):\n edge_groups.union(source, target)\n mesh.union_groups(source, target)\n\n @staticmethod\n def __remove_group(mesh, edge_groups, index):\n edge_groups.remove_group(index)\n mesh.remove_group(index)\n\n" ]
[ [ "torch.arange", "torch.sum", "torch.cat", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ldylab/deep_learning_with_pytorch
[ "c86a2e24ee94ade1a78b66f10eb69b6e1fdd4463" ]
[ "pytorch_basic_template/model/model_entry.py" ]
[ "# from model.base.fcn import CustomFcn\n# from model.best.fcn import DeepLabv3Fcn\n# from model.better.fcn import Resnet101Fcn\n# from model.sota.fcn import LightFcn\nfrom model.alexnet.alexnet_model import AlexNet\nfrom model.lenet5.lenet_5_model import LeNet5\nfrom model.vggnet.vggnet16 import VGG16\nfrom model.densenet.densenet_model import DenseNet121\nfrom model.resnet.resnet34_model import resnet34\nfrom model.resnet.resnet101_model import resnet101, resnet50\nfrom model.cotnet.cotnet_model import cotnet50\nimport torch.nn as nn\n\n\ndef select_model(args):\n type2model = {\n 'alexnet_fcn': AlexNet(args),\n 'lenet5_fcn': LeNet5(args),\n 'vggnet16_fcn': VGG16(args),\n 'densenet121_fcn': DenseNet121(num_classes=args.classes_num, grayscale=False),\n 'resnet34_fcn': resnet34(num_classes=args.classes_num),\n 'resnet101_fcn': resnet101(num_classes=args.classes_num),\n 'resnet50_fcn': resnet50(num_classes=args.classes_num),\n 'cotnet50_fcn': cotnet50(num_classes=args.classes_num)\n }\n model = type2model[args.model_type]\n return model\n\n\ndef equip_multi_gpu(model, args):\n model = nn.DataParallel(model, device_ids=args.gpus)\n return model\n" ]
[ [ "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gohanlon/nlp
[ "a5cd2303187239799ae0b1597a7c16eb99a97108" ]
[ "examples/sentence_similarity/gensen_train.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"\nThe GenSen training process follows the steps:\n1. Create or load the dataset vocabulary\n2. Train on the training dataset for each batch epoch (batch size = 48 updates)\n3. Evaluate on the validation dataset for every 10 epoches\n4. Find the local minimum point on validation loss\n5. Save the best model and stop the training process\n\nAzureML provides AI Compute to train the model and track the performance.\nThis training process is based on GPU only.\n\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport os\nimport time\n\nimport horovod.torch as hvd\nimport mlflow\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as f\nimport torch.optim as optim\n\nfrom utils_nlp.models.gensen.multi_task_model import MultitaskModel\nfrom utils_nlp.models.gensen.utils import (\n BufferedDataIterator,\n NLIIterator,\n compute_validation_loss,\n)\n\ncudnn.benchmark = True\nlogger = logging.getLogger(__name__)\n\nhvd.init()\nif torch.cuda.is_available():\n # Horovod: pin GPU to local rank.\n torch.cuda.set_device(hvd.local_rank())\n\n\ndef metric_average(value, name):\n \"\"\"\n Sync the validation loss with nodes.\n :param value:\n :param name:\n :return:\n \"\"\"\n tensor = torch.tensor(value)\n avg_tensor = hvd.allreduce(tensor, name=name)\n return avg_tensor.item()\n\n\ndef setup_horovod(model, learning_rate):\n \"\"\" Setup for Horovod usage.\n\n Args:\n model(MultitaskModel): The MultitaskModel object.\n learning_rate(float): Learning rate for the model.\n\n Returns: hvd.DistributedOptimizer: Optimizer to use for computing\n gradients and applying updates.\n\n \"\"\"\n # Horovod: scale learning rate by the number of GPUs.\n optimizer = optim.Adam(model.parameters(), lr=learning_rate * hvd.size())\n\n # Horovod: broadcast parameters & optimizer state.\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n # Horovod: (optional) compression algorithm.\n compression = hvd.Compression.fp16\n\n # Horovod: wrap optimizer with DistributedOptimizer.\n optimizer = hvd.DistributedOptimizer(\n optimizer,\n named_parameters=model.named_parameters(),\n compression=compression,\n )\n\n return optimizer\n\n\ndef setup_logging(config):\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s - %(message)s\",\n filename=\"log/%s\" % (config[\"data\"][\"task\"]),\n filemode=\"w\",\n )\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n console.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(console)\n\n\ndef log_config(config):\n logging.info(\"Model Parameters : \")\n logging.info(\"Task : %s \" % (config[\"data\"][\"task\"]))\n logging.info(\n \"Source Word Embedding Dim : %s\" % (config[\"model\"][\"dim_word_src\"])\n )\n logging.info(\n \"Target Word Embedding Dim : %s\" % (config[\"model\"][\"dim_word_trg\"])\n )\n logging.info(\"Source RNN Hidden Dim : %s\" % (config[\"model\"][\"dim_src\"]))\n logging.info(\"Target RNN Hidden Dim : %s\" % (config[\"model\"][\"dim_trg\"]))\n logging.info(\n \"Source RNN Bidirectional : %s\" % (config[\"model\"][\"bidirectional\"])\n )\n logging.info(\"Batch Size : %d \" % (config[\"training\"][\"batch_size\"]))\n logging.info(\"Optimizer : %s \" % (config[\"training\"][\"optimizer\"]))\n logging.info(\"Learning Rate : %f \" % (config[\"training\"][\"lrate\"]))\n\n\ndef evaluate(\n config,\n train_iterator,\n model,\n loss_criterion,\n monitor_epoch,\n min_val_loss,\n min_val_loss_epoch,\n save_dir,\n starting_time,\n model_state,\n max_epoch,\n):\n \"\"\" Function to validate the model.\n\n Args:\n max_epoch(int): Limit training to specified number of epochs.\n model_state(dict): Saved model weights.\n config(dict): Config object.\n train_iterator(BufferedDataIterator): BufferedDataIterator object.\n model(MultitaskModel): The MultitaskModel object.\n loss_criterion(nn.CrossEntropyLoss): Cross entropy loss.\n monitor_epoch(int): Current epoch count.\n min_val_loss(float): Minimum validation loss\n min_val_loss_epoch(int): Epoch where the minimum validation\n loss was seen.\n save_dir(str): Directory path to save the model dictionary.\n starting_time(time.Time): Starting time of the training.\n\n Returns:\n bool: Whether to continue training or not.\n \"\"\"\n\n break_flag = 0\n\n for task_idx, task in enumerate(train_iterator.tasknames):\n if \"skipthought\" in task:\n continue\n validation_loss = compute_validation_loss(\n config,\n model,\n train_iterator,\n loss_criterion,\n task_idx,\n lowercase=True,\n )\n validation_loss = metric_average(validation_loss, \"val_loss\")\n logging.info(\"%s Validation Loss : %.3f\" % (task, validation_loss))\n\n # Horovod: print output only on first rank.\n if hvd.rank() == 0:\n # log the best val accuracy to AML run\n logging.info(\n \"Best Validation Loss: {}\".format(np.float(validation_loss))\n )\n\n # If the validation loss is small enough, and it starts to go up.\n # Should stop training.\n # Small is defined by the number of epochs it lasts.\n if validation_loss < min_val_loss:\n min_val_loss = validation_loss\n min_val_loss_epoch = monitor_epoch\n model_state = model.state_dict()\n\n logging.info(\n \"Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: \"\n \"%d Loss : %.3f \"\n % (\n monitor_epoch,\n validation_loss,\n min_val_loss_epoch,\n min_val_loss,\n )\n )\n if (monitor_epoch - min_val_loss_epoch) > config[\"training\"][\n \"stop_patience\"\n ] or (max_epoch is not None and monitor_epoch >= max_epoch):\n logging.info(\"Saving model ...\")\n # Save the name with validation loss.\n torch.save(\n model_state,\n open(os.path.join(save_dir, \"best_model.model\"), \"wb\"),\n )\n # Let the training end.\n break_flag = 1\n break\n if break_flag == 1:\n logging.info(\"##### Training stopped at ##### %f\" % min_val_loss)\n logging.info(\n \"##### Training Time ##### %f seconds\"\n % (time.time() - starting_time)\n )\n return True, min_val_loss_epoch, min_val_loss, model_state\n else:\n return False, min_val_loss_epoch, min_val_loss, model_state\n\n\ndef evaluate_nli(nli_iterator, model, batch_size, n_gpus):\n \"\"\"\n\n Args:\n nli_iterator(NLIIterator): NLIIterator object.\n model(MultitaskModel): Multitask model object.\n batch_size(int): Batch size.\n n_gpus(int): Number of gpus\n\n \"\"\"\n n_correct = 0.0\n n_wrong = 0.0\n for j in range(0, len(nli_iterator.dev_lines), batch_size * n_gpus):\n minibatch = nli_iterator.get_parallel_minibatch(\n j, batch_size * n_gpus, \"dev\"\n )\n class_logits = model(\n minibatch, -1, return_hidden=False, paired_trg=None\n )\n class_preds = (\n f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)\n )\n labels = minibatch[\"labels\"].data.cpu().numpy()\n for pred, label in zip(class_preds, labels):\n if pred == label:\n n_correct += 1.0\n else:\n n_wrong += 1.0\n logging.info(\"NLI Dev Acc : %.5f\" % (n_correct / (n_correct + n_wrong)))\n n_correct = 0.0\n n_wrong = 0.0\n for j in range(0, len(nli_iterator.test_lines), batch_size * n_gpus):\n minibatch = nli_iterator.get_parallel_minibatch(\n j, batch_size * n_gpus, \"test\"\n )\n class_logits = model(\n minibatch, -1, return_hidden=False, paired_trg=None\n )\n class_preds = (\n f.softmax(class_logits).data.cpu().numpy().argmax(axis=-1)\n )\n labels = minibatch[\"labels\"].data.cpu().numpy()\n for pred, label in zip(class_preds, labels):\n if pred == label:\n n_correct += 1.0\n else:\n n_wrong += 1.0\n logging.info(\"NLI Test Acc : %.5f\" % (n_correct / (n_correct + n_wrong)))\n logging.info(\"******************************************************\")\n\n\ndef train(config, data_folder, learning_rate=0.0001, max_epoch=None):\n \"\"\" Train the Gensen model.\n\n Args:\n max_epoch(int): Limit training to specified number of epochs.\n config(dict): Loaded json file as a python object.\n data_folder(str): Path to the folder containing the data.\n learning_rate(float): Learning rate for the model.\n \"\"\"\n owd = os.getcwd()\n os.chdir(data_folder)\n\n try:\n with mlflow.start_run():\n save_dir = config[\"data\"][\"save_dir\"]\n if not os.path.exists(\"./log\"):\n os.makedirs(\"./log\")\n\n os.makedirs(save_dir, exist_ok=True)\n\n setup_logging(config)\n\n batch_size = config[\"training\"][\"batch_size\"]\n src_vocab_size = config[\"model\"][\"n_words_src\"]\n trg_vocab_size = config[\"model\"][\"n_words_trg\"]\n max_len_src = config[\"data\"][\"max_src_length\"]\n max_len_trg = config[\"data\"][\"max_trg_length\"]\n model_state = {}\n\n train_src = [item[\"train_src\"] for item in config[\"data\"][\"paths\"]]\n train_trg = [item[\"train_trg\"] for item in config[\"data\"][\"paths\"]]\n tasknames = [item[\"taskname\"] for item in config[\"data\"][\"paths\"]]\n\n # Keep track of indicies to train forward and backward jointly\n if (\n \"skipthought_next\" in tasknames\n and \"skipthought_previous\" in tasknames\n ):\n skipthought_idx = tasknames.index(\"skipthought_next\")\n skipthought_backward_idx = tasknames.index(\n \"skipthought_previous\"\n )\n paired_tasks = {\n skipthought_idx: skipthought_backward_idx,\n skipthought_backward_idx: skipthought_idx,\n }\n else:\n paired_tasks = None\n skipthought_idx = None\n skipthought_backward_idx = None\n\n train_iterator = BufferedDataIterator(\n train_src,\n train_trg,\n src_vocab_size,\n trg_vocab_size,\n tasknames,\n save_dir,\n buffer_size=1e6,\n lowercase=True,\n seed=(hvd.rank() + 1) * 12345,\n )\n\n nli_iterator = NLIIterator(\n train=config[\"data\"][\"nli_train\"],\n dev=config[\"data\"][\"nli_dev\"],\n test=config[\"data\"][\"nli_test\"],\n vocab_size=-1,\n vocab=os.path.join(save_dir, \"src_vocab.pkl\"),\n seed=(hvd.rank() + 1) * 12345,\n )\n\n src_vocab_size = len(train_iterator.src[0][\"word2id\"])\n trg_vocab_size = len(train_iterator.trg[0][\"word2id\"])\n\n # Logging set up.\n logging.info(\"Finished creating iterator ...\")\n log_config(config)\n logging.info(\n \"Found %d words in source : \"\n % (len(train_iterator.src[0][\"id2word\"]))\n )\n for idx, taskname in enumerate(tasknames):\n logging.info(\n \"Found %d target words in task %s \"\n % (len(train_iterator.trg[idx][\"id2word\"]), taskname)\n )\n logging.info(\"Found %d words in src \" % src_vocab_size)\n logging.info(\"Found %d words in trg \" % trg_vocab_size)\n\n weight_mask = torch.ones(trg_vocab_size).cuda()\n weight_mask[train_iterator.trg[0][\"word2id\"][\"<pad>\"]] = 0\n loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda()\n nli_criterion = nn.CrossEntropyLoss().cuda()\n\n model = MultitaskModel(\n src_emb_dim=config[\"model\"][\"dim_word_src\"],\n trg_emb_dim=config[\"model\"][\"dim_word_trg\"],\n src_vocab_size=src_vocab_size,\n trg_vocab_size=trg_vocab_size,\n src_hidden_dim=config[\"model\"][\"dim_src\"],\n trg_hidden_dim=config[\"model\"][\"dim_trg\"],\n bidirectional=config[\"model\"][\"bidirectional\"],\n pad_token_src=train_iterator.src[0][\"word2id\"][\"<pad>\"],\n pad_token_trg=train_iterator.trg[0][\"word2id\"][\"<pad>\"],\n nlayers_src=config[\"model\"][\"n_layers_src\"],\n dropout=config[\"model\"][\"dropout\"],\n num_tasks=len(train_iterator.src),\n paired_tasks=paired_tasks,\n ).cuda()\n\n optimizer = setup_horovod(model, learning_rate=learning_rate)\n logging.info(model)\n\n n_gpus = config[\"training\"][\"n_gpus\"]\n model = torch.nn.DataParallel(model, device_ids=range(n_gpus))\n\n task_losses = [[] for _ in tasknames]\n task_idxs = [0 for _ in tasknames]\n nli_losses = []\n updates = 0\n nli_ctr = 0\n nli_epoch = 0\n monitor_epoch = 0\n nli_mbatch_ctr = 0\n mbatch_times = []\n min_val_loss = 10000000\n min_val_loss_epoch = -1\n rng_num_tasks = (\n len(tasknames) - 1 if paired_tasks else len(tasknames)\n )\n logging.info(\"OS Environ: \\n {} \\n\\n\".format(os.environ))\n mlflow.log_param(\"learning_rate\", learning_rate)\n logging.info(\"Commencing Training ...\")\n start = time.time()\n while True:\n batch_start_time = time.time()\n # Train NLI once every 10 minibatches of other tasks\n if nli_ctr % 10 == 0:\n minibatch = nli_iterator.get_parallel_minibatch(\n nli_mbatch_ctr, batch_size * n_gpus\n )\n optimizer.zero_grad()\n class_logits = model(\n minibatch, -1, return_hidden=False, paired_trg=None\n )\n\n loss = nli_criterion(\n class_logits.contiguous().view(\n -1, class_logits.size(1)\n ),\n minibatch[\"labels\"].contiguous().view(-1),\n )\n\n # nli_losses.append(loss.data[0])\n nli_losses.append(loss.item())\n loss.backward()\n torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)\n optimizer.step()\n\n nli_mbatch_ctr += batch_size * n_gpus\n if nli_mbatch_ctr >= len(nli_iterator.train_lines):\n nli_mbatch_ctr = 0\n nli_epoch += 1\n else:\n # Sample a random task\n task_idx = np.random.randint(low=0, high=rng_num_tasks)\n\n # Get a minibatch corresponding to the sampled task\n minibatch = train_iterator.get_parallel_minibatch(\n task_idx,\n task_idxs[task_idx],\n batch_size * n_gpus,\n max_len_src,\n max_len_trg,\n )\n\n \"\"\"Increment pointer into task and if current buffer is\n exhausted, fetch new buffer. \"\"\"\n task_idxs[task_idx] += batch_size * n_gpus\n if task_idxs[task_idx] >= train_iterator.buffer_size:\n train_iterator.fetch_buffer(task_idx)\n task_idxs[task_idx] = 0\n\n if task_idx == skipthought_idx:\n minibatch_back = train_iterator.get_parallel_minibatch(\n skipthought_backward_idx,\n task_idxs[skipthought_backward_idx],\n batch_size * n_gpus,\n max_len_src,\n max_len_trg,\n )\n task_idxs[skipthought_backward_idx] += (\n batch_size * n_gpus\n )\n if (\n task_idxs[skipthought_backward_idx]\n >= train_iterator.buffer_size\n ):\n train_iterator.fetch_buffer(\n skipthought_backward_idx\n )\n task_idxs[skipthought_backward_idx] = 0\n\n optimizer.zero_grad()\n decoder_logit, decoder_logit_2 = model(\n minibatch,\n task_idx,\n paired_trg=minibatch_back[\"input_trg\"],\n )\n\n loss_f = loss_criterion(\n decoder_logit.contiguous().view(\n -1, decoder_logit.size(2)\n ),\n minibatch[\"output_trg\"].contiguous().view(-1),\n )\n\n loss_b = loss_criterion(\n decoder_logit_2.contiguous().view(\n -1, decoder_logit_2.size(2)\n ),\n minibatch_back[\"output_trg\"].contiguous().view(-1),\n )\n\n task_losses[task_idx].append(loss_f.data[0])\n task_losses[skipthought_backward_idx].append(\n loss_b.data[0]\n )\n loss = loss_f + loss_b\n\n else:\n optimizer.zero_grad()\n decoder_logit = model(minibatch, task_idx)\n\n loss = loss_criterion(\n decoder_logit.contiguous().view(\n -1, decoder_logit.size(2)\n ),\n minibatch[\"output_trg\"].contiguous().view(-1),\n )\n\n task_losses[task_idx].append(loss.item())\n\n loss.backward()\n # For distributed optimizer need to sync before gradient\n # clipping.\n optimizer.synchronize()\n\n torch.nn.utils.clip_grad_norm(model.parameters(), 1.0)\n optimizer.step()\n\n end = time.time()\n mbatch_times.append(end - batch_start_time)\n\n # Validations\n if (\n updates % config[\"management\"][\"monitor_loss\"] == 0\n and updates != 0\n ):\n monitor_epoch += 1\n for idx, task in enumerate(tasknames):\n logging.info(\n \"Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s \"\n \"minibatches : %d\"\n % (\n updates,\n task,\n np.mean(task_losses[idx]),\n task,\n len(task_losses[idx]),\n )\n )\n mlflow.log_metric(\n \"validation_loss\",\n np.mean(task_losses[idx]),\n step=monitor_epoch,\n )\n\n logging.info(\n \"Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI \"\n \"Loss : %.5f \"\n % (\n nli_ctr,\n nli_epoch,\n nli_mbatch_ctr,\n np.mean(nli_losses),\n )\n )\n mlflow.log_metric(\n \"nli_loss\", np.mean(nli_losses), step=nli_epoch\n )\n\n logging.info(\n \"Average time per minibatch : %.5f\"\n % (np.mean(mbatch_times))\n )\n mlflow.log_metric(\n \"minibatch_avg_duration\", np.mean(mbatch_times)\n )\n\n task_losses = [[] for _ in tasknames]\n mbatch_times = []\n nli_losses = []\n\n # For validate and break if done.\n logging.info(\"############################\")\n logging.info(\"##### Evaluating model #####\")\n logging.info(\"############################\")\n training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate(\n config=config,\n train_iterator=train_iterator,\n model=model,\n loss_criterion=loss_criterion,\n monitor_epoch=monitor_epoch,\n min_val_loss=min_val_loss,\n min_val_loss_epoch=min_val_loss_epoch,\n save_dir=save_dir,\n starting_time=start,\n model_state=model_state,\n max_epoch=max_epoch,\n )\n if training_complete:\n mlflow.log_metric(\"min_val_loss\", float(min_val_loss))\n mlflow.log_metric(\"learning_rate\", learning_rate)\n break\n\n logging.info(\"Evaluating on NLI\")\n evaluate_nli(\n nli_iterator=nli_iterator,\n model=model,\n n_gpus=n_gpus,\n batch_size=batch_size,\n )\n\n updates += batch_size * n_gpus\n nli_ctr += 1\n logging.info(\"Updates: %d\" % updates)\n finally:\n os.chdir(owd)\n\n\ndef read_config(json_file):\n \"\"\"Read JSON config.\"\"\"\n json_object = json.load(open(json_file, \"r\", encoding=\"utf-8\"))\n return json_object\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", help=\"path to json config\", required=True)\n parser.add_argument(\"--data_folder\", type=str, help=\"data folder\")\n # Add learning rate to tune model.\n parser.add_argument(\n \"--learning_rate\", type=float, default=0.0001, help=\"learning rate\"\n )\n parser.add_argument(\n \"--max_epoch\",\n type=int,\n default=None,\n help=\"Limit training to specified number of epochs.\",\n )\n\n args = parser.parse_args()\n data_path = args.data_folder\n lr = args.learning_rate\n\n config_file_path = args.config\n max_epoch = args.max_epoch\n config_obj = read_config(config_file_path)\n train(config_obj, data_path, lr, max_epoch)\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.ones", "torch.tensor", "numpy.mean", "torch.cuda.is_available", "numpy.float", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vanttec/vanttec_usv
[ "5c7b45a61728404b4c957028eac7bc361f1b2077" ]
[ "rb_missions/scripts/acoustic_docking.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n----------------------------------------------------------\n @file: acoustic_docking.py\n @date: Wed Jun 3, 2020\n @author: Alejandro Gonzalez Garcia\n @e-mail: [email protected]\n @brief: Motion planning. ROS node to follow an acoustic\n signal for autonomous docking.\n @version: 1.0\n Open source\n---------------------------------------------------------\n'''\n\nimport math\nimport time\nimport os\n\nimport numpy as np\nimport rospy\nfrom geometry_msgs.msg import Pose, Pose2D, PoseArray\nfrom std_msgs.msg import Int32, Float32MultiArray, Float64, String\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nclass AcousticDocking:\n def __init__(self):\n self.ned_x = 0\n self.ned_y = 0\n self.yaw = 0\n\n self.activated = True\n\n self.distance = 0\n self.signal_angle = 0\n self.x1 = 0\n self.y1 = 0\n self.x2 = 0\n self.y2 = 0\n self.x_body_origin = 0\n self.y_body_origin = 0\n self.correction_distance = 2\n\n # ROS Subscribers\n rospy.Subscriber(\"/vectornav/ins_2d/NED_pose\", Pose2D, self.ins_pose_callback)\n rospy.Subscriber(\"/usv_perception/hydrophones/acoustic_signal\", Float64, self.signal_callback)\n rospy.Subscriber(\"/usv_perception/lidar_detector/dock\", PoseArray, self.dock_callback)\n\n # ROS Publishers\n self.path_pub = rospy.Publisher(\"/mission/waypoints\", Float32MultiArray, queue_size=10)\n self.status_pub = rospy.Publisher(\"/mission/status\", Int32, queue_size=10)\n self.test = rospy.Publisher(\"/mission/state\", Int32, queue_size=10)\n\n def ins_pose_callback(self,pose):\n self.ned_x = pose.x\n self.ned_y = pose.y\n self.yaw = pose.theta\n\n def signal_callback(self, signal):\n self.signal_angle = signal.data\n\n def dock_callback(self, dock):\n self.x1 = dock.poses[0].position.x\n self.y1 = dock.poses[0].position.y\n self.x2 = dock.poses[1].position.x\n self.y2 = dock.poses[1].position.y\n\n def calculate_distance_to_dock(self):\n '''\n @name: calculate_distance_to_dock\n @brief: Calculates the distance between the USV and the dock. \n @param: --\n @return: --\n '''\n xc = min([self.x1,self.x2]) + abs(self.x1 - self.x2)/2\n yc = min([self.y1,self.y2]) + abs(self.y1 - self.y2)/2\n\n self.distance = math.pow(xc*xc + yc*yc, 0.5)\n\n def dock(self):\n '''\n @name: dock\n @brief: Calculates the intersection point between the USV and the pinger\n location at the dock. Returns two waypoints as desired positions. The first\n waypoint is perpendicularly in front of the pinger to straighten the path.\n the second waypoint is the location of the pinger in the dock, for docking. \n @param: --\n @return: --\n '''\n\n if self.y1 < self.y2:\n yl = self.y1\n xl = self.x1\n yr = self.y2\n xr = self.x2\n else:\n yl = self.y2\n xl = self.x2\n yr = self.y1\n xr = self.x1\n\n yd = yl - yr\n xd = xl - xr\n\n alpha = math.atan2(yd,xd) + math.pi/2\n if (abs(alpha) > (math.pi)):\n alpha = (alpha/abs(alpha))*(abs(alpha) - 2*math.pi)\n\n x_beta, y_beta = self.aux_to_body(1,0,self.signal_angle,self.x_body_origin,self.y_body_origin)\n\n common_denominator = (xl - xr)*(self.y_body_origin - y_beta) - (yl - yr)*(self.x_body_origin - x_beta)\n x_pinger = ((xl*yr-yl*xr)*(self.x_body_origin-x_beta)-(xl-xr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator\n y_pinger = ((xl*yr-yl*xr)*(self.y_body_origin-y_beta)-(yl-yr)*(self.x_body_origin*y_beta-self.y_body_origin*x_beta)) / common_denominator\n\n x_aux, y_aux = self.aux_to_body(-self.correction_distance,0,alpha,x_pinger,y_pinger)\n\n path_array = Float32MultiArray()\n path_array.layout.data_offset = 5\n path_array.data = [x_aux, y_aux, x_pinger, y_pinger, 2]\n\n self.desired(path_array)\n\n def aux_to_body(self, aux_x2, aux_y2, alpha, body_x1, body_y1):\n '''\n @name: aux_to_body\n @brief: Coordinate transformation between auxiliary and body reference frames.\n @param: aux_x2: target x coordinate in aux reference frame\n aux_y2: target y coordinate in aux reference frame\n alpha: angle between aux and body reference frames\n body_x1: aux x coordinate in body reference frame\n body_y1: aux y coordinate in body reference frame\n @return: body_x2: target x coordinate in body reference frame\n body_y2: target y coordinate in body reference frame\n '''\n p = np.array([[aux_x2],[aux_y2]])\n J = self.rotation_matrix(alpha)\n n = J.dot(p)\n body_x2 = n[0] + body_x1\n body_y2 = n[1] + body_y1\n return (body_x2, body_y2)\n\n def rotation_matrix(self, angle):\n '''\n @name: rotation_matrix\n @brief: Transformation matrix template.\n @param: angle: angle of rotation\n @return: J: transformation matrix\n '''\n J = np.array([[math.cos(angle), -1*math.sin(angle)],\n [math.sin(angle), math.cos(angle)]])\n return (J)\n\n def desired(self, path):\n \tself.path_pub.publish(path)\n\ndef main():\n rospy.init_node(\"acoustic_docking\", anonymous=False)\n rate = rospy.Rate(20)\n acousticDocking = AcousticDocking()\n last_detection = []\n while not rospy.is_shutdown() and acousticDocking.activated:\n acousticDocking.calculate_distance_to_dock()\n if (acousticDocking.distance >= 5):\n acousticDocking.dock()\n else:\n acousticDocking.status_pub.publish(1)\n\n rate.sleep()\n rospy.spin()\n\nif __name__ == \"__main__\":\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wiseodd/lula
[ "a52b27c118ed136a62d8d7d1a898067d5ac685fb", "a52b27c118ed136a62d8d7d1a898067d5ac685fb" ]
[ "lula/util.py", "eval_CIFAR10C.py" ]
[ "import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass MaskedLinear(nn.Module):\n\n def __init__(self, base_layer, m_in, m_out):\n \"\"\"\n The standard nn.Linear layer, but with gradient masking to enforce the LULA construction.\n \"\"\"\n super(MaskedLinear, self).__init__()\n\n # Extend the weight matrix\n W_base = base_layer.weight.data.clone() # (n_out, n_in)\n n_out, n_in = W_base.shape\n\n W = torch.randn(n_out+m_out, n_in+m_in)\n W[0:n_out, 0:n_in] = W_base.clone()\n W[0:n_out, n_in:] = 0 # Upper-right quadrant\n\n self.weight = nn.Parameter(W)\n\n # Extend the bias vector\n if base_layer.bias is not None:\n b_base = base_layer.bias.data.clone()\n\n b = torch.randn(n_out+m_out)\n b[:n_out] = b_base.clone()\n\n self.bias = nn.Parameter(b)\n else:\n self.bias = None\n\n # Apply gradient mask to the weight and bias\n self.mask_w = torch.zeros(n_out+m_out, n_in+m_in)\n self.mask_w[n_out:, :] = 1 # Lower half\n\n self.mask_b = torch.zeros(n_out+m_out)\n self.mask_b[n_out:] = 1\n\n self.switch_grad_mask(True)\n\n # For safekeeping\n self.W_base, self.b_base = W_base, b_base\n self.n_out, self.n_in = n_out, n_in\n self.m_out, self.m_in = m_out, m_in\n\n def forward(self, x):\n return F.linear(x, self.weight, self.bias)\n\n def switch_grad_mask(self, on=True):\n if on:\n self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))\n self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))\n else:\n self.grad_handle_w.remove()\n self.grad_handle_b.remove()\n\n def to_gpu(self):\n self.mask_w = self.mask_w.cuda()\n self.mask_b = self.mask_b.cuda()\n\n def to_unmasked(self):\n lin = nn.Linear(self.n_in+self.m_in, self.n_out+self.m_out)\n lin.weight = self.weight\n lin.bias = self.bias\n return lin\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None\n )\n\n\n\nclass MaskedConv2d(nn.Module):\n\n def __init__(self, base_layer, m_in, m_out):\n \"\"\"\n The standard nn.Conv2d layer, but with gradient masking to enforce the LULA construction.\n \"\"\"\n super(MaskedConv2d, self).__init__()\n\n self.kernel_size = base_layer.kernel_size\n self.stride = base_layer.stride\n self.padding = base_layer.padding\n self.dilation = base_layer.dilation\n self.groups = base_layer.groups\n\n # Extend the weight matrix\n W_base = base_layer.weight.data.clone() # (n_out, n_in, k, k)\n n_out, n_in, k, _ = W_base.shape # Num of channels\n\n W = torch.randn(n_out+m_out, n_in+m_in, k, k)\n W[0:n_out, 0:n_in, :, :] = W_base.clone()\n W[0:n_out, n_in:, :, :] = 0 # Upper-right quadrant\n\n self.weight = nn.Parameter(W)\n\n # Extend the bias vector\n if base_layer.bias is not None:\n b_base = base_layer.bias.data.clone()\n\n b = torch.randn(n_out+m_out)\n b[:n_out] = b_base.clone()\n\n self.bias = nn.Parameter(b)\n else:\n self.bias = None\n\n # Apply gradient mask to the weight and bias\n self.mask_w = torch.zeros(n_out+m_out, n_in+m_in, k, k)\n self.mask_w[n_out:, :, :, :] = 1 # Lower half\n\n self.mask_b = torch.zeros(n_out+m_out)\n self.mask_b[n_out:] = 1\n\n self.switch_grad_mask(True)\n\n # For safekeeping\n self.W_base, self.b_base = W_base, b_base\n self.n_out, self.n_in = n_out, n_in\n self.m_out, self.m_in = m_out, m_in\n\n def forward(self, x):\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n def switch_grad_mask(self, on=True):\n if on:\n self.grad_handle_w = self.weight.register_hook(lambda grad: grad.mul_(self.mask_w))\n self.grad_handle_b = self.bias.register_hook(lambda grad: grad.mul_(self.mask_b))\n else:\n self.grad_handle_w.remove()\n self.grad_handle_b.remove()\n\n def to_gpu(self):\n self.mask_w = self.mask_w.cuda()\n self.mask_b = self.mask_b.cuda()\n\n def to_unmasked(self):\n conv = nn.Conv2d(self.n_in+self.m_in, self.n_out+self.m_out, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)\n conv.weight = self.weight\n conv.bias = self.bias\n return conv\n\n def extra_repr(self):\n return 'in_channels={}, out_channels={}, bias={}'.format(\n self.n_in+self.m_in, self.n_out+self.m_out, self.bias is not None\n )\n", "import warnings\nwarnings.filterwarnings('ignore')\nimport torch\nimport numpy as np\nfrom models import wrn\nfrom laplace import kfla\nimport laplace.util as lutil\nimport util.evaluation as evalutil\nimport util.dataloaders as dl\nimport util.misc\nfrom math import *\nfrom tqdm import tqdm, trange\nimport argparse\nimport os, sys\nfrom tqdm import tqdm, trange\nfrom collections import defaultdict\nimport reluq\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--ood_dset', default='imagenet', choices=['imagenet', 'uniform', 'smooth'])\nargs = parser.parse_args()\n\ntorch.manual_seed(9999)\nnp.random.seed(9999)\n\npath = f'./pretrained_models'\n\ntrain_loader = dl.CIFAR10(train=True, augm_flag=False)\nval_loader, test_loader = dl.CIFAR10(train=False, val_size=2000)\nprint(len(train_loader.dataset), len(val_loader.dataset), len(test_loader.dataset))\n\nnum_classes = 10\ndata_shape = [3, 32, 32]\n\nmethod_types = ['MAP', 'DE', 'LA', 'LULA']\nmethod_strs = ['MAP', 'DE', 'LA', 'LA-LULA']\ndistortion_types = dl.CorruptedCIFAR10Dataset.distortions\nseverity_levels = range(1, 6) # 1 ... 5\n\ntab_acc = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\ntab_mmc = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\ntab_ece = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\ntab_brier = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\ntab_loglik = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n\n\ndef load_model(type='MAP'):\n def create_model():\n return wrn.WideResNet(16, 4, num_classes).cuda()\n\n if type == 'DE':\n K = 5\n model = [create_model() for _ in range(K)]\n state_dicts = torch.load(f'./pretrained_models/CIFAR10_wrn_de.pt')\n for k in range(K):\n model[k].load_state_dict(state_dicts[k])\n model[k].eval()\n else:\n model = create_model()\n model.load_state_dict(torch.load(f'./pretrained_models/CIFAR10_wrn_plain.pt'))\n model.eval()\n\n # Additionally, load these for LULA\n if type == 'LULA':\n lula_params = torch.load(f'./pretrained_models/kfla/CIFAR10_wrn_lula_{args.ood_dset}.pt')\n\n if args.ood_dset == 'best':\n state_dict, n_units, noise = lula_params\n print(f'LULA uses this OOD dataset: {noise}')\n else:\n state_dict, n_units = lula_params\n\n model = lula.model.LULAModel_LastLayer(model, n_units).cuda()\n model.to_gpu()\n model.load_state_dict(state_dict)\n model.disable_grad_mask()\n model.unmask()\n model.eval()\n\n if type in ['LA', 'LULA']:\n var0 = torch.tensor(1/(5e-4*len(train_loader.dataset))).float().cuda()\n model = kfla.KFLA(model)\n model.get_hessian(train_loader)\n model.estimate_variance(var0)\n\n return model\n\n\ndef predict_(test_loader, model, model_name, params=None):\n assert model_name in method_types\n\n if model_name in ['LA', 'LULA']:\n py = lutil.predict(test_loader, model, n_samples=20)\n elif model_name == 'DE':\n py = evalutil.predict_ensemble(test_loader, model)\n else: # MAP\n py = evalutil.predict(test_loader, model)\n\n return py.cpu().numpy()\n\n\ndef evaluate(model_name):\n assert model_name in method_types\n\n model = load_model(model_name)\n params = None\n\n if model_name == 'LULA':\n model_str = 'LA-LULA'\n else:\n model_str = model_name\n\n print(f'Processing for {model_str}')\n\n # For all distortions, for all severity\n for d in tqdm(distortion_types, leave=False):\n for s in tqdm(severity_levels, leave=False):\n shift_loader = dl.CorruptedCIFAR10(d, s)\n py_shift = predict_(shift_loader, model, model_name, params=params)\n targets = torch.cat([y for x, y in shift_loader], dim=0).numpy()\n\n tab_acc[model_str][d][str(s)].append(evalutil.get_acc(py_shift, targets))\n tab_mmc[model_str][d][str(s)].append(evalutil.get_mmc(py_shift))\n tab_ece[model_str][d][str(s)].append(evalutil.get_calib(py_shift, targets)[0])\n tab_brier[model_str][d][str(s)].append(evalutil.get_brier(py_shift, targets))\n tab_loglik[model_str][d][str(s)].append(evalutil.get_loglik(py_shift, targets))\n\n\nevaluate('MAP')\nevaluate('DE')\nevaluate('LA')\nevaluate('LULA')\n\n\n# Save results\ndir_name = f'results/CIFAR10C/'\ndir_name += f'{args.ood_dset}'\n\nif not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\nnp.save(f'{dir_name}/mmcs', util.misc.ddict2dict(tab_mmc))\nnp.save(f'{dir_name}/accs', util.misc.ddict2dict(tab_acc))\nnp.save(f'{dir_name}/eces', util.misc.ddict2dict(tab_ece))\nnp.save(f'{dir_name}/briers', util.misc.ddict2dict(tab_brier))\nnp.save(f'{dir_name}/logliks', util.misc.ddict2dict(tab_loglik))\n" ]
[ [ "torch.nn.Parameter", "torch.zeros", "torch.randn", "torch.nn.functional.conv2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.linear" ], [ "torch.manual_seed", "torch.cat", "numpy.random.seed", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jfigui/pyrad
[ "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254" ]
[ "src/pyrad_proc/pyrad/EGG-INFO/scripts/rewrite_monitoring.py", "src/pyrad_proc/pyrad/proc/process_aux.py", "src/pyrad_proc/scripts/common_colocated_gates.py" ]
[ "#!/home/daniel/anaconda3/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n================================================\nrewrite_monitoring\n================================================\n\nThis program rewrites a monitoring time series files into the correct\ntime order\n\n\"\"\"\n\n# Author: fvj\n# License: BSD 3 clause\n\nimport datetime\nimport atexit\nimport numpy as np\nimport os\n\nfrom pyrad.io import read_monitoring_ts, write_monitoring_ts\nfrom pyrad.graph import plot_monitoring_ts\nfrom pyrad.io import generate_field_name_str, get_fieldname_pyart\n\n\nprint(__doc__)\n\n\ndef main():\n \"\"\"\n \"\"\"\n\n input_base = (\n '/store/msrad/radar/pyrad_products/')\n output_base = (\n '/store/msrad/radar/pyrad_products/')\n rad_vec = ['D']\n var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']\n year_vec = [datetime.datetime(2018, 1, 1)]\n\n plot_data = True\n\n print(\"====== Monitoring rewriting started: %s\" %\n datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\"))\n atexit.register(_print_end_msg,\n \"====== Monitoring rewriting finished: \")\n\n for i, rad in enumerate(rad_vec):\n print('Processing Radar '+rad)\n for j, var in enumerate(var_vec):\n if var == 'dBZ':\n basedir = 'rad4alp_gc_PH'+rad\n dsdir = 'monitoring_clt_Zh'\n mon_type = 'GC_MONITORING'\n quantiles = [50., 95., 99.]\n elif var == 'dBZv':\n basedir = 'rad4alp_gc_PH'+rad\n dsdir = 'monitoring_clt_Zv'\n mon_type = 'GC_MONITORING'\n quantiles = [50., 95., 99.]\n elif var == 'RhoHV_rain':\n basedir = 'rad4alp_dataquality_PL'+rad\n dsdir = 'monitoring_RhoHV'\n mon_type = 'MONITORING'\n quantiles = [65., 80., 95.]\n elif var == 'PhiDP0':\n basedir = 'rad4alp_dataquality_PL'+rad\n dsdir = 'monitoring_PhiDP0'\n mon_type = 'MONITORING'\n quantiles = [25., 50., 75.]\n elif var == 'ZDR_prec':\n basedir = 'rad4alp_dataquality_PL'+rad\n dsdir = 'monitoring_ZDR'\n mon_type = 'MONITORING'\n quantiles = [25., 50., 75.]\n elif var == 'ZDR_snow':\n basedir = 'rad4alp_dataquality_PL'+rad\n dsdir = 'monitoring_ZDR_snow'\n mon_type = 'MONITORING'\n quantiles = [25., 50., 75.]\n elif var == 'dBZ_bias':\n basedir = 'rad4alp_dataquality_PL'+rad\n dsdir = 'monitoring_Zh_bias'\n mon_type = 'MONITORING'\n quantiles = [25., 50., 75.]\n\n input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'\n output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n\n print('- Processing Variable '+var)\n for k, year in enumerate(year_vec):\n print('-- Processing Year '+year.strftime('%Y'))\n fname_input = (\n input_path+year.strftime('%Y')+'_'+rad +\n '_ts_'+mon_type+'_'+var+'.csv')\n fname_output = (\n output_path+year.strftime('%Y')+'_'+rad +\n '_ts_'+mon_type+'_'+var+'.csv')\n figfname = [\n output_path+year.strftime('%Y')+'_'+rad +\n '_ts_'+mon_type+'_'+var+'.png']\n\n date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (\n read_monitoring_ts(fname_input, sort_by_date=True))\n\n if date is None:\n continue\n\n val_vec = np.ma.asarray(\n [lquant_vec, cquant_vec, hquant_vec]).T\n fname = write_monitoring_ts(\n date, np_t_vec, val_vec, quantiles, var,\n fname_output, rewrite=True)\n\n print('written file '+fname)\n\n if not plot_data:\n continue\n\n titldate = (date[0].strftime('%Y%m%d')+'-' +\n date[-1].strftime('%Y%m%d'))\n titl = rad+' Monitoring '+titldate\n\n labely = generate_field_name_str(var)\n\n if var == 'dBZ':\n if rad == 'A':\n ref_value = 49.5\n vmin = 44.5\n vmax = 54.5\n np_min = 100000\n elif rad == 'D':\n ref_value = 48.5\n vmin = 43.5\n vmax = 53.5\n np_min = 20000\n elif rad == 'L':\n ref_value = 67.\n vmin = 62.\n vmax = 72.\n np_min = 100000\n elif rad == 'P':\n ref_value = 69.\n vmin = 64.\n vmax = 74.\n np_min = 100000\n elif rad == 'W':\n ref_value = 27.5\n vmin = 22.5\n vmax = 32.5\n np_min = 100000\n elif var == 'dBZv':\n if rad == 'A':\n ref_value = 51.5\n vmin = 46.5\n vmax = 56.5\n np_min = 100000\n elif rad == 'D':\n ref_value = 50.5\n vmin = 45.5\n vmax = 55.5\n np_min = 20000\n elif rad == 'L':\n ref_value = 69.5\n vmin = 64.5\n vmax = 74.5\n np_min = 100000\n elif rad == 'P':\n ref_value = 68.5\n vmin = 63.5\n vmax = 73.5\n np_min = 100000\n elif rad == 'W':\n ref_value = 26.5\n vmin = 21.5\n vmax = 31.5\n np_min = 100000\n elif var == 'RhoHV_rain':\n ref_value = 0.99\n vmin = 0.95\n vmax = 1.01\n np_min = 5000\n elif var == 'PhiDP0':\n ref_value = 0.\n vmin = -20.\n vmax = 20.\n np_min = 500000\n elif var == 'ZDR_prec':\n ref_value = 0.2\n vmin = -2.\n vmax = 2.\n np_min = 5000\n elif var == 'ZDR_snow':\n ref_value = 0.2\n vmin = -2.\n vmax = 2.\n np_min = 5000\n elif var == 'dBZ_bias':\n ref_value = 0.\n vmin = -30.\n vmax = 30.\n np_min = 100\n\n fname = plot_monitoring_ts(\n date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,\n get_fieldname_pyart(var), figfname,\n ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,\n labelx='Time UTC', labely=labely, titl=titl)\n print('plotted file '+' '.join(fname))\n\n\ndef _print_end_msg(text):\n \"\"\"\n prints end message\n\n Parameters\n ----------\n text : str\n the text to be printed\n\n Returns\n -------\n Nothing\n\n \"\"\"\n print(text + datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n# ---------------------------------------------------------\n# Start main:\n# ---------------------------------------------------------\nif __name__ == \"__main__\":\n main()\n", "\"\"\"\npyrad.proc.process_aux\n======================\n\nAuxiliary functions. Functions to determine the process type, pass raw data to\nthe product generation functions, save radar data and extract data at\ndetermined points or regions of interest.\n\n.. autosummary::\n :toctree: generated/\n\n get_process_func\n process_raw\n process_save_radar\n process_fixed_rng\n process_fixed_rng_span\n process_roi\n process_azimuthal_average\n process_radar_resampling\n _get_values_antenna_pattern\n _create_target_radar\n\n\"\"\"\n\nfrom time import time\n\nfrom copy import deepcopy\nfrom warnings import warn\nimport numpy as np\nfrom scipy.spatial import cKDTree\n\nimport pyart\nfrom pyart.util import cross_section_rhi\nfrom pyart.config import get_metadata\nfrom pyart.core import Radar\n\nfrom ..io.io_aux import get_datatype_fields, get_fieldname_pyart\nfrom ..io.read_data_sensor import read_trt_traj_data\nfrom ..io.read_data_other import read_antenna_pattern\nfrom ..io.read_data_cosmo import _put_radar_in_swiss_coord\nfrom ..util.radar_utils import belongs_roi_indices, get_target_elevations\nfrom ..util.radar_utils import find_neighbour_gates, compute_directional_stats\nfrom ..util.radar_utils import get_fixed_rng_data\nfrom ..util.stat_utils import quantiles_weighted\nfrom ..proc.process_traj import _get_gates_antenna_pattern\n\n\ndef get_process_func(dataset_type, dsname):\n \"\"\"\n Maps the dataset type into its processing function and data set format\n associated.\n\n Parameters\n ----------\n dataset_type : str\n The following is a list of data set types ordered by type of output\n dataset with the function they call. For details of what they do check\n the function documentation:\n 'VOL' format output:\n 'ATTENUATION': process_attenuation\n 'AZI_AVG': process_azimuthal_average\n 'BIAS_CORRECTION': process_correct_bias\n 'BIRDS_ID': process_birds_id\n 'BIRD_DENSITY': process_bird_density\n 'CCOR': process_ccor\n 'CDF': process_cdf\n 'CDR': process_cdr\n 'CLT_TO_SAN': process_clt_to_echo_id\n 'COSMO': process_cosmo\n 'COSMO_LOOKUP': process_cosmo_lookup_table\n 'DEM': process_dem\n 'DEALIAS_FOURDD': process_dealias_fourdd\n 'DEALIAS_REGION': process_dealias_region_based\n 'DEALIAS_UNWRAP': process_dealias_unwrap_phase\n 'DOPPLER_VELOCITY': process_Doppler_velocity\n 'DOPPLER_VELOCITY_IQ': process_Doppler_velocity_iq\n 'DOPPLER_WIDTH': process_Doppler_width\n 'DOPPLER_WIDTH_IQ': process_Doppler_width_iq\n 'ECHO_FILTER': process_echo_filter\n 'FIELDS_DIFF': process_fields_diff\n 'FIXED_RNG': process_fixed_rng\n 'FIXED_RNG_SPAN': process_fixed_rng_span\n 'GECSX' : process_gecsx\n 'hydroMF_to_hydro': process_hydro_mf_to_hydro\n 'HYDROCLASS': process_hydroclass\n 'HZT': process_hzt\n 'HZT_LOOKUP': process_hzt_lookup_table\n 'ISO0_GRIB': process_iso0_grib\n 'ISO0_MF': process_iso0_mf\n 'KDP_LEASTSQUARE_1W': process_kdp_leastsquare_single_window\n 'KDP_LEASTSQUARE_2W': process_kdp_leastsquare_double_window\n 'L': process_l\n 'MEAN_PHASE_IQ': process_mean_phase_iq\n 'NCVOL': process_save_radar\n 'NOISE_POWER': process_noise_power\n 'OUTLIER_FILTER': process_outlier_filter\n 'PhiDP': process_differential_phase\n 'PHIDP0_CORRECTION': process_correct_phidp0\n 'PHIDP0_ESTIMATE': process_estimate_phidp0\n 'PhiDP_IQ': process_differential_phase_iq\n 'PHIDP_KDP_KALMAN': process_phidp_kdp_Kalman\n 'PHIDP_KDP_LP': process_phidp_kdp_lp\n 'PHIDP_KDP_VULPIANI': process_phidp_kdp_Vulpiani\n 'PHIDP_SMOOTH_1W': process_smooth_phidp_single_window\n 'PHIDP_SMOOTH_2W': process_smooth_phidp_double_window\n 'POL_VARIABLES': process_pol_variables\n 'POL_VARIABLES_IQ': process_pol_variables_iq\n 'PWR': process_signal_power\n 'RADAR_RESAMPLING': process_radar_resampling\n 'RADIAL_NOISE_HS': process_radial_noise_hs\n 'RADIAL_NOISE_IVIC': process_radial_noise_ivic\n 'RADIAL_VELOCITY': process_radial_velocity\n 'RAINRATE': process_rainrate\n 'RAW': process_raw\n 'REFLECTIVITY': process_reflectivity\n 'REFLECTIVITY_IQ': process_reflectivity_iq\n 'RCS': process_rcs\n 'RCS_PR': process_rcs_pr\n 'RhoHV': process_rhohv\n 'RhoHV_IQ': process_rhohv_iq\n 'RHOHV_CORRECTION': process_correct_noise_rhohv\n 'RHOHV_RAIN': process_rhohv_rain\n 'ROI': process_roi\n 'SAN': process_echo_id\n 'SELFCONSISTENCY_BIAS': process_selfconsistency_bias\n 'SELFCONSISTENCY_BIAS2': process_selfconsistency_bias2\n 'SELFCONSISTENCY_KDP_PHIDP': process_selfconsistency_kdp_phidp\n 'SNR': process_snr\n 'SNR_FILTER': process_filter_snr\n 'ST1_IQ': process_st1_iq\n 'ST2_IQ': process_st2_iq\n 'TRAJ_TRT' : process_traj_trt\n 'TRAJ_TRT_CONTOUR' : process_traj_trt_contour\n 'TURBULENCE': process_turbulence\n 'VAD': process_vad\n 'VEL_FILTER': process_filter_vel_diff\n 'VIS': process_visibility\n 'VIS_FILTER': process_filter_visibility\n 'VOL_REFL': process_vol_refl\n 'WBN': process_wbn_iq\n 'WIND_VEL': process_wind_vel\n 'WINDSHEAR': process_windshear\n 'ZDR': process_differential_reflectivity\n 'ZDR_IQ': process_differential_reflectivity_iq\n 'ZDR_PREC': process_zdr_precip\n 'ZDR_SNOW': process_zdr_snow\n 'SPECTRA' format output:\n 'FFT': process_fft\n 'FILTER_0DOPPLER': process_filter_0Doppler\n 'FILTER_SPECTRA_NOISE': process_filter_spectra_noise\n 'IFFT': process_ifft\n 'RAW_IQ': process_raw_iq\n 'RAW_SPECTRA': process_raw_spectra\n 'SPECTRA_ANGULAR_AVERAGE': process_spectra_ang_avg\n 'SPECTRA_POINT': process_spectra_point\n 'SPECTRAL_NOISE': process_spectral_noise\n 'SPECTRAL_PHASE': process_spectral_phase\n 'SPECTRAL_POWER': process_spectral_power\n 'SPECTRAL_REFLECTIVITY': process_spectral_reflectivity\n 'sPhiDP': process_spectral_differential_phase\n 'sRhoHV': process_spectral_RhoHV\n 'SRHOHV_FILTER': process_filter_srhohv\n 'sZDR': process_spectral_differential_reflectivity\n 'COLOCATED_GATES' format output:\n 'COLOCATED_GATES': process_colocated_gates\n 'COSMO_COORD' format output:\n 'COSMO_COORD': process_cosmo_coord\n 'HZT_COORD': process_hzt_coord\n 'COSMO2RADAR' format output:\n 'COSMO2RADAR': process_cosmo_to_radar\n 'GRID' format output:\n 'RAW_GRID': process_raw_grid\n 'GECSX' : process_gecsx\n 'GRID': process_grid\n 'GRID_FIELDS_DIFF': process_grid_fields_diff\n 'GRID_MASK': process_grid_mask\n 'GRID_TEXTURE': process_grid_texture\n 'NORMALIZE_LUMINOSITY': process_normalize_luminosity\n 'PIXEL_FILTER': process_pixel_filter\n 'GRID_TIMEAVG' format output:\n 'GRID_TIME_STATS': process_grid_time_stats\n 'GRID_TIME_STATS2': process_grid_time_stats2\n 'INTERCOMP' format output:\n 'INTERCOMP': process_intercomp\n 'INTERCOMP_FIELDS': process_intercomp_fields\n 'INTERCOMP_TIME_AVG': process_intercomp_time_avg\n 'ML' format output:\n 'ML_DETECTION': process_melting_layer\n 'MONITORING' format output:\n 'GC_MONITORING': process_gc_monitoring\n 'MONITORING': process_monitoring\n 'OCCURRENCE' format output:\n 'OCCURRENCE': process_occurrence\n 'OCCURRENCE_PERIOD': process_occurrence_period\n 'TIMEAVG_STD': process_time_avg_std\n 'QVP' format output:\n 'EVP': process_evp\n 'QVP': process_qvp\n 'rQVP': process_rqvp\n 'SVP': process_svp\n 'TIME_HEIGHT': process_time_height\n 'TIME_ALONG_COORD': process_ts_along_coord\n 'SPARSE_GRID' format output:\n 'ZDR_COLUMN': process_zdr_column\n 'SUN_HITS' format output:\n 'SUN_HITS': process_sun_hits\n 'SUNSCAN' format output:\n 'SUNSCAN': process_sunscan\n 'TIMEAVG' format output:\n 'FLAG_TIME_AVG': process_time_avg_flag\n 'TIME_AVG': process_time_avg\n 'WEIGHTED_TIME_AVG': process_weighted_time_avg\n 'TIME_STATS': process_time_stats\n 'TIME_STATS2': process_time_stats2\n 'RAIN_ACCU': process_rainfall_accumulation\n 'TIMESERIES' format output:\n 'GRID_POINT_MEASUREMENT': process_grid_point\n 'POINT_MEASUREMENT': 'process_point_measurement'\n 'TRAJ_ANTENNA_PATTERN': process_traj_antenna_pattern\n 'TRAJ_ATPLANE': process_traj_atplane\n 'TRAJ_LIGHTNING': process_traj_lightning\n 'TRAJ_ONLY' format output:\n 'TRAJ': process_trajectory\n dsname : str\n Name of dataset\n\n Returns\n -------\n func_name : str or processing function\n pyrad function used to process the data set type\n dsformat : str\n data set format, i.e.: 'VOL', etc.\n\n \"\"\"\n\n dsformat = 'VOL'\n if dataset_type == 'RAW':\n func_name = process_raw\n elif dataset_type == 'AZI_AVG':\n func_name = process_azimuthal_average\n elif dataset_type == 'RADAR_RESAMPLING':\n func_name = 'process_radar_resampling'\n elif dataset_type == 'CCOR':\n func_name = 'process_ccor'\n elif dataset_type == 'GECSX':\n func_name = 'process_gecsx'\n dsformat = ['GRID','VOL']\n elif dataset_type == 'GRID':\n func_name = 'process_grid'\n dsformat = 'GRID'\n elif dataset_type == 'RAW_GRID':\n func_name = 'process_raw_grid'\n dsformat = 'GRID'\n elif dataset_type == 'GRID_FIELDS_DIFF':\n func_name = 'process_grid_fields_diff'\n dsformat = 'GRID'\n elif dataset_type == 'GRID_MASK':\n func_name = 'process_grid_mask'\n dsformat = 'GRID'\n elif dataset_type == 'GRID_TEXTURE':\n func_name = 'process_grid_texture'\n dsformat = 'GRID'\n elif dataset_type == 'NORMALIZE_LUMINOSITY':\n func_name = 'process_normalize_luminosity'\n dsformat = 'GRID'\n elif dataset_type == 'PIXEL_FILTER':\n func_name = 'process_pixel_filter'\n dsformat = 'GRID'\n elif dataset_type == 'RAW_SPECTRA':\n func_name = 'process_raw_spectra'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SPECTRA_POINT':\n func_name = 'process_spectra_point'\n dsformat = 'SPECTRA'\n elif dataset_type == 'IFFT':\n func_name = 'process_ifft'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SPECTRAL_POWER':\n func_name = 'process_spectral_power'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SPECTRAL_NOISE':\n func_name = 'process_spectral_noise'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SPECTRAL_PHASE':\n func_name = 'process_spectral_phase'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SPECTRAL_REFLECTIVITY':\n func_name = 'process_spectral_reflectivity'\n dsformat = 'SPECTRA'\n elif dataset_type == 'sZDR':\n func_name = 'process_spectral_differential_reflectivity'\n dsformat = 'SPECTRA'\n elif dataset_type == 'sPhiDP':\n func_name = 'process_spectral_differential_phase'\n dsformat = 'SPECTRA'\n elif dataset_type == 'sRhoHV':\n func_name = 'process_spectral_rhohv'\n dsformat = 'SPECTRA'\n elif dataset_type == 'FILTER_SPECTRA_NOISE':\n func_name = 'process_filter_spectra_noise'\n dsformat = 'SPECTRA'\n elif dataset_type == 'FILTER_0DOPPLER':\n func_name = 'process_filter_0Doppler'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SRHOHV_FILTER':\n func_name = 'process_filter_srhohv'\n dsformat = 'SPECTRA'\n elif dataset_type == 'SPECTRA_ANGULAR_AVERAGE':\n func_name = 'process_spectra_ang_avg'\n dsformat = 'SPECTRA'\n elif dataset_type == 'FFT':\n func_name = 'process_fft'\n dsformat = 'SPECTRA'\n elif dataset_type == 'RAW_IQ':\n func_name = 'process_raw_iq'\n dsformat = 'SPECTRA'\n elif dataset_type == 'QVP':\n func_name = 'process_qvp'\n dsformat = 'QVP'\n elif dataset_type == 'rQVP':\n func_name = 'process_rqvp'\n dsformat = 'QVP'\n elif dataset_type == 'SVP':\n func_name = 'process_svp'\n dsformat = 'QVP'\n elif dataset_type == 'EVP':\n func_name = 'process_evp'\n dsformat = 'QVP'\n elif dataset_type == 'TIME_HEIGHT':\n func_name = 'process_time_height'\n dsformat = 'QVP'\n elif dataset_type == 'TIME_ALONG_COORD':\n func_name = 'process_ts_along_coord'\n dsformat = 'QVP'\n elif dataset_type == 'CDF':\n func_name = 'process_cdf'\n elif dataset_type == 'NCVOL':\n func_name = process_save_radar\n elif dataset_type == 'PWR':\n func_name = 'process_signal_power'\n elif dataset_type == 'RCS_PR':\n func_name = 'process_rcs_pr'\n elif dataset_type == 'RCS':\n func_name = 'process_rcs'\n elif dataset_type == 'SNR':\n func_name = 'process_snr'\n elif dataset_type == 'RADIAL_NOISE_HS':\n func_name = 'process_radial_noise_hs'\n elif dataset_type == 'RADIAL_NOISE_IVIC':\n func_name = 'process_radial_noise_ivic'\n elif dataset_type == 'VOL_REFL':\n func_name = 'process_vol_refl'\n elif dataset_type == 'BIRD_DENSITY':\n func_name = 'process_bird_density'\n elif dataset_type == 'RHOHV_CORRECTION':\n func_name = 'process_correct_noise_rhohv'\n elif dataset_type == 'BIAS_CORRECTION':\n func_name = 'process_correct_bias'\n elif dataset_type == 'L':\n func_name = 'process_l'\n elif dataset_type == 'CDR':\n func_name = 'process_cdr'\n elif dataset_type == 'SAN':\n func_name = 'process_echo_id'\n elif dataset_type == 'BIRDS_ID':\n func_name = 'process_birds_id'\n elif dataset_type == 'CLT_TO_SAN':\n func_name = 'process_clt_to_echo_id'\n elif dataset_type == 'hydroMF_to_hydro':\n func_name = 'process_hydro_mf_to_hydro'\n elif dataset_type == 'ECHO_FILTER':\n func_name = 'process_echo_filter'\n elif dataset_type == 'ZDR_COLUMN':\n func_name = 'process_zdr_column'\n dsformat = 'SPARSE_GRID'\n elif dataset_type == 'SNR_FILTER':\n func_name = 'process_filter_snr'\n elif dataset_type == 'VEL_FILTER':\n func_name = 'process_filter_vel_diff'\n elif dataset_type == 'VIS_FILTER':\n func_name = 'process_filter_visibility'\n elif dataset_type == 'VIS':\n func_name = 'process_visibility'\n elif dataset_type == 'OUTLIER_FILTER':\n func_name = 'process_outlier_filter'\n elif dataset_type == 'PHIDP0_CORRECTION':\n func_name = 'process_correct_phidp0'\n elif dataset_type == 'PHIDP_SMOOTH_1W':\n func_name = 'process_smooth_phidp_single_window'\n elif dataset_type == 'PHIDP_SMOOTH_2W':\n func_name = 'process_smooth_phidp_double_window'\n elif dataset_type == 'PHIDP_KDP_VULPIANI':\n func_name = 'process_phidp_kdp_Vulpiani'\n elif dataset_type == 'PHIDP_KDP_KALMAN':\n func_name = 'process_phidp_kdp_Kalman'\n elif dataset_type == 'PHIDP_KDP_MAESAKA':\n func_name = 'process_phidp_kdp_Maesaka'\n elif dataset_type == 'PHIDP_KDP_LP':\n func_name = 'process_phidp_kdp_lp'\n elif dataset_type == 'KDP_LEASTSQUARE_1W':\n func_name = 'process_kdp_leastsquare_single_window'\n elif dataset_type == 'KDP_LEASTSQUARE_2W':\n func_name = 'process_kdp_leastsquare_double_window'\n elif dataset_type == 'ATTENUATION':\n func_name = 'process_attenuation'\n elif dataset_type == 'RAINRATE':\n func_name = 'process_rainrate'\n elif dataset_type == 'RAIN_ACCU':\n func_name = 'process_rainfall_accumulation'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'TURBULENCE':\n func_name = 'process_turbulence'\n elif dataset_type == 'DEALIAS_FOURDD':\n func_name = 'process_dealias_fourdd'\n elif dataset_type == 'DEALIAS_REGION':\n func_name = 'process_dealias_region_based'\n elif dataset_type == 'DEALIAS_UNWRAP':\n func_name = 'process_dealias_unwrap_phase'\n elif dataset_type == 'RADIAL_VELOCITY':\n func_name = 'process_radial_velocity'\n elif dataset_type == 'WIND_VEL':\n func_name = 'process_wind_vel'\n elif dataset_type == 'VAD':\n func_name = 'process_vad'\n elif dataset_type == 'WINDSHEAR':\n func_name = 'process_windshear'\n elif dataset_type == 'HYDROCLASS':\n func_name = 'process_hydroclass'\n elif dataset_type == 'ML_DETECTION':\n func_name = 'process_melting_layer'\n dsformat = 'ML'\n elif dataset_type == 'PHIDP0_ESTIMATE':\n func_name = 'process_estimate_phidp0'\n elif dataset_type == 'RHOHV_RAIN':\n func_name = 'process_rhohv_rain'\n elif dataset_type == 'ZDR_PREC':\n func_name = 'process_zdr_precip'\n elif dataset_type == 'ZDR_SNOW':\n func_name = 'process_zdr_snow'\n elif dataset_type == 'POL_VARIABLES':\n func_name = 'process_pol_variables'\n elif dataset_type == 'NOISE_POWER':\n func_name = 'process_noise_power'\n elif dataset_type == 'REFLECTIVITY':\n func_name = 'process_reflectivity'\n elif dataset_type == 'ZDR':\n func_name = 'process_differential_reflectivity'\n elif dataset_type == 'PhiDP':\n func_name = 'process_differential_phase'\n elif dataset_type == 'RhoHV':\n func_name = 'process_rhohv'\n elif dataset_type == 'DOPPLER_VELOCITY':\n func_name = 'process_Doppler_velocity'\n elif dataset_type == 'DOPPLER_WIDTH':\n func_name = 'process_Doppler_width'\n elif dataset_type == 'POL_VARIABLES_IQ':\n func_name = 'process_pol_variables_iq'\n elif dataset_type == 'REFLECTIVITY_IQ':\n func_name = 'process_reflectivity_iq'\n elif dataset_type == 'ZDR_IQ':\n func_name = 'process_differential_reflectivity_iq'\n elif dataset_type == 'PhiDP_IQ':\n func_name = 'process_differential_phase_iq'\n elif dataset_type == 'RhoHV_IQ':\n func_name = 'process_rhohv_iq'\n elif dataset_type == 'DOPPLER_VELOCITY_IQ':\n func_name = 'process_Doppler_velocity_iq'\n elif dataset_type == 'DOPPLER_WIDTH_IQ':\n func_name = 'process_Doppler_width_iq'\n elif dataset_type == 'MEAN_PHASE_IQ':\n func_name = 'process_mean_phase_iq'\n elif dataset_type == 'ST1_IQ':\n func_name = 'process_st1_iq'\n elif dataset_type == 'ST2_IQ':\n func_name = 'process_st2_iq'\n elif dataset_type == 'WBN_IQ':\n func_name = 'process_wbn_iq'\n elif dataset_type == 'SELFCONSISTENCY_KDP_PHIDP':\n func_name = 'process_selfconsistency_kdp_phidp'\n elif dataset_type == 'SELFCONSISTENCY_BIAS':\n func_name = 'process_selfconsistency_bias'\n elif dataset_type == 'SELFCONSISTENCY_BIAS2':\n func_name = 'process_selfconsistency_bias2'\n elif dataset_type == 'COSMO':\n func_name = 'process_cosmo'\n elif dataset_type == 'COSMO_LOOKUP':\n func_name = 'process_cosmo_lookup_table'\n elif dataset_type == 'COSMO_COORD':\n func_name = 'process_cosmo_coord'\n dsformat = 'COSMO_COORD'\n elif dataset_type == 'HZT_COORD':\n func_name = 'process_hzt_coord'\n dsformat = 'COSMO_COORD'\n elif dataset_type == 'COSMO2RADAR':\n func_name = 'process_cosmo_to_radar'\n dsformat = 'COSMO2RADAR'\n elif dataset_type == 'HZT':\n func_name = 'process_hzt'\n elif dataset_type == 'ISO0_MF':\n func_name = 'process_iso0_mf'\n elif dataset_type == 'ISO0_GRIB':\n func_name = 'process_iso0_grib'\n elif dataset_type == 'HZT_LOOKUP':\n func_name = 'process_hzt_lookup_table'\n elif dataset_type == 'DEM':\n func_name = 'process_dem'\n elif dataset_type == 'TIME_AVG':\n func_name = 'process_time_avg'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'WEIGHTED_TIME_AVG':\n func_name = 'process_weighted_time_avg'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'FLAG_TIME_AVG':\n func_name = 'process_time_avg_flag'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'TIME_STATS':\n func_name = 'process_time_stats'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'TIME_STATS2':\n func_name = 'process_time_stats2'\n dsformat = 'TIMEAVG'\n elif dataset_type == 'GRID_TIME_STATS':\n func_name = 'process_grid_time_stats'\n dsformat = 'GRID_TIMEAVG'\n elif dataset_type == 'GRID_TIME_STATS2':\n func_name = 'process_grid_time_stats2'\n dsformat = 'GRID_TIMEAVG'\n elif dataset_type == 'COLOCATED_GATES':\n func_name = 'process_colocated_gates'\n dsformat = 'COLOCATED_GATES'\n elif dataset_type == 'INTERCOMP':\n func_name = 'process_intercomp'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'INTERCOMP_FIELDS':\n func_name = 'process_intercomp_fields'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'INTERCOMP_TIME_AVG':\n func_name = 'process_intercomp_time_avg'\n dsformat = 'INTERCOMP'\n elif dataset_type == 'FIELDS_DIFF':\n func_name = 'process_fields_diff'\n elif dataset_type == 'MONITORING':\n func_name = 'process_monitoring'\n dsformat = 'MONITORING'\n elif dataset_type == 'GC_MONITORING':\n func_name = 'process_gc_monitoring'\n dsformat = 'MONITORING'\n elif dataset_type == 'OCCURRENCE':\n func_name = 'process_occurrence'\n dsformat = 'OCCURRENCE'\n elif dataset_type == 'TIMEAVG_STD':\n func_name = 'process_time_avg_std'\n dsformat = 'OCCURRENCE'\n elif dataset_type == 'OCCURRENCE_PERIOD':\n func_name = 'process_occurrence_period'\n dsformat = 'OCCURRENCE'\n elif dataset_type == 'SUN_HITS':\n func_name = 'process_sun_hits'\n dsformat = 'SUN_HITS'\n elif dataset_type == 'SUNSCAN':\n func_name = 'process_sunscan'\n dsformat = 'SUN_HITS'\n elif dataset_type == 'POINT_MEASUREMENT':\n func_name = 'process_point_measurement'\n dsformat = 'TIMESERIES'\n elif dataset_type == 'GRID_POINT_MEASUREMENT':\n func_name = 'process_grid_point'\n dsformat = 'TIMESERIES'\n elif dataset_type == 'ROI':\n func_name = process_roi\n elif dataset_type == 'TRAJ':\n func_name = 'process_trajectory'\n dsformat = 'TRAJ_ONLY'\n elif dataset_type == 'TRAJ_ATPLANE':\n func_name = 'process_traj_atplane'\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ_ANTENNA_PATTERN':\n func_name = 'process_traj_antenna_pattern'\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ_LIGHTNING':\n func_name = 'process_traj_lightning'\n dsformat = 'TIMESERIES'\n elif dataset_type == 'TRAJ_TRT':\n func_name = 'process_traj_trt'\n elif dataset_type == 'TRAJ_TRT_CONTOUR':\n func_name = 'process_traj_trt_contour'\n elif dataset_type == 'FIXED_RNG':\n func_name = process_fixed_rng\n elif dataset_type == 'FIXED_RNG_SPAN':\n func_name = process_fixed_rng_span\n else:\n raise ValueError(\"ERROR: Unknown dataset type '%s' of dataset '%s'\"\n % (dataset_type, dsname))\n\n return func_name, dsformat\n\n\ndef process_raw(procstatus, dscfg, radar_list=None):\n \"\"\"\n Dummy function that returns the initial input data set\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n \"\"\"\n\n if procstatus != 1:\n return None, None\n\n for datatypedescr in dscfg['datatype']:\n radarnr, _, _, _, _ = get_datatype_fields(datatypedescr)\n break\n ind_rad = int(radarnr[5:8])-1\n if (radar_list is None) or (radar_list[ind_rad] is None):\n warn('ERROR: No valid radar')\n return None, None\n new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}\n\n return new_dataset, ind_rad\n\n\ndef process_save_radar(procstatus, dscfg, radar_list=None):\n \"\"\"\n Dummy function that allows to save the entire radar object\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the output\n ind_rad : int\n radar index\n\n \"\"\"\n\n if procstatus != 1:\n return None, None\n\n for datatypedescr in dscfg['datatype']:\n radarnr, _, _, _, _ = get_datatype_fields(datatypedescr)\n break\n ind_rad = int(radarnr[5:8])-1\n if (radar_list is None) or (radar_list[ind_rad] is None):\n warn('ERROR: No valid radar')\n return None, None\n new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}\n\n return new_dataset, ind_rad\n\n\ndef process_fixed_rng(procstatus, dscfg, radar_list=None):\n \"\"\"\n Obtains radar data at a fixed range\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of strings. Dataset keyword\n The fields we want to extract\n rng : float. Dataset keyword\n The fixed range [m]\n RngTol : float. Dataset keyword\n The tolerance between the nominal range and the radar range\n ele_min, ele_max, azi_min, azi_max : floats. Dataset keyword\n The azimuth and elevation limits of the data [deg]\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the data and metadata at the point of interest\n ind_rad : int\n radar index\n\n \"\"\"\n if procstatus != 1:\n return None, None\n\n field_names = []\n for datatypedescr in dscfg['datatype']:\n radarnr, _, datatype, _, _ = get_datatype_fields(\n datatypedescr)\n field_names.append(get_fieldname_pyart(datatype))\n ind_rad = int(radarnr[5:8])-1\n\n if (radar_list is None) or (radar_list[ind_rad] is None):\n warn('ERROR: No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n # user defined parameters\n rng_tol = dscfg.get('RngTol', 50.)\n ele_min = dscfg.get('ele_min', None)\n ele_max = dscfg.get('ele_max', None)\n azi_min = dscfg.get('azi_min', None)\n azi_max = dscfg.get('azi_max', None)\n\n radar_aux = get_fixed_rng_data(\n radar, field_names, dscfg['rng'], rng_tol=rng_tol, ele_min=ele_min,\n ele_max=ele_max, azi_min=azi_min, azi_max=azi_max)\n\n if radar_aux is None:\n return None\n\n new_dataset = {'radar_out': radar_aux}\n\n return new_dataset, ind_rad\n\n\ndef process_fixed_rng_span(procstatus, dscfg, radar_list=None):\n \"\"\"\n For each azimuth-elevation gets the data within a fixed range span\n and computes a user-defined statistic: mean, min, max, mode, median\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : list of strings. Dataset keyword\n The fields we want to extract\n rmin, rmax : float. Dataset keyword\n The range limits [m]\n ele_min, ele_max, azi_min, azi_max : floats. Dataset keyword\n The azimuth and elevation limits of the data [deg]\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the data and metadata at the point of interest\n ind_rad : int\n radar index\n\n \"\"\"\n if procstatus != 1:\n return None, None\n\n field_names = []\n for datatypedescr in dscfg['datatype']:\n radarnr, _, datatype, _, _ = get_datatype_fields(\n datatypedescr)\n field_names.append(get_fieldname_pyart(datatype))\n ind_rad = int(radarnr[5:8])-1\n\n if (radar_list is None) or (radar_list[ind_rad] is None):\n warn('ERROR: No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n # user defined parameters\n rmin = dscfg.get('rmin', None)\n rmax = dscfg.get('rmax', None)\n ele_min = dscfg.get('ele_min', None)\n ele_max = dscfg.get('ele_max', None)\n azi_min = dscfg.get('azi_min', None)\n azi_max = dscfg.get('azi_max', None)\n\n radar_aux = pyart.util.cut_radar(\n radar, field_names, rng_min=rmin, rng_max=rmax, ele_min=ele_min,\n ele_max=ele_max, azi_min=azi_min, azi_max=azi_max)\n\n if radar_aux is None:\n return None\n\n new_dataset = {'radar_out': radar_aux}\n\n return new_dataset, ind_rad\n\n\ndef process_roi(procstatus, dscfg, radar_list=None):\n \"\"\"\n Obtains the radar data at a region of interest defined by a TRT file or\n by the user.\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n trtfile : str. Dataset keyword\n TRT file from which to extract the region of interest\n lon_roi, lat_roi : float array. Dataset keyword\n latitude and longitude positions defining a region of interest\n alt_min, alt_max : float. Dataset keyword\n Minimum and maximum altitude of the region of interest. Can be\n None\n\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the data and metadata at the point of interest\n ind_rad : int\n radar index\n\n \"\"\"\n if procstatus != 1:\n return None, None\n\n field_names_aux = []\n for datatypedescr in dscfg['datatype']:\n radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)\n field_names_aux.append(get_fieldname_pyart(datatype))\n\n ind_rad = int(radarnr[5:8])-1\n if (radar_list is None) or (radar_list[ind_rad] is None):\n warn('ERROR: No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n # keep only fields present in radar object\n field_names = []\n nfields_available = 0\n for field_name in field_names_aux:\n if field_name not in radar.fields:\n warn('Field name '+field_name+' not available in radar object')\n continue\n field_names.append(field_name)\n nfields_available += 1\n\n if nfields_available == 0:\n warn(\"Fields not available in radar data\")\n return None, None\n\n if 'trtfile' in dscfg:\n (_, yyyymmddHHMM, lon, lat, _, _, _, _, _, _, _, _, _, _, _, _, _, _,\n _, _, _, _, _, _, _, _, _, cell_contour) = read_trt_traj_data(\n dscfg['trtfile'])\n\n time_tol = dscfg.get('TimeTol', 100.)\n dt = np.empty(yyyymmddHHMM.size, dtype=float)\n for i, time_traj in enumerate(yyyymmddHHMM):\n dt[i] = np.abs((dscfg['timeinfo'] - time_traj).total_seconds())\n if dt.min() > time_tol:\n warn('No TRT data for radar volume time')\n return None, None\n\n ind = np.argmin(dt)\n lon_roi = cell_contour[ind]['lon']\n lat_roi = cell_contour[ind]['lat']\n else:\n lon_roi = dscfg.get('lon_roi', None)\n lat_roi = dscfg.get('lat_roi', None)\n\n if lon_roi is None or lat_roi is None:\n warn('Undefined ROI')\n return None, None\n\n alt_min = dscfg.get('alt_min', None)\n alt_max = dscfg.get('alt_max', None)\n\n roi_dict = {\n 'lon': lon_roi,\n 'lat': lat_roi,\n 'alt_min': alt_min,\n 'alt_max': alt_max}\n\n # extract the data within the ROI boundaries\n inds_ray, inds_rng = np.indices(np.shape(radar.gate_longitude['data']))\n\n mask = np.logical_and(\n np.logical_and(\n radar.gate_latitude['data'] >= roi_dict['lat'].min(),\n radar.gate_latitude['data'] <= roi_dict['lat'].max()),\n np.logical_and(\n radar.gate_longitude['data'] >= roi_dict['lon'].min(),\n radar.gate_longitude['data'] <= roi_dict['lon'].max()))\n\n if alt_min is not None:\n mask[radar.gate_altitude['data'] < alt_min] = 0\n if alt_max is not None:\n mask[radar.gate_altitude['data'] > alt_max] = 0\n\n if np.all(mask == 0):\n warn('No values within ROI')\n return None, None\n\n inds_ray = inds_ray[mask]\n inds_rng = inds_rng[mask]\n\n # extract the data inside the ROI\n lat = radar.gate_latitude['data'][mask]\n lon = radar.gate_longitude['data'][mask]\n inds, is_roi = belongs_roi_indices(lat, lon, roi_dict)\n\n if is_roi == 'None':\n warn('No values within ROI')\n return None, None\n\n inds_ray = inds_ray[inds]\n inds_rng = inds_rng[inds]\n\n lat = lat[inds].T\n lon = lon[inds].T\n alt = radar.gate_altitude['data'][inds_ray, inds_rng].T\n\n # prepare new radar object output\n new_dataset = {'radar_out': deepcopy(radar)}\n\n new_dataset['radar_out'].range['data'] = radar.range['data'][inds_rng]\n new_dataset['radar_out'].ngates = inds_rng.size\n new_dataset['radar_out'].time['data'] = np.asarray(\n [new_dataset['radar_out'].time['data'][0]])\n new_dataset['radar_out'].scan_type = 'roi'\n new_dataset['radar_out'].sweep_mode['data'] = np.array(['roi'])\n new_dataset['radar_out'].sweep_start_ray_index['data'] = np.array(\n [0], dtype='int32')\n new_dataset['radar_out'].fixed_angle['data'] = np.array(\n [], dtype='float64')\n new_dataset['radar_out'].sweep_number['data'] = np.array(\n [0], dtype='int32')\n new_dataset['radar_out'].nsweeps = 1\n\n if radar.rays_are_indexed is not None:\n new_dataset['radar_out'].rays_are_indexed['data'] = np.array(\n [radar.rays_are_indexed['data'][0]])\n if radar.ray_angle_res is not None:\n new_dataset['radar_out'].ray_angle_res['data'] = np.array(\n [radar.ray_angle_res['data'][0]])\n\n new_dataset['radar_out'].sweep_end_ray_index['data'] = np.array(\n [1], dtype='int32')\n new_dataset['radar_out'].rays_per_sweep = np.array([1], dtype='int32')\n new_dataset['radar_out'].azimuth['data'] = np.array([], dtype='float64')\n new_dataset['radar_out'].elevation['data'] = np.array([], dtype='float64')\n new_dataset['radar_out'].nrays = 1\n\n new_dataset['radar_out'].gate_longitude['data'] = lon\n new_dataset['radar_out'].gate_latitude['data'] = lat\n new_dataset['radar_out'].gate_altitude['data'] = alt\n\n new_dataset['radar_out'].gate_x['data'] = (\n radar.gate_x['data'][inds_ray, inds_rng].T)\n new_dataset['radar_out'].gate_y['data'] = (\n radar.gate_y['data'][inds_ray, inds_rng].T)\n new_dataset['radar_out'].gate_z['data'] = (\n radar.gate_z['data'][inds_ray, inds_rng].T)\n\n new_dataset['radar_out'].fields = dict()\n for field_name in field_names:\n field_dict = deepcopy(radar.fields[field_name])\n field_dict['data'] = (\n radar.fields[field_name]['data'][inds_ray, inds_rng].T)\n new_dataset['radar_out'].add_field(field_name, field_dict)\n\n return new_dataset['radar_out'], ind_rad\n\n\ndef process_azimuthal_average(procstatus, dscfg, radar_list=None):\n \"\"\"\n Averages radar data in azimuth obtaining and RHI as a result\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n data set configuration. Accepted Configuration Keywords::\n\n datatype : string. Dataset keyword\n The data type where we want to extract the point measurement\n angle : float or None. Dataset keyword\n The center angle to average. If not set or set to -1 all\n available azimuth angles will be used\n delta_azi : float. Dataset keyword\n The angle span to average. If not set or set to -1 all the\n available azimuth angles will be used\n avg_type : str. Dataset keyword\n Average type. Can be mean or median\n nvalid_min : int. Dataset keyword\n the (minimum) radius of the region of interest in m. Default half\n the largest resolution\n\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the gridded data\n ind_rad : int\n radar index\n\n \"\"\"\n if procstatus != 1:\n return None, None\n\n field_names_aux = []\n for datatypedescr in dscfg['datatype']:\n radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)\n field_names_aux.append(get_fieldname_pyart(datatype))\n\n ind_rad = int(radarnr[5:8])-1\n if (radar_list is None) or (radar_list[ind_rad] is None):\n warn('ERROR: No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n # keep only fields present in radar object\n field_names = []\n nfields_available = 0\n for field_name in field_names_aux:\n if field_name not in radar.fields:\n warn('Field name '+field_name+' not available in radar object')\n continue\n field_names.append(field_name)\n nfields_available += 1\n\n if nfields_available == 0:\n warn(\"Fields not available in radar data\")\n return None, None\n\n # default parameters\n angle = dscfg.get('angle', None)\n delta_azi = dscfg.get('delta_azi', None)\n avg_type = dscfg.get('avg_type', 'mean')\n nvalid_min = dscfg.get('nvalid_min', 1)\n if avg_type not in ('mean', 'median'):\n warn('Unsuported statistics '+avg_type)\n return None, None\n\n if delta_azi == -1:\n delta_azi = None\n if angle == -1:\n angle = None\n\n radar_aux = deepcopy(radar)\n # transform radar into ppi over the required elevation\n if radar_aux.scan_type == 'rhi':\n target_elevations, el_tol = get_target_elevations(radar_aux)\n radar_ppi = cross_section_rhi(\n radar_aux, target_elevations, el_tol=el_tol)\n elif radar_aux.scan_type == 'ppi':\n radar_ppi = radar_aux\n else:\n warn('Error: unsupported scan type.')\n return None, None\n\n # range, metadata, radar position are the same as the original\n # time\n radar_rhi = deepcopy(radar)\n radar_rhi.fields = dict()\n radar_rhi.scan_type = 'rhi'\n radar_rhi.sweep_number['data'] = np.array([0])\n radar_rhi.sweep_mode['data'] = np.array(['rhi'])\n radar_rhi.fixed_angle['data'] = np.array([0])\n radar_rhi.sweep_start_ray_index['data'] = np.array([0])\n radar_rhi.sweep_end_ray_index['data'] = np.array([radar_ppi.nsweeps-1])\n radar_rhi.rays_per_sweep['data'] = np.array([radar_ppi.nsweeps])\n radar_rhi.azimuth['data'] = np.ones(radar_ppi.nsweeps)\n radar_rhi.elevation['data'] = radar_ppi.fixed_angle['data']\n radar_rhi.nrays = radar_ppi.fixed_angle['data'].size\n radar_rhi.nsweeps = 1\n radar_rhi.rays_are_indexed = None\n radar_rhi.ray_angle_res = None\n\n # average radar data\n if angle is None:\n fixed_angle = np.zeros(radar_ppi.nsweeps)\n\n fields_dict = dict()\n for field_name in field_names:\n fields_dict.update(\n {field_name: get_metadata(field_name)})\n fields_dict[field_name]['data'] = np.ma.masked_all(\n (radar_ppi.nsweeps, radar_ppi.ngates))\n\n for sweep in range(radar_ppi.nsweeps):\n radar_aux = deepcopy(radar_ppi)\n radar_aux = radar_aux.extract_sweeps([sweep])\n\n # find neighbouring gates to be selected\n inds_ray, inds_rng = find_neighbour_gates(\n radar_aux, angle, None, delta_azi=delta_azi, delta_rng=None)\n\n if angle is None:\n fixed_angle[sweep] = np.median(radar_aux.azimuth['data'][inds_ray])\n\n # keep only data we are interested in\n for field_name in field_names:\n field_aux = radar_aux.fields[field_name]['data'][:, inds_rng]\n field_aux = field_aux[inds_ray, :]\n\n vals, _ = compute_directional_stats(\n field_aux, avg_type=avg_type, nvalid_min=nvalid_min, axis=0)\n\n fields_dict[field_name]['data'][sweep, :] = vals\n\n if angle is None:\n radar_rhi.fixed_angle['data'] = np.array([np.mean(fixed_angle)])\n else:\n radar_rhi.fixed_angle['data'] = np.array([angle])\n radar_rhi.azimuth['data'] *= radar_rhi.fixed_angle['data'][0]\n\n for field_name in field_names:\n radar_rhi.add_field(field_name, fields_dict[field_name])\n\n # prepare for exit\n new_dataset = {'radar_out': radar_rhi}\n\n return new_dataset, ind_rad\n\n\ndef process_radar_resampling(procstatus, dscfg, radar_list=None):\n \"\"\"\n Resamples the radar data to mimic another radar with different geometry\n and antenna pattern\n\n Parameters\n ----------\n procstatus : int\n Processing status: 0 initializing, 1 processing volume,\n 2 post-processing\n dscfg : dictionary of dictionaries\n datatype : list of string. Dataset keyword\n The input data types\n antennaType : str. Dataset keyword\n Type of antenna of the radar we want to get the view from. Can\n be AZIMUTH, ELEVATION, LOWBEAM, HIGHBEAM\n par_azimuth_antenna : dict. Global keyword\n Dictionary containing the parameters of the PAR azimuth antenna,\n i.e. name of the file with the antenna elevation pattern and fixed\n antenna angle\n par_elevation_antenna : dict. Global keyword\n Dictionary containing the parameters of the PAR elevation antenna,\n i.e. name of the file with the antenna azimuth pattern and fixed\n antenna angle\n asr_lowbeam_antenna : dict. Global keyword\n Dictionary containing the parameters of the ASR low beam antenna,\n i.e. name of the file with the antenna elevation pattern and fixed\n antenna angle\n asr_highbeam_antenna : dict. Global keyword\n Dictionary containing the parameters of the ASR high beam antenna,\n i.e. name of the file with the antenna elevation pattern and fixed\n antenna angle\n target_radar_pos : dict. Global keyword\n Dictionary containing the latitude, longitude and altitude of\n the radar we want to get the view from. If not specifying it will\n assume the radar is collocated\n change_antenna_pattern : Bool. Dataset keyword\n If true the target radar has a different antenna pattern than the\n observations radar. Default True\n rhi_resolution : Bool. Dataset keyword\n Resolution of the synthetic RHI used to compute the data as viewed\n from the synthetic radar [deg]. Default 0.5\n max_altitude : float. Dataset keyword\n Max altitude of the data to use when computing the view from the\n synthetic radar [m MSL]. Default 12000.\n latlon_tol : float. Dataset keyword\n The tolerance in latitude and longitude to determine which\n synthetic radar gates are co-located with real radar gates [deg].\n Default 0.04\n alt_tol : float. Dataset keyword\n The tolerance in altitude to determine which synthetic\n radar gates are co-located with real radar gates [m]. Default 1000.\n distance_upper_bound : float. Dataset keyword\n The maximum distance where to look for a neighbour when\n determining which synthetic radar gates are co-located with real\n radar gates [m]. Default 1000.\n use_cKDTree : Bool. Dataset keyword\n Which function to use to find co-located real radar gates with the\n synthetic radar. If True a function using cKDTree from\n scipy.spatial is used. This function uses parameter\n distance_upper_bound. If False a native implementation is used\n that takes as parameters latlon_tol and alt_tol. Default True.\n pattern_thres : float. Dataset keyword\n The minimum of the sum of the weights given to each value in order\n to consider the weighted quantile valid. It is related to the\n number of valid data points\n data_is_log : dict. Dataset keyword\n Dictionary specifying for each field if it is in log (True) or\n linear units (False). Default False\n use_nans : dict. Dataset keyword\n Dictionary specifying whether the nans have to be used in the\n computation of the statistics for each field. Default False\n nan_value : dict. Dataset keyword\n Dictionary with the value to use to substitute the NaN values when\n computing the statistics of each field. Default 0\n moving_angle_min, moving_angle_max: float. Dataset keyword\n The minimum and maximum azimuth angle (deg) of the target radar.\n Default 0, 360.\n ray_res: float\n Ray resolution (deg). Default 1 deg.\n rng_min, rng_max:\n The minimum and maximum range of the target radar (m).\n Default 0, 100000\n rng_res : float\n The target radar range resolution (m). Default 100.\n radar_list : list of Radar objects\n Optional. list of radar objects\n\n Returns\n -------\n new_dataset : dict\n dictionary containing the new radar\n ind_rad : int\n radar index\n \"\"\"\n\n if procstatus != 1:\n return None, None\n\n # Process\n field_names_aux = []\n datatypes = []\n for datatypedescr in dscfg['datatype']:\n radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)\n field_names_aux.append(get_fieldname_pyart(datatype))\n datatypes.append(datatype)\n\n ind_rad = int(radarnr[5:8])-1\n if ((radar_list is None) or (radar_list[ind_rad] is None)):\n warn('ERROR: No valid radar found')\n return None, None\n radar = deepcopy(radar_list[ind_rad])\n\n field_names = []\n for field_name in field_names_aux:\n if field_name not in radar.fields:\n warn('Field '+field_name+' not in observations radar object')\n continue\n field_names.append(field_name)\n\n if not dscfg['initialized']:\n if 'antennaType' not in dscfg:\n raise Exception(\"ERROR: Undefined 'antennaType' for dataset '%s'\"\n % dscfg['dsname'])\n if 'configpath' not in dscfg:\n raise Exception(\"ERROR: Undefined 'configpath' for dataset '%s'\"\n % dscfg['dsname'])\n if 'target_radar_pos' not in dscfg:\n radar_antenna_atsameplace = True\n warn('No target radar position specified. ' +\n 'The radars are assumed co-located')\n else:\n radar_antenna_atsameplace = False\n\n if dscfg['antennaType'] == 'AZIMUTH':\n is_azimuth_antenna = True\n info = 'parAzAnt'\n if 'par_azimuth_antenna' not in dscfg:\n raise Exception(\"ERROR: Undefined 'par_azimuth_antenna' for\"\n \" dataset '%s'\" % dscfg['dsname'])\n\n patternfile = dscfg['configpath'] + 'antenna/' \\\n + dscfg['par_azimuth_antenna']['elPatternFile']\n fixed_angle_val = dscfg['par_azimuth_antenna']['fixed_angle']\n\n elif dscfg['antennaType'] == 'ELEVATION':\n is_azimuth_antenna = False\n info = 'parElAnt'\n if 'par_elevation_antenna' not in dscfg:\n raise Exception(\"ERROR: Undefined 'par_elevation_antenna' for\"\n \" dataset '%s'\" % dscfg['dsname'])\n\n patternfile = dscfg['configpath'] + 'antenna/' \\\n + dscfg['par_elevation_antenna']['azPatternFile']\n fixed_angle_val = dscfg['par_elevation_antenna']['fixed_angle']\n\n elif dscfg['antennaType'] == 'LOWBEAM':\n is_azimuth_antenna = True\n info = 'asrLowBeamAnt'\n if 'asr_lowbeam_antenna' not in dscfg:\n raise Exception(\"ERROR: Undefined 'asr_lowbeam_antenna' for\"\n \" dataset '%s'\" % dscfg['dsname'])\n\n patternfile = dscfg['configpath'] + 'antenna/' \\\n + dscfg['asr_lowbeam_antenna']['elPatternFile']\n fixed_angle_val = dscfg['asr_lowbeam_antenna']['fixed_angle']\n\n elif dscfg['antennaType'] == 'HIGHBEAM':\n is_azimuth_antenna = True\n info = 'asrHighBeamAnt'\n if 'asr_highbeam_antenna' not in dscfg:\n raise Exception(\"ERROR: Undefined 'asr_highbeam_antenna' for\"\n \" dataset '%s'\" % dscfg['dsname'])\n\n patternfile = dscfg['configpath'] + 'antenna/' \\\n + dscfg['asr_highbeam_antenna']['elPatternFile']\n patternfile_low = dscfg['configpath'] + 'antenna/' \\\n + dscfg['asr_lowbeam_antenna']['elPatternFile']\n fixed_angle_val = dscfg['asr_highbeam_antenna']['fixed_angle']\n else:\n raise Exception(\"ERROR: Unexpected antenna type '%s' for dataset\"\n \" '%s'\" % (dscfg['antennaType'], dscfg['dsname']))\n\n if isinstance(fixed_angle_val, float):\n fixed_angle_val = [fixed_angle_val]\n\n change_antenna_pattern = dscfg.get('change_antenna_pattern', True)\n\n # Read dataset config parameters:\n weight_threshold = dscfg.get('pattern_thres', 0.)\n\n # Config parameters for processing when the weather radar and the\n # antenna are not at the same place:\n rhi_resolution = dscfg.get('rhi_resolution', 0.5) # [deg]\n max_altitude = dscfg.get('max_altitude', 12000.) # [m]\n latlon_tol = dscfg.get('latlon_tol', 0.04) # [deg]\n alt_tol = dscfg.get('alt_tol', 1000.) # [m]\n distance_upper_bound = dscfg.get('distance_upper_bound', 1000.)\n use_cKDTree = dscfg.get('use_cKDTree', True)\n quants = np.array(dscfg.get(\n 'quants', [0.05, 0.1, 0.2, 0.5, 0.8, 0.9, 0.95]))\n\n target_radar = _create_target_radar(\n radar, dscfg, fixed_angle_val, info, field_names,\n change_antenna_pattern=change_antenna_pattern,\n quantiles=100*quants)\n\n # Get antenna pattern and make weight vector\n try:\n if info == 'asrHighBeamAnt':\n antpattern = read_antenna_pattern(\n patternfile, linear=True, twoway=False)\n antpattern_low = read_antenna_pattern(\n patternfile_low, linear=True, twoway=False)\n antpattern['attenuation'] *= antpattern_low['attenuation']\n else:\n antpattern = read_antenna_pattern(patternfile, linear=True,\n twoway=True)\n except Exception as ee:\n warn(str(ee))\n raise\n\n pattern_angles = antpattern['angle'] + fixed_angle_val[0]\n if not is_azimuth_antenna:\n pattern_angles[pattern_angles < 0] += 360.\n pattern_angles[pattern_angles >= 360.] -= 360.\n\n if radar_antenna_atsameplace:\n if is_azimuth_antenna:\n scan_angles = np.sort(np.unique(\n radar.elevation['data'].round(decimals=1)))\n else:\n scan_angles = np.sort(np.unique(\n radar.azimuth['data'].round(decimals=1)))\n else:\n scan_angles = np.arange(0, 90, rhi_resolution, dtype=float)\n\n weightvec = np.empty(scan_angles.size, dtype=float)\n for kk in range(scan_angles.size):\n ind = np.argmin(np.abs(pattern_angles - scan_angles[kk]))\n weightvec[kk] = antpattern['attenuation'][ind]\n\n data_is_log = dict()\n use_nans = dict()\n nan_value = dict()\n for datatype, field_name in zip(datatypes, field_names):\n data_is_log.update({field_name: False})\n if 'data_is_log' in dscfg:\n if datatype in dscfg['data_is_log']:\n data_is_log[field_name] = (\n dscfg['data_is_log'][datatype] != 0)\n else:\n warn('Units type for data type '+datatype +\n ' not specified. Assumed linear')\n\n use_nans.update({field_name: False})\n if 'use_nans' in dscfg:\n if datatype in dscfg['use_nans']:\n use_nans[field_name] = (\n dscfg['use_nans'][datatype] != 0)\n else:\n warn('Use of nans not specified for data type '+datatype +\n ' not specified. Assumed not used')\n\n nan_value.update({field_name: 0.})\n if 'nan_value' in dscfg:\n if datatype in dscfg['nan_value']:\n nan_value[field_name] = dscfg['nan_value'][datatype]\n else:\n warn('NaN value not specified for data type '+datatype +\n ' not specified. Assumed 0')\n\n # Persistent data structure\n trdict = dict({\n 'target_radar': target_radar,\n 'is_azimuth_antenna': is_azimuth_antenna,\n 'info': info,\n 'scan_angles': scan_angles,\n 'radar_antenna_atsameplace': radar_antenna_atsameplace,\n 'weightvec': weightvec,\n 'quantiles': quants,\n 'use_nans': use_nans,\n 'nan_value': nan_value,\n 'weight_threshold': weight_threshold,\n 'max_altitude': max_altitude,\n 'latlon_tol': latlon_tol,\n 'alt_tol': alt_tol,\n 'distance_upper_bound': distance_upper_bound,\n 'use_cKDTree': use_cKDTree,\n 'data_is_log': data_is_log,\n 'change_antenna_pattern': change_antenna_pattern})\n\n dscfg['global_data'] = trdict\n dscfg['initialized'] = True\n # end init\n else:\n # init already done\n trdict = dscfg['global_data']\n\n # update time of target radar\n trdict['target_radar'].time = deepcopy(radar.time)\n time_data = np.sort(trdict['target_radar'].time['data'])\n time_res = time_data[1]-time_data[0]\n trdict['target_radar'].time['data'] = np.arange(\n trdict['target_radar'].nrays)*time_res\n\n # reset field values\n for field_name in trdict['target_radar'].fields.keys():\n if 'npoints' in field_name:\n trdict['target_radar'].fields[field_name]['data'] = (\n np.ma.zeros(\n (trdict['target_radar'].nrays,\n trdict['target_radar'].ngates), dtype=np.int32))\n continue\n trdict['target_radar'].fields[field_name]['data'] = (\n np.ma.masked_all(\n (trdict['target_radar'].nrays,\n trdict['target_radar'].ngates)))\n\n target_radar = _get_values_antenna_pattern(radar, trdict, field_names)\n\n if target_radar is None:\n return None, None\n\n new_dataset = {'radar_out': target_radar}\n\n return new_dataset, ind_rad\n\n\ndef _get_values_antenna_pattern(radar, tadict, field_names):\n \"\"\"\n Get the values of a synthetic radar\n\n Parameters\n ----------\n radar : radar object\n The radar volume with the data\n tadict : dict\n A dictionary containing parameters useful for radar re-sampling\n field_names : list of str\n list of names of the radar field\n\n Returns\n -------\n target_radar : radar object\n The synthetic radar\n\n \"\"\"\n is_azimuth_antenna = tadict['is_azimuth_antenna']\n scan_angles = tadict['scan_angles']\n radar_antenna_atsameplace = tadict['radar_antenna_atsameplace']\n nan_value = tadict['nan_value']\n use_nans = tadict['use_nans']\n weight_threshold = tadict['weight_threshold']\n target_radar = tadict['target_radar']\n max_altitude = tadict['max_altitude']\n latlon_tol = tadict['latlon_tol']\n alt_tol = tadict['alt_tol']\n distance_upper_bound = tadict['distance_upper_bound']\n use_cKDTree = tadict['use_cKDTree']\n data_is_log = tadict['data_is_log']\n change_antenna_pattern = tadict['change_antenna_pattern']\n\n # find closest radar gate to target\n x_radar, y_radar, z_radar = _put_radar_in_swiss_coord(radar)\n x_target, y_target, z_target = _put_radar_in_swiss_coord(target_radar)\n\n tree = cKDTree(\n np.transpose(\n (x_radar.flatten(), y_radar.flatten(), z_radar.flatten())),\n compact_nodes=False, balanced_tree=False)\n _, ind_vec = tree.query(np.transpose(\n (x_target.flatten(), y_target.flatten(), z_target.flatten())), k=1)\n\n # temporary solution to get right time:\n target_radar.time['data'][:] = radar.time['data'][0]\n\n if not change_antenna_pattern:\n for field_name in field_names:\n if field_name not in radar.fields:\n warn('Field '+field_name+' not in observations radar object')\n continue\n\n values = radar.fields[field_name]['data'].flatten()\n target_radar.fields[field_name]['data'][:] = (\n values[ind_vec].reshape(\n target_radar.nrays, target_radar.ngates))\n\n return target_radar\n\n # Find closest azimuth and elevation ray to target radar\n rad_ind_rays, rad_ind_rngs = np.unravel_index(\n ind_vec, (radar.nrays, radar.ngates))\n for sample, (rad_ind_ray, rad_ind_rng) in enumerate(\n zip(rad_ind_rays, rad_ind_rngs)):\n\n # measure time\n tstart = time()\n\n trad_ind_ray, trad_ind_rng = np.unravel_index(\n sample, (target_radar.nrays, target_radar.ngates))\n\n if radar_antenna_atsameplace:\n # ==============================================================\n # Radar and scanning antenna are at the SAME place\n # ==============================================================\n\n # ==============================================================\n # Get sample at bin\n if is_azimuth_antenna:\n angles = radar.azimuth['data']\n angles_scan = radar.elevation['data']\n ray_angle = radar.azimuth['data'][rad_ind_ray]\n else:\n angles = radar.elevation['data']\n angles_scan = radar.azimuth['data']\n ray_angle = radar.elevation['data'][rad_ind_ray]\n\n d_angle = np.abs(angles - ray_angle)\n ray_inds = np.where(d_angle < 0.09)[0]\n angles_sortind = np.argsort(angles_scan[ray_inds])\n\n ray_inds = ray_inds[angles_sortind]\n angles_sorted = angles_scan[ray_inds]\n\n # Set default values\n if ((scan_angles.size != angles_sorted.size) or\n (np.max(np.abs(scan_angles - angles_sorted)) > 0.1)):\n warn(\"Scan angle mismatch!\")\n continue\n\n w_vec = tadict['weightvec']\n for field_name in field_names:\n if field_name not in radar.fields:\n warn(\"Datatype '%s' not available in radar data\" %\n field_name)\n continue\n values = radar.fields[field_name]['data'][\n ray_inds, rad_ind_rng]\n if use_nans[field_name]:\n values_ma = np.ma.getmaskarray(values)\n values[values_ma] = nan_value[field_name]\n\n try:\n (avg, qvals, nvals_valid) = quantiles_weighted(\n values,\n weight_vector=deepcopy(w_vec),\n quantiles=tadict['quantiles'],\n weight_threshold=weight_threshold,\n data_is_log=data_is_log[field_name])\n except Exception as ee:\n warn(str(ee))\n continue\n\n if avg is None:\n continue\n\n # average field\n target_radar.fields['avg_'+field_name]['data'][\n trad_ind_ray, trad_ind_rng] = avg\n \n \n\n # npoints field\n target_radar.fields['npoints_'+field_name]['data'][\n trad_ind_ray, trad_ind_rng] = nvals_valid\n \n \n\n # quantile fields\n for quant, val in zip(tadict['quantiles'], qvals):\n if val is None:\n continue\n quant_field = (\n 'quant'+'{:02d}'.format(int(100*quant))+'_' +\n field_name)\n target_radar.fields[quant_field]['data'][\n trad_ind_ray, trad_ind_rng] = val\n \n \n else:\n # ================================================================\n # Radar and scanning antenna are NOT at the same place\n # ================================================================\n ray_inds, rng_inds, w_inds = _get_gates_antenna_pattern(\n radar, target_radar,\n target_radar.azimuth['data'][trad_ind_ray],\n target_radar.range['data'][trad_ind_rng],\n target_radar.time['data'][trad_ind_ray], scan_angles,\n alt_tol=alt_tol, latlon_tol=latlon_tol,\n max_altitude=max_altitude,\n distance_upper_bound=distance_upper_bound,\n use_cKDTree=use_cKDTree)\n\n w_vec = tadict['weightvec'][w_inds]\n for field_name in field_names:\n if field_name not in radar.fields:\n warn(\"Datatype '%s' not available in radar data\" %\n field_name)\n continue\n values = radar.fields[field_name]['data'][ray_inds, rng_inds]\n if use_nans[field_name]:\n values_ma = np.ma.getmaskarray(values)\n values[values_ma] = nan_value[field_name]\n\n try:\n (avg, qvals, nvals_valid) = quantiles_weighted(\n values,\n weight_vector=deepcopy(w_vec),\n quantiles=tadict['quantiles'],\n weight_threshold=weight_threshold,\n data_is_log=data_is_log[field_name])\n except Exception as ee:\n warn(str(ee))\n continue\n\n if avg is None:\n continue\n\n # average field\n target_radar.fields['avg_'+field_name]['data'][\n trad_ind_ray, trad_ind_rng] = avg\n \n \n\n # npoints field\n target_radar.fields['npoints_'+field_name]['data'][\n trad_ind_ray, trad_ind_rng] = nvals_valid\n \n \n \n\n # quantile fields\n for quant, val in zip(tadict['quantiles'], qvals):\n if val is None:\n continue\n quant_field = (\n 'quant'+'{:02d}'.format(int(100*quant))+'_' +\n field_name)\n target_radar.fields[quant_field]['data'][\n trad_ind_ray, trad_ind_rng] = val\n\n tend = time()\n\n print(\n 'original radar indices (azi, rng): '+str(rad_ind_ray)+', ' +\n str(rad_ind_rng) +\n ' target radar indices (azi, rng): '+str(trad_ind_ray)+', ' +\n str(trad_ind_rng) +\n ' Samples done: '+str(sample)+'/'+str(rad_ind_rngs.size) +\n ' Time used: '+str(tend-tstart),\n end=\"\\r\", flush=True)\n\n return target_radar\n\n\ndef _create_target_radar(radar, dscfg, fixed_angle_val, info, field_names,\n change_antenna_pattern=False, quantiles=[50]):\n \"\"\"\n Creates the target radar\n\n Parameters\n ----------\n radar : radar object\n the radar object containing the observed data\n dscfg : dict\n dict with the configuration\n fixed_angle_val : array of floats\n array containing the fixed angles\n info : str\n String with info on the type of antenna\n field_names : list of str\n the list of field names that the target radar will contain\n change_antenna_pattern : bool\n Whether the antenna pattern of the target radar is different from the\n observations radar\n quantiles : list of floats\n the quantiles to be computed if the target radar has a different\n antenna pattern\n\n Returns\n -------\n target_radar : radar object\n The target radar\n\n \"\"\"\n # Parameters to create the new radar\n moving_angle_min = dscfg.get('moving_angle_min', 0.)\n moving_angle_max = dscfg.get('moving_angle_max', 359.)\n ray_res = dscfg.get('ray_res', 1.)\n rng_min = dscfg.get('rng_min', 0.)\n rng_max = dscfg.get('rng_max', 100000.)\n rng_res = dscfg.get('rng_res', 100.)\n\n # metadata needed\n _time = get_metadata('time')\n _range = get_metadata('range')\n sweep_number = get_metadata('sweep_number')\n sweep_mode = get_metadata('sweep_mode')\n fixed_angle = get_metadata('fixed_angle')\n sweep_start_ray_index = get_metadata('sweep_start_ray_index')\n sweep_end_ray_index = get_metadata('sweep_end_ray_index')\n azimuth = get_metadata('azimuth')\n elevation = get_metadata('elevation')\n metadata = dict()\n\n latitude = deepcopy(radar.latitude)\n longitude = deepcopy(radar.longitude)\n altitude = deepcopy(radar.altitude)\n if 'target_radar_pos' in dscfg:\n latitude['data'] = np.array(\n [dscfg['target_radar_pos']['latitude']], dtype=np.float)\n longitude['data'] = np.array(\n [dscfg['target_radar_pos']['longitude']], dtype=np.float)\n altitude['data'] = np.array(\n [dscfg['target_radar_pos']['altitude']], dtype=np.float)\n\n _range['data'] = np.arange(rng_min, rng_max+rng_res, rng_res)\n ngates = _range['data'].size\n\n fixed_angle['data'] = np.array(fixed_angle_val)\n nsweeps = fixed_angle['data'].size\n if info in ('parElAnt', 'asrLowBeamAnt', 'asrHighBeamAnt'):\n scan_type = 'ppi'\n sweep_mode['data'] = np.array(nsweeps*['azimuth_surveillance'])\n else:\n scan_type = 'rhi'\n sweep_mode['data'] = np.array(nsweeps*['elevation_surveillance'])\n\n moving_angle = np.arange(\n moving_angle_min, moving_angle_max+ray_res, ray_res)\n\n nrays = moving_angle.size*nsweeps\n sweep_start_ray_index['data'] = np.empty((nsweeps), dtype=np.int32)\n sweep_end_ray_index['data'] = np.empty((nsweeps), dtype=np.int32)\n sweep_number['data'] = np.arange(nsweeps)\n for sweep in range(nsweeps):\n sweep_start_ray_index['data'][sweep] = sweep*nrays\n sweep_end_ray_index['data'][sweep] = (sweep+1)*(nrays-1)\n\n elevation['data'] = np.empty((nrays), dtype=float)\n azimuth['data'] = np.empty((nrays), dtype=float)\n if scan_type == 'ppi':\n for sweep, (start_ray, end_ray) in enumerate(zip(\n sweep_start_ray_index['data'],\n sweep_end_ray_index['data'])):\n azimuth['data'][start_ray:end_ray+1] = moving_angle\n elevation['data'][start_ray:end_ray+1] = (\n fixed_angle['data'][sweep])\n else:\n for sweep, (start_ray, end_ray) in enumerate(zip(\n sweep_start_ray_index['data'],\n sweep_end_ray_index['data'])):\n elevation['data'][start_ray:end_ray+1] = moving_angle\n azimuth['data'][start_ray:end_ray+1] = fixed_angle['data'][sweep]\n\n _time = deepcopy(radar.time)\n time_data = np.sort(_time['data'])\n time_res = time_data[1]-time_data[0]\n _time['data'] = np.arange(nrays)*time_res\n\n fields = dict()\n for field_name in field_names:\n if not change_antenna_pattern:\n fields.update({field_name: get_metadata(field_name)})\n fields[field_name]['data'] = np.ma.masked_all((nrays, ngates))\n continue\n\n # average field\n field_name_aux = 'avg_'+field_name\n fields.update({field_name_aux: get_metadata(field_name_aux)})\n fields[field_name_aux]['data'] = np.ma.masked_all((nrays, ngates))\n\n # npoints field\n field_name_aux = 'npoints_'+field_name\n fields.update({field_name_aux: get_metadata(field_name_aux)})\n fields[field_name_aux]['data'] = np.ma.zeros(\n (nrays, ngates), dtype=np.int32)\n\n # quantile fields\n for quant in quantiles:\n field_name_aux = (\n 'quant'+'{:02d}'.format(int(quant))+'_'+field_name)\n fields.update({field_name_aux: get_metadata(field_name_aux)})\n fields[field_name_aux]['data'] = np.ma.masked_all((nrays, ngates))\n\n target_radar = Radar(\n _time, _range, fields, metadata, scan_type, latitude, longitude,\n altitude, sweep_number, sweep_mode, fixed_angle,\n sweep_start_ray_index, sweep_end_ray_index, azimuth, elevation)\n\n return target_radar\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n================================================\ncommon_colocated_gates\n================================================\n\nThis program reads colocated gates files from two radars\nand creates a new file with the gates that are common to\nboth radars\n\n\"\"\"\n\n# Author: fvj\n# License: BSD 3 clause\n\nimport datetime\nimport atexit\nimport numpy as np\nimport pandas as pd\nfrom copy import deepcopy\n\nfrom pyrad.io import read_colocated_gates, write_colocated_gates\n\nprint(__doc__)\n\n\ndef main():\n \"\"\"\n \"\"\"\n\n file_path = (\n '/srn/analysis/pyrad_products/rad4alp_intercomp/colocated_gates/')\n rad1_vec = ['A', 'A', 'A', 'A', 'D', 'D', 'D', 'L', 'L', 'P']\n rad2_vec = ['D', 'L', 'P', 'W', 'L', 'P', 'W', 'P', 'W', 'W']\n\n print(\"====== common colocated gates started: %s\" %\n datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\"))\n atexit.register(_print_end_msg,\n \"====== common colocated gates finished: \")\n\n for i, rad1 in enumerate(rad1_vec):\n rad2 = rad2_vec[i]\n\n print('Radars: '+rad1+' '+rad2)\n fname1 = (file_path+'PL'+rad1+'_'+'PL'+rad2 +\n '/info_COLOCATED_GATES_PL'+rad1+'_PL'+rad2+'.csv')\n fname2 = (file_path+'PL'+rad2+'_'+'PL'+rad1 +\n '/info_COLOCATED_GATES_PL'+rad2+'_PL'+rad1+'.csv')\n\n (rad1_ray_ind, rad1_rng_ind, rad1_ele, rad1_azi, rad1_rng,\n rad2_ray_ind, rad2_rng_ind, rad2_ele, rad2_azi, rad2_rng) = (\n read_colocated_gates(fname1))\n\n print('Number of gates rad1-rad2 ', np.shape(rad1_ray_ind))\n\n (rad2_ray_ind_aux, rad2_rng_ind_aux, rad2_ele_aux, rad2_azi_aux,\n rad2_rng_aux, rad1_ray_ind_aux, rad1_rng_ind_aux, rad1_ele_aux,\n rad1_azi_aux, rad1_rng_aux) = read_colocated_gates(fname2)\n\n print('Number of gates rad2-rad1 ', np.shape(rad2_ray_ind_aux))\n\n # make a pool of data\n rad1_ray_ind = np.ma.concatenate((rad1_ray_ind, rad1_ray_ind_aux))\n rad1_rng_ind = np.ma.concatenate((rad1_rng_ind, rad1_rng_ind_aux))\n rad1_ele = np.ma.concatenate((rad1_ele, rad1_ele_aux))\n rad1_azi = np.ma.concatenate((rad1_azi, rad1_azi_aux))\n rad1_rng = np.ma.concatenate((rad1_rng, rad1_rng_aux))\n rad2_ray_ind = np.ma.concatenate((rad2_ray_ind, rad2_ray_ind_aux))\n rad2_rng_ind = np.ma.concatenate((rad2_rng_ind, rad2_rng_ind_aux))\n rad2_ele = np.ma.concatenate((rad2_ele, rad2_ele_aux))\n rad2_azi = np.ma.concatenate((rad2_azi, rad2_azi_aux))\n rad2_rng = np.ma.concatenate((rad2_rng, rad2_rng_aux))\n\n print('Total number of gates ', np.shape(rad1_ray_ind))\n\n # create dictionary and put it in pandas framework\n coloc_dict = {\n 'rad1_ray_ind': rad1_ray_ind,\n 'rad1_rng_ind': rad1_rng_ind,\n 'rad1_ele': rad1_ele,\n 'rad1_azi': rad1_azi,\n 'rad1_rng': rad1_rng,\n 'rad2_ray_ind': rad2_ray_ind,\n 'rad2_rng_ind': rad2_rng_ind,\n 'rad2_ele': rad2_ele,\n 'rad2_azi': rad2_azi,\n 'rad2_rng': rad2_rng}\n df = pd.DataFrame(data=coloc_dict)\n\n # keep only duplicated data\n df_common = df[df.duplicated(keep=False)].drop_duplicates()\n common_dict = df_common.to_dict(orient='list')\n\n print('Number of common gates', df_common.shape)\n print('rad1 elev min/max', np.min(common_dict['rad1_ele']),\n np.max(common_dict['rad1_ele']))\n print('rad2 elev min/max', np.min(common_dict['rad2_ele']),\n np.max(common_dict['rad2_ele']))\n\n # write resultant output\n fname1_out = (\n file_path+'PL'+rad1+'_'+'PL'+rad2 +\n '/info_common_COLOCATED_GATES_PL'+rad1+'_PL'+rad2+'.csv')\n fname2_out = (\n file_path+'PL'+rad2+'_'+'PL'+rad1 +\n '/info_common_COLOCATED_GATES_PL'+rad2+'_PL'+rad1+'.csv')\n\n rad1_dict = {\n 'rad1_ray_ind': np.asarray(common_dict['rad1_ray_ind']),\n 'rad1_rng_ind': np.asarray(common_dict['rad1_rng_ind']),\n 'rad1_ele': np.asarray(common_dict['rad1_ele']),\n 'rad1_azi': np.asarray(common_dict['rad1_azi']),\n 'rad1_rng': np.asarray(common_dict['rad1_rng']),\n 'rad2_ray_ind': np.asarray(common_dict['rad2_ray_ind']),\n 'rad2_rng_ind': np.asarray(common_dict['rad2_rng_ind']),\n 'rad2_ele': np.asarray(common_dict['rad2_ele']),\n 'rad2_azi': np.asarray(common_dict['rad2_azi']),\n 'rad2_rng': np.asarray(common_dict['rad2_rng'])}\n\n rad2_dict = {\n 'rad1_ray_ind': np.asarray(common_dict['rad2_ray_ind']),\n 'rad1_rng_ind': np.asarray(common_dict['rad2_rng_ind']),\n 'rad1_ele': np.asarray(common_dict['rad2_ele']),\n 'rad1_azi': np.asarray(common_dict['rad2_azi']),\n 'rad1_rng': np.asarray(common_dict['rad2_rng']),\n 'rad2_ray_ind': np.asarray(common_dict['rad1_ray_ind']),\n 'rad2_rng_ind': np.asarray(common_dict['rad1_rng_ind']),\n 'rad2_ele': np.asarray(common_dict['rad1_ele']),\n 'rad2_azi': np.asarray(common_dict['rad1_azi']),\n 'rad2_rng': np.asarray(common_dict['rad1_rng'])}\n\n write_colocated_gates(rad1_dict, fname1_out)\n write_colocated_gates(rad2_dict, fname2_out)\n\n\ndef _print_end_msg(text):\n \"\"\"\n prints end message\n\n Parameters\n ----------\n text : str\n the text to be printed\n\n Returns\n -------\n Nothing\n\n \"\"\"\n print(text + datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n\n# ---------------------------------------------------------\n# Start main:\n# ---------------------------------------------------------\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.ma.asarray" ], [ "numpy.asarray", "numpy.all", "numpy.argmin", "numpy.mean", "numpy.where", "numpy.ma.getmaskarray", "numpy.arange", "numpy.ma.zeros", "numpy.zeros", "numpy.unravel_index", "numpy.median", "numpy.argsort", "numpy.ma.masked_all", "numpy.array", "numpy.abs", "numpy.sort", "numpy.ones", "numpy.shape", "numpy.empty" ], [ "numpy.min", "numpy.asarray", "pandas.DataFrame", "numpy.max", "numpy.ma.concatenate", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
pistoia/qiskit-aqua
[ "c7900ffdabc1499145739bfab29a392709bee1a0", "c7900ffdabc1499145739bfab29a392709bee1a0" ]
[ "test/test_mct.py", "qiskit/aqua/translators/ising/tsp.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport unittest\nimport itertools\nimport numpy as np\n\nfrom parameterized import parameterized\nfrom qiskit import QuantumCircuit, QuantumRegister\nfrom qiskit import execute as q_execute\nfrom qiskit.quantum_info import state_fidelity\n\nfrom qiskit.aqua import get_aer_backend\nfrom test.common import QiskitAquaTestCase\n\nnum_controls = [i + 1 for i in range(7)]\nmodes = ['basic', 'advanced', 'noancilla']\n\n\nclass TestMCT(QiskitAquaTestCase):\n @parameterized.expand(\n itertools.product(num_controls, modes)\n )\n def test_mct(self, num_controls, mode):\n c = QuantumRegister(num_controls, name='c')\n o = QuantumRegister(1, name='o')\n subsets = [tuple(range(i)) for i in range(num_controls + 1)]\n for subset in subsets:\n qc = QuantumCircuit(o, c)\n if mode == 'basic':\n if num_controls <= 2:\n num_ancillae = 0\n else:\n num_ancillae = num_controls - 2\n elif mode == 'noancilla':\n num_ancillae = 0\n else:\n if num_controls <= 4:\n num_ancillae = 0\n else:\n num_ancillae = 1\n if num_ancillae > 0:\n a = QuantumRegister(num_ancillae, name='a')\n qc.add_register(a)\n for idx in subset:\n qc.x(c[idx])\n qc.mct(\n [c[i] for i in range(num_controls)],\n o[0],\n [a[i] for i in range(num_ancillae)],\n mode=mode\n )\n for idx in subset:\n qc.x(c[idx])\n\n vec = np.asarray(q_execute(qc, get_aer_backend(\n 'statevector_simulator')).result().get_statevector(qc, decimals=16))\n vec_o = [0, 1] if len(subset) == num_controls else [1, 0]\n f = state_fidelity(vec, np.array(vec_o + [0] * (2 ** (num_controls + num_ancillae + 1) - 2)))\n self.assertAlmostEqual(f, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\" Convert symmetric TSP instances into Pauli list\nDeal with TSPLIB format. It supports only EUC_2D edge weight type.\nSee https://wwwproxy.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/\nand http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/index.html\nDesign the tsp object `w` as a two-dimensional np.array\ne.g., w[i, j] = x means that the length of a edge between i and j is x\nNote that the weights are symmetric, i.e., w[j, i] = x always holds.\n\"\"\"\n\nimport logging\nfrom collections import OrderedDict, namedtuple\n\nimport numpy as np\nimport numpy.random as rand\nfrom qiskit.quantum_info import Pauli\n\nfrom qiskit.aqua import Operator\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"Instance data of TSP\"\"\"\nTspData = namedtuple('TspData', 'name dim coord w')\n\n\ndef calc_distance(coord, name='tmp'):\n assert coord.shape[1] == 2\n dim = coord.shape[0]\n w = np.zeros((dim, dim))\n for i in range(dim):\n for j in range(i + 1, dim):\n delta = coord[i] - coord[j]\n w[i, j] = np.rint(np.hypot(delta[0], delta[1]))\n w += w.T\n return TspData(name=name, dim=dim, coord=coord, w=w)\n\n\ndef random_tsp(n, low=0, high=100, savefile=None, seed=None, name='tmp'):\n \"\"\"Generate a random instance for TSP.\n\n Args:\n n (int): number of nodes.\n low (float): lower bound of coordinate.\n high (float): uppper bound of coordinate.\n savefile (str or None): name of file where to save graph.\n seed (int or None): random seed - if None, will not initialize.\n name (str): name of an instance\n\n Returns:\n TspData: instance data.\n\n \"\"\"\n assert n > 0\n if seed:\n rand.seed(seed)\n coord = rand.uniform(low, high, (n, 2))\n ins = calc_distance(coord, name)\n if savefile:\n with open(savefile, 'w') as outfile:\n outfile.write('NAME : {}\\n'.format(ins.name))\n outfile.write('COMMENT : random data\\n')\n outfile.write('TYPE : TSP\\n')\n outfile.write('DIMENSION : {}\\n'.format(ins.dim))\n outfile.write('EDGE_WEIGHT_TYPE : EUC_2D\\n')\n outfile.write('NODE_COORD_SECTION\\n')\n for i in range(ins.dim):\n x = ins.coord[i]\n outfile.write('{} {:.4f} {:.4f}\\n'.format(i + 1, x[0], x[1]))\n return ins\n\n\ndef parse_tsplib_format(filename):\n \"\"\"Read graph in TSPLIB format from file.\n\n Args:\n filename (str): name of the file.\n\n Returns:\n TspData: instance data.\n\n \"\"\"\n name = ''\n coord = None\n with open(filename) as infile:\n coord_section = False\n for line in infile:\n if line.startswith('NAME'):\n name = line.split(':')[1]\n name.strip()\n elif line.startswith('TYPE'):\n typ = line.split(':')[1]\n typ.strip()\n if typ != 'TSP':\n logger.warning('This supports only \"TSP\" type. Actual: {}'.format(typ))\n elif line.startswith('DIMENSION'):\n dim = int(line.split(':')[1])\n coord = np.zeros((dim, 2))\n elif line.startswith('EDGE_WEIGHT_TYPE'):\n typ = line.split(':')[1]\n typ.strip()\n if typ != 'EUC_2D':\n logger.warning('This supports only \"EUC_2D\" edge weight. Actual: {}'.format(typ))\n elif line.startswith('NODE_COORD_SECTION'):\n coord_section = True\n elif coord_section:\n v = line.split()\n index = int(v[0]) - 1\n coord[index][0] = float(v[1])\n coord[index][1] = float(v[2])\n return calc_distance(coord, name)\n\n\ndef get_tsp_qubitops(ins, penalty=1e5):\n \"\"\"Generate Hamiltonian for TSP of a graph.\n\n Args:\n ins (TspData) : TSP data including coordinates and distances.\n penalty (float) : Penalty coefficient for the constraints\n\n Returns:\n operator.Operator, float: operator for the Hamiltonian and a\n constant shift for the obj function.\n\n \"\"\"\n num_nodes = ins.dim\n num_qubits = num_nodes ** 2\n zero = np.zeros(num_qubits, dtype=np.bool)\n pauli_list = []\n shift = 0\n for i in range(num_nodes):\n for j in range(num_nodes):\n if i == j:\n continue\n for p in range(num_nodes):\n q = (p + 1) % num_nodes\n shift += ins.w[i, j] / 4\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n pauli_list.append([-ins.w[i, j] / 4, Pauli(zp, zero)])\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[j * num_nodes + q] = True\n pauli_list.append([-ins.w[i, j] / 4, Pauli(zp, zero)])\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n zp[j * num_nodes + q] = True\n pauli_list.append([ins.w[i, j] / 4, Pauli(zp, zero)])\n\n for i in range(num_nodes):\n for p in range(num_nodes):\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n pauli_list.append([penalty, Pauli(zp, zero)])\n shift += -penalty\n\n for p in range(num_nodes):\n for i in range(num_nodes):\n for j in range(i):\n shift += penalty / 2\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n pauli_list.append([-penalty / 2, Pauli(zp, zero)])\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[j * num_nodes + p] = True\n pauli_list.append([-penalty / 2, Pauli(zp, zero)])\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n zp[j * num_nodes + p] = True\n pauli_list.append([penalty / 2, Pauli(zp, zero)])\n\n for i in range(num_nodes):\n for p in range(num_nodes):\n for q in range(p):\n shift += penalty / 2\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n pauli_list.append([-penalty / 2, Pauli(zp, zero)])\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + q] = True\n pauli_list.append([-penalty / 2, Pauli(zp, zero)])\n\n zp = np.zeros(num_qubits, dtype=np.bool)\n zp[i * num_nodes + p] = True\n zp[i * num_nodes + q] = True\n pauli_list.append([penalty / 2, Pauli(zp, zero)])\n shift += 2 * penalty * num_nodes\n return Operator(paulis=pauli_list), shift\n\n\ndef tsp_value(z, w):\n \"\"\"Compute the TSP value of a solution.\n\n Args:\n z (list[int]): list of cities.\n w (numpy.ndarray): adjacency matrix.\n\n Returns:\n float: value of the cut.\n \"\"\"\n ret = 0.0\n for i in range(len(z) - 1):\n ret += w[z[i], z[i + 1]]\n ret += w[z[-1], z[0]]\n return ret\n\n\ndef tsp_feasible(x):\n \"\"\"Check whether a solution is feasible or not.\n\n Args:\n x (numpy.ndarray) : binary string as numpy array.\n\n Returns:\n bool: feasible or not.\n \"\"\"\n n = int(np.sqrt(len(x)))\n y = np.zeros((n, n))\n for i in range(n):\n for p in range(n):\n y[i, p] = x[i * n + p]\n for i in range(n):\n if sum(y[i, p] for p in range(n)) != 1:\n return False\n for p in range(n):\n if sum(y[i, p] for i in range(n)) != 1:\n return False\n return True\n\n\ndef get_tsp_solution(x):\n \"\"\"Get graph solution from binary string.\n\n Args:\n x (numpy.ndarray) : binary string as numpy array.\n\n Returns:\n list[int]: sequence of cities to traverse.\n \"\"\"\n n = int(np.sqrt(len(x)))\n z = []\n for p in range(n):\n for i in range(n):\n if x[i * n + p] >= 0.999:\n assert len(z) == p\n z.append(i)\n return z\n\n\ndef sample_most_likely(state_vector):\n \"\"\"Compute the most likely binary string from state vector.\n\n Args:\n state_vector (numpy.ndarray or dict): state vector or counts.\n\n Returns:\n numpy.ndarray: binary string as numpy.ndarray of ints.\n \"\"\"\n if isinstance(state_vector, dict) or isinstance(state_vector, OrderedDict):\n # get the binary string with the largest count\n binary_string = sorted(state_vector.items(), key=lambda kv: kv[1])[-1][0]\n x = np.asarray([int(y) for y in reversed(list(binary_string))])\n return x\n else:\n n = int(np.log2(state_vector.shape[0]))\n k = np.argmax(np.abs(state_vector))\n x = np.zeros(n)\n for i in range(n):\n x[i] = k % 2\n k >>= 1\n return x\n" ]
[ [ "numpy.array" ], [ "numpy.log2", "numpy.abs", "numpy.random.seed", "numpy.random.uniform", "numpy.zeros", "numpy.hypot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mogorman/openpilot-1
[ "1d19166992149a7dea3536644d67e9e0e2e385fd" ]
[ "selfdrive/controls/lib/longitudinal_planner.py" ]
[ "#!/usr/bin/env python3\nimport math\nimport numpy as np\nfrom common.numpy_fast import interp\nfrom common.cached_params import CachedParams\n\nimport cereal.messaging as messaging\nfrom common.realtime import DT_MDL\nfrom selfdrive.modeld.constants import T_IDXS\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.controls.lib.longcontrol import LongCtrlState\nfrom selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc\nfrom selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC\nfrom selfdrive.controls.lib.drive_helpers import V_CRUISE_MAX, CONTROL_N\nfrom selfdrive.swaglog import cloudlog\n\nLON_MPC_STEP = 0.2 # first step is 0.2s\nAWARENESS_DECEL = -0.2 # car smoothly decel at .2m/s^2 when user is distracted\nA_CRUISE_MIN = -1.2\nA_CRUISE_MAX_VALS = [1.2, 1.2, 0.8, 0.6]\nA_CRUISE_MAX_BP = [0., 15., 25., 40.]\n\n# Lookup table for turns\n_A_TOTAL_MAX_V = [1.7, 3.2]\n_A_TOTAL_MAX_BP = [20., 40.]\n\n\ndef get_max_accel(v_ego):\n return interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS)\n\n\ndef limit_accel_in_turns(v_ego, angle_steers, a_target, CP):\n \"\"\"\n This function returns a limited long acceleration allowed, depending on the existing lateral acceleration\n this should avoid accelerating when losing the target in turns\n \"\"\"\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * angle_steers * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = math.sqrt(max(a_total_max**2 - a_y**2, 0.))\n\n return [a_target[0], min(a_target[1], a_x_allowed)]\n\n\nclass Planner():\n def __init__(self, CP, init_v=0.0, init_a=0.0):\n self.CP = CP\n self.mpc = LongitudinalMpc()\n\n self.fcw = False\n\n self.cachedParams = CachedParams()\n\n self.v_desired = init_v\n self.a_desired = init_a\n self.alpha = np.exp(-DT_MDL/2.0)\n\n self.v_desired_trajectory = np.zeros(CONTROL_N)\n self.a_desired_trajectory = np.zeros(CONTROL_N)\n self.j_desired_trajectory = np.zeros(CONTROL_N)\n\n\n def update(self, sm, CP, lateral_planner):\n v_ego = sm['carState'].vEgo\n a_ego = sm['carState'].aEgo\n\n v_cruise_kph = sm['controlsState'].vCruise\n v_cruise_kph = min(v_cruise_kph, V_CRUISE_MAX)\n v_cruise = v_cruise_kph * CV.KPH_TO_MS\n\n long_control_state = sm['controlsState'].longControlState\n force_slow_decel = sm['controlsState'].forceDecel\n\n enabled = (long_control_state == LongCtrlState.pid) or (long_control_state == LongCtrlState.stopping)\n if not enabled or sm['carState'].gasPressed:\n self.v_desired = v_ego\n self.a_desired = a_ego\n\n # Prevent divergence, smooth in current v_ego\n self.v_desired = self.alpha * self.v_desired + (1 - self.alpha) * v_ego\n self.v_desired = max(0.0, self.v_desired)\n\n accel_limits = [A_CRUISE_MIN, get_max_accel(v_ego)]\n if not self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == \"1\":\n accel_limits = limit_accel_in_turns(v_ego, sm['carState'].steeringAngleDeg, accel_limits, self.CP)\n\n if force_slow_decel:\n # if required so, force a smooth deceleration\n accel_limits[1] = min(accel_limits[1], AWARENESS_DECEL)\n accel_limits[0] = min(accel_limits[0], accel_limits[1])\n # clip limits, cannot init MPC outside of bounds\n accel_limits[0] = min(accel_limits[0], self.a_desired + 0.05)\n accel_limits[1] = max(accel_limits[1], self.a_desired - 0.05)\n self.mpc.set_accel_limits(accel_limits[0], accel_limits[1])\n self.mpc.set_cur_state(self.v_desired, self.a_desired)\n self.mpc.update(sm['carState'], sm['radarState'], v_cruise)\n self.v_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.v_solution)\n self.a_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC, self.mpc.a_solution)\n self.j_desired_trajectory = np.interp(T_IDXS[:CONTROL_N], T_IDXS_MPC[:-1], self.mpc.j_solution)\n\n #TODO counter is only needed because radar is glitchy, remove once radar is gone\n self.fcw = self.mpc.crash_cnt > 5\n if self.fcw:\n cloudlog.info(\"FCW triggered\")\n\n # Interpolate 0.05 seconds and save as starting point for next iteration\n a_prev = self.a_desired\n self.a_desired = float(interp(DT_MDL, T_IDXS[:CONTROL_N], self.a_desired_trajectory))\n self.v_desired = self.v_desired + DT_MDL * (self.a_desired + a_prev)/2.0\n\n if lateral_planner.lateralPlan and self.cachedParams.get('jvePilot.settings.slowInCurves', 5000) == \"1\":\n curvs = list(lateral_planner.lateralPlan.curvatures)\n if len(curvs):\n # find the largest curvature in the solution and use that.\n curv = abs(curvs[-1])\n if curv != 0:\n self.v_desired = float(min(self.v_desired, self.limit_speed_in_curv(sm, curv)))\n\n def publish(self, sm, pm):\n plan_send = messaging.new_message('longitudinalPlan')\n\n plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState'])\n\n longitudinalPlan = plan_send.longitudinalPlan\n longitudinalPlan.modelMonoTime = sm.logMonoTime['modelV2']\n longitudinalPlan.processingDelay = (plan_send.logMonoTime / 1e9) - sm.logMonoTime['modelV2']\n\n longitudinalPlan.speeds = [float(x) for x in self.v_desired_trajectory]\n longitudinalPlan.accels = [float(x) for x in self.a_desired_trajectory]\n longitudinalPlan.jerks = [float(x) for x in self.j_desired_trajectory]\n\n longitudinalPlan.hasLead = sm['radarState'].leadOne.status\n longitudinalPlan.longitudinalPlanSource = self.mpc.source\n longitudinalPlan.fcw = self.fcw\n\n pm.send('longitudinalPlan', plan_send)\n\n def limit_speed_in_curv(self, sm, curv):\n v_ego = sm['carState'].vEgo\n a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph\n\n # drop off\n drop_off = self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedDropOff', 5000)\n if drop_off != 2 and a_y_max > 0:\n a_y_max = np.sqrt(a_y_max) ** drop_off\n\n v_curvature = np.sqrt(a_y_max / np.clip(curv, 1e-4, None))\n model_speed = np.min(v_curvature)\n return model_speed * self.cachedParams.get_float('jvePilot.settings.slowInCurves.speedRatio', 5000)" ]
[ [ "numpy.sqrt", "numpy.min", "numpy.clip", "numpy.interp", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PlaidCloud/public-utilities
[ "1031cb87580bbe110f56455925e483a0ae177fe1" ]
[ "plaidcloud/utilities/tests/test_remote_dimension.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport filecmp\nimport os\nimport unittest\nfrom unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_frame_equal\n\nfrom plaidcloud.utilities.connect import create_connection\nfrom plaidcloud.utilities.remote.dimension import Dimensions\nfrom plaidcloud.utilities.remote.dimension import MAIN\nfrom plaidcloud.utilities.remote.dimension import ROOT\n\n__author__ = 'Dave Parsons'\n__copyright__ = 'Copyright 2010-2020, Tartan Solutions, Inc'\n__credits__ = ['Dave Parsons']\n__license__ = 'Proprietary'\n__maintainer__ = 'Dave Parsons'\n__email__ = '[email protected]'\n\n# Folders for comparison\nBASELINE = './dim_baseline/'\nFOLDER = './dim_current/'\n\n\nconn = create_connection(verify_ssl=False)\n\n\nclass TestDimension(TestCase):\n \"\"\"Test Redis Dimension code\"\"\"\n\n def assertFileEqual(self, file1, file2, **kwargs):\n return self.assertTrue(filecmp.cmp(file1, file2, shallow=False))\n\n def assertFrameEqual(self, df1, df2, **kwargs):\n return assert_frame_equal(df1, df2, check_names=True, check_like=True, **kwargs)\n\n def setUp(self):\n if not os.path.exists(BASELINE):\n os.makedirs(BASELINE)\n self.periods = 'periods_rpc_test'\n self.dims = Dimensions(conn=conn)\n self.dim = self.dims.get_dimension(name=self.periods, replace=False)\n return\n\n def test_001_load_hierarchy_main(self):\n df_main = pd.DataFrame(\n [\n [ROOT, 'Year'],\n ['Year', 'Q1'],\n ['Year', 'Q2'],\n ['Year', 'Q3'],\n ['Year', 'Q4'],\n ['Q1', 'January'],\n ['Q1', 'February'],\n ['Q1', 'March'],\n ['Q2', 'April'],\n ['Q2', 'May'],\n ['Q2', 'June'],\n ['Q3', 'July'],\n ['Q3', 'August'],\n ['Q3', 'September'],\n ['Q4', 'October'],\n ['Q4', 'November'],\n ['Q4', 'December'],\n ],\n columns=['ParentName', 'ChildName']\n )\n\n # Clear down the dimension and reload\n self.dim.clear()\n\n # main hierarchy\n df_results = self.dim.load_hierarchy_from_dataframe(df_main, 'ParentName', 'ChildName')\n df_results.to_csv(f'{FOLDER}df_main_load.csv', index=False)\n\n # Create a backup file to allow reloading in tests\n data = self.dims.backup(self.periods)\n with open(f'{FOLDER}periods.yaml', 'w') as file:\n file.write(data)\n\n self.assertFileEqual(f'{FOLDER}df_main_load.csv', f'{BASELINE}df_main_load.csv')\n return\n\n def test_002_save_hierarchy_main(self):\n # main hierarchy\n df = self.dim.save_hierarchy_to_dataframe(MAIN)\n df.drop(labels='index', axis=1, inplace=True)\n df.to_csv(f'{FOLDER}df_main_hierarchy.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_main_hierarchy.csv', f'{BASELINE}df_main_hierarchy.csv')\n return\n\n def test_003_load_hierarchy_halves(self):\n df_halves = pd.DataFrame(\n [\n [ROOT, 'H1', '~', 'halves'],\n [ROOT, 'H2', '~', 'halves'],\n ['H1', 'Q1', '+', 'halves'],\n ['H1', 'Q2', '+', 'halves'],\n ['H2', 'Q3', '+', 'halves'],\n ['H2', 'Q4', '+', 'halves'],\n ],\n columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']\n )\n\n # halves hierarchy\n df_results = self.dim.load_hierarchy_from_dataframe(df_halves, 'ParentName', 'ChildName',\n 'ConsolidationType', hierarchy='Hierarchy')\n df_results.to_csv(f'{FOLDER}df_halves_load.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_halves_load.csv', f'{BASELINE}df_halves_load.csv')\n return\n\n def test_004_save_hierarchy_halves(self):\n # halves hierarchy\n df = self.dim.save_hierarchy_to_dataframe('halves')\n df.drop(labels='index', axis=1, inplace=True)\n df.to_csv(f'{FOLDER}df_halves_hierarchy.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_halves_hierarchy.csv', f'{BASELINE}df_halves_hierarchy.csv')\n return\n\n def test_005_load_hierarchy_financial(self):\n df_financial = pd.DataFrame(\n [\n [ROOT, 'YTD', '+', 'financial'],\n [ROOT, 'YTG', '+', 'financial'],\n ['YTD', 'January', '+', 'financial'],\n ['YTD', 'February', '+', 'financial'],\n ['YTD', 'March', '+', 'financial'],\n ['YTD', 'April', '+', 'financial'],\n ['YTG', 'May', '-', 'financial'],\n ['YTG', 'June', '-', 'financial'],\n ['YTG', 'July', '-', 'financial'],\n ['YTG', 'August', '-', 'financial'],\n ['YTG', 'September', '-', 'financial'],\n ['YTG', 'October', '-', 'financial'],\n ['YTG', 'November', '-', 'financial'],\n ['YTG', 'December', '-', 'financial'],\n ],\n columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']\n )\n\n # financial hierarchy\n df_results = self.dim.load_hierarchy_from_dataframe(df_financial, 'ParentName', 'ChildName',\n 'ConsolidationType', hierarchy='Hierarchy')\n df_results.to_csv(f'{FOLDER}df_financial_load.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_financial_load.csv', f'{BASELINE}df_financial_load.csv')\n return\n\n def test_006_save_hierarchy_financial(self):\n # financial hierarchy\n df = self.dim.save_hierarchy_to_dataframe('financial')\n df.drop(labels='index', axis=1, inplace=True)\n df.to_csv(f'{FOLDER}df_financial_hierarchy.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_financial_hierarchy.csv', f'{BASELINE}df_financial_hierarchy.csv')\n return\n\n def test_007_load_hierarchy_errors(self):\n # This dataframe includes specific errors so check out the results dataframe\n df_test = pd.DataFrame(\n [\n ['', '', '+', 'main'],\n [' ', ' ', '+', 'main'],\n ['Q5', '', '+', 'main'],\n [np.NaN, np.NaN, '+', 'main'],\n [None, None, '+', 'main'],\n ['None', 'None', '+', 'main'],\n ['Q5', 'Q5', '+', 'main'],\n ['Q5', ROOT, '+', 'main'],\n ['Q5', 'Donk:tober', '+', 'main'],\n ['Donk:tober', 'Janusday', '+', 'main'],\n ['Year', 'Q5', '+', 'main'],\n ['Year', 'Q5', '+', 'main'],\n ['Q4', 'Badtober', '+', 'halves'],\n ['Q6', 'Craptober', '+', ''],\n ],\n columns=['ParentName', 'ChildName', 'ConsolidationType', 'Hierarchy']\n )\n\n df_results = self.dim.load_hierarchy_from_dataframe(df_test, 'ParentName', 'ChildName',\n 'ConsolidationType', hierarchy='Hierarchy')\n df_results.to_csv(f'{FOLDER}df_complex_load.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_complex_load.csv', f'{BASELINE}df_complex_load.csv')\n\n return\n\n def test_008_load_save_aliases(self):\n df_aliases = pd.DataFrame(\n [\n ['Trimestre 1', 'French', 'Q1'],\n ['Trimestre 2', 'French', 'Q2'],\n ['Trimestre 3', 'French', 'Q3'],\n ['Trimestre 4', 'French', 'Q4'],\n ['Janvier', 'French', 'January'],\n ['Fevier', 'French', 'February'],\n ['Mars', 'French', 'March'],\n ['Avril', 'French', 'April'],\n ['Mai', 'French', 'May'],\n ['Juin', 'French', 'June'],\n ['Julliet', 'French', 'July'],\n ['Aout', 'French', 'August'],\n ['Septembre', 'French', 'September'],\n ['Octobre', 'French', 'October'],\n ['Novembre', 'French', 'November'],\n ['Decembre', 'French', 'December'],\n ['Haneri 1', 'Welsh', 'H1'],\n ['Haneri 2', 'Welsh', 'H2'],\n ['Ionawr', 'Welsh', 'January'],\n ['Chwefror', 'Welsh', 'February'],\n ['Mawrth', 'Welsh', 'March'],\n ['Ebrill', 'Welsh', 'April'],\n ['Mai', 'Welsh', 'May'],\n ['Mehefin', 'Welsh', 'June'],\n ['Gorffennaf', 'Welsh', 'July'],\n ['Awst', 'Welsh', 'August'],\n ['Medi', 'Welsh', 'September'],\n ['Hydref', 'Welsh', 'October'],\n ['Tachwedd', 'Welsh', 'November'],\n ['Rhagfyr', 'Welsh', 'December'],\n ['Январь', 'Russian', 'January'],\n ['Февраль', 'Russian', 'February'],\n ['Март', 'Russian', 'March'],\n ['Апрель', 'Russian', 'April'],\n ['Май', 'Russian', 'May'],\n ['Июнь', 'Russian', 'June'],\n ['Июль', 'Russian', 'July'],\n ['Август', 'Russian', 'August'],\n ['Сентябрь', 'Russian', 'September'],\n ['Октябрь', 'Russian', 'October'],\n ['Ноябрь', 'Russian', 'November'],\n ['Декабрь', 'Russian', 'December'],\n ['일월', 'Korean', 'January'],\n ['이월', 'Korean', 'February'],\n ['삼월', 'Korean', 'March'],\n ['사월', 'Korean', 'April'],\n ['오월', 'Korean', 'May'],\n ['유월', 'Korean', 'June'],\n ['칠월', 'Korean', 'July'],\n ['팔월', 'Korean', 'August'],\n ['구월', 'Korean', 'September'],\n ['시월', 'Korean', 'October'],\n ['십일월', 'Korean', 'November'],\n ['십이월', 'Korean', 'December'],\n ['☃️', 'Emoji', 'January'],\n ['💘', 'Emoji', 'February'],\n ['☘️', 'Emoji', 'March'],\n ['☔', 'Emoji', 'April'],\n ['🌺', 'Emoji', 'May'],\n ['🌞', 'Emoji', 'June'],\n ['🍦', 'Emoji', 'July'],\n ['🏖️', 'Emoji', 'August'],\n ['🍎', 'Emoji', 'September'],\n ['🎃', 'Emoji', 'October'],\n ['🍂', 'Emoji', 'November'],\n ['🎅', 'Emoji', 'December'],\n ],\n columns=['AliasValue', 'AliasName', 'NodeName']\n )\n\n # Aliases\n self.dim.load_aliases_from_dataframe(df_aliases, 'NodeName', 'AliasName', 'AliasValue')\n df = self.dim.save_aliases_to_dataframe(None)\n df.drop(labels='index', axis=1, inplace=True)\n df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_aliases.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_aliases.csv', f'{BASELINE}df_aliases.csv')\n return\n\n def test_009_load_save_properties(self):\n df_properties = pd.DataFrame(\n [\n ['Magenta', 'Colour', ROOT],\n ['Purple', 'Colour', 'Year'],\n ['Red', 'Colour', 'Q1'],\n ['Orange', 'Colour', 'Q2'],\n ['Green', 'Colour', 'April'],\n ['Green', 'Colour', 'May'],\n ['Blue', 'Colour', 'July'],\n ['Blue', 'Colour', 'August'],\n ['Blue', 'Colour', 'September'],\n ['White', 'Colour', 'Q4'],\n ['Red', 'Colour', 'October'],\n ['Green', 'Colour', 'November'],\n ['Red', 'Colour', 'December'],\n ['Winter', 'Season', 'Q1'],\n ['Spring', 'Season', 'Q2'],\n ['Summer', 'Season', 'Q3'],\n ['Autumn', 'Season', 'Q4'],\n ],\n columns=['PropertyValue', 'PropertyName', 'NodeName']\n )\n\n # Properties\n self.dim.load_properties_from_dataframe(df_properties, 'NodeName', 'PropertyName', 'PropertyValue')\n df = self.dim.save_properties_to_dataframe(None)\n df.drop(labels='index', axis=1, inplace=True)\n df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_properties.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_properties.csv', f'{BASELINE}df_properties.csv')\n return\n\n def test_010_load_save_values(self):\n df_values = pd.DataFrame(\n [\n [-10.0, 'Costs', 'January'],\n [-100.0, 'Costs', 'February'],\n [-1000.0, 'Costs', 'March'],\n [-20.0, 'Costs', 'April'],\n [-200.0, 'Costs', 'May'],\n [-2000.0, 'Costs', 'June'],\n [-30.0, 'Costs', 'July'],\n [-300.0, 'Costs', 'August'],\n [-3000.0, 'Costs', 'September'],\n [-40.0, 'Costs', 'October'],\n [-400.0, 'Costs', 'November'],\n [-4000.0, 'Costs', 'December'],\n [10.0, 'Profit', 'January'],\n [100.0, 'Profit', 'February'],\n [1000.0, 'Profit', 'March'],\n [20.0, 'Profit', 'April'],\n [200.0, 'Profit', 'May'],\n [2000.0, 'Profit', 'June'],\n [30.0, 'Profit', 'July'],\n [300.0, 'Profit', 'August'],\n [3000.0, 'Profit', 'September'],\n [40.0, 'Profit', 'October'],\n [400.0, 'Profit', 'November'],\n [4000.0, 'Profit', 'December'],\n ],\n columns=['Value', 'ValueName', 'NodeName']\n )\n\n # Values\n self.dim.load_values_from_dataframe(df_values, 'NodeName', 'ValueName', 'Value')\n df = self.dim.save_values_to_dataframe(None)\n df.drop(labels='index', axis=1, inplace=True)\n df.sort_values(by=['name', 'node', 'value'], axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_values.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_values.csv', f'{BASELINE}df_values.csv')\n return\n\n def test_011_get_hierarchy_dataframe(self):\n df = self.dim.get_hierarchy_dataframe(hierarchy=MAIN)\n df = df.reindex(columns=sorted(df.columns))\n df.to_csv(f'{FOLDER}df_get_hierarchy_main.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_hierarchy_main.csv', f'{BASELINE}df_get_hierarchy_main.csv')\n return\n\n def test_012_get_aliases_dataframe(self):\n df = self.dim.get_aliases_dataframe()\n df = df.reindex(columns=sorted(df.columns))\n df.sort_values(by=list(df.columns), axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_get_aliases.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_aliases.csv', f'{BASELINE}df_get_aliases.csv')\n return\n\n def test_013_get_attributes_dataframe(self):\n df = self.dim.get_attributes_dataframe()\n df.drop(labels='index', axis=1, inplace=True)\n df = df.reindex(columns=sorted(df.columns))\n df.sort_values(by=list(df.columns), axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_get_attributes.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_attributes.csv', f'{BASELINE}df_get_attributes.csv')\n return\n\n def test_014_get_consolidation_dataframe(self):\n df = self.dim.get_consolidation_dataframe('Costs', hierarchy=MAIN)\n df.to_csv(f'{FOLDER}df_get_consolidation_costs_main.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_consolidation_costs_main.csv', f'{BASELINE}df_get_consolidation_costs_main.csv')\n return\n\n def test_015_get_properties_dataframe(self):\n df = self.dim.get_properties_dataframe()\n df.drop(labels='index', axis=1, inplace=True)\n df = df.reindex(columns=sorted(df.columns))\n df.sort_values(by=list(df.columns), axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_get_properties.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_properties.csv', f'{BASELINE}df_get_properties.csv')\n return\n\n def test_016_get_values_dataframe(self):\n df = self.dim.get_values_dataframe()\n df = df.reindex(columns=sorted(df.columns))\n df.sort_values(by=list(df.columns), axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_get_values.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_values.csv', f'{BASELINE}df_get_values.csv')\n return\n\n def test_017_get_hierarchy_table(self):\n df = self.dim.hierarchy_table(hierarchy=MAIN)\n df = df.reindex(columns=sorted(df.columns))\n df.sort_values(by=list(df.columns), axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_get_hierarchy_table_main.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_hierarchy_table_main.csv', f'{BASELINE}df_get_hierarchy_table_main.csv')\n return\n\n def test_018_get_all_leaves(self):\n expected = ['April',\n 'August',\n 'December',\n 'February',\n 'January',\n 'Janusday',\n 'July',\n 'June',\n 'March',\n 'May',\n 'November',\n 'October',\n 'September']\n\n nodes = sorted(self.dim.get_all_leaves(hierarchy=MAIN))\n return self.assertListEqual(expected, nodes)\n\n def test_019_get_all_nodes(self):\n expected = ['!!root!!',\n 'April',\n 'August',\n 'December',\n 'Donk-tober',\n 'February',\n 'January',\n 'Janusday',\n 'July',\n 'June',\n 'March',\n 'May',\n 'November',\n 'October',\n 'Q1',\n 'Q2',\n 'Q3',\n 'Q4',\n 'Q5',\n 'September',\n 'Year']\n\n nodes = sorted(self.dim.get_all_nodes(hierarchy=MAIN))\n return self.assertListEqual(expected, nodes)\n\n def test_020_get_all_parents(self):\n expected = ['!!root!!', 'Donk-tober', 'Q1', 'Q2', 'Q3', 'Q4', 'Q5', 'Year']\n nodes = sorted(self.dim.get_all_parents(hierarchy=MAIN))\n return self.assertListEqual(expected, nodes)\n\n def test_021_get_ancestors(self):\n expected = [[0, 'February'], [1, 'Q1'], [2, 'Year'], [3, '!!root!!']]\n nodes = self.dim.get_ancestors('February', hierarchy=MAIN)\n return self.assertListEqual(expected, nodes)\n\n def test_022_get_ancestor_at_generation(self):\n expected = 'Year'\n node = self.dim.get_ancestor_at_generation('February', 1, hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_023_get_ancestor_at_level(self):\n expected = 'Year'\n node = self.dim.get_ancestor_at_level('February', 2, hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_024_get_bottom(self):\n expected = 'March'\n node = self.dim.get_bottom('Q1', hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_025_get_top(self):\n expected = 'January'\n node = self.dim.get_top('Q1', hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_026_get_down(self):\n expected = 'March'\n node = self.dim.get_down('Q1', 'February', hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_027_get_up(self):\n expected = 'January'\n node = self.dim.get_up('Q1', 'February', hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_028_get_children(self):\n expected = ['January', 'February', 'March']\n nodes = self.dim.get_children('Q1', hierarchy=MAIN)\n return self.assertListEqual(expected, nodes)\n\n def test_029_get_children_count(self):\n expected = 3\n count = self.dim.get_children_count('Q1', hierarchy=MAIN)\n return self.assertEqual(expected, count)\n\n def test_030_get_generation(self):\n expected = 2\n count = self.dim.get_generation('Q1', hierarchy=MAIN)\n return self.assertEqual(expected, count)\n\n def test_031_get_grandparent(self):\n expected = 'Year'\n node = self.dim.get_grandparent('February', hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_032_get_leaves(self):\n expected = [[2, 'January'],\n [2, 'February'],\n [2, 'March'],\n [2, 'April'],\n [2, 'May'],\n [2, 'June'],\n [2, 'July'],\n [2, 'August'],\n [2, 'September'],\n [2, 'October'],\n [2, 'November'],\n [2, 'December'],\n [3, 'Janusday']]\n\n nodes = self.dim.get_leaves('Year', hierarchy=MAIN)\n return self.assertEqual(expected, nodes)\n\n def test_033_get_leaves_at_generation(self):\n expected = [[2, 'January'],\n [2, 'February'],\n [2, 'March'],\n [2, 'April'],\n [2, 'May'],\n [2, 'June'],\n [2, 'July'],\n [2, 'August'],\n [2, 'September'],\n [2, 'October'],\n [2, 'November'],\n [2, 'December']]\n\n nodes = self.dim.get_leaves_at_generation('Year', 2, hierarchy=MAIN)\n return self.assertEqual(expected, nodes)\n\n def test_034_get_leaves_at_level(self):\n expected = [[3, 'January'],\n [3, 'February'],\n [3, 'March'],\n [3, 'April'],\n [3, 'May'],\n [3, 'June'],\n [3, 'July'],\n [3, 'August'],\n [3, 'September'],\n [3, 'October'],\n [3, 'November'],\n [3, 'December']]\n\n nodes = self.dim.get_leaves_at_level('February', 0, hierarchy=MAIN)\n return self.assertEqual(expected, nodes)\n\n def test_035_get_parent(self):\n expected = 'Q1'\n nodes = self.dim.get_parent('February', hierarchy=MAIN)\n return self.assertEqual(expected, nodes)\n\n def test_036_get_parents(self):\n expected = [['financial', 'halves', 'main'], ['YTD', 'Q1', 'Q1']]\n nodes = self.dim.get_parents('February')\n return self.assertEqual(expected, nodes)\n\n def test_037_get_siblings(self):\n expected = ['January', 'February', 'March']\n nodes = self.dim.get_siblings('February', hierarchy=MAIN)\n return self.assertEqual(expected, nodes)\n\n def test_038_get_difference(self):\n expected = sorted(['Janusday', 'Year', 'Q5', 'Donk-tober'])\n nodes = sorted(self.dim.get_difference(['halves']))\n return self.assertEqual(expected, nodes)\n\n def test_039_get_intersection(self):\n expected = sorted(['!!root!!', 'April', 'August', 'December', 'February', 'January', 'July', 'June', 'March',\n 'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4', 'September'])\n nodes = sorted(self.dim.get_intersection(['halves']))\n return self.assertEqual(expected, nodes)\n\n def test_040_get_union(self):\n expected = sorted(['!!root!!', 'April', 'August', 'December', 'Donk-tober', 'February', 'H1', 'H2', 'January',\n 'Janusday', 'July', 'June', 'March', 'May', 'November', 'October', 'Q1', 'Q2', 'Q3', 'Q4',\n 'Q5', 'September', 'Year'])\n nodes = sorted(self.dim.get_union(['halves']))\n return self.assertEqual(expected, nodes)\n\n def test_041_add_node_to_alt(self):\n expected = 'H2'\n self.dim.add_node('H2', 'Q5', '+', hierarchy='halves', after='Q4')\n node = self.dim.get_parent('Q5', hierarchy='halves')\n return self.assertEqual(expected, node)\n\n def test_042_move_node_in_alt(self):\n expected = 'H1'\n self.dim.move_node('Q5', 'H1', hierarchy='halves', before='Q2')\n node = self.dim.get_parent('Q5', hierarchy='halves')\n return self.assertEqual(expected, node)\n\n def test_043_rename_node(self):\n expected = 'Q5'\n self.dim.rename_node('Donk-tober', 'Davetober')\n node = self.dim.get_parent('Davetober', hierarchy=MAIN)\n return self.assertEqual(expected, node)\n\n def test_044_delete_node(self):\n self.dim.delete_node('Year', 'Q5', hierarchy=MAIN)\n node = self.dim.node_exists('Q5')\n return self.assertFalse(node)\n\n def test_045_default_alias_dataframe(self):\n self.dim.set_default_aliases(primary='Welsh', secondary='French')\n df = self.dim.get_aliases_dataframe()\n df = df.reindex(columns=sorted(df.columns))\n df.sort_values(by=list(df.columns), axis=0, inplace=True)\n df.to_csv(f'{FOLDER}df_get_default_aliases.csv', index=False)\n self.assertFileEqual(f'{FOLDER}df_get_default_aliases.csv', f'{BASELINE}df_get_default_aliases.csv')\n pass\n\n def tearDown(self):\n self.dim = None\n self.dims = None\n" ]
[ [ "pandas.testing.assert_frame_equal", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
psobot/beam
[ "d9da8a4dc818b01a86d2dce2e78c0d78b47038bb" ]
[ "sdks/python/apache_beam/dataframe/pandas_doctests_test.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\n\nimport pandas as pd\n\nfrom apache_beam.dataframe import doctests\nfrom apache_beam.dataframe.pandas_top_level_functions import _is_top_level_function\n\n\[email protected](sys.platform == 'win32', '[BEAM-10626]')\nclass DoctestTest(unittest.TestCase):\n def test_ndframe_tests(self):\n # IO methods are tested in io_test.py\n skip_writes = {\n f'pandas.core.generic.NDFrame.{name}': ['*']\n for name in dir(pd.core.generic.NDFrame) if name.startswith('to_')\n }\n\n result = doctests.testmod(\n pd.core.generic,\n use_beam=False,\n report=True,\n wont_implement_ok={\n 'pandas.core.generic.NDFrame.first': ['*'],\n 'pandas.core.generic.NDFrame.head': ['*'],\n 'pandas.core.generic.NDFrame.last': ['*'],\n 'pandas.core.generic.NDFrame.shift': ['*'],\n 'pandas.core.generic.NDFrame.tail': ['*'],\n 'pandas.core.generic.NDFrame.take': ['*'],\n 'pandas.core.generic.NDFrame.values': ['*'],\n 'pandas.core.generic.NDFrame.tz_localize': [\n \"s.tz_localize('CET', ambiguous='infer')\",\n # np.array is not a deferred object. This use-case is possible\n # with a deferred Series though, which is tested in\n # frames_test.py\n \"s.tz_localize('CET', ambiguous=np.array([True, True, False]))\",\n ],\n 'pandas.core.generic.NDFrame.truncate': [\n # These inputs rely on tail (wont implement, order\n # sensitive) for verification\n \"df.tail()\",\n \"df.loc['2016-01-05':'2016-01-10', :].tail()\",\n ],\n 'pandas.core.generic.NDFrame.replace': [\n \"s.replace([1, 2], method='bfill')\",\n # Relies on method='pad'\n \"s.replace('a', None)\",\n ],\n 'pandas.core.generic.NDFrame.fillna': [\n \"df.fillna(method='ffill')\",\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.generic.NDFrame.sort_values': ['*'],\n 'pandas.core.generic.NDFrame.mask': [\n 'df.where(m, -df) == np.where(m, df, -df)'\n ],\n 'pandas.core.generic.NDFrame.where': [\n 'df.where(m, -df) == np.where(m, df, -df)'\n ],\n 'pandas.core.generic.NDFrame.interpolate': ['*'],\n },\n not_implemented_ok={\n 'pandas.core.generic.NDFrame.asof': ['*'],\n 'pandas.core.generic.NDFrame.at_time': ['*'],\n 'pandas.core.generic.NDFrame.between_time': ['*'],\n 'pandas.core.generic.NDFrame.describe': ['*'],\n 'pandas.core.generic.NDFrame.ewm': ['*'],\n 'pandas.core.generic.NDFrame.expanding': ['*'],\n 'pandas.core.generic.NDFrame.flags': ['*'],\n 'pandas.core.generic.NDFrame.pct_change': ['*'],\n 'pandas.core.generic.NDFrame.rank': ['*'],\n 'pandas.core.generic.NDFrame.reindex': ['*'],\n 'pandas.core.generic.NDFrame.reindex_like': ['*'],\n 'pandas.core.generic.NDFrame.replace': ['*'],\n 'pandas.core.generic.NDFrame.resample': ['*'],\n 'pandas.core.generic.NDFrame.rolling': ['*'],\n 'pandas.core.generic.NDFrame.sample': ['*'],\n 'pandas.core.generic.NDFrame.set_flags': ['*'],\n 'pandas.core.generic.NDFrame.squeeze': ['*'],\n 'pandas.core.generic.NDFrame.transform': ['*'],\n 'pandas.core.generic.NDFrame.truncate': ['*'],\n 'pandas.core.generic.NDFrame.xs': ['*'],\n # argsort unimplemented\n 'pandas.core.generic.NDFrame.abs': [\n 'df.loc[(df.c - 43).abs().argsort()]',\n ],\n },\n skip={\n # Internal test\n 'pandas.core.generic.NDFrame._set_axis_name': ['*'],\n # Fails to construct test series. asfreq is not implemented anyway.\n 'pandas.core.generic.NDFrame.asfreq': ['*'],\n 'pandas.core.generic.NDFrame.astype': ['*'],\n 'pandas.core.generic.NDFrame.convert_dtypes': ['*'],\n 'pandas.core.generic.NDFrame.copy': ['*'],\n 'pandas.core.generic.NDFrame.droplevel': ['*'],\n 'pandas.core.generic.NDFrame.infer_objects': ['*'],\n 'pandas.core.generic.NDFrame.rank': [\n # Modified dataframe\n 'df'\n ],\n 'pandas.core.generic.NDFrame.rename': [\n # Seems to be an upstream bug. The actual error has a different\n # message:\n # TypeError: Index(...) must be called with a collection of\n # some kind, 2 was passed\n # pandas doctests only verify the type of exception\n 'df.rename(2)'\n ],\n # Tests rely on setting index\n 'pandas.core.generic.NDFrame.rename_axis': ['*'],\n # Raises right exception, but testing framework has matching issues.\n 'pandas.core.generic.NDFrame.replace': [\n \"df.replace({'a string': 'new value', True: False}) # raises\"\n ],\n 'pandas.core.generic.NDFrame.squeeze': ['*'],\n\n # NameError\n 'pandas.core.generic.NDFrame.resample': ['df'],\n\n # Skipped so we don't need to install natsort\n 'pandas.core.generic.NDFrame.sort_values': [\n 'from natsort import index_natsorted',\n 'df.sort_values(\\n'\n ' by=\"time\",\\n'\n ' key=lambda x: np.argsort(index_natsorted(df[\"time\"]))\\n'\n ')'\n ],\n **skip_writes\n })\n self.assertEqual(result.failed, 0)\n\n def test_dataframe_tests(self):\n result = doctests.testmod(\n pd.core.frame,\n use_beam=False,\n report=True,\n wont_implement_ok={\n 'pandas.core.frame.DataFrame.T': ['*'],\n 'pandas.core.frame.DataFrame.cummax': ['*'],\n 'pandas.core.frame.DataFrame.cummin': ['*'],\n 'pandas.core.frame.DataFrame.cumsum': ['*'],\n 'pandas.core.frame.DataFrame.cumprod': ['*'],\n 'pandas.core.frame.DataFrame.diff': ['*'],\n 'pandas.core.frame.DataFrame.fillna': [\n \"df.fillna(method='ffill')\",\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.frame.DataFrame.items': ['*'],\n 'pandas.core.frame.DataFrame.itertuples': ['*'],\n 'pandas.core.frame.DataFrame.iterrows': ['*'],\n 'pandas.core.frame.DataFrame.iteritems': ['*'],\n # default keep is 'first'\n 'pandas.core.frame.DataFrame.nlargest': [\n \"df.nlargest(3, 'population')\",\n \"df.nlargest(3, ['population', 'GDP'])\",\n \"df.nlargest(3, 'population', keep='last')\"\n ],\n 'pandas.core.frame.DataFrame.nsmallest': [\n \"df.nsmallest(3, 'population')\",\n \"df.nsmallest(3, ['population', 'GDP'])\",\n \"df.nsmallest(3, 'population', keep='last')\",\n ],\n 'pandas.core.frame.DataFrame.replace': [\n \"s.replace([1, 2], method='bfill')\",\n # Relies on method='pad'\n \"s.replace('a', None)\",\n ],\n 'pandas.core.frame.DataFrame.to_records': ['*'],\n 'pandas.core.frame.DataFrame.to_dict': ['*'],\n 'pandas.core.frame.DataFrame.to_numpy': ['*'],\n 'pandas.core.frame.DataFrame.to_string': ['*'],\n 'pandas.core.frame.DataFrame.transpose': ['*'],\n 'pandas.core.frame.DataFrame.shape': ['*'],\n 'pandas.core.frame.DataFrame.shift': [\n 'df.shift(periods=3, freq=\"D\")',\n 'df.shift(periods=3, freq=\"infer\")'\n ],\n 'pandas.core.frame.DataFrame.unstack': ['*'],\n 'pandas.core.frame.DataFrame.memory_usage': ['*'],\n 'pandas.core.frame.DataFrame.info': ['*'],\n # Not equal to df.agg('mode', axis='columns', numeric_only=True)\n # because there can be multiple columns if a row has more than one\n # mode\n 'pandas.core.frame.DataFrame.mode': [\n \"df.mode(axis='columns', numeric_only=True)\"\n ],\n 'pandas.core.frame.DataFrame.append': [\n 'df.append(df2, ignore_index=True)',\n \"for i in range(5):\\n\" +\n \" df = df.append({'A': i}, ignore_index=True)\",\n ],\n 'pandas.core.frame.DataFrame.sort_index': ['*'],\n 'pandas.core.frame.DataFrame.sort_values': ['*'],\n 'pandas.core.frame.DataFrame.melt': [\n \"df.melt(id_vars=['A'], value_vars=['B'])\",\n \"df.melt(id_vars=['A'], value_vars=['B', 'C'])\",\n \"df.melt(col_level=0, id_vars=['A'], value_vars=['B'])\",\n \"df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])\",\n \"df.melt(id_vars=['A'], value_vars=['B'],\\n\" +\n \" var_name='myVarname', value_name='myValname')\"\n ]\n },\n not_implemented_ok={\n 'pandas.core.frame.DataFrame.transform': ['*'],\n 'pandas.core.frame.DataFrame.reindex': ['*'],\n 'pandas.core.frame.DataFrame.reindex_axis': ['*'],\n\n 'pandas.core.frame.DataFrame.round': [\n 'df.round(decimals)',\n ],\n\n # We should be able to support pivot and pivot_table for categorical\n # columns\n 'pandas.core.frame.DataFrame.pivot': ['*'],\n\n # We can implement this as a zipping operator, but it won't have the\n # same capability. The doctest includes an example that branches on\n # a deferred result.\n 'pandas.core.frame.DataFrame.combine': ['*'],\n\n # Can be implemented as a zipping operator\n 'pandas.core.frame.DataFrame.combine_first': ['*'],\n\n # Difficult to parallelize but should be possible?\n 'pandas.core.frame.DataFrame.dot': [\n # reindex not supported\n 's2 = s.reindex([1, 0, 2, 3])',\n 'df.dot(s2)',\n ],\n\n # Trivially elementwise for axis=columns. Relies on global indexing\n # for axis=rows.\n # Difficult to determine proxy, need to inspect function\n 'pandas.core.frame.DataFrame.apply': ['*'],\n\n # Cross-join not implemented\n 'pandas.core.frame.DataFrame.merge': [\n \"df1.merge(df2, how='cross')\"\n ],\n\n # TODO(BEAM-11711)\n 'pandas.core.frame.DataFrame.set_index': [\n \"df.set_index([s, s**2])\",\n ],\n },\n skip={\n # Throws NotImplementedError when modifying df\n 'pandas.core.frame.DataFrame.transform': ['df'],\n 'pandas.core.frame.DataFrame.axes': [\n # Returns deferred index.\n 'df.axes',\n ],\n 'pandas.core.frame.DataFrame.compare': ['*'],\n 'pandas.core.frame.DataFrame.cov': [\n # Relies on setting entries ahead of time.\n \"df.loc[df.index[:5], 'a'] = np.nan\",\n \"df.loc[df.index[5:10], 'b'] = np.nan\",\n 'df.cov(min_periods=12)',\n ],\n 'pandas.core.frame.DataFrame.drop_duplicates': ['*'],\n 'pandas.core.frame.DataFrame.duplicated': ['*'],\n 'pandas.core.frame.DataFrame.idxmax': ['*'],\n 'pandas.core.frame.DataFrame.idxmin': ['*'],\n 'pandas.core.frame.DataFrame.rename': [\n # Returns deferred index.\n 'df.index',\n 'df.rename(index=str).index',\n ],\n 'pandas.core.frame.DataFrame.set_index': [\n # TODO(BEAM-11711): This could pass in the index as\n # a DeferredIndex, and we should fail it as order-sensitive.\n \"df.set_index([pd.Index([1, 2, 3, 4]), 'year'])\",\n ],\n 'pandas.core.frame.DataFrame.set_axis': ['*'],\n 'pandas.core.frame.DataFrame.to_markdown': ['*'],\n 'pandas.core.frame.DataFrame.to_parquet': ['*'],\n 'pandas.core.frame.DataFrame.value_counts': ['*'],\n\n 'pandas.core.frame.DataFrame.to_records': [\n 'df.index = df.index.rename(\"I\")',\n 'index_dtypes = f\"<S{df.index.str.len().max()}\"', # 1.x\n 'index_dtypes = \"<S{}\".format(df.index.str.len().max())', #0.x\n 'df.to_records(index_dtypes=index_dtypes)',\n ],\n # These tests use the static method pd.pivot_table, which doesn't\n # actually raise NotImplementedError\n 'pandas.core.frame.DataFrame.pivot_table': ['*'],\n # Expected to raise a ValueError, but we raise NotImplementedError\n 'pandas.core.frame.DataFrame.pivot': [\n \"df.pivot(index='foo', columns='bar', values='baz')\"\n ],\n 'pandas.core.frame.DataFrame.append': [\n 'df',\n # pylint: disable=line-too-long\n \"pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],\\n\"\n \" ignore_index=True)\"\n ],\n 'pandas.core.frame.DataFrame.eval': ['df'],\n 'pandas.core.frame.DataFrame.melt': [\n \"df.columns = [list('ABC'), list('DEF')]\", \"df\"\n ],\n 'pandas.core.frame.DataFrame.merge': [\n # Order-sensitive index, checked in frames_test.py.\n \"df1.merge(df2, left_on='lkey', right_on='rkey')\",\n \"df1.merge(df2, left_on='lkey', right_on='rkey',\\n\"\n \" suffixes=('_left', '_right'))\",\n \"df1.merge(df2, how='left', on='a')\",\n ],\n # Raises right exception, but testing framework has matching issues.\n 'pandas.core.frame.DataFrame.replace': [\n \"df.replace({'a string': 'new value', True: False}) # raises\"\n ],\n 'pandas.core.frame.DataFrame.to_sparse': ['type(df)'],\n\n # Skipped because \"seen_wont_implement\" is reset before getting to\n # these calls, so the NameError they raise is not ignored.\n 'pandas.core.frame.DataFrame.T': [\n 'df1_transposed.dtypes', 'df2_transposed.dtypes'\n ],\n 'pandas.core.frame.DataFrame.transpose': [\n 'df1_transposed.dtypes', 'df2_transposed.dtypes'\n ],\n # Skipped because the relies on iloc to set a cell to NA. Test is\n # replicated in frames_test::DeferredFrameTest::test_applymap.\n 'pandas.core.frame.DataFrame.applymap': [\n 'df_copy.iloc[0, 0] = pd.NA',\n \"df_copy.applymap(lambda x: len(str(x)), na_action='ignore')\",\n ],\n # Skipped so we don't need to install natsort\n 'pandas.core.frame.DataFrame.sort_values': [\n 'from natsort import index_natsorted',\n 'df.sort_values(\\n'\n ' by=\"time\",\\n'\n ' key=lambda x: np.argsort(index_natsorted(df[\"time\"]))\\n'\n ')'\n ],\n # Mode that we don't yet support, documentation added in pandas\n # 1.2.0 (https://github.com/pandas-dev/pandas/issues/35912)\n 'pandas.core.frame.DataFrame.aggregate': [\n \"df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))\"\n ],\n })\n self.assertEqual(result.failed, 0)\n\n def test_series_tests(self):\n result = doctests.testmod(\n pd.core.series,\n use_beam=False,\n report=True,\n wont_implement_ok={\n 'pandas.core.series.Series.__array__': ['*'],\n 'pandas.core.series.Series.array': ['*'],\n 'pandas.core.series.Series.cummax': ['*'],\n 'pandas.core.series.Series.cummin': ['*'],\n 'pandas.core.series.Series.cumsum': ['*'],\n 'pandas.core.series.Series.cumprod': ['*'],\n 'pandas.core.series.Series.diff': ['*'],\n 'pandas.core.series.Series.dot': [\n 's.dot(arr)', # non-deferred result\n ],\n 'pandas.core.series.Series.fillna': [\n \"df.fillna(method='ffill')\",\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.series.Series.items': ['*'],\n 'pandas.core.series.Series.iteritems': ['*'],\n # default keep is 'first'\n 'pandas.core.series.Series.nlargest': [\n \"s.nlargest()\",\n \"s.nlargest(3)\",\n \"s.nlargest(3, keep='last')\",\n ],\n 'pandas.core.series.Series.memory_usage': ['*'],\n 'pandas.core.series.Series.nsmallest': [\n \"s.nsmallest()\",\n \"s.nsmallest(3)\",\n \"s.nsmallest(3, keep='last')\",\n ],\n 'pandas.core.series.Series.pop': ['*'],\n 'pandas.core.series.Series.searchsorted': ['*'],\n 'pandas.core.series.Series.shift': ['*'],\n 'pandas.core.series.Series.take': ['*'],\n 'pandas.core.series.Series.to_dict': ['*'],\n 'pandas.core.series.Series.unique': ['*'],\n 'pandas.core.series.Series.unstack': ['*'],\n 'pandas.core.series.Series.values': ['*'],\n 'pandas.core.series.Series.view': ['*'],\n 'pandas.core.series.Series.append': [\n 's1.append(s2, ignore_index=True)',\n ],\n 'pandas.core.series.Series.sort_index': ['*'],\n 'pandas.core.series.Series.sort_values': ['*'],\n 'pandas.core.series.Series.argmax': ['*'],\n 'pandas.core.series.Series.argmin': ['*'],\n },\n not_implemented_ok={\n 'pandas.core.series.Series.transform': ['*'],\n 'pandas.core.series.Series.groupby': [\n 'ser.groupby([\"a\", \"b\", \"a\", \"b\"]).mean()',\n 'ser.groupby([\"a\", \"b\", \"a\", np.nan]).mean()',\n 'ser.groupby([\"a\", \"b\", \"a\", np.nan], dropna=False).mean()',\n # Grouping by a series is not supported\n 'ser.groupby(ser > 100).mean()',\n ],\n 'pandas.core.series.Series.reindex': ['*'],\n },\n skip={\n # error formatting\n 'pandas.core.series.Series.append': [\n 's1.append(s2, verify_integrity=True)',\n ],\n # Throws NotImplementedError when modifying df\n 'pandas.core.series.Series.transform': ['df'],\n 'pandas.core.series.Series.autocorr': ['*'],\n 'pandas.core.series.Series.combine': ['*'],\n 'pandas.core.series.Series.combine_first': ['*'],\n 'pandas.core.series.Series.compare': ['*'],\n 'pandas.core.series.Series.cov': [\n # Differs in LSB on jenkins.\n \"s1.cov(s2)\",\n ],\n 'pandas.core.series.Series.drop_duplicates': ['*'],\n 'pandas.core.series.Series.duplicated': ['*'],\n 'pandas.core.series.Series.explode': ['*'],\n 'pandas.core.series.Series.idxmax': ['*'],\n 'pandas.core.series.Series.idxmin': ['*'],\n 'pandas.core.series.Series.nonzero': ['*'],\n 'pandas.core.series.Series.quantile': ['*'],\n 'pandas.core.series.Series.pop': ['ser'], # testing side effect\n 'pandas.core.series.Series.repeat': ['*'],\n 'pandas.core.series.Series.replace': ['*'],\n 'pandas.core.series.Series.reset_index': ['*'],\n 'pandas.core.series.Series.searchsorted': [\n # This doctest seems to be incorrectly parsed.\n \"x = pd.Categorical(['apple', 'bread', 'bread',\"\n ],\n 'pandas.core.series.Series.set_axis': ['*'],\n 'pandas.core.series.Series.to_csv': ['*'],\n 'pandas.core.series.Series.to_markdown': ['*'],\n 'pandas.core.series.Series.update': ['*'],\n 'pandas.core.series.Series.view': [\n # Inspection after modification.\n 's'\n ],\n })\n self.assertEqual(result.failed, 0)\n\n def test_string_tests(self):\n PD_VERSION = tuple(int(v) for v in pd.__version__.split('.'))\n if PD_VERSION < (1, 2, 0):\n module = pd.core.strings\n else:\n # Definitions were moved to accessor in pandas 1.2.0\n module = pd.core.strings.accessor\n\n module_name = module.__name__\n\n result = doctests.testmod(\n module,\n use_beam=False,\n wont_implement_ok={\n # These methods can accept deferred series objects, but not lists\n f'{module_name}.StringMethods.cat': [\n \"s.str.cat(['A', 'B', 'C', 'D'], sep=',')\",\n \"s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')\",\n \"s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')\"\n ],\n f'{module_name}.StringMethods.repeat': [\n 's.str.repeat(repeats=[1, 2, 3])'\n ],\n f'{module_name}.str_repeat': ['s.str.repeat(repeats=[1, 2, 3])'],\n f'{module_name}.StringMethods.get_dummies': ['*'],\n f'{module_name}.str_get_dummies': ['*'],\n },\n skip={\n # count() on Series with a NaN produces mismatched type if we\n # have a NaN-only partition.\n f'{module_name}.StringMethods.count': [\"s.str.count('a')\"],\n f'{module_name}.str_count': [\"s.str.count('a')\"],\n\n # Produce None instead of NaN, see\n # frames_test.py::DeferredFrameTest::test_str_split\n f'{module_name}.StringMethods.rsplit': [\n 's.str.split(expand=True)',\n 's.str.rsplit(\"/\", n=1, expand=True)',\n ],\n f'{module_name}.StringMethods.split': [\n 's.str.split(expand=True)',\n 's.str.rsplit(\"/\", n=1, expand=True)',\n ],\n\n # Bad test strings in pandas 1.1.x\n f'{module_name}.str_replace': [\n \"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\"\n ],\n f'{module_name}.StringMethods.replace': [\n \"pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\"\n ],\n\n # output has incorrect formatting in 1.2.x\n f'{module_name}.StringMethods.extractall': ['*']\n })\n self.assertEqual(result.failed, 0)\n\n def test_datetime_tests(self):\n # TODO(BEAM-10721)\n datetimelike_result = doctests.testmod(\n pd.core.arrays.datetimelike,\n use_beam=False,\n skip={\n 'pandas.core.arrays.datetimelike.AttributesMixin._unbox_scalar': [\n '*'\n ],\n 'pandas.core.arrays.datetimelike.TimelikeOps.ceil': ['*'],\n 'pandas.core.arrays.datetimelike.TimelikeOps.floor': ['*'],\n 'pandas.core.arrays.datetimelike.TimelikeOps.round': ['*'],\n })\n\n datetime_result = doctests.testmod(\n pd.core.arrays.datetimes,\n use_beam=False,\n skip={\n 'pandas.core.arrays.datetimes.DatetimeArray.day': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.hour': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.microsecond': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.minute': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.month': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.nanosecond': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.second': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.year': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_leap_year': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_month_end': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_month_start': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_end': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_quarter_start': [\n '*'\n ],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_year_end': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.is_year_start': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.to_period': ['*'],\n 'pandas.core.arrays.datetimes.DatetimeArray.tz_localize': ['*'],\n })\n\n self.assertEqual(datetimelike_result.failed, 0)\n self.assertEqual(datetime_result.failed, 0)\n\n def test_indexing_tests(self):\n result = doctests.testmod(\n pd.core.indexing,\n use_beam=False,\n skip={\n 'pandas.core.indexing._IndexSlice': ['*'],\n 'pandas.core.indexing.IndexingMixin.at': ['*'],\n 'pandas.core.indexing.IndexingMixin.iat': ['*'],\n 'pandas.core.indexing.IndexingMixin.iloc': ['*'],\n 'pandas.core.indexing.IndexingMixin.loc': ['*'],\n 'pandas.core.indexing._AtIndexer': ['*'],\n 'pandas.core.indexing._LocIndexer': ['*'],\n 'pandas.core.indexing._iAtIndexer': ['*'],\n 'pandas.core.indexing._iLocIndexer': ['*'],\n })\n self.assertEqual(result.failed, 0)\n\n def test_groupby_tests(self):\n result = doctests.testmod(\n pd.core.groupby.groupby,\n use_beam=False,\n wont_implement_ok={\n 'pandas.core.groupby.groupby.GroupBy.head': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.tail': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.nth': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.cumcount': ['*'],\n },\n not_implemented_ok={\n 'pandas.core.groupby.groupby.GroupBy.describe': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.ngroup': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.resample': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.sample': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.quantile': ['*'],\n 'pandas.core.groupby.groupby.BaseGroupBy.pipe': ['*'],\n # pipe tests are in a different location in pandas 1.1.x\n 'pandas.core.groupby.groupby._GroupBy.pipe': ['*'],\n 'pandas.core.groupby.groupby.GroupBy.nth': [\n \"df.groupby('A', as_index=False).nth(1)\",\n ],\n },\n skip={\n # Uses iloc to mutate a DataFrame\n 'pandas.core.groupby.groupby.GroupBy.resample': [\n 'df.iloc[2, 0] = 5',\n 'df',\n ],\n # TODO: Raise wont implement for list passed as a grouping column\n # Currently raises unhashable type: list\n 'pandas.core.groupby.groupby.GroupBy.ngroup': [\n 'df.groupby([\"A\", [1,1,2,3,2,1]]).ngroup()'\n ],\n })\n self.assertEqual(result.failed, 0)\n\n result = doctests.testmod(\n pd.core.groupby.generic,\n use_beam=False,\n wont_implement_ok={\n # Returns an array by default, not a Series. WontImplement\n # (non-deferred)\n 'pandas.core.groupby.generic.SeriesGroupBy.unique': ['*'],\n # TODO: Is take actually deprecated?\n 'pandas.core.groupby.generic.DataFrameGroupBy.take': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.take': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.nsmallest': [\n \"s.nsmallest(3, keep='last')\",\n \"s.nsmallest(3)\",\n \"s.nsmallest()\",\n ],\n 'pandas.core.groupby.generic.SeriesGroupBy.nlargest': [\n \"s.nlargest(3, keep='last')\",\n \"s.nlargest(3)\",\n \"s.nlargest()\",\n ],\n 'pandas.core.groupby.generic.DataFrameGroupBy.diff': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.diff': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.hist': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.fillna': [\n \"df.fillna(method='ffill')\",\n 'df.fillna(value=values, limit=1)',\n ],\n 'pandas.core.groupby.generic.SeriesGroupBy.fillna': [\n \"df.fillna(method='ffill')\",\n 'df.fillna(value=values, limit=1)',\n ],\n },\n not_implemented_ok={\n 'pandas.core.groupby.generic.DataFrameGroupBy.transform': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.idxmax': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.idxmin': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.filter': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.nunique': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.transform': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.idxmax': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.idxmin': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.filter': ['*'],\n 'pandas.core.groupby.generic.SeriesGroupBy.describe': ['*'],\n },\n skip={\n 'pandas.core.groupby.generic.SeriesGroupBy.cov': [\n # Floating point comparison fails\n 's1.cov(s2)',\n ],\n 'pandas.core.groupby.generic.DataFrameGroupBy.cov': [\n # Mutates input DataFrame with loc\n # TODO: Replicate in frames_test.py\n \"df.loc[df.index[:5], 'a'] = np.nan\",\n \"df.loc[df.index[5:10], 'b'] = np.nan\",\n \"df.cov(min_periods=12)\",\n ],\n # These examples rely on grouping by a list\n 'pandas.core.groupby.generic.SeriesGroupBy.aggregate': ['*'],\n 'pandas.core.groupby.generic.DataFrameGroupBy.aggregate': ['*'],\n })\n self.assertEqual(result.failed, 0)\n\n def test_top_level(self):\n tests = {\n name: func.__doc__\n for (name, func) in pd.__dict__.items()\n if _is_top_level_function(func) and getattr(func, '__doc__', None)\n }\n\n # IO methods are tested in io_test.py\n skip_reads = {name: ['*'] for name in dir(pd) if name.startswith('read_')}\n\n result = doctests.teststrings(\n tests,\n use_beam=False,\n report=True,\n not_implemented_ok={\n 'concat': ['pd.concat([s1, s2], ignore_index=True)'],\n 'crosstab': ['*'],\n 'cut': ['*'],\n 'eval': ['*'],\n 'factorize': ['*'],\n 'get_dummies': ['*'],\n 'infer_freq': ['*'],\n 'lreshape': ['*'],\n 'melt': ['*'],\n 'merge': [\"df1.merge(df2, how='cross')\"],\n 'merge_asof': ['*'],\n 'pivot': ['*'],\n 'pivot_table': ['*'],\n 'qcut': ['*'],\n 'reset_option': ['*'],\n 'set_eng_float_format': ['*'],\n 'set_option': ['*'],\n 'to_numeric': ['*'],\n 'to_timedelta': ['*'],\n 'unique': ['*'],\n 'value_counts': ['*'],\n 'wide_to_long': ['*'],\n },\n wont_implement_ok={\n 'to_datetime': ['s.head()'],\n 'to_pickle': ['*'],\n 'melt': [\n \"pd.melt(df, id_vars=['A'], value_vars=['B'])\",\n \"pd.melt(df, id_vars=['A'], value_vars=['B', 'C'])\",\n \"pd.melt(df, col_level=0, id_vars=['A'], value_vars=['B'])\",\n \"pd.melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')])\",\n \"pd.melt(df, id_vars=['A'], value_vars=['B'],\\n\" +\n \" var_name='myVarname', value_name='myValname')\"\n ],\n },\n skip={\n # error formatting\n 'concat': ['pd.concat([df5, df6], verify_integrity=True)'],\n # doctest DeprecationWarning\n 'melt': ['df'],\n # Order-sensitive re-indexing.\n 'merge': [\n \"df1.merge(df2, left_on='lkey', right_on='rkey')\",\n \"df1.merge(df2, left_on='lkey', right_on='rkey',\\n\"\n \" suffixes=('_left', '_right'))\",\n \"df1.merge(df2, how='left', on='a')\",\n ],\n # Not an actual test.\n 'option_context': ['*'],\n 'factorize': ['codes', 'uniques'],\n # Bad top-level use of un-imported function.\n 'merge_ordered': [\n 'merge_ordered(df1, df2, fill_method=\"ffill\", left_by=\"group\")'\n ],\n # Expected error.\n 'pivot': [\"df.pivot(index='foo', columns='bar', values='baz')\"],\n # Never written.\n 'to_pickle': ['os.remove(\"./dummy.pkl\")'],\n **skip_reads\n })\n self.assertEqual(result.failed, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.__version__.split", "pandas.__dict__.items" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
metacpp/pytorch
[ "1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952", "1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952", "1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952", "1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952", "1e7a4d6bbe1fac4fb94f6b62f24c6e242db1e952" ]
[ "test/jit/test_misc.py", "torch/nn/intrinsic/qat/modules/linear_fused.py", "torch/ao/quantization/_dbr/quantization_state.py", "torch/utils/data/datapipes/iter/routeddecoder.py", "test/distributed/fsdp/test_fsdp_uneven.py" ]
[ "# Owner(s): [\"oncall: jit\"]\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom torch.testing._internal.jit_utils import JitTestCase, make_global\nfrom torch.testing import FileCheck\nfrom torch import jit\nfrom jit.test_module_interface import TestModuleInterface # noqa: F401\nimport os\nimport sys\nimport torch\nimport torch.testing._internal.jit_utils\nimport torch.nn as nn\n\n# Make the helper files in test/ importable\npytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(pytorch_test_dir)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_jit.py TESTNAME\\n\\n\"\n \"instead.\")\n\nclass TestMisc(JitTestCase):\n def test_joined_str(self):\n def func(x):\n hello, test = \"Hello\", \"test\"\n print(f\"{hello + ' ' + test}, I'm a {test}\")\n print(\"format blank\")\n hi = 'hi'\n print(f\"stuff before {hi}\")\n print(f\"{hi} stuff after\")\n return x + 1\n\n x = torch.arange(4., requires_grad=True)\n # TODO: Add support for f-strings in string parser frontend\n # self.checkScript(func, [x], optimize=True, capture_output=True)\n\n with self.capture_stdout() as captured:\n out = func(x)\n\n scripted = torch.jit.script(func)\n with self.capture_stdout() as captured_script:\n out_script = func(x)\n\n self.assertEqual(out, out_script)\n self.assertEqual(captured, captured_script)\n\n def test_kwarg_support(self):\n with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, \"variable number of arguments\"):\n class M(torch.nn.Module):\n def forward(self, *, n_tokens: int, device_name: str = 2):\n pass\n torch.jit.script(M())\n\n class M(torch.nn.Module):\n def forward(self, *, n_tokens: int, device_name: str):\n return n_tokens, device_name\n\n sm = torch.jit.script(M())\n\n with self.assertRaisesRegex(RuntimeError, \"missing value for argument 'n_tokens'\"):\n sm()\n\n with self.assertRaisesRegex(RuntimeError, \"positional arg\"):\n sm(3, 'hello')\n\n self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))\n\n def test_tuple_subscripted_assign(self):\n with self.assertRaisesRegex(RuntimeError, \"subscripted assignment\"):\n @torch.jit.script\n def foo(a: Tuple[int, int]) -> None:\n a[0] = a[1]\n\n with self.assertRaisesRegex(RuntimeError, \"augmented assignment\"):\n @torch.jit.script\n def bar(a: Tuple[int, int]) -> None:\n a[0] += a[1]\n\n def test_subexpression_List_Future(self):\n\n @torch.jit.script\n def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:\n return x[0]\n\n FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)\n\n def test_subexpression_Future_annotate(self):\n @torch.jit.script\n def fn() -> torch.jit.Future[int]:\n x: List[torch.jit.Future[int]] = []\n return x[0]\n\n FileCheck().check(\"Future[int][]\").run(fn.graph)\n\n def test_future_isinstance(self):\n @torch.jit.script\n def fn(x: Any) -> torch.jit.Future[int]:\n assert isinstance(x, jit.Future[int])\n return x\n\n FileCheck().check(\"Future[int]\").run(fn.graph)\n\n def test_str_refine_any(self):\n def forward(x: Any) -> str:\n if isinstance(x, str):\n return x\n return \"foo\"\n forward = torch.jit.script(forward)\n self.assertEqual(forward(1), \"foo\")\n self.assertEqual(forward(\"bar\"), \"bar\")\n\n def test_subexpression_Tuple_int_int_Future(self):\n\n @torch.jit.script\n def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:\n return x[0], x[2]\n\n FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)\n\n def test_subexpression_Dict_int_Future(self):\n\n @torch.jit.script\n def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:\n return x[y]\n\n FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)\n\n def test_subexpression_Optional(self):\n\n @torch.jit.script\n def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:\n if x is not None:\n return x[0]\n else:\n return None\n\n FileCheck().check('Dict(int, Future(int))?').run(fn.graph)\n\n def test_if_returning_any(self):\n \"\"\"\n Check that an if statement can return different\n types early from each branch when the return\n type of the function is Any.\n \"\"\"\n def if_function(inp: torch.Tensor) -> Any:\n if inp.shape[0] == 1:\n return inp * inp\n else:\n return \"str\"\n\n self.checkScript(if_function, (torch.randn(5),))\n\n def test_export_opnames_interface(self):\n\n @torch.jit.interface\n class OneTwoModule(nn.Module):\n def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n pass\n\n def two(self, x: torch.Tensor) -> torch.Tensor:\n pass\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n pass\n\n class FooMod(nn.Module):\n def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return x + y\n\n def two(self, x: torch.Tensor) -> torch.Tensor:\n return 2 * x\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.one(self.two(x), x)\n\n class BarMod(nn.Module):\n def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return x * y\n\n def two(self, x: torch.Tensor) -> torch.Tensor:\n return 2 / x\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.two(self.one(x, x))\n\n make_global(OneTwoModule)\n\n class M(nn.Module):\n sub : OneTwoModule\n\n def __init__(self):\n super(M, self).__init__()\n self.sub = BarMod()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.sub.forward(x)\n\n def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):\n return mod_list[0].forward(x) + mod_list[1].forward(x)\n\n torch._C._enable_mobile_interface_call_export()\n scripted_M_mod = torch.jit.script(M())\n self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(\n set(torch.jit.export_opnames(scripted_M_mod))))\n\n scripted_M_mod.sub = torch.jit.script(FooMod())\n self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(\n set(torch.jit.export_opnames(scripted_M_mod))))\n\n def test_broadcasting_list(self):\n \"\"\"\n Test BroadcastingList and torch.nn._size_N_t alias\n \"\"\"\n from torch._jit_internal import BroadcastingList2\n from torch.nn.common_types import _size_2_t\n\n def sum_i(x: _size_2_t) -> int:\n return x[0] + x[1]\n\n def sum_f(x: BroadcastingList2[float]) -> float:\n return x[0] + x[1]\n\n self.assertTrue(torch.jit.script(sum_i)(4) == 8)\n self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)\n", "import torch\nimport torch.nn as nn\nimport torch.nn.intrinsic as nni\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\n\n\nclass LinearBn1d(nn.modules.linear.Linear, nni._FusedModule):\n r\"\"\"\n A LinearBn1d module is a module fused from Linear and BatchNorm1d, attached\n with FakeQuantize modules for weight, used in quantization aware training.\n\n We combined the interface of :class:`torch.nn.Linear` and\n :class:torch.nn.BatchNorm1d`.\n\n Similar to :class:`torch.nn.Linear`, with FakeQuantize modules initialized\n to default.\n\n Attributes:\n freeze_bn:\n weight_fake_quant: fake quant module for weight\n\n \"\"\"\n def __init__(self,\n # Linear args\n in_features, out_features, bias=True,\n # BatchNorm1d args\n # num_features: out_features\n eps=1e-05, momentum=0.1,\n # affine: True\n # track_running_stats: True\n # Args for this module\n freeze_bn=False,\n qconfig=None):\n nn.modules.linear.Linear.__init__(self, in_features, out_features, bias)\n assert qconfig, 'qconfig must be provded for QAT module'\n self.qconfig = qconfig\n self.freeze_bn = freeze_bn if self.training else True\n self.bn = nn.BatchNorm1d(out_features, eps, momentum, True, True)\n self.weight_fake_quant = self.qconfig.weight()\n if bias:\n self.bias = Parameter(torch.empty(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_bn_parameters()\n\n # this needs to be called after reset_bn_parameters,\n # as they modify the same state\n if self.training:\n if freeze_bn:\n self.freeze_bn_stats()\n else:\n self.update_bn_stats()\n else:\n self.freeze_bn_stats()\n\n def reset_running_stats(self):\n self.bn.reset_running_stats()\n\n def reset_bn_parameters(self):\n self.bn.reset_running_stats()\n init.uniform_(self.bn.weight)\n init.zeros_(self.bn.bias)\n\n def reset_parameters(self):\n super(LinearBn1d, self).reset_parameters()\n\n def update_bn_stats(self):\n self.freeze_bn = False\n self.bn.training = True\n return self\n\n def freeze_bn_stats(self):\n self.freeze_bn = True\n self.bn.training = False\n return self\n\n def forward(self, input):\n assert self.bn.running_var is not None\n\n # Scale the linear weights by BN's running statistics to reduce\n # weight jitter, see https://arxiv.org/pdf/1806.08342.pdf, page 18\n # for motivation.\n #\n # Instead of\n #\n # x1 = F.linear(x0, fq(w), b)\n # x2 = self.bn(x1)\n #\n # We have\n #\n # # scale the weight by previous batch's running statistics\n # scale_factor = bn.w / bn.running_std_from_prev_batch\n # # do the linear transformation without bias\n # x1_scaled = F.linear(x0, fq(w * scale_factor), 0)\n # # reverse the scaling and add original bias\n # x1_orig = x1_scaled / scale_factor + b\n # x2 = self.bn(x1_orig)\n\n running_std = torch.sqrt(self.bn.running_var + self.bn.eps)\n scale_factor = self.bn.weight / running_std\n weight_shape = [1] * len(self.weight.shape)\n weight_shape[0] = -1\n bias_shape = [1] * len(self.weight.shape)\n bias_shape[1] = -1\n scaled_weight = self.weight_fake_quant(self.weight * scale_factor.reshape(weight_shape))\n if self.bias is not None:\n zero_bias = torch.zeros_like(self.bias)\n else:\n zero_bias = torch.zeros(self.out_features, device=scaled_weight.device)\n linear_out = F.linear(input, scaled_weight, zero_bias)\n linear_out_orig = linear_out / scale_factor.reshape(bias_shape)\n if self.bias is not None:\n linear_out_orig = linear_out_orig + self.bias.reshape(bias_shape)\n bn_out = self.bn(linear_out_orig)\n return bn_out\n\n def train(self, mode=True):\n \"\"\"\n Batchnorm's training behavior is using the self.training flag. Prevent\n changing it if BN is frozen. This makes sure that calling `model.train()`\n on a model with a frozen BN will behave properly.\n \"\"\"\n self.training = mode\n if not self.freeze_bn:\n for module in self.children():\n module.train(mode)\n return self\n\n @classmethod\n def from_float(cls, mod):\n r\"\"\"Create a qat module from a float module or qparams_dict\n\n Args: `mod' a float module, either produced by torch.ao.quantization\n utilities or directly from user\n \"\"\"\n assert type(mod) == nni.LinearBn1d, 'qat.' + cls.__name__ + \\\n '.from_float only works for ' + nni.LinearBn1d.__name__\n assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined'\n assert mod.qconfig, 'Input float module must have a valid config'\n qconfig = mod.qconfig\n linear, bn = mod[0], mod[1]\n qat_linearbn = cls(linear.in_features, linear.out_features, linear.bias is not None,\n bn.eps, bn.momentum,\n False, qconfig)\n qat_linearbn.weight = linear.weight\n qat_linearbn.bias = linear.bias\n qat_linearbn.bn.weight = bn.weight\n qat_linearbn.bn.bias = bn.bias\n qat_linearbn.bn.running_mean = bn.running_mean\n qat_linearbn.bn.running_var = bn.running_var\n qat_linearbn.bn.num_batches_tracked = bn.num_batches_tracked\n return qat_linearbn\n", "from typing import Callable, List, Tuple, Any, Optional, Dict\n\nimport torch\nimport torch.nn.functional as F\n\nfrom .mappings import (\n conv_ops,\n ops_are_related,\n)\n\nfrom .utils import (\n _raise_obs_not_found_error,\n _raise_obs_op_mismatch,\n op_needs_quantization,\n SeenQOpInfo,\n SeenNonQOpInfo,\n QTensorInfo,\n FuncOutputObsType,\n get_func_output_obs_type,\n converted_func_needs_scale_zp,\n FuncOutputDTypeType,\n get_func_output_dtype_type,\n get_quantized_op,\n get_input_observed_arg_idxs,\n get_packable_tensor_arg_idxs,\n get_param_name,\n get_packable_nontensor_arg_idxs,\n get_packable_arg_idxs,\n get_weight_arg_idx,\n iterate_and_apply,\n get_op_packing_only_uses_module_attributes,\n get_packable_tensor_kwarg_names,\n get_producer_of_seen_q_op_info,\n clone_detach_tensor_without_dispatch,\n get_input_args_quant_dequant_info,\n get_cur_qconfig,\n OpQuantizeabilityType,\n)\n\nfrom .function_fusion import (\n match_fusion_patterns,\n get_seen_q_op_info_of_start_of_fusion,\n get_seen_q_op_info_of_end_of_fusion,\n)\n\nOpConvertInfo = Tuple[\n # quantized equivalent of original op (None means keep original)\n Optional[Callable],\n # arg_quant_infos, each element is (scale, zp) for quantized and None otherwise\n List[Optional[Tuple[float, int]]],\n # arg_dequant_infos, each element is True if this arg needs a dequant\n List[bool],\n # packed param name, if the op has a packed param\n Optional[str],\n # additional kwargs, such as output scale and zero_point\n Dict[str, Any],\n # any_arg_quant_or_dequant_needed, if False then we can skip looking at\n # arg_quant_infos and arg_dequant_infos, for performance\n bool,\n # any_arg_kwarg_modification_needed, if False then we can return original\n # args and kwargs, for performance\n bool,\n]\n\n# TODO(future PR): maybe better name\n# TODO(future PR): add serialization support\nclass AutoQuantizationState(torch.nn.Module):\n \"\"\"\n Contains state necessary to perform auto quantization on the parent\n `nn.Module` instance.\n \"\"\"\n\n idx : int\n\n def __init__(\n self,\n qconfig_dict: Dict[str, Any],\n fqn: str,\n input_dtypes: Any = None,\n output_dtypes: Any = None,\n ):\n super().__init__()\n self.idx = 0\n self.qconfig_dict = qconfig_dict\n self.fqn = fqn\n # this is a ModuleDict in order to properly register observers\n # to be within the module hierarchy.\n self.tensor_id_to_observer = torch.nn.ModuleDict()\n\n # TODO(future PR): include kwargs\n # Note: seen quantizeable ops are recorded with an index,\n # because we enforce order of execution. However, seen\n # unquantizeable ops are recorded without an index, because\n # we do not enforce order of execution.\n self.idx_to_seen_q_op_infos: Dict[int, SeenQOpInfo] = {}\n self.seen_nonq_op_infos: List[SeenNonQOpInfo] = []\n\n # qtensor_info objects of tensor outputs of the module, specified\n # in order of iteration through the output type. Non-tensor outputs\n # are represented with `None`.\n self.output_qtensor_infos: List[Optional[QTensorInfo]] = []\n self.input_dtypes = input_dtypes\n self.output_dtypes = output_dtypes\n # key: idx of seen op\n # value: name of packed weight\n # note: this is filled out right before convert\n self.idx_to_packed_weight_name: Dict[int, str] = {}\n self.tensor_id_to_scale_zp: Dict[int, Tuple[torch.Tensor, torch.Tensor]] = {}\n\n # Numeric Suite add_loggers functionality\n # if this flag is True, op outputs will be saved for debugging\n self.log_op_outputs = False\n # data structure to save op outputs for debugging\n # * outer list represents the different model forward call instances\n # * inner list represents the different op forward call instances in a\n # model forward\n # TODO(future PR): handle types which are not torch.Tensor\n # TODO(future PR): use the Logger class and allow user overrides of it\n self.op_outputs: List[List[Tuple[\n int, # global op idx\n Optional[str], # fqn\n Callable, # fp32 op type (TODO future PR: add quantized op type)\n torch.Tensor, # value\n ]]] = []\n # model name to use in logging results\n self.logging_model_name: Optional[str]\n\n self.idx_to_op_convert_info: Dict[int, OpConvertInfo] = {}\n\n # If this is True, module outputs will be checked and converted\n # to the dtype specified by the user. If this is False, module outputs\n # will be returned as is. This value can be precalculated and it is set\n # to its final value after tracing.\n self.needs_dtype_transform_on_outputs = True\n\n def get_extra_state(self):\n return {\"tensor_id_to_scale_zp\": self.tensor_id_to_scale_zp}\n\n def set_extra_state(self, state):\n self.tensor_id_to_scale_zp = state[\"tensor_id_to_scale_zp\"]\n for _, seen_q_op_info in self.idx_to_seen_q_op_infos.items():\n self.idx_to_op_convert_info[seen_q_op_info.idx] = \\\n self.calculate_op_convert_info(seen_q_op_info)\n\n def has_at_least_one_seen_q_op_info(self) -> bool:\n return len(self.idx_to_seen_q_op_infos) > 0\n\n def validate_is_at_last_seen_idx(self) -> None:\n is_at_last_seen_idx = (\n len(self.idx_to_seen_q_op_infos) == 0 or\n self.idx == len(self.idx_to_seen_q_op_infos)\n )\n if not is_at_last_seen_idx:\n raise AssertionError(\n f\"Cur idx: {self.idx}, expected idx: {len(self.idx_to_seen_q_op_infos)}\")\n\n def extra_repr(self) -> str:\n s = \"\"\n # idx_to_seen_q_op_infos\n if len(self.idx_to_seen_q_op_infos):\n s += \"(seen_q_op_infos): {\\n\"\n for k, v in self.idx_to_seen_q_op_infos.items():\n s += f\" {k}: {v}\\n\"\n s += \"}\\n\"\n else:\n s += \"(seen_q_op_infos): {}\\n\"\n if len(self.seen_nonq_op_infos):\n s += \"(seen_nonq_op_infos): {\\n\"\n for n in self.seen_nonq_op_infos:\n s += f\" {n}\\n\"\n s += \"}\\n\"\n else:\n s += \"(seen_nonq_op_infos): {}\\n\"\n # output_qtensor_infos\n s += \"(output_qtensor_infos): [\"\n for i in self.output_qtensor_infos:\n s += f\"{i} \"\n s += \"]\\n\"\n # idx_to_packed_weight_name\n if len(self.idx_to_packed_weight_name):\n s += \"(idx_to_packed_weight_name): {\\n\"\n for k, v in self.idx_to_packed_weight_name.items(): # type: ignore[assignment]\n s += f\" {k}: {v}\\n\"\n s += \"}\\n\"\n else:\n s += \"(idx_to_packed_weight_name): {}\\n\"\n if len(self.tensor_id_to_scale_zp):\n s += \"(tensor_id_to_scale_zp): {\\n\"\n for k, v in self.tensor_id_to_scale_zp.items(): # type: ignore[assignment]\n s += f\" {k}: {v}\\n\"\n s += \"}\"\n return s\n\n def _get_cur_seen_q_op_info(self):\n return self.idx_to_seen_q_op_infos[self.idx]\n\n def get_cur_output_inf_dtype(self):\n return self._get_cur_seen_q_op_info().output_tensor_infos[0].inf_dtype\n\n def reset_to_new_call(self):\n \"\"\"\n Resets the internal op counter to start a new top level module call\n \"\"\"\n # torch.nn.Module __setattr__ has overhead,\n # this code is the explicit fast path for `self.idx = 0`\n object.__setattr__(self, 'idx', 0)\n\n if self.log_op_outputs:\n self.op_outputs.append([])\n\n def cur_op_needs_hooks(self, cur_op: Callable) -> bool:\n return op_needs_quantization(cur_op)\n\n def validate_cur_op(self, cur_op: Callable) -> None:\n \"\"\"\n This function is expected to be called before any new function or\n module call which needs hooks. It validates that the new function or\n module is of the expected type based on the order of execution.\n \"\"\"\n try:\n seen_q_op_info = self._get_cur_seen_q_op_info()\n expected_op = seen_q_op_info.type\n except IndexError:\n _raise_obs_not_found_error(cur_op)\n if not ops_are_related(cur_op, expected_op, seen_q_op_info.type_is_module):\n _raise_obs_op_mismatch(cur_op, expected_op)\n\n def mark_cur_op_complete(self, cur_op: Callable) -> None:\n \"\"\"\n This function is expected to be called after a function or module\n processing is complete.\n \"\"\"\n # torch.nn.Module __setattr__ has overhead,\n # this code is the explicit fast path for `self.idx += 1`\n object.__setattr__(self, 'idx', self.idx + 1)\n\n def first_call_outputs_prepare_hook(\n self,\n outputs: Any,\n qtensor_id: List[int],\n ) -> Any:\n \"\"\"\n This function is expected to be called on the outputs of a prepared\n module right before they are returned to the parent, during tracing.\n \"\"\"\n outputs = self._first_call_assign_qtensor_infos_to_mod_outputs(\n outputs, qtensor_id)\n return outputs\n\n def outputs_prepare_hook(\n self,\n outputs: Any,\n ) -> Any:\n \"\"\"\n This function is expected to be called on the outputs of a prepared\n module right before they are returned to the parent.\n \"\"\"\n return outputs\n\n def outputs_convert_hook(\n self,\n outputs: Any,\n ) -> Any:\n \"\"\"\n This function is expected to be called on the outputs of a converted\n module right before they are returned to the parent.\n \"\"\"\n outputs = self._maybe_mod_outputs_dtype_transform(outputs)\n return outputs\n\n def get_output_qtensor_infos(self) -> List[Optional[QTensorInfo]]:\n \"\"\"\n Used by the conversion to torch.jit.script.\n \"\"\"\n return self.output_qtensor_infos\n\n def get_output_dtypes(self) -> Any:\n \"\"\"\n Used by the conversion to torch.jit.script.\n \"\"\"\n return self.output_dtypes\n\n def first_call_op_prepare_before_hook(\n self,\n op: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n qtensor_id: List[int],\n fqn: str,\n root_module: torch.nn.Module,\n op_quantizeability_type: OpQuantizeabilityType,\n ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:\n \"\"\"\n This function is expected to be called on args and kwargs of\n `op` directly before `op` is executed, during tracing.\n\n We record the type of `op`\n and the IDs of its tensor inputs. Note: we add a placeholder for IDs\n of tensor outputs, the placeholder will be filled out during the\n `op_prepare_after_hook`.\n\n The function returns modified `args` and `kwargs`.\n \"\"\"\n return self._first_call_op_prepare_before_hook_create_subgraphs(\n op, args, kwargs, qtensor_id, fqn, root_module,\n op_quantizeability_type)\n\n def op_prepare_before_hook(\n self,\n op: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:\n \"\"\"\n This function is expected to be called on args and kwargs of\n `op` directly before `op` is executed.\n\n We do the following:\n * pass the inputs through observers, if needed\n\n The function returns modified `args` and `kwargs`.\n \"\"\"\n seen_q_op_info = self._get_cur_seen_q_op_info()\n\n def _maybe_observe(arg, tensor_info):\n tensor_id = tensor_info.id\n # TODO: do not run this twice on input and output\n if str(tensor_id) in self.tensor_id_to_observer:\n observer = self.tensor_id_to_observer[str(tensor_id)]\n return observer(arg)\n else:\n return arg\n\n args = iterate_and_apply(\n args, seen_q_op_info.input_tensor_infos, _maybe_observe)\n\n return args, kwargs\n\n def first_call_op_prepare_after_hook(\n self,\n op: Callable,\n output: Any,\n args: Tuple[Any, ...],\n qtensor_id: List[int],\n op_quantizeability_type: OpQuantizeabilityType,\n ) -> Any:\n \"\"\"\n This function is called after an op call on a prepared model.\n\n * create an observer for the output, if needed, and record it in\n `tensor_id_to_observer`\n * amend the current seen op with the tensor ID of the output\n \"\"\"\n self._first_call_op_prepare_after_hook_adjust_subgraphs(\n op, output, args, qtensor_id, op_quantizeability_type)\n return output\n\n def op_prepare_after_hook(\n self,\n op: Callable,\n output: Any,\n args: Tuple[Any, ...],\n global_op_idx: List[int],\n ) -> Any:\n \"\"\"\n This function is called after an op call on a prepared model.\n\n * observe the output, if needed\n \"\"\"\n seen_q_op_info = self._get_cur_seen_q_op_info()\n\n # if we are in a fusion, we only observe at the end of it\n is_fusion = seen_q_op_info.fusion_info is not None\n is_end_of_fusion = seen_q_op_info.fusion_info is not None and \\\n seen_q_op_info.fusion_info.is_last_element\n\n if is_fusion:\n if is_end_of_fusion:\n # do observe in the end of fusions, according to info\n # of the base op\n seen_q_op_info_start = get_seen_q_op_info_of_start_of_fusion(\n seen_q_op_info, self.idx_to_seen_q_op_infos)\n # use the obs type from beginning of pattern\n func_output_obs_type = get_func_output_obs_type(seen_q_op_info_start)\n if func_output_obs_type != FuncOutputObsType.NONE:\n # use the output tensor ID from the end of pattern\n tensor_id = seen_q_op_info.output_tensor_infos[0].id\n obs = self.tensor_id_to_observer[str(tensor_id)]\n output = obs(output)\n\n else:\n # do not observe in the middle of fusions\n pass\n else:\n # observe without fusions as normal\n func_output_obs_type = get_func_output_obs_type(seen_q_op_info)\n # TODO(future PR): other output types\n if func_output_obs_type != FuncOutputObsType.NONE:\n tensor_id = seen_q_op_info.output_tensor_infos[0].id\n obs = self.tensor_id_to_observer[str(tensor_id)]\n output = obs(output)\n\n if self.log_op_outputs:\n output_clone = clone_detach_tensor_without_dispatch(output)\n self.op_outputs[-1].append(\n (global_op_idx[0], seen_q_op_info.fqn, seen_q_op_info.type, output_clone))\n global_op_idx[0] += 1\n\n return output\n\n def op_convert_before_hook(\n self,\n op: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n root_module: torch.nn.Module,\n ) -> Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]:\n \"\"\"\n This function is called before an op call in a converted model.\n\n For each arg in `args`, quantizes it if necessary.\n\n Returns potentially modified `op`, potentially modified `args`,\n potentially modified `kwargs`.\n \"\"\"\n # TODO generalize this for more things\n # currently:\n # * can quantize args (via arg_quant_infos)\n # * can add scale and zp (via additional kwargs)\n\n # needed for F.conv2d\n # F.conv2d(input, weight, bias, stride, padding, dilation, groups)\n # to\n # q.conv2d(input, packed_params, scale, zero_point)\n orig_op = op\n maybe_new_op, arg_quant_infos, arg_dequant_infos, packed_param_name, \\\n additional_kwargs, any_arg_quant_or_dequant_needed, \\\n any_arg_kwarg_modification_needed = self.get_op_convert_info(op)\n if maybe_new_op is not None:\n op = maybe_new_op\n if not any_arg_kwarg_modification_needed:\n return op, args, kwargs\n # print(op, arg_quant_infos, packed_param_name, additional_kwargs)\n\n # potentially quantize args, based on arg_quant_infos\n new_args = []\n if any_arg_quant_or_dequant_needed:\n tensor_arg_idx = 0\n # TODO: refactor this to use iterate_and_apply\n if orig_op is torch.cat: # torch.cat variants\n # input tensors\n new_first_arg = []\n for arg in args[0]:\n # TODO: handle non-tensor inputs\n quant_info = arg_quant_infos[tensor_arg_idx]\n dequant_info = arg_dequant_infos[tensor_arg_idx]\n if quant_info is not None:\n scale, zp = quant_info\n arg = torch.quantize_per_tensor(arg, scale, zp, torch.quint8)\n elif dequant_info is True:\n arg = arg.dequantize()\n new_first_arg.append(arg)\n tensor_arg_idx += 1\n new_args = [new_first_arg, *args[1:]]\n else:\n for arg in args:\n # TODO: handle non-tensor inputs\n # TODO: this is not handling non-tensor tuple args (for example,\n # dilation in conv2d) correctly, it just happens to work but\n # needs a fix.\n quant_info = arg_quant_infos[tensor_arg_idx]\n dequant_info = arg_dequant_infos[tensor_arg_idx]\n if quant_info is not None:\n scale, zp = quant_info\n arg = torch.quantize_per_tensor(arg, scale, zp, torch.quint8)\n elif dequant_info is True:\n arg = arg.dequantize()\n new_args.append(arg)\n tensor_arg_idx += 1\n else:\n new_args = [*args]\n\n # if there is a packed param, replace the relevant args\n if packed_param_name is not None:\n new_args_with_packed = []\n packable_arg_idxs = get_packable_arg_idxs(orig_op)\n added_packed = False\n for idx, arg in enumerate(new_args):\n if packable_arg_idxs is not None and idx in packable_arg_idxs:\n if not added_packed:\n packed_param = getattr(root_module, packed_param_name)\n new_args_with_packed.append(packed_param)\n added_packed = True\n else:\n new_args_with_packed.append(arg)\n new_args = new_args_with_packed\n\n # potentially extend kwargs with scale and zero_point\n # TODO move op-specific logic out of here\n if len(additional_kwargs):\n if orig_op not in conv_ops and orig_op != F.linear:\n kwargs.update(**additional_kwargs)\n else:\n seen_q_op_info = self._get_cur_seen_q_op_info()\n if seen_q_op_info.output_tensor_infos[0].inf_dtype == torch.quint8:\n new_args.append(additional_kwargs['scale'])\n new_args.append(additional_kwargs['zero_point'])\n\n # TODO move op-specific logic out of here\n if op is torch.ops.quantized.linear:\n kwargs.pop('bias', None)\n\n return op, tuple(new_args), kwargs\n\n def op_convert_after_hook(\n self,\n op: Callable,\n output,\n global_op_idx: List[int],\n ) -> Any:\n \"\"\"\n This function is called aftern an op call in a converted model.\n\n TODO: add dequant, if needed\n \"\"\"\n if self.log_op_outputs:\n output_clone = clone_detach_tensor_without_dispatch(output)\n seen_q_op_info = self._get_cur_seen_q_op_info()\n self.op_outputs[-1].append(\n (global_op_idx[0], seen_q_op_info.fqn, seen_q_op_info.type, output_clone))\n global_op_idx[0] += 1\n\n return output\n\n def get_op_convert_info(\n self,\n op: Callable,\n ) -> OpConvertInfo:\n \"\"\"\n Returns the information needed for convert time modifications to `op`.\n \"\"\"\n return self.idx_to_op_convert_info[self.idx]\n\n def calculate_op_convert_info(\n self,\n seen_q_op_info: SeenQOpInfo,\n ) -> OpConvertInfo:\n \"\"\"\n This precalculates the information which will be returned by\n `get_op_convert_info`.\n \"\"\"\n # calculate new op\n maybe_new_op = get_quantized_op(\n seen_q_op_info, self.idx_to_seen_q_op_infos)\n\n # calculate quant infos\n arg_quant_infos, arg_dequant_infos, any_arg_quant_or_dequant_needed = \\\n get_input_args_quant_dequant_info(\n seen_q_op_info, self.tensor_id_to_scale_zp)\n\n # get packed param name, if applicable\n packed_param_name = self._get_packed_param_name(seen_q_op_info)\n\n # calculate scale and zp for output\n # TODO: instead of always doing this if there is an observer,\n # calculate whether this is needed based on the op and dtypes\n additional_kwargs = {}\n needs_scale_zp = converted_func_needs_scale_zp(seen_q_op_info)\n if needs_scale_zp:\n cur_seen_q_op_info = seen_q_op_info\n\n # if this is a start of a fusion pattern, get the observer\n # from the end of the fusion\n is_start_of_fusion = seen_q_op_info.fusion_info and \\\n seen_q_op_info.fusion_info.is_first_element\n if is_start_of_fusion:\n cur_seen_q_op_info = get_seen_q_op_info_of_end_of_fusion(\n seen_q_op_info, self.idx_to_seen_q_op_infos)\n\n output_tensor_infos = cur_seen_q_op_info.output_tensor_infos\n tensor_id = output_tensor_infos[0].id\n scale, zp = self.tensor_id_to_scale_zp[tensor_id]\n additional_kwargs.update({'scale': scale, 'zero_point': zp})\n\n any_arg_kwarg_modification_needed = bool(\n any_arg_quant_or_dequant_needed or\n packed_param_name is not None or\n len(additional_kwargs)\n ) # the cast to bool is to make mypy recognize this as a bool\n\n return maybe_new_op, arg_quant_infos, arg_dequant_infos, \\\n packed_param_name, additional_kwargs, any_arg_quant_or_dequant_needed, \\\n any_arg_kwarg_modification_needed\n\n def _get_packed_param_name(self, seen_q_op_info: SeenQOpInfo) -> Optional[str]:\n \"\"\"\n If the op in seen_q_op_info has a quantized packed param, returns it.\n Otherwise, returns None.\n \"\"\"\n return self.idx_to_packed_weight_name.get(seen_q_op_info.idx, None)\n\n def _first_call_assign_qtensor_infos_to_mod_outputs_tensor(\n self,\n output: torch.Tensor,\n qtensor_id: List[int],\n ) -> torch.Tensor:\n \"\"\"\n This is a helper function for _first_call_assign_qtensor_infos_to_mod_outputs\n to handle iterables of tensors without code duplication.\n \"\"\"\n if not hasattr(output, '_qtensor_info'):\n # TODO: use actual dtype instead of defaulting to float\n output._qtensor_info = QTensorInfo( # type: ignore[attr-defined]\n qtensor_id[0], output.dtype, torch.float)\n qtensor_id[0] += 1\n self.output_qtensor_infos.append(output._qtensor_info) # type: ignore[attr-defined]\n # TODO(future PR): add an observer if needed\n return output\n\n def _first_call_assign_qtensor_infos_to_mod_outputs(\n self,\n outputs: Any,\n qtensor_id: List[int],\n ) -> Any:\n \"\"\"\n Takes `outputs`, which are a set of values about to be returned from\n the current module. If `_qtensor_info` attributes do not already exist\n on any tensors in `outputs`, this function adds them, initializing the\n dtype to `torch.float`. This allows us to reason about module output\n dtypes even if the last op in the module is not quantizeable.\n \"\"\"\n # TODO: handle objects with deeper nested tensors\n if isinstance(outputs, torch.Tensor):\n self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(outputs, qtensor_id)\n elif isinstance(outputs, tuple):\n # TODO: handle other tuple subclasses more generically\n new_outputs = []\n for output in outputs:\n if isinstance(output, torch.Tensor):\n new_outputs.append(self._first_call_assign_qtensor_infos_to_mod_outputs_tensor(\n output, qtensor_id))\n else:\n new_outputs.append(output)\n # hacky check for collections.namedtuple, TODO improve this\n # https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple\n if hasattr(outputs, '_fields'):\n outputs = outputs.__class__(*new_outputs)\n else:\n outputs = tuple(new_outputs)\n else:\n pass\n return outputs\n\n def set_needs_dtype_transform_on_outputs(self):\n \"\"\"\n Calculates whether a dtype transform on module outputs is needed\n and stores it. This is used to skip the outputs hook if it is not\n needed.\n \"\"\"\n self.needs_dtype_transform_on_outputs = False\n\n if not len(self.output_qtensor_infos):\n # if there are no tensor outputs, there is nothing to transform\n return\n\n qtensor_info = self.output_qtensor_infos[0]\n if self.output_dtypes is not None:\n assert qtensor_info is not None\n # check the output dtype, and do the conversion if needed\n output_dtype = self.output_dtypes[0]\n if qtensor_info.inf_dtype != output_dtype:\n assert output_dtype is torch.float, \\\n 'non-float output dtypes not handled yet'\n self.needs_dtype_transform_on_outputs = True\n\n def _maybe_mod_outputs_dtype_transform(\n self,\n outputs: Any,\n ) -> Any:\n \"\"\"\n Takes `outputs` which are about to be returned from this module\n to the caller. If this module has restrictions on the dtypes of\n tensors it has to return, does the dtype conversion. Otherwise,\n does nothing.\n \"\"\"\n if not self.needs_dtype_transform_on_outputs:\n return outputs\n\n if isinstance(outputs, torch.Tensor):\n qtensor_info = self.output_qtensor_infos[0]\n if self.output_dtypes is not None:\n assert qtensor_info is not None\n # check the output dtype, and do the conversion if needed\n output_dtype = self.output_dtypes[0]\n if qtensor_info.inf_dtype != output_dtype:\n assert output_dtype is torch.float, \\\n 'non-float output dtypes not handled yet'\n outputs = outputs.dequantize()\n else:\n # if no output dtype was specified, do nothing\n pass\n\n return outputs\n\n def _first_call_op_prepare_before_hook_create_subgraphs_tensor(\n self,\n op: Callable,\n arg: Any,\n arg_tensor_infos: List[Optional[QTensorInfo]],\n qtensor_id: List[int],\n ) -> None:\n \"\"\"\n Runs the prepare hook during first_call for individual\n tensors. If the input argument is a tensor, this function is\n called directly. If the input argument is an iterable such\n as a list or a tuple, this function is called on each element of\n the iteratble.\n \"\"\"\n # TODO(next): fix this for torch.cat\n if not isinstance(arg, torch.Tensor):\n arg_tensor_infos.append(None)\n return\n\n # If a tensor does not have an ID, add it. This allows\n # us to track inputs shared by multiple quantizeable modules.\n if not hasattr(arg, '_qtensor_info'):\n arg._qtensor_info = QTensorInfo( # type: ignore[attr-defined]\n qtensor_id[0], arg.dtype, arg.dtype)\n qtensor_id[0] += 1\n arg_tensor_infos.append(arg._qtensor_info) # type: ignore[attr-defined]\n\n def _first_call_op_prepare_before_hook_create_subgraphs(\n self,\n op: Callable,\n args: Tuple[Any, ...],\n kwargs: Dict[str, Any],\n qtensor_id: List[int],\n fqn: str,\n root_module: torch.nn.Module,\n op_quantizeability_type: OpQuantizeabilityType,\n ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:\n \"\"\"\n Given an op, args, kwargs about to be executed, records the subgraph\n of this op in `self`.\n \"\"\"\n arg_tensor_infos: List[Optional[QTensorInfo]] = []\n for arg in args:\n if isinstance(arg, (list, tuple)):\n for inner_arg in arg:\n self._first_call_op_prepare_before_hook_create_subgraphs_tensor(\n op, inner_arg, arg_tensor_infos, qtensor_id)\n else:\n self._first_call_op_prepare_before_hook_create_subgraphs_tensor(\n op, arg, arg_tensor_infos, qtensor_id)\n\n if op_quantizeability_type is OpQuantizeabilityType.NOT_QUANTIZEABLE:\n op_type_is_module = isinstance(op, torch.nn.Module)\n op_type : Callable = type(op) if op_type_is_module else op # type: ignore[assignment]\n self.seen_nonq_op_infos.append(SeenNonQOpInfo(\n op_type, arg_tensor_infos, []))\n return args, kwargs\n\n op_packing_only_uses_module_attributes = \\\n get_op_packing_only_uses_module_attributes(op, args, kwargs, root_module)\n\n packable_tensor_idx_to_name = {}\n packable_nontensor_idx_to_arg = {}\n packable_tensor_kwarg_name_to_name = {}\n if op_packing_only_uses_module_attributes:\n packable_tensor_arg_idxs = get_packable_tensor_arg_idxs(op)\n if packable_tensor_arg_idxs is not None:\n for arg_idx in packable_tensor_arg_idxs:\n if arg_idx >= len(args):\n continue\n arg = args[arg_idx]\n param_name = get_param_name(root_module, arg)\n packable_tensor_idx_to_name[arg_idx] = param_name\n\n packable_nontensor_arg_idxs = get_packable_nontensor_arg_idxs(op)\n if packable_nontensor_arg_idxs is not None:\n for arg_idx in packable_nontensor_arg_idxs:\n packable_nontensor_idx_to_arg[arg_idx] = args[arg_idx]\n\n packable_tensor_kwarg_names = \\\n get_packable_tensor_kwarg_names(op)\n if packable_tensor_kwarg_names is not None:\n for kwarg_name in packable_tensor_kwarg_names:\n if kwarg_name not in kwargs:\n continue\n kwarg = kwargs[kwarg_name]\n kwarg_name_on_module = get_param_name(root_module, kwarg)\n packable_tensor_kwarg_name_to_name[kwarg_name] = \\\n kwarg_name_on_module\n\n if self.idx not in self.idx_to_seen_q_op_infos:\n op_type_is_module = isinstance(op, torch.nn.Module)\n op_type = type(op) if op_type_is_module else op # type: ignore[assignment]\n qconfig = get_cur_qconfig(self.qconfig_dict, fqn, op_type)\n self.idx_to_seen_q_op_infos[self.idx] = SeenQOpInfo(\n self.idx, op_type, op_type_is_module, fqn, arg_tensor_infos, [],\n packable_tensor_idx_to_name, packable_nontensor_idx_to_arg,\n packable_tensor_kwarg_name_to_name,\n op_packing_only_uses_module_attributes, qconfig, None)\n\n return args, kwargs\n\n def _first_call_op_prepare_after_hook_adjust_subgraphs(\n self,\n op: Callable,\n output: Any,\n args: Tuple[Any, ...],\n qtensor_id: List[int],\n op_quantizeability_type: OpQuantizeabilityType,\n ) -> None:\n \"\"\"\n After `op` was just executed, modifies the subgraph recorded\n for this op with the information about the output. Note, this\n has to be done in the \"after\" hook because the output of the op\n does not exist in the \"before\" hook.\n \"\"\"\n # TODO(future PR): check if _qtensor_id needs to become an actual\n # attribute of Tensor\n # TODO(future PR): handle non-tensor outputs\n if op_quantizeability_type is OpQuantizeabilityType.QUANTIZEABLE:\n\n seen_q_op_info = self._get_cur_seen_q_op_info()\n func_output_dtype_type = get_func_output_dtype_type(seen_q_op_info)\n if func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG:\n if isinstance(op, torch.nn.Module):\n # For now, assume that eager mode convert has attached qconfig\n # objects to any leaf module which needs quantization\n if hasattr(op, 'activation_post_process'):\n dtype_to_use = op.activation_post_process.dtype\n else:\n dtype_to_use = torch.float\n else:\n qconfig = get_cur_qconfig(self.qconfig_dict, seen_q_op_info.fqn, op)\n if qconfig is None:\n dtype_to_use = torch.float\n else:\n dtype_to_use = qconfig.activation().dtype\n\n elif func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEFAULT_BC_UNSUPPORTED_SYNTAX:\n dtype_to_use = torch.float\n else:\n # TODO(future PR): respect qconfig for torch.cat\n if isinstance(args[0], (tuple, list)): # for torch.cat\n unique_arg_dtypes = [\n arg._qtensor_info.inf_dtype for arg in args[0]]\n assert len(set(unique_arg_dtypes)) == 1, \\\n 'an iterable with arguments with different inference ' + \\\n 'dtypes is not supported yet'\n dtype_to_use = args[0][0]._qtensor_info.inf_dtype\n else:\n dtype_to_use = args[0]._qtensor_info.inf_dtype\n\n else:\n dtype_to_use = None # type: ignore[assignment]\n\n def _add_output_qtensor_info(output, dtype_to_use):\n if dtype_to_use is None:\n dtype_to_use = output.dtype\n output._qtensor_info = QTensorInfo(\n qtensor_id[0], output.dtype, dtype_to_use) # type: ignore[arg-type]\n if op_quantizeability_type is OpQuantizeabilityType.QUANTIZEABLE:\n target = self.idx_to_seen_q_op_infos[self.idx].output_tensor_infos\n else:\n target = self.seen_nonq_op_infos[-1].output_tensor_infos\n target.append(output._qtensor_info)\n qtensor_id[0] += 1\n\n if isinstance(output, torch.Tensor):\n _add_output_qtensor_info(output, dtype_to_use)\n elif isinstance(output, tuple):\n for element in output:\n if isinstance(element, torch.Tensor):\n _add_output_qtensor_info(element, dtype_to_use)\n\n def match_fusion_patterns(self):\n match_fusion_patterns(self.idx_to_seen_q_op_infos)\n\n def _maybe_insert_input_observers(self, seen_q_op_info: SeenQOpInfo):\n func_output_dtype_type = get_func_output_dtype_type(seen_q_op_info)\n input_observed_arg_idxs = get_input_observed_arg_idxs(\n seen_q_op_info.type, seen_q_op_info.type_is_module)\n\n if func_output_dtype_type == FuncOutputDTypeType.DTYPE_DEPENDS_ON_QCONFIG:\n for idx, tensor_info in enumerate(seen_q_op_info.input_tensor_infos):\n if tensor_info is None:\n continue\n if input_observed_arg_idxs is not None and \\\n idx not in input_observed_arg_idxs:\n continue\n\n qconfig = get_cur_qconfig(\n self.qconfig_dict, seen_q_op_info.fqn, seen_q_op_info.type)\n if qconfig is None:\n # If qconfig is None, we do not need any input observers\n continue\n\n elif tensor_info.inf_dtype != torch.quint8:\n # TODO(future PR): this assumes current dtype is quint8,\n # this is not always true\n # TODO(future PR): currently this only handles float32 and\n # quint8, we need to extend it to other dtypes\n tensor_id = tensor_info.id # type: ignore[attr-defined]\n weight_arg_idx = get_weight_arg_idx(seen_q_op_info.type)\n obs = qconfig.weight() if idx == weight_arg_idx else \\\n qconfig.activation()\n self.tensor_id_to_observer[str(tensor_id)] = obs\n\n def _maybe_insert_output_observers(\n self,\n seen_q_op_info: SeenQOpInfo,\n root_module: torch.nn.Module,\n ):\n if seen_q_op_info.fusion_info is not None:\n if not seen_q_op_info.fusion_info.is_first_element:\n # if we are in a fusion but not at the start, do not insert observer\n return\n else:\n # if we are in a fusion and at the start, insert observer for its end\n # get the output of the end of the fusion\n cur_seen_q_op_info = get_seen_q_op_info_of_end_of_fusion(\n seen_q_op_info, self.idx_to_seen_q_op_infos)\n output_tensor_id = cur_seen_q_op_info.output_tensor_infos[0].id\n else:\n output_tensor_id = seen_q_op_info.output_tensor_infos[0].id\n\n func_output_obs_type = get_func_output_obs_type(seen_q_op_info)\n if func_output_obs_type == FuncOutputObsType.NEW_OBS:\n # TODO(future PR): check qconfig is None\n qconfig = get_cur_qconfig(\n self.qconfig_dict, seen_q_op_info.fqn, seen_q_op_info.type)\n assert qconfig is not None\n self.tensor_id_to_observer[str(output_tensor_id)] = \\\n qconfig.activation()\n elif func_output_obs_type == FuncOutputObsType.REUSES_FIRST_INPUT_OBS:\n assert seen_q_op_info.input_tensor_infos[0] is not None\n first_input_tensor_id = seen_q_op_info.input_tensor_infos[0].id\n\n first_input_obs = None\n if str(first_input_tensor_id) in self.tensor_id_to_observer:\n first_input_obs = \\\n self.tensor_id_to_observer[str(first_input_tensor_id)]\n else:\n # This observer may be in a module (handled by eager\n # convert), in which case it's not in our map. For now,\n # copy it from the module. In the future, we could look\n # into having a soft link.\n # TODO: make this handle more cases\n # TODO: handle module -> add_scalar -> add_scalar\n prev_op = get_producer_of_seen_q_op_info(\n self.idx_to_seen_q_op_infos, seen_q_op_info)\n assert prev_op is not None\n # TODO: the following line needs to only check fqn\n # for modules, not for functions\n fqn_last_part = prev_op.fqn.split('.')[-1]\n if hasattr(root_module, fqn_last_part):\n first_input_mod = getattr(root_module, fqn_last_part)\n else:\n first_input_mod = None\n # Currently, both tracing for module fusion and tracing for\n # quantization go through this code path. When tracing\n # for module fusion, quantizeable modules do not have\n # observers yet. For this path to not crash, we create one.\n # When tracing for quantization, this will be ignored.\n # TODO(future PR): refactor to avoid this.\n if first_input_mod and hasattr(first_input_mod, 'activation_post_process'):\n first_input_obs = first_input_mod.activation_post_process\n else:\n # TODO(future PR): check qconfig is None\n qconfig = get_cur_qconfig(\n self.qconfig_dict, seen_q_op_info.fqn, seen_q_op_info.type)\n assert qconfig is not None\n first_input_obs = qconfig.activation()\n\n self.tensor_id_to_observer[str(output_tensor_id)] = first_input_obs\n\n def insert_observers(self, root_module: torch.nn.Module):\n for idx, seen_q_op_info in self.idx_to_seen_q_op_infos.items():\n self._maybe_insert_input_observers(seen_q_op_info)\n self._maybe_insert_output_observers(seen_q_op_info, root_module)\n\n # This is a hack to enable nn.Sequential to properly work with\n # this class.\n # TODO(future): remove the hack\n def forward(self, x):\n raise NotImplementedError('Calling AutoQuantizationState.forward is not supported')\n # return x\n", "from io import BufferedIOBase\nfrom typing import Any, Callable, Iterable, Iterator, Sized, Tuple\n\nfrom torch.utils.data import IterDataPipe, functional_datapipe\nfrom torch.utils.data.datapipes.utils.common import deprecation_warning\nfrom torch.utils.data.datapipes.utils.decoder import (\n Decoder,\n basichandlers as decoder_basichandlers,\n imagehandler as decoder_imagehandler,\n extension_extract_fn\n)\n\n\n@functional_datapipe('routed_decode')\nclass RoutedDecoderIterDataPipe(IterDataPipe[Tuple[str, Any]]):\n r\"\"\"\n Decodes binary streams from input DataPipe, yields pathname and decoded data\n in a tuple (functional name: ``routed_decode``).\n\n Args:\n datapipe: Iterable datapipe that provides pathname and binary stream in tuples\n handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder\n handlers will be set as default. If multiple handles are provided, the priority\n order follows the order of handlers (the first handler has the top priority)\n key_fn: Function for decoder to extract key from pathname to dispatch handlers.\n Default is set to extract file extension from pathname\n\n Note:\n When ``key_fn`` is specified returning anything other than extension, the default\n handler will not work and users need to specify custom handler. Custom handler\n could use regex to determine the eligibility to handle data.\n \"\"\"\n\n def __init__(self,\n datapipe: Iterable[Tuple[str, BufferedIOBase]],\n *handlers: Callable,\n key_fn: Callable = extension_extract_fn) -> None:\n super().__init__()\n self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe\n if not handlers:\n handlers = (decoder_basichandlers, decoder_imagehandler('torch'))\n self.decoder = Decoder(*handlers, key_fn=key_fn)\n deprecation_warning(type(self).__name__)\n\n def add_handler(self, *handler: Callable) -> None:\n self.decoder.add_handler(*handler)\n\n def __iter__(self) -> Iterator[Tuple[str, Any]]:\n for data in self.datapipe:\n pathname = data[0]\n result = self.decoder(data)\n yield (pathname, result[pathname])\n\n def __len__(self) -> int:\n if isinstance(self.datapipe, Sized):\n return len(self.datapipe)\n raise TypeError(\"{} instance doesn't have valid length\".format(type(self).__name__))\n", "# Owner(s): [\"oncall: distributed\"]\n\nimport sys\n\nimport torch\nfrom torch import distributed as dist\nfrom torch.distributed.fsdp import FullyShardedDataParallel as FSDP\nfrom torch.nn import Linear\nfrom torch.optim import SGD\nfrom torch.testing._internal.common_distributed import skip_if_lt_x_gpu\nfrom torch.testing._internal.common_fsdp import (\n FSDPTest,\n)\nfrom torch.testing._internal.common_utils import TEST_WITH_DEV_DBG_ASAN, run_tests\n\n\nif not dist.is_available():\n print(\"Distributed not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nif TEST_WITH_DEV_DBG_ASAN:\n print(\n \"Skip dev-asan as torch + multiprocessing spawn have known issues\",\n file=sys.stderr,\n )\n sys.exit(0)\n\n\nclass TestUnevenParamShard(FSDPTest):\n def _get_ref_results(self, model, input, my_lr):\n with torch.no_grad():\n # Compute one iteration local output.\n weight = model.weight.T.clone().to(self.rank)\n v = torch.Tensor(input[self.rank]).to(self.rank)\n ref_forward_output_my_rank = torch.matmul(v, weight)\n # Compute one iteration global weight update.\n v = torch.Tensor(input[: self.world_size]).to(self.rank)\n grad = v.float().sum(0).repeat(weight.shape[0], 1).div(self.world_size)\n ref_weight_out = weight - grad.T * my_lr\n\n return ref_forward_output_my_rank, ref_weight_out\n\n @skip_if_lt_x_gpu(2)\n def test_one_iteration(self):\n \"\"\"Test FSDP with uneven divide of parameter shards.\"\"\"\n model = Linear(3, 3, bias=False)\n input = torch.rand(8, 3)\n my_lr = 0.1\n\n ref_forward_output_my_rank, ref_weight_out = self._get_ref_results(\n model, input, my_lr\n )\n\n model.to(self.rank)\n model = FSDP(model)\n optim = SGD(model.parameters(), lr=my_lr)\n self.assertTrue(len(input) >= self.world_size)\n in_data = torch.Tensor(input[self.rank]).to(self.rank)\n out = model(in_data)\n out.float().sum().backward()\n optim.step()\n optim.zero_grad()\n\n with model._summon_full_params():\n torch.cuda.synchronize() # TODO: This is here because it was\n # originally part of get_full_params(), debug why it is needed here.\n weight_out = model.module.weight.T.clone()\n self.assertEqual(ref_forward_output_my_rank, out)\n self.assertEqual(ref_weight_out, weight_out)\n\n\nif __name__ == \"__main__\":\n run_tests()\n" ]
[ [ "torch.jit.script", "torch.testing._internal.jit_utils.make_global", "torch.randn", "torch.testing.FileCheck", "torch._C._enable_mobile_interface_call_export", "torch.jit.export_opnames", "torch.arange" ], [ "torch.nn.BatchNorm1d", "torch.nn.init.uniform_", "torch.empty", "torch.zeros", "torch.sqrt", "torch.zeros_like", "torch.nn.modules.linear.Linear.__init__", "torch.nn.init.zeros_", "torch.nn.functional.linear" ], [ "torch.nn.ModuleDict", "torch.quantize_per_tensor" ], [ "torch.utils.data.functional_datapipe", "torch.utils.data.datapipes.utils.decoder.imagehandler", "torch.utils.data.datapipes.utils.decoder.Decoder" ], [ "torch.cuda.synchronize", "torch.distributed.fsdp.FullyShardedDataParallel", "torch.Tensor", "torch.nn.Linear", "torch.matmul", "torch.distributed.is_available", "torch.no_grad", "torch.rand", "torch.testing._internal.common_distributed.skip_if_lt_x_gpu", "torch.testing._internal.common_utils.run_tests" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LaudateCorpus1/deepmath
[ "b5b721f54de1d5d6a02d78f5da5995237f9995f9", "b5b721f54de1d5d6a02d78f5da5995237f9995f9", "b5b721f54de1d5d6a02d78f5da5995237f9995f9" ]
[ "deepmath/deephol/public/proof_assistant.py", "deepmath/deephol/proof_search_tree.py", "deepmath/guidance/driver_lib.py" ]
[ "\"\"\"A python client interface for ProofAssistantService.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Import Type Annotations\nfrom __future__ import print_function\nimport grpc\nimport tensorflow as tf\nfrom deepmath.proof_assistant import proof_assistant_pb2\nfrom deepmath.proof_assistant import proof_assistant_pb2_grpc\n\ntf.flags.DEFINE_string(\n 'proof_assistant_server_address', 'localhost:2000',\n 'address (including port) of the proof assistant server')\n\nFLAGS = tf.flags.FLAGS\n\nGIGABYTE = 1024 * 1024 * 1024\nGRPC_MAX_MESSAGE_LENGTH = GIGABYTE\n\n\nclass ProofAssistant(object):\n \"\"\"Class for intefacing a proof assistant.\"\"\"\n\n def __init__(self):\n self.channel = grpc.insecure_channel(\n FLAGS.proof_assistant_server_address,\n options=[('grpc.max_send_message_length', GRPC_MAX_MESSAGE_LENGTH),\n ('grpc.max_receive_message_length', GRPC_MAX_MESSAGE_LENGTH)])\n self.stub = proof_assistant_pb2_grpc.ProofAssistantServiceStub(self.channel)\n\n def ApplyTactic(self, request: proof_assistant_pb2.ApplyTacticRequest\n ) -> proof_assistant_pb2.ApplyTacticResponse:\n return self.stub.ApplyTactic(request)\n\n def VerifyProof(self, request: proof_assistant_pb2.VerifyProofRequest\n ) -> proof_assistant_pb2.VerifyProofResponse:\n return self.stub.VerifyProof(request)\n\n def RegisterTheorem(self, request: proof_assistant_pb2.RegisterTheoremRequest\n ) -> proof_assistant_pb2.RegisterTheoremResponse:\n return self.stub.RegisterTheorem(request)\n", "\"\"\"Infrastructure to maintain a proof search tree.\n\nThis file contains support for performing and logging proof searches.\n\nProof search tree has the following important features:\n - Can maintain multiple alternative proof branches.\n - Allows efficient subgoal-sharing between different proof branches.\n - Consistent, verified proof-search logging, independent of the\n search implementation.\n\nA search algorithm should iterate on:\n 1. Rank subgoals.\n 1.1 Picking a (open) subgoal from the tree.\n 1.2 Create a new SearchTreeNode for closing the subgoal\n 1.3 Call try_tactics for the SearchTreeNode.\n 1.4 Update the status and other metadata of the nodes whose status\n has changed.\n 1.5 Rerank subgoals whose status might have changed.\n 1.6 Go to 1.1\n 2. Produce search log.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Import Type Annotations\nfrom __future__ import print_function\n\nimport collections\nimport time\n\nimport tensorflow as tf\nfrom typing import List, Optional, Tuple, Text\nfrom deepmath.deephol.public import proof_assistant\nfrom deepmath.deephol import deephol_pb2\nfrom deepmath.deephol import theorem_fingerprint\nfrom deepmath.proof_assistant import proof_assistant_pb2\nfrom deepmath.public import error\n\n\ndef _extract_tactic_and_parameters(\n tactic_string: Text) -> Tuple[Text, List[deephol_pb2.TacticParameter]]:\n \"\"\"Extract the tactic string and its parameter list from a string.\n\n Args:\n tactic_string: The tactic application string to be passed to ocaml.\n\n Returns:\n A pair of tactic name and tactic parameter list.\n \"\"\"\n if '[' in tactic_string:\n s = tactic_string.replace(']', '').split('[')\n assert len(s) == 2, ('Expected single argument %s' % tactic_string)\n theorems = []\n for param_string in s[1].split(';'):\n ps = param_string.strip()\n if ps:\n t = ps.split()\n assert len(t) == 2, ('Invalid tactic parameter \"%s\"' % ps)\n assert t[0] == 'THM', ('Invalid tactic parameter \"%s\"' % ps)\n theorems.append(proof_assistant_pb2.Theorem(fingerprint=int(t[1])))\n return s[0].strip(), [\n deephol_pb2.TacticParameter(\n parameter_type=deephol_pb2.Tactic.THEOREM_LIST, theorems=theorems)\n ]\n else:\n s = tactic_string.split()\n if len(s) == 1:\n return s[0], []\n else:\n assert len(s) == 3\n assert s[1] == 'THM'\n return s[0], [\n deephol_pb2.TacticParameter(\n parameter_type=deephol_pb2.Tactic.THEOREM,\n theorems=[proof_assistant_pb2.Theorem(fingerprint=int(s[2]))])\n ]\n\n\ndef _theorem_to_string(thm: proof_assistant_pb2.Theorem) -> Text:\n \"\"\"Turn the theorem into a string for map lookup.\n\n Args:\n thm: Theorem to turn to string format.\n\n Returns:\n String joining the hypotheses and the conclusion by '|:|'- separators.\n \"\"\"\n return '|:|'.join([str(hyp) for hyp in thm.hypotheses] +\n [str(thm.conclusion)])\n\n\n# To reference a SubGoal of a tactic application, we need the following two\n# pieces of information:\n# - The TacticApplication that generated this subgoal.\n# - The index of the subgoal in the list of subgoals.\n# Note that we can't use NamedTuple, since TacticApplication can't be\n# referenced due to recursive definitions.\nSubGoalRef = collections.namedtuple('SubGoalRef',\n ['tactic_application', 'subgoal_index'])\n\n\nclass ProofSearchTree(object):\n \"\"\"Container object to represent the whole search tree.\n\n This object maintains:\n - A list of nodes, where the first node corresponds to the root goal.\n (Which should be in the theorem database now, for premise selection\n purposes).\n - A map of theorems to nodes in order to allow subgoal-sharing. It is\n unclear if this ever happens.\n - A pointer to the wrapper for the proof assistant.\n - A current index to iterate through the search tree in a BFS manner.\n \"\"\"\n\n def add_node(self, goal: proof_assistant_pb2.Theorem,\n parent: Optional[SubGoalRef]):\n \"\"\"Append a new node to the tree.\"\"\"\n goal_as_string = _theorem_to_string(goal)\n if goal_as_string in self.nodes_map:\n node = self.nodes[self.nodes_map[goal_as_string]]\n # Make sure that we really match everything exactly\n assert len(node.goal.hypotheses) == len(goal.hypotheses)\n for i, hyp in enumerate(goal.hypotheses):\n assert hyp == node.goal.hypotheses[i]\n assert goal.conclusion == node.goal.conclusion\n if parent is not None:\n node.parents.append(parent)\n # If the node was already ignored, remove its ignore flag if\n # there is something we can still do about this node.\n # Now the question remains: How would the prover enqueue all\n # nodes that might be helpful for closing this goal?\n # Note that this code might or might not remove the ignore\n # flag from this node and a lot of its descendants.\n # However, the descendants should have higher index than this\n # node, unless the node is involved in a loop in which case\n # this node can never be closed along that loop.\n node.remove_ignore()\n if not (node.ignore or node.closed or node.failed):\n if self.cur_index is None or self.cur_index > node.index:\n self.cur_index = node.index\n return node\n else:\n index = len(self.nodes)\n self.nodes_map[goal_as_string] = index\n node = ProofSearchNode(self, index, goal, parent)\n self.nodes.append(node)\n return node\n\n def __init__(self, proof_assistant_obj: proof_assistant.ProofAssistant,\n goal: proof_assistant_pb2.Theorem):\n \"\"\"Constructor for a proof search tree.\n\n Args:\n proof_assistant_obj: An interface to the proof assistant.\n goal: The root goal which is also used to limit the premise selection to\n preceding theorems. This is the first theorem in the theorem database\n that is not allowed to be used in the proof. For now, it is mandatory\n that the goal is in the theorem database. Later, we should relax this\n constraint.\n \"\"\"\n self.proof_assistant = proof_assistant_obj\n self.nodes = []\n self.nodes_map = {}\n root = self.add_node(goal, None)\n assert root.index == 0\n self.cur_index = None\n\n def to_proto(self) -> deephol_pb2.ProofLog:\n \"\"\"Serialize the proof search tree as a protobuf.\n\n Returns:\n A deephol_pb2.ProofLog protobuf representing the whole proof search tree.\n \"\"\"\n proof_log = deephol_pb2.ProofLog()\n for node in self.nodes:\n status = deephol_pb2.ProofNode.UNKNOWN\n if node.closed:\n status = deephol_pb2.ProofNode.PROVED\n node_log = proof_log.nodes.add(\n goal=node.goal,\n status=status,\n action_generation_time_millisec=node.action_generation_time_millisec)\n for tapp in node.failed_attempts:\n tapp.add_to_node_proto(node_log)\n for tapp in node.successful_attempts:\n tapp.add_to_node_proto(node_log)\n return proof_log\n\n\nclass TacticApplication(object):\n \"\"\"Result of tactic applications.\"\"\"\n\n def __init__(\n self,\n parent, # : ProofSearchNode,\n successful_attempts: List[int],\n failed_attempts: List[int],\n tree: ProofSearchTree,\n request: proof_assistant_pb2.ApplyTacticRequest,\n score: float):\n \"\"\"Constructor for the result of a tactic application.\n\n This function is a wrapper around a proof assistant's ApplyTactic.\n TacticApplication objects are always stored as elements in the\n tactic_applications field of SearchNode. These represents starts of\n proof attempts for particular goals or subgoals.\n\n Args:\n parent: ProofSearchNode to which the tactic was applied to.\n successful_attempts: List of successful tactic applications. If the tactic\n is applied successfully, then this application is added to this list and\n the index will refer to this list. The result field must contain\n deephol_pb2.TacticApplication.SUCCESS in this case.\n failed_attempts: List of failed tactic applications. If tactic could not\n be applied, timed out or did not change the goal, then the application\n is added to this list and the index will refer to this list. The result\n field must be any value different from\n deephol_pb2.TacticApplication.SUCCESS in this case.\n tree: ProofSearchTree to which this application belongs to.\n request: Tactic-application request to be run.\n score: Score produced by the action generator.\n \"\"\"\n self.parent = parent\n # Index of the tactic application in either (successful or failed) list of\n # proof attempts in the ProofSearchNode. Will be filled once it is clear if\n # the application was successful or not.\n self.index = None\n self.result = None\n self.error_message = None\n self.time_spent = None\n # List of ProofSearchNodes corresponding to the subgoals of this tactic.\n self.subgoals = []\n self.tactic = request.tactic\n self.closed = False # True if all subgoals are closed.\n self.failed = False # True if any of the subgoals are failed to close.\n self.score = score\n self.rank = len(failed_attempts) + len(successful_attempts)\n start_time = time.time()\n try:\n response = tree.proof_assistant.ApplyTactic(request)\n elapsed_msecs = int((time.time() - start_time) * 1000.0 + 0.5)\n self.time_spent = elapsed_msecs\n except error.StatusNotOk as exception:\n elapsed_msecs = int((time.time() - start_time) * 1000.0 + 0.5)\n self.time_spent = elapsed_msecs\n tf.logging.info('Tactic application failed: %s with error %s',\n str(self.tactic), exception.message)\n self.result = deephol_pb2.TacticApplication.ERROR\n self.failed = True\n self.error_message = exception.message\n self.index = len(failed_attempts)\n failed_attempts.append(self)\n # Sometimes, rarely, the prover gets into in which it stops\n # communicating and eventually requests hang. However we\n # can bail out before that happen and can prevent the whole\n # program to hang for a long time.\n if str(exception).startswith('Communication') and str(exception).endswith(\n 'failed.'):\n raise exception\n return\n if response.HasField('error'):\n tf.logging.info('Tactic application failed: %s, %s', str(request.tactic),\n response.error)\n self.result = deephol_pb2.TacticApplication.ERROR\n self.failed = True\n self.error_message = response.error\n self.index = len(failed_attempts)\n failed_attempts.append(self)\n return\n assert response.HasField('goals')\n new_subgoals = list(response.goals.goals)\n\n def is_same_expr(t1, t2):\n return t1.conclusion == t2.conclusion and t1.hypotheses == t2.hypotheses\n\n if len(new_subgoals) == 1 and is_same_expr(request.goal, new_subgoals[0]):\n tf.logging.info('Tactic %s applied, but did not change subgoals.',\n request.tactic)\n self.result = deephol_pb2.TacticApplication.UNCHANGED\n self.failed = True\n self.index = len(failed_attempts)\n failed_attempts.append(self)\n return\n # We have a successful tactic application.\n assert not self.subgoals\n self.index = len(successful_attempts)\n for i, goal in enumerate(new_subgoals):\n thm = proof_assistant_pb2.Theorem(\n hypotheses=goal.hypotheses,\n conclusion=goal.conclusion,\n pretty_printed=goal.pretty_printed,\n tag=proof_assistant_pb2.Theorem.GOAL)\n subgoal_ref = SubGoalRef(tactic_application=self, subgoal_index=i)\n self.subgoals.append(tree.add_node(thm, subgoal_ref))\n self.result = deephol_pb2.TacticApplication.SUCCESS\n # We don't know if some of the subgoals will fail or not.\n self.failed = False\n tf.logging.info('Tactic %s successfully applied.', self.tactic)\n successful_attempts.append(self)\n if not new_subgoals:\n assert self.update_closed()\n\n def update_closed(self) -> bool:\n \"\"\"Update the \"closed\" property for the TacticApplication.\n\n It returns true if the application was a successful tactic\n application and all of the resulting subgoals are already marked as closed.\n Otherwise it checks all subgoals and marks the application if\n any of them is closed. Note that it is essential to test that\n self.result is SUCCESS, otherwise failed attempt would be marked\n as closed, which would be a grave mistake.\n\n Returns:\n True if the status of the application was success and all\n the subgoals are closed, otherwise false.\n \"\"\"\n if self.result != deephol_pb2.TacticApplication.SUCCESS:\n return False\n if self.closed:\n return True\n for subgoal in self.subgoals:\n if not subgoal.closed:\n return False\n self.closed = True\n # We are marking the associated node closed. Note that this is a recursive\n # call and might update more associated TacticApplications upstream.\n self.parent.mark_closed(self)\n return True\n\n def mark_failed(self):\n \"\"\"Mark this tactic-application failed if any of the subgoals has failed.\n\n Note that having \"failed\" is a soft condition, not a definitive one. Right\n now, the hard-coded behavior is to \"fail\" if no tactic could is applied\n in a way that changes the goal without producing error. However, this\n behavior might be overridden.\n \"\"\"\n if self.failed:\n # Nothing to do, we are already marked as a failure.\n # Make sure that we have not marked this node closed. That would be a\n # contradiction.\n assert not self.closed\n return\n for subgoal in self.subgoals:\n if subgoal.failed:\n # We have found a failing subgoal.\n # Make sure that we have not marked this node closed. That would be a\n # contradiction.\n assert not self.closed\n # This tactic application is failed since we can't close it anymore.\n self.failed = True\n # Update the parent node to be failed if all of its tactic applications\n # have failed.\n self.parent.update_failed()\n # We would like to mark all non-failing sibling nodes and their\n # descendants useless if they have not chance of contributing to closing\n # any other goal.\n for sibling in self.subgoals:\n sibling.update_ignore()\n # Don't do duplicated work.\n return\n\n def add_to_node_proto(self, node_proto: deephol_pb2.ProofNode):\n tactic, parameters = _extract_tactic_and_parameters(str(self.tactic))\n node_proto.proofs.add(\n tactic=tactic,\n parameters=parameters,\n subgoals=[sg.goal for sg in self.subgoals],\n result=self.result,\n error_message=self.error_message,\n time_spent=self.time_spent,\n closed=self.closed,\n score=self.score,\n rank=self.rank)\n\n\nclass ProofSearchNode(object):\n \"\"\"Node in the proof tree, corresponding to one goal.\"\"\"\n\n def __init__(self,\n tree: ProofSearchTree,\n index: int,\n goal: proof_assistant_pb2.Theorem,\n parent: Optional[SubGoalRef] = None):\n \"\"\"Constructor for a Node within proof search.\n\n Each node represents a goal or subgoal with one or multiple proof attempts.\n Proof attempts are tactic_applications that can generate one or multiple\n subgoals.\n\n Args:\n tree: The ProofSearchTree object to which this node belongs.\n index: Index of this node in the list of nodes of the search tree.\n goal: Actual goal to be proved.\n parent: The source of this goal. If it is None, then it must be the root\n of the search tree. Otherwise it must be a SubGoalRef referring to the\n tactic application that created the proof search node.\n \"\"\"\n self.tree = tree\n self.goal = goal\n if not self.goal.fingerprint:\n self.goal.fingerprint = theorem_fingerprint.Fingerprint(goal)\n self.index = index\n if parent is not None:\n self.parents = [parent]\n else:\n self.parents = []\n # The list of the successful tactic applications. Note that elements\n # of this list might refer to subtrees that are not or can't be closed\n # successfully.\n self.successful_attempts = []\n # The list of the failed tactic applications.\n self.failed_attempts = []\n # Here, we have three options:\n # - None: we have attempted no tactics yet\n # - False: the tree was expanded at this node, but it is not closed yet\n # - True: We have at least one proof attempt that was successful.\n self.closed = None\n # This is a temporary marker: we say that a node has failed if\n # all of its proof attempts have failed.\n self.failed = None\n # This is a temporary marker: we set a node to be *ignored* if it is useless\n # to close, that is:\n # - it has at least one parent (that is: it is not the root node) and\n # - each of its parent is to set to ignore.\n #\n # A node should be ignored if it is not helpful to close any of its subgoals\n # anymore, bacause its parent is set to ignored due to\n # - its being hopeless to close (i.e. failed) or\n # - its being useless to close, since some of the ancestors participates\n # only in tactic applications where at least one of the subgoals has\n # failed.\n #\n # A node will be ignored if either\n # - All of its parents (participating tactic applications) are failed or\n # ignored\n # - One of its sibling has failed.\n # The ignore flag is propagated from failed nodes in the following way:\n # - Once a node is set to failed, then all of its siblings are set to ignore\n # - All descendants of ignored nodes are marked as ignored as long as all\n # their other parents are ignored or failed too.\n #\n # Once a non-failed node becomes descendant of a non-ignored\n # node again (as a shared node), then the ignored flags are removed for\n # all of its non-failed descendants (that have a chance to close).\n self.ignore = False\n # Set to true if initial tactics are applied.\n self.processed = False\n # Action generation happens only once, when node is processed.\n self.action_generation_time_millisec = None\n\n def update_ignore(self):\n \"\"\"Update the ignore flag on the node all descendants if warrented.\"\"\"\n if self.ignore:\n # Don't do double work if the node is already ignored.\n return\n if not self.parents:\n # Never ignore the root node. It might fail, but never gets ignored.\n return\n for parent in self.parents:\n tac_app = parent.tactic_application\n if not (tac_app.failed or tac_app.parent.ignore):\n # This node might be useful for closing this tactic application.\n return\n # This node is useless as closing it will not help the final goal:\n # - The node is not the root node.\n # - Either the tactic application has already failed or\n # - the goal of each parent tactic application is already useless.\n self.ignore = True\n # Now, we need to update all the children in all current tactic applications\n for tac_app in self.successful_attempts:\n for subgoal in tac_app.subgoals:\n # Mark all subgoals in the proof attempts as ignore, since it is\n # useless to close this goal.\n subgoal.update_ignore()\n\n # If a node gets a new non-ignored parent, then we should mark it and\n # all its useful descendants non-ignored, unless all proof attempts\n # of the node have failed.\n def remove_ignore(self):\n \"\"\"Clear the ignore flag on the node if warranted by the circumnstances.\n\n The use case of this function is to re-enable ignored nodes if they\n become interesting again, since they show up as result of tactic\n applications on newly expanded nodes. In this case, we might be forced\n to remove the ignore flag on the node if the node has become interesting\n again.\n \"\"\"\n # We only remove ignore from a node if there is something interesting\n # we can do about this node.\n # If it is failed, then there is nothing we can do\n # If it is closed, then there is nothing to do\n # If it is not yet ignored, then we don't need to mark it not-ignored.\n if self.failed or self.closed or not self.ignore:\n return\n remove_ignore = False\n for parent in self.parents:\n tac_app = parent.tactic_application\n if not (tac_app.failed or tac_app.parent.ignore):\n remove_ignore = True\n break\n if remove_ignore:\n self.ignore = False\n else:\n if self.parents:\n # We are hopless anyways. There is no reason to close to\n return\n else:\n # The root should never be set to ignore.\n self.ignore = False\n might_close = False\n for tac_app in self.successfull_attempts:\n if tac_app.closed:\n self.mark_closed(tac_app)\n break\n if not tac_app.failed:\n might_close = True\n if might_close and not self.closed:\n # Recursively remove the ignore flag in all descendants\n # that have a chance to close.\n for tac_app in self.successfull_attempts:\n if not tac_app.failed:\n assert not tac_app.closed\n for subgoal in tac_app.subgoals:\n subgoal.remove_ignore()\n\n def mark_closed(self, tactic_application: TacticApplication):\n \"\"\"Mark the proof search node closed.\n\n This function assumes that we have a fully closed subtree on one of\n the TacticApplications in successful_attempts. Assertion will fail\n if this is not the case.\n\n Args:\n tactic_application: The TacticApplication that has lead to a proof. This\n parameter is only used for verifying that the node is really closed.\n Assertions will fail if that's not the case.\n \"\"\"\n assert self.closed is not None\n if self.closed:\n return\n # Check that the tactic_application belongs to this node.\n assert tactic_application.parent.index == self.index\n # Make sure that all subgoals are really closed.\n for subgoal in tactic_application.subgoals:\n assert subgoal.closed\n self.closed = True\n # Now, we don't want to close this goal again. We ignore it\n # for all further attempts.\n self.ignored = True\n # For all other non-closed tactic application, ignore the children\n # if they don't need to be closed as other subgoals.\n for tac_app in self.successful_attempts:\n if not tac_app.closed:\n for subgoal in tac_app.subgoals:\n # Mark all subgoals in the other proof attempts as ignore, they\n # have become useless for closing this goal.\n subgoal.update_ignore()\n # Note that update_closed does not necessarily close the\n # parent. Only, if all of their subgoals are closed.\n # In general, we have a recursive call, that might mark\n # all the closed (sub-)goals closed in the relevant part\n # of the search tree, if they got proved.\n for subgoal_ref in self.parents:\n subgoal_ref.tactic_application.update_closed()\n\n def update_failed(self):\n \"\"\"Update the not to be failed if there is no chance to close it.\"\"\"\n if self.closed:\n # This node can't fail as it is already closed.\n return\n # Mark this node to be failed if all of its tactic applications have failed.\n for tac_app in self.successful_attempts:\n if not tac_app.failed:\n # We have a chance to close some of these subgoals\n return\n self.failed = True\n for subgoal_ref in self.parents:\n subgoal_ref.tactic_application.mark_failed()\n # Mark this node and its descendants to be ignored.\n self.update_ignore()\n\n\ndef check_tree_consistency(tree: ProofSearchTree):\n \"\"\"Checks the consistency of the proof search tree.\n\n Verifies that the cross-reference indices are set correctly.\n It also checks that the \"closed\" flags are propagated correctly through\n the tree.\n\n Args:\n tree: Reference to the proof search tree to be checked.\n \"\"\"\n for i, node in enumerate(tree.nodes):\n assert i == node.index, ('Inconsistent node index %d != %d' %\n (node.index, i))\n assert tree is node.tree, ('Inconsistent tree for node %d' % i)\n if i == 0:\n assert not node.parents, 'Root node with parent'\n else:\n assert node.parents, ('Non-root node %d without parent' % i)\n for parent in node.parents:\n tapp = parent.tactic_application\n assert tapp.subgoals[parent.subgoal_index] is node\n for j, tapp in enumerate(node.failed_attempts):\n assert j == tapp.index, ('Index mismatch %d != %d' % (j, tapp.index))\n assert tapp.result != deephol_pb2.TacticApplication.UNKNOWN\n assert tapp.result != deephol_pb2.TacticApplication.SUCCESS\n assert not tapp.subgoals, ('Failed attempts with subgoals %d %d' % (i, j))\n closed = False\n for j, tapp in enumerate(node.successful_attempts):\n assert j == tapp.index, ('Inconsistent TacticApplication index %d != %d' %\n (j, tapp.index))\n assert tapp.result != deephol_pb2.TacticApplication.UNKNOWN\n assert tapp.result == deephol_pb2.TacticApplication.SUCCESS\n assert not tapp.error_message, ('Successful attempt with error %s %d %d' %\n (tapp.error_message, i, j))\n all_goals_closed = True\n for goal in tapp.subgoals:\n if not goal.closed:\n all_goals_closed = False\n if all_goals_closed:\n assert tapp.closed, ('All subgoals closed for %d:%d but tapp is '\n 'is not closed' % (i, j))\n closed = True\n assert all_goals_closed == tapp.closed, ('Inconsistent closed mark '\n '%d:%d' % (i, j))\n if not node.failed_attempts and not node.successful_attempts:\n closed = None\n assert closed == node.closed, ('Inconsistent closed mark for node %d' % i)\n", "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Clause selection driver utilities for training and eval.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow as tf\nfrom deepmath.guidance import all_models\nfrom deepmath.guidance import clause_loom\nfrom deepmath.guidance import gen_clause_ops\nfrom deepmath.guidance import inputs\nfrom deepmath.guidance import jagged\nfrom deepmath.guidance import train\nfrom deepmath.util import model_utils\n\nFLAGS = tf.flags.FLAGS\n\n\ndef parse_hparams(hparam_str):\n \"\"\"Parse hyperparameters from the given flag value.\"\"\"\n m = re.search(r'(?:^|,)model=(\\w+)(?:,|$)', hparam_str)\n if not m:\n raise ValueError('No model specified in --hparams=%r' % hparam_str)\n input_hparams = tf.contrib.training.HParams(\n model='', # Name of the model to run\n embedding_size=256, # Word embedding dimension\n )\n hparams = model_utils.merge_hparams(train.default_hparams(),\n all_models.model_hparams(m.group(1)),\n input_hparams)\n hparams.parse(hparam_str)\n return hparams\n\n\ndef mode_batch_size(mode, hparams):\n \"\"\"Returns the batch size for a given mode (train or eval).\n\n Args:\n mode: Either 'train' or 'eval'.\n hparams: Hyperparameters.\n\n Returns:\n Integer batch size.\n\n Raises:\n ValueError: If mode is not 'train' or 'eval'.\n \"\"\"\n if mode == 'train':\n return hparams.batch_size\n elif mode == 'eval':\n return hparams.eval_batch_size\n else:\n raise ValueError('Invalid --mode=%r' % mode)\n\n\ndef fix_logits(kind, logits):\n \"\"\"Fix logits to be scalar (True / False) rather than two class.\n\n Args:\n kind: Either 'sequence' or 'tree'.\n logits: Logits tensor of shape (?, 2) or (?,).\n\n Returns:\n Logits tensor of shape (?,)\n\n Raises:\n ValueError: If logits has an invalid shape.\n \"\"\"\n logits_rank = logits.get_shape().ndims\n if kind == 'sequence' and logits_rank == 2:\n logits.get_shape().merge_with((None, 2))\n logits = logits[:, 0]\n elif logits_rank != 1:\n raise ValueError('logits has bad rank %r' % logits_rank)\n return logits\n\n\ndef full_model(mode, hparams):\n \"\"\"Make a clause search model including input pipeline.\n\n Args:\n mode: Either 'train' or 'eval'.\n hparams: Hyperparameters. See default_hparams for details.\n\n Returns:\n logits, labels\n\n Raises:\n ValueError: If the model returns badly shaped tensors.\n \"\"\"\n if hparams.use_averages:\n raise NotImplementedError('Figure out how to eval with Polyak averaging')\n kind, model = all_models.make_model(name=hparams.model, mode=mode,\n hparams=hparams, vocab=FLAGS.vocab)\n batch_size = mode_batch_size(mode, hparams)\n\n if kind == 'sequence':\n # Read\n _, conjectures, clauses, labels = inputs.sequence_example_batch(\n mode=mode, batch_size=batch_size, shuffle=True)\n clauses = tf.reshape(clauses, [2 * batch_size, -1])\n labels = tf.reshape(labels, [2 * batch_size])\n\n # Embed\n vocab_size, _ = inputs.read_vocab(FLAGS.vocab)\n conjectures, clauses = model_utils.shared_embedding_layer(\n (conjectures, clauses), dim=hparams.embedding_size, size=vocab_size)\n\n # Classify\n conjectures = model.conjecture_embedding(conjectures)\n conjectures = tf.reshape(\n tf.tile(tf.reshape(conjectures, [batch_size, 1, -1]), [1, 2, 1]),\n [2 * batch_size, -1])\n clauses = model.axiom_embedding(clauses)\n logits = model.classifier(conjectures, clauses)\n elif kind == 'tree':\n examples = inputs.proto_batch(mode=mode, batch_size=batch_size)\n def weave(**ops):\n return clause_loom.weave_clauses(\n examples=examples, vocab=FLAGS.vocab, **ops)\n\n logits, labels = model(weave)\n elif kind == 'fast':\n examples = inputs.proto_batch(mode=mode, batch_size=batch_size)\n conjecture_sizes, conjecture_flat, clauses, labels = (\n gen_clause_ops.random_clauses_as_fast_clause(\n examples, vocab=FLAGS.vocab))\n conjectures = jagged.Jagged(conjecture_sizes, conjecture_flat)\n logits = model(conjectures, clauses)\n\n # Done!\n return fix_logits(kind, logits), labels\n\n\ndef with_name(value, name):\n \"\"\"Returns a tensor with value value and name name.\n\n Args:\n value: A tensor.\n name: A name.\n\n Returns:\n A tensor with the given value and name.\n\n Raises:\n TypeError: If the input isn't a Tensor or Operation.\n ValueError: If the name is taken.\n \"\"\"\n if isinstance(value, tf.Tensor):\n nop, suffix = tf.identity, ':0'\n elif isinstance(value, tf.Operation):\n nop, suffix = tf.group, ''\n else:\n raise TypeError('Expected Tensor or Operation, got %r' % type(value))\n if value.name != name + suffix:\n value = nop(value, name=name)\n if value.name != name + suffix:\n raise ValueError('Tried to ensure name %r, but got %r' %\n (name + suffix, value.name))\n return value\n\n\ndef inference(hparams):\n \"\"\"Make a clause search graph suitable for inference at proof time.\n\n Each described node has the correct name, for purposes of C++ lookup:\n\n conjecture, clauses: string, shape (?,), placeholders of serialized\n FastClause protos.\n conjecture_embeddings: float32, shape (dim,).\n logits: float32, shape (?,) output logits.\n initialize: Initialization op.\n\n Args:\n hparams: Hyperparameters. See default_hparams for details.\n\n Returns:\n The tf.Saver object.\n\n Raises:\n ValueError: If the model kind is not 'tree' or 'sequence'.\n \"\"\"\n if hparams.use_averages:\n raise NotImplementedError('Figure out how to eval with Polyak averaging')\n kind, model = all_models.make_model(name=hparams.model, mode='eval',\n hparams=hparams, vocab=FLAGS.vocab)\n\n # Input placeholders, which will hold FastClause protos.\n conjecture = tf.placeholder(\n name='conjecture', shape=(None,), dtype=tf.string)\n clauses = tf.placeholder(name='clauses', shape=(None,), dtype=tf.string)\n\n def expand(embedding):\n \"\"\"Tile the one conjecture to match clauses.\"\"\"\n embeddings = tf.tile(embedding, tf.stack([tf.size(clauses), 1]))\n embeddings.set_shape([None, embedding.get_shape()[-1]])\n return embeddings\n\n if kind == 'sequence':\n # Embedding weights\n vocab_size, _ = inputs.read_vocab(FLAGS.vocab)\n params = model_utils.embedding_weights(dim=hparams.embedding_size,\n size=vocab_size)\n\n # Embed conjecture\n ids = gen_clause_ops.fast_clauses_as_sequence(\n conjecture, conjunction=True)\n ids = tf.nn.embedding_lookup(params, ids)\n ids = ids[None] # Singleton batch since many clauses are one ~conjecture\n conjecture_embedding = with_name(\n model.conjecture_embedding(ids), name='conjecture_embeddings')\n\n # Embed clauses\n ids = gen_clause_ops.fast_clauses_as_sequence(clauses)\n ids = tf.nn.embedding_lookup(params, ids)\n clause_embeddings = model.axiom_embedding(ids)\n\n # Classify\n logits = model.classifier(expand(conjecture_embedding), clause_embeddings)\n elif kind == 'tree':\n def weave(embed, conjecture_apply, conjecture_not, conjecture_or,\n conjecture_and, clause_apply, clause_not, clause_or, combine):\n \"\"\"Weave conjecture and clauses separately, then combine.\"\"\"\n # Embed conjecture, naming a concatenated version for simplicity\n parts = clause_loom.weave_fast_clauses(\n clauses=conjecture,\n embed=embed,\n apply_=conjecture_apply,\n not_=conjecture_not,\n or_=conjecture_or,\n and_=conjecture_and,\n shuffle=False)\n concat = tf.concat(parts, 1, name='conjecture_embeddings')\n splits = tf.split(\n concat, [p.get_shape()[1].value for p in parts], axis=1)\n splits = [expand(s) for s in splits]\n\n # Embed clauses\n clause_embeddings = clause_loom.weave_fast_clauses(\n clauses=clauses,\n embed=embed,\n apply_=clause_apply,\n not_=clause_not,\n or_=clause_or,\n shuffle=False)\n\n # Combine into logits\n return combine.instantiate_batch(splits + list(clause_embeddings))\n\n logits, = model(weave)\n elif kind == 'fast':\n logits = model(jagged.pack([conjecture]), clauses)\n else:\n raise ValueError('Unknown kind %r' % kind)\n\n # Fix and name logits\n with_name(fix_logits(kind, logits), name='logits')\n\n # Add init op for testing purposes\n with_name(tf.global_variables_initializer(), name='initialize')\n\n # Add saver and init ops (the latter only for test purposes)\n return tf.train.Saver()\n\n\ndef export_inference_meta_graph(hparams, filename=None, as_text=False):\n \"\"\"Export the inference graph to a file.\n\n See `inference` above for details about graph structure.\n\n Args:\n hparams: Hyperparameters. See default_hparams for details.\n filename: Optional filename to write the MetaGraphDef to.\n as_text: If true, use ASCII format.\n\n Returns:\n A MetaGraphDef proto.\n \"\"\"\n with tf.Graph().as_default():\n saver = inference(hparams)\n return saver.export_meta_graph(filename=filename, as_text=as_text)\n\n\ndef run_mode(mode, hparams):\n \"\"\"Either train or evaluate a clause search model.\n\n Args:\n mode: Either 'train' or 'test'.\n hparams: Hyperparameters.\n\n Raises:\n ValueError: If mode is not 'train' or 'eval'.\n \"\"\"\n if hparams.seed:\n tf.set_random_seed(hparams.seed)\n model = lambda: full_model(mode, hparams)\n if mode == 'train':\n train.sigmoid_train(model, hparams=hparams, joint_safe=True)\n elif mode == 'eval':\n train.sigmoid_eval(model, hparams=hparams, joint_safe=True)\n else:\n raise ValueError('Invalid --mode=%r' % mode)\n" ]
[ [ "tensorflow.flags.DEFINE_string" ], [ "tensorflow.logging.info" ], [ "tensorflow.Graph", "tensorflow.concat", "tensorflow.reshape", "tensorflow.size", "tensorflow.placeholder", "tensorflow.set_random_seed", "tensorflow.global_variables_initializer", "tensorflow.train.Saver", "tensorflow.nn.embedding_lookup", "tensorflow.contrib.training.HParams" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ASinanSaglam/atomizer_analysis
[ "8dfc1230b2ad0c691885f8fd7119d6169cd7d1ed" ]
[ "run_validation.py" ]
[ "# %matplotlib notebook\nimport os, re, sys, urllib, requests, base64, IPython, io, pickle, glob\nsys.path.append(\"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/manual\")\nimport itertools as itt\nimport numpy as np\nimport subprocess as sb\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport roadrunner, h5py\nfrom bs4 import BeautifulSoup as BS\nfrom IPython.display import Image, display\nfrom matplotlib import rcParams\nimport analyzerTools as AT\n\ndef run_test(analyzer, test_no, t_end=1000, atomize=False, db=None, meta=None):\n \n if(analyzer.run_single_test(test_no, t_end=100, atomize=atomize,meta=meta)):\n if meta:\n meta[test_no][\"success\"] = True\n print(\"run successful {}\".format(test_no))\n #if db is not None:\n # # Save results into a DataFrame\n # res = analyzer.all_results[test_no]\n # sbml, bngl, rmsd, valid_per, keys = res[0],res[1],res[2],res[3],res[4] \n # for key in keys:\n # # couldn't get the curation keys\n # if len(key) == 2:\n # skey, bkey = key\n # # got curation keys\n # elif len(key) == 3:\n # skey, bkey, ckey = key\n # else:\n # print(\"couldn't find keys\")\n # IPython.embed()\n # sys.exit()\n # # setting up the database\n # db.at[\"{:010d}\".format(test_no), \"{}_sbml\".format(skey)] = res[0][skey]\n # db.at[\"{:010d}\".format(test_no), \"{}_bngl\".format(bkey)] = res[1][bkey]\n # analyzer.plot_results(test_no, legend=True, save_fig=True)\n# if(analyzer.run_old_test(test_no, t_end=100, atomize=atomize)):\n# print(\"run successful {}\".format(test_no))\n# analyzer.plot_old_results(test_no, legend=False, save_fig=True)\n else:\n if meta:\n meta[test_no][\"success\"] = False\n print(\"run failed {}\".format(test_no))\n\ndef uniquefy_names(keys):\n unique_keys = []\n if len(keys[0]) == 3:\n bkeys_d = {}\n skeys_d = {}\n ckeys_d = {}\n for key in keys:\n bkey, skey, ckey = key\n if bkey in bkeys_d.keys():\n bkey_new = bkey + \"_{}\".format(bkeys_d[bkey])\n bkeys_d[bkey] += 1\n bkey = bkey_new\n else:\n bkeys_d[bkey] = 1\n if skey in skeys_d.keys():\n skey_new = skey + \"_{}\".format(skeys_d[skey])\n skeys_d[skey] += 1\n skey = skey_new\n else:\n skeys_d[skey] = 1\n if ckey in ckeys_d.keys():\n ckey_new = ckey + \"_{}\".format(ckeys_d[ckey])\n ckeys_d[ckey] += 1\n ckey = ckey_new\n else:\n ckeys_d[ckey] = 1\n unique_keys.append( (bkey,skey,ckey) )\n else:\n bkeys_d = {}\n skeys_d = {}\n for key in keys:\n bkey, skey = key\n if bkey in bkeys_d.keys():\n bkey_new = bkey + \"_{}\".format(bkeys_d[bkey])\n bkeys_d[bkey] += 1\n bkey = bkey_new\n else:\n bkeys_d[bkey] = 1\n if skey in skeys_d.keys():\n skey_new = skey + \"_{}\".format(skeys_d[skey])\n skeys_d[skey] += 1\n skey = skey_new\n else:\n skeys_d[skey] = 1\n unique_keys.append( (bkey,skey) )\n return unique_keys\n\ndef update_results(results, h5file):\n for key in results:\n if \"{:010d}\".format(key) in h5file:\n continue\n # create a model group\n res_grp = h5file.create_group(\"{:010d}\".format(key))\n # pull dataframes\n sres, bres, _, _, keys_used = results[key]\n # names\n if len(keys_used) == 0:\n continue\n if len(keys_used[0]) == 2:\n names_to_use = [keys_used[i][1] for i in range(len(keys_used))]\n skeyd = dict([(keys_used[i][1],keys_used[i][0]) for i in range(len(keys_used))])\n bkeyd = dict([(keys_used[i][1],keys_used[i][1]) for i in range(len(keys_used))])\n skn = list(map(lambda x: skeyd[x], names_to_use))\n bkn = list(map(lambda x: bkeyd[x], names_to_use))\n else:\n names_to_use = [keys_used[i][2] for i in range(len(keys_used))]\n skeyd = dict([(keys_used[i][2],keys_used[i][0]) for i in range(len(keys_used))])\n bkeyd = dict([(keys_used[i][2],keys_used[i][1]) for i in range(len(keys_used))])\n skn = list(map(lambda x: skeyd[x], names_to_use))\n bkn = list(map(lambda x: bkeyd[x], names_to_use))\n # make structured arrays\n sdtype = np.dtype({\"names\":names_to_use,\n \"formats\": [\"<f8\" for i in range(len(names_to_use))]})\n bdtype = np.dtype({\"names\":names_to_use,\n \"formats\": [\"<f8\" for i in range(len(names_to_use))]})\n # if len(names_to_use) != sres[skn].shape[1]:\n # # we have multiple datasets per name, drop one\n # for iname,name in enumerate(names_to_use):\n # if len(sres[name].shape) > 1:\n # # \n stupl = list(map(tuple, sres[skn].values))\n btupl = list(map(tuple, bres[bkn].values))\n sarr = np.array(stupl, dtype=sdtype)\n barr = np.array(btupl, dtype=bdtype)\n # add the data in, if it exists\n if sarr.shape[0] != 0:\n sg = res_grp.create_dataset(\"sbml_data\", data=sarr)\n if barr.shape[0] != 0:\n bg = res_grp.create_dataset(\"bngl_data\", data=barr)\n print(\"updated results\")\n return True\n\ndef save_meta(meta, fname=\"meta_data.pickle\"):\n if os.path.isfile(fname):\n with open(fname, \"rb\") as f:\n m = pickle.load(f)\n for key in meta:\n m[key] = meta[key]\n with open(fname, \"wb\") as f:\n pickle.dump(m, f)\n else: \n with open(fname, \"wb\") as f:\n pickle.dump(meta, f)\n# All the paths we need\n# The BNG2.pl file for bionetgen runs\nbng_path = \"/home/monoid/apps/BioNetGen-2.5.0/BNG2.pl\"\n# This is the python file that can be called from the command line\nsbml_translator_path = \"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/sbmlTranslator.py\"\n# if you give this the ATOMIZER ANALYZER 5000 will import atomizer and run internally \n# translator_package_path = \"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser\"\ntranslator_package_path = None\n# This is neccesary for atomizer, has default naming conventions and a lot more \n# this path will be sym linked to everywhere you want to run translator under\nconfig_path = \"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/config\"\n# the path to the folder that contains 5 zero padded folders for each test\ntests_path = \"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated\"\n# Now we also add COPASI PATH!!_!_\ncopasi_path = \"/home/monoid/apps/copasi/4.27/bin/CopasiSE\"\n# change directory to where we want to run the tests\nos.chdir(\"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/analyzerTools\")\n# The analyzer setup\nba = AT.BiomodelAnalyzer(bng_path, sbml_translator_path, config_path, tests_path, \n translator_import=translator_package_path, copasi_path=copasi_path)\n\n# Let's re-run everything\ntests = list(range(908,915))\nknown_issues = set([24,25,34,154,155,196,201,589,613,668,669,696,468, # Not implemented\n 643,644,645, # Complex \"i\" is used in function/parameter\n 63,245,248,305,556,575,578,542, # rule named used as parameter\n 342,429,457,547,570,627,637,638, # compartment used as parameter\n 527,562,592,593,596,723,250, # Actually broken, even in Copasi\n 304,324,330,331,341,343,345,349,367,371,374,377,381,533,548,\n 549,551,618,642,670,671,680,682,684,118,252,673,531,532,555,\n 561, # no reactions\n 306,307,308,309,310,311,388,390,391,393,409,\n 428,505,512,528,557,566,567,719,641,71,90,173,\n 253, # assignment rules used in reactions\n 610, # function defs for v16/v17 \n 558,568,674,722,412,445,302,208,268,51,55,162,180,179,579,\n 691,465,466,238,312,538,603,604,605,215, # Uses time\n 635,636, # Uses not only time but also encoded strings for parameters\n 119, # single reaction, not really suitable for translation\n 47,483,484,486,487, # initial states should result in no reactions, \n 164,165,167,326,375,400,554,577,664,672,693,698,\n 234,237,286,450, # Uses piecewise definitions \n 396,398,507,522,705,\n 499,474, # SBML modeller is careless and uses species that should be params\n 607, # Function not defined properly/links to another function\n 319,206,39,145,353,385,392,463,608,470,472, # non-integer stoichiometry\n 161,182,239, # true multi-compartment model\n 271 # multi-compartment and the modeller has issues\n ])\n\n# Need to figure out, mostly CVODE\nlist_of_fails = set([246,336,378,383,384,387,438,9,107,123,183,192,269,\n 279,292,328,617,678,606, # new ones \n 616, # Legitimate bug, if species name is very simple AND rate constant \n # only depenent on the species concentration AND we end up generating \n # an observable with the same name as species name, then BNGL thinkg \n # we are giving obs name as the rate constant, leading to a bug\n 255, # Circular dependency in funcs?\n 401,402,403, # if func messes with func ordering\n 559, # can't load copasi result\n 64, # Due to website addition? also in too long set\n 232, # BNG takes too long?\n 172,176,177 # doesn't end up translating, takes a long time?\n ])\n\n#too_long = set([64,574,426,70,217,247,503,469,471,473,506,451,595, # WAAAY TOO LONG - debug\n# 332,334, # ATOMIZER BREAKS THESE\n# 217,247,293,426,469 # too long when atomized \n# ])\n\ntoo_long = set([64 ,172,176,177,212,217,235,247,293,385,\n 426,451,457,463,469,470,471,472,473,474,\n 496,497,503,505,506,574,595,835, \n 863, # transl too long\n 232,608, # BNG takes too long\n 63,70, # long but completes?\n 269 # due to long CVODE error\n ])\n\n################# NEW CHECKS ##############\n# A complete new set of checks to see the latest state of the tool as we are \n# writing the manuscript.\nnew_checks = set([64,217,235,496, # too long\n 497,498, # skey ratio index out of range?\n 63, # fairly long but does complete\n 119,465,468, # no data?\n 247,269,469,470,471,472,473,474,\n 503,505,506,595,606,608,835,863 # long, didn't check if completes\n ])\n################# RUN FAILS ###############\nrun_fails = set([9,24,25,34,51,55,107,\n 123,154,155,162,164,165,167,172,176,177,179,180,183,192,\n 201,208,215,232,234,237,238,245,246,248,250,255,268,279,286,292,\n 302,305,312,326,328,332,334,336,353,375,383,384,385,387,396,398,\n 400,401,402,403,412,426,429,438,445,450,451,457,463,466,483,484,\n 486,487,499,507,522,527,531,532,538,542,547,554,555,556,558,559,\n 561,562,574,575,577,578,579,589,592,593,599,600,602,607,610,617,\n 627,635,636,637,638,643,644,645,664,668,669,672,673,674,675,678,\n 687,688,692,693,696,698,705,722,723,730,731,748,749,757,759,760,\n 763,764,766,775,801,802,808,815,824,826,833,837,840,841,849,851,\n 858,859,876,879,880 # run_failed\n ])\n################# EVENTS #################\nw_event = set([1,7,56,77,81,87,88,95,96,97,101,104,109, # models with events\n 111,117,120,121,122,124,125,126,127,128,129,130,131, # models with events\n 132,133,134,135,136,137,139,140,141,142,144,148,149, # models with events\n 152,153,158,186,187,188,189,193,194,195,196,227,235, # models with events\n 241,244,256,265,281,285,287,297,301,316,317,318,327, # models with events\n 337,338,339,340,342,344,404,408,422,436,437,439,479, # models with events\n 480,488,493,494,496,497,534,535,536,537,540,541,563, # models with events\n 570,571,597,598,601,612,613,620,621,628,632,634,650, # models with events\n 659,681,695,699,702,706,711,718,727,734,735,736,786, # models with events\n 789,791,794,806,814,816,817,818,820,822,825,829,834, # models with events\n 856,860,862,864,901]) # models with events\n################# END CHECKS ##############\nall_issues = known_issues.union(w_event)\nall_issues = all_issues.union(list_of_fails)\n\n# Load in database\n# dbname = \"validation.h5\"\n# if os.path.isfile(dbname):\n# db = pd.read_hdf(dbname,key=\"validation\")\n# else:\n# db = pd.DataFrame()\n\n# run tests\n# try:\nif os.path.isfile(\"results.h5\"):\n os.remove(\"results.h5\")\n # results_file = h5py.File(\"results.h5\",\"a\")\n results_file = h5py.File(\"results.h5\",\"w\")\nelse:\n results_file = h5py.File(\"results.h5\",\"w\")\n\nmeta_data = {}\n\nfor test_no in tests:\n #if test_no in all_issues:\n # continue\n # if test_no in w_event or test_no in new_checks or test_no in run_fails:\n # if test_no in new_checks or test_no in run_fails:\n # continue\n if test_no in too_long:\n meta_data[test_no] = {\"too_long\":True}\n continue\n if (os.path.isfile(\"/home/monoid/Development/fresh_atomizer_checks/atomizer/SBMLparser/test/curated/BIOMD{0:010d}.xml\".format(test_no))):\n #run_test(ba, test_no, t_end=100, atomize=False, db=db)\n meta_data[test_no] = {\"file\":True, \"too_long\":False}\n run_test(ba, test_no, t_end=100, atomize=True, meta=meta_data)\n update_results(ba.all_results,results_file)\n else: \n meta_data[test_no] = {\"file\":False}\n print(\"number {} doesn't exist\".format(test_no))\n save_meta(meta_data)\n# with open(\"validation.pickle\", 'wb') as f:\n# pickle.dump(ba.all_results, f)\n#except:\n# with open(\"validation.pickle\", 'wb') as f:\n# pickle.dump(ba.all_results, f)\n# db.to_hdf(dbname,\"validation\")\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SheffieldAI/pykale
[ "1f5cce57a50f7772520a482e8135a391eb0517f5", "1f5cce57a50f7772520a482e8135a391eb0517f5" ]
[ "kale/utils/download.py", "tests/predict/test_losses.py" ]
[ "# ===============================================================================\n# Author: Xianyuan Liu, [email protected]\n# Raivo Koot, [email protected]\n# Haiping Lu, [email protected] or [email protected]\n# ===============================================================================\n\n\"\"\"Data downloading and compressed data extraction functions, Based on\nhttps://github.com/pytorch/vision/blob/master/torchvision/datasets/utils.py\nhttps://github.com/pytorch/pytorch/blob/master/torch/hub.py\n\"\"\"\n\nimport logging\nimport os\nfrom pathlib import Path\n\nfrom torch.hub import download_url_to_file\nfrom torchvision.datasets.utils import download_and_extract_archive, download_file_from_google_drive, extract_archive\n\n\ndef download_file_by_url(url, output_directory, output_file_name, file_format=None):\n \"\"\"Download file/compressed file by url.\n\n Args:\n url (string): URL of the object to download\n output_directory (string, optional): Full path where object will be saved\n Abosolute path recommended. Relative path also works.\n output_file_name (string, optional): File name which object will be saved as\n file_format (string, optional): File format\n For compressed file, support [\"tar.xz\", \"tar\", \"tar.gz\", \"tgz\", \"gz\", \"zip\"]\n\n Example: (Grab the raw link from GitHub. Notice that using \"raw\" in the URL.)\n >>> url = \"https://github.com/pykale/data/raw/main/videos/video_test_data/ADL/annotations/labels_train_test/adl_P_04_train.pkl\"\n >>> download_file_by_url(url, \"data\", \"a.pkl\", \"pkl\")\n\n >>> url = \"https://github.com/pykale/data/raw/main/videos/video_test_data.zip\"\n >>> download_file_by_url(url, \"data\", \"video_test_data.zip\", \"zip\")\n\n \"\"\"\n\n output_directory = Path(output_directory).absolute()\n file = Path(output_directory).joinpath(output_file_name)\n if os.path.exists(file):\n logging.info(\"Skipping Download and Extraction\")\n return\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n\n if file_format in [\"tar.xz\", \"tar\", \"tar.gz\", \"tgz\", \"gz\", \"zip\"]:\n logging.info(\"Downloading and extracting {}.\".format(output_file_name))\n download_and_extract_archive(url=url, download_root=output_directory, filename=output_file_name)\n logging.info(\"Datasets downloaded and extracted in {}\".format(file))\n else:\n logging.info(\"Downloading {}.\".format(output_file_name))\n download_url_to_file(url, file)\n logging.info(\"Datasets downloaded in {}\".format(file))\n\n\ndef download_file_gdrive(id, output_directory, output_file_name, file_format=None):\n \"\"\"Download file/compressed file by Google Drive id.\n\n Args:\n id (string): Google Drive file id of the object to download\n output_directory (string, optional): Full path where object will be saved\n Abosolute path recommended. Relative path also works.\n output_file_name (string, optional): File name which object will be saved as\n file_format (string, optional): File format\n For compressed file, support [\"tar.xz\", \"tar\", \"tar.gz\", \"tgz\", \"gz\", \"zip\"]\n\n Example:\n >>> gdrive_id = \"1U4D23R8u8MJX9KVKb92bZZX-tbpKWtga\"\n >>> download_file_gdrive(gdrive_id, \"data\", \"demo_datasets.zip\", \"zip\")\n\n >>> gdrive_id = \"1SV7fmAnWj-6AU9X5BGOrvGMoh2Gu9Nih\"\n >>> download_file_gdrive(gdrive_id, \"data\", \"dummy_data.csv\", \"csv\")\n \"\"\"\n\n output_directory = Path(output_directory).absolute()\n file = Path(output_directory).joinpath(output_file_name)\n if os.path.exists(file):\n logging.info(\"Skipping Download and Extraction\")\n return\n os.makedirs(output_directory, exist_ok=True)\n\n logging.info(\"Downloading {}.\".format(output_file_name))\n download_file_from_google_drive(id, output_directory, output_file_name)\n\n if file_format is not None and file_format in [\"tar.xz\", \"tar\", \"tar.gz\", \"tgz\", \"gz\", \"zip\"]:\n logging.info(\"Extracting {}.\".format(output_file_name))\n extract_archive(file.as_posix())\n logging.info(\"Datasets downloaded and extracted in {}\".format(file))\n else:\n logging.info(\"Datasets downloaded in {}\".format(file))\n", "import pytest\nimport torch\n\nfrom kale.predict.losses import multitask_topk_accuracy, topk_accuracy\n\n# Dummy data: [batch_size, num_classes]\n# Dummy ground truth: batch_size\nFIRST_PREDS = torch.tensor(\n (\n [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n )\n)\nFIRST_LABELS = torch.tensor((0, 2, 4, 5, 5))\n\nSECOND_PREDS = torch.tensor(\n (\n [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],\n )\n)\nSECOND_LABELS = torch.tensor((0, 0, 4, 4, 5))\n\nMULTI_PREDS = (FIRST_PREDS, SECOND_PREDS)\nMULTI_LABELS = (FIRST_LABELS, SECOND_LABELS)\n\n\ndef test_topk_accuracy():\n # Test topk_accuracy with single-task input\n preds = FIRST_PREDS\n labels = FIRST_LABELS\n k = (1, 3, 5)\n\n top1, top3, top5 = topk_accuracy(preds, labels, k)\n top1_value = top1.double().mean()\n top3_value = top3.double().mean()\n top5_value = top5.double().mean()\n assert top1_value.cpu() == pytest.approx(1 / 5)\n assert top3_value.cpu() == pytest.approx(2 / 5)\n assert top5_value.cpu() == pytest.approx(3 / 5)\n\n\ndef test_multitask_topk_accuracy():\n # Test multitask_topk_accuracy with input for two tasks\n preds = MULTI_PREDS\n labels = MULTI_LABELS\n k = (1, 3, 5)\n\n top1, top3, top5 = multitask_topk_accuracy(preds, labels, k)\n top1_value = top1.double().mean()\n top3_value = top3.double().mean()\n top5_value = top5.double().mean()\n assert top1_value.cpu() == pytest.approx(1 / 5)\n assert top3_value.cpu() == pytest.approx(2 / 5)\n assert top5_value.cpu() == pytest.approx(3 / 5)\n" ]
[ [ "torch.hub.download_url_to_file" ], [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kingjr/jr-tools
[ "8a4c9c42a9e36e224279566945e798869904c4c8" ]
[ "jr/plot/meg.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom . import pretty_plot\n\n\ndef plot_butterfly(evoked, ax=None, sig=None, color=None, ch_type=None):\n from mne import pick_types\n if ch_type is not None:\n picks = pick_types(evoked.info, ch_type)\n evoked = evoked.copy()\n evoked = evoked.pick_types(ch_type)\n sig = sig[picks, :] if sig is not None else None\n times = evoked.times * 1e3\n data = evoked.data\n ax = plt.gca() if ax is None else ax\n ax.plot(times, data.T, color='k', alpha=.5)\n gfp = np.vstack((data.max(0), data.min(0)))\n if sig is not None:\n sig = np.array(np.sum(sig, axis=0) > 0., dtype=int)\n ax.fill_between(np.hstack((times, times[::-1])),\n np.hstack((sig * gfp[0, :] + (1 - sig) * gfp[1, :],\n gfp[1, ::-1])),\n facecolor=color, edgecolor='none', alpha=.5,\n zorder=len(data) + 1)\n ax.axvline(0, color='k')\n ax.set_xlabel('Times (ms)')\n ax.set_xlim(min(times), max(times))\n xticks = np.arange(np.ceil(min(times)/1e2) * 1e2,\n np.floor(max(times)/1e2) * 1e2 + 1e-10, 100)\n ax.set_xticks(xticks)\n ax.set_xticklabels(['%i' % t if t in [xticks[0], xticks[-1], 0]\n else '' for t in xticks])\n ax.set_yticks([np.min(data), np.max(data)])\n ax.set_ylim(np.min(data), np.max(data))\n ax.set_xlim(np.min(times), np.max(times))\n pretty_plot(ax)\n return ax\n\n\ndef plot_gfp(evoked, ax=None, sig=None, color=None, ch_type='mag'):\n from mne import pick_types\n if ch_type is not None:\n picks = pick_types(evoked.info, ch_type)\n evoked = evoked.copy()\n evoked = evoked.pick_types(ch_type)\n sig = sig[picks, :] if sig is not None else None\n times = evoked.times * 1e3\n gfp = np.std(evoked.data, axis=0)\n ax = plt.gca() if ax is None else ax\n ax.plot(times, gfp, color='k', alpha=.5)\n if sig is not None:\n sig = np.array(np.sum(sig, axis=0) > 0., dtype=int)\n ax.fill_between(np.hstack((times, times[::-1])),\n np.hstack((sig * gfp, np.zeros_like(gfp))),\n facecolor=color, edgecolor='none', alpha=.5)\n ax.axvline(0, color='k')\n ax.set_xlabel('Times (ms)')\n ax.set_xlim(min(times), max(times))\n xticks = np.arange(np.ceil(min(times)/1e2) * 1e2,\n np.floor(max(times)/1e2) * 1e2 + 1e-10, 100)\n ax.set_xticks(xticks)\n ax.set_xticklabels(['%i' % t if t in [xticks[0], xticks[-1], 0]\n else '' for t in xticks])\n ax.set_yticks([np.min(gfp), np.max(gfp)])\n ax.set_ylim(np.min(gfp), np.max(gfp))\n ax.set_xlim(np.min(times), np.max(times))\n pretty_plot(ax)\n return ax\n" ]
[ [ "matplotlib.pyplot.gca", "numpy.hstack", "numpy.min", "numpy.max", "numpy.std", "numpy.zeros_like", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmsplank/phdhelper
[ "c06dd06669b42dbe4c9e1a6eeec3d0ad3885d2eb" ]
[ "phdhelper/suMMSary/suMMSary.py" ]
[ "import numpy as np\nimport pyspedas\nfrom phdhelper.helpers import title_print\nfrom phdhelper.helpers.CONSTANTS import c, k_B, m_e, m_i, mu_0, q\nfrom pytplot import data_quants\nimport matplotlib.pyplot as plt\nfrom datetime import datetime as dt\nfrom cached_property import cached_property\n\n\nclass EventHandler:\n FPI = None\n FPI_DIST = None\n FSM = None\n FGM = None\n\n trange = None\n probe = None\n\n def load_FGM(self):\n self.FGM = pyspedas.mms.fgm(\n trange=self.trange, probe=self.probe, data_rate=\"brst\"\n )\n\n def load_FSM(self):\n raise NotImplementedError()\n\n def load_FPI_DIST(self):\n self.FPI_DIST = pyspedas.mms.fpi(\n trange=self.trange,\n probe=self.probe,\n data_rate=\"brst\",\n datatype=\"dis-dist\",\n )\n\n def load_FPI(self):\n self.FPI = pyspedas.mms.fpi(\n trange=self.trange, probe=self.probe, data_rate=\"brst\"\n )\n\n @staticmethod\n def get_tplot_data(var_str, sl=None, time=False):\n if not time:\n if sl is None:\n # Get all data\n return data_quants[var_str].values\n else:\n return data_quants[var_str].values[sl]\n else:\n if sl is None:\n # Get all data\n return data_quants[var_str].coords[\"time\"].values\n else:\n return data_quants[var_str].coords[\"time\"].values[sl]\n\n\nclass TimeMMS(EventHandler):\n def __init__(self, kw):\n self.kw = kw\n\n @cached_property\n def timestamp(self):\n return self.get_tplot_data(self.kw, time=True)\n\n @cached_property\n def date_time(self):\n return np.array([dt.utcfromtimestamp(t) for t in self.timestamp])\n\n def date_string(self, fmt=\"%H:%M\"):\n return np.array([dt.strftime(t, fmt) for t in self.date_time])\n\n\nclass Species(EventHandler):\n def __init__(self, kw) -> None:\n self.kw = kw\n\n @cached_property\n def value(self):\n return self.get_tplot_data(self.kw)\n\n @cached_property\n def time(self):\n return TimeMMS(self.kw)\n\n def plot(self):\n plt.plot(self.value)\n\n def __repr__(self):\n return (\n f\"Species({self.kw})\"\n \"Available properties:\"\n \" value\"\n \"Available methods:\"\n \" plot\"\n )\n\n\nclass MultiSpecies:\n def __init__(self, ion_kw: str, electron_kw: str) -> None:\n self.ion_kw = ion_kw\n self.electron_kw = electron_kw\n\n @cached_property\n def ion(self):\n return Species(self.ion_kw)\n\n @cached_property\n def electron(self):\n return Species(self.electron_kw)\n\n\nclass Event(EventHandler):\n def __init__(\n self, trange: str, required_instruments: str, probe: str = \"1\"\n ) -> None:\n self.trange = trange\n self.required_instruments = required_instruments.upper()\n self.probe = probe\n\n if \"FGM\" in required_instruments:\n self.load_FGM()\n if \"FPI\" in required_instruments:\n self.load_FPI()\n if \"FSM\" in required_instruments:\n self.load_FSM()\n if \"FPI_DIST\" in required_instruments:\n self.load_FPI_DIST()\n\n @cached_property\n def B(self):\n return Species(f\"mms{self.probe}_fgm_b_gse_brst_l2\")\n\n @cached_property\n def v(self):\n return MultiSpecies(\n f\"mms{self.probe}_dis_bulkv_gse_brst\",\n f\"mms{self.probe}_des_bulkv_gse_brst\",\n )\n\n @cached_property\n def T(self):\n return MultiSpecies(\n f\"mms{self.probe}_dis_temppara_brst\",\n f\"mms{self.probe}_dis_tempperp_brst\",\n )\n\n @cached_property\n def E(self):\n return MultiSpecies(\n f\"mms{self.probe}_dis_energyspectr_omni_brst\",\n f\"mms{self.probe}_des_energyspectr_omni_brst\",\n )\n\n # @property\n # def v_0(self, species=\"i\"):\n # title_print(\"Calculating background flow speed\")\n # species = self.Species(species)\n # if species.ion:\n # self.v_0_i = np.mean(np.linalg.norm(self.v_i, axis=1))\n # if species.elec:\n # self.v_0_e = np.mean(np.linalg.norm(self.v_e, axis=1))\n\n # @property\n # def v_A(self):\n # title_print(\"Calculating Alfven speed\")\n # self.v_A = self.mean_B / np.sqrt(mu_0 * self.number_density_i) / 1e3\n\n # @property\n # def number_density(self, species=\"i\"):\n # species = self.Species(species)\n # if species.ion:\n # self.number_density_i = (\n # self.get_tplot_data(f\"mms{self.probe}_dis_numberdensity_brst\") * 1e6\n # ).mean()\n # if species.elec:\n # self.number_density_e = (\n # self.get_tplot_data(f\"mms{self.probe}_des_numberdensity_brst\") * 1e6\n # ).mean()\n\n # @property\n # def beta(self, species=\"i\"):\n # title_print(\"Calculating plasma betas\")\n # species = self.Species(species)\n # magPress = self.mean_B ** 2 / (2 * mu_0)\n # if species.ion:\n # self.beta_i = (\n # self.number_density_i * k_B * self.T_i[:, 0].mean()\n # ) / magPress\n # if species.elec:\n # self.beta_e = (\n # self.number_density_e * k_B * self.T_e[:, 0].mean()\n # ) / magPress\n\n # @property\n # def rho(self, species=\"i\"):\n # title_print(\"Calculating gyroradius\")\n # species = self.Species(species)\n # if species.ion:\n # i_thermal_velocity = np.sqrt(self.T_i[:, 1].mean() * 2 * q / m_i) / 1e3\n # i_gyrofrequency = q * self.mean_B / m_i\n # self.rho_i = i_thermal_velocity / i_gyrofrequency\n # if species.elec:\n # e_thermal_velocity = np.sqrt(self.T_i[:, 1].mean() * 2 * q / m_e) / 1e3\n # e_gyrofrequency = q * self.mean_B / m_e\n # self.rho_e = e_thermal_velocity / e_gyrofrequency\n\n # @property\n # def p(self, species=\"i\"):\n # title_print(\"Calculating Intertial length\")\n # species = self.Species(species)\n # if species.ion:\n # i_plasma_frequency = 1.32e3 * np.sqrt(self.number_density_i)\n # self.p_i = c / i_plasma_frequency\n # self.p_i /= 1e3\n # if species.elec:\n # e_plasma_frequency = 5.64e4 * np.sqrt(self.number_density_e)\n # self.p_e = c / e_plasma_frequency\n # self.p_e /= 1e3\n\n # @property\n # def time(self, var=\"B\"):\n # title_print(\"Getting time arrays\")\n # var = var.split(\"|\")\n # if \"B\" in var:\n # self.time_B = self.get_tplot_data(\n # f\"mms{self.probe}_fgm_b_gse_brst_l2\", time=True\n # )\n # if \"V\" in var:\n # self.time_V = self.get_tplot_data(\n # f\"mms{self.probe}_dis_bulkv_gse_brst\", time=True\n # )\n # if \"e\" in var:\n # self.time_e = self.get_tplot_data(\n # f\"mms{self.probe}_des_temppara_brst\", time=True\n # )" ]
[ [ "matplotlib.pyplot.plot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
coderatwork7/AI-algorithms
[ "11e9c012cc2f5fb4493bc1ec6b14ddc9cf0fc2d4" ]
[ "perceptron/perceptron.py" ]
[ "import pandas as pd\n\n# TODO: Set weight1, weight2, and bias\nweight1 = 1.5\nweight2 = 1.5\nbias = -2.0\n\n\n# DON'T CHANGE ANYTHING BELOW\n# Inputs and outputs\ntest_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]\ncorrect_outputs = [False, False, False, True]\noutputs = []\n\n# Generate and check output\nfor test_input, correct_output in zip(test_inputs, correct_outputs):\n linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias\n output = int(linear_combination >= 0)\n is_correct_string = 'Yes' if output == correct_output else 'No'\n outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])\n\n# Print output\nnum_wrong = len([output[4] for output in outputs if output[4] == 'No'])\noutput_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])\nif not num_wrong:\n print('Nice! You got it all correct.\\n')\nelse:\n print('You got {} wrong. Keep trying!\\n'.format(num_wrong))\nprint(output_frame.to_string(index=False))" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
muhanzhang/NestedGNN
[ "a5adccf62d397ad7f83bc73be34eba3765df73fa" ]
[ "kernel/graph_sage.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\nfrom torch_geometric.nn import SAGEConv, global_mean_pool\n\n\nclass NestedGraphSAGE(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden, use_z=False, use_rd=False):\n super(NestedGraphSAGE, self).__init__()\n self.use_rd = use_rd\n self.use_z = use_z\n if self.use_rd:\n self.rd_projection = torch.nn.Linear(1, 8)\n if self.use_z:\n self.z_embedding = torch.nn.Embedding(1000, 8)\n input_dim = dataset.num_features\n if self.use_z or self.use_rd:\n input_dim += 8\n\n self.conv1 = SAGEConv(input_dim, hidden)\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers - 1):\n self.convs.append(SAGEConv(hidden, hidden))\n self.lin1 = torch.nn.Linear(num_layers * hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n if self.use_rd:\n self.rd_projection.reset_parameters()\n if self.use_z:\n self.z_embedding.reset_parameters()\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n\n # node label embedding\n z_emb = 0\n if self.use_z and 'z' in data:\n ### computing input node embedding\n z_emb = self.z_embedding(data.z)\n if z_emb.ndim == 3:\n z_emb = z_emb.sum(dim=1)\n \n if self.use_rd and 'rd' in data:\n rd_proj = self.rd_projection(data.rd)\n z_emb += rd_proj\n\n if self.use_rd or self.use_z:\n x = torch.cat([z_emb, x], -1)\n\n x = F.relu(self.conv1(x, edge_index))\n xs = [x]\n for conv in self.convs:\n x = F.relu(conv(x, edge_index))\n xs += [x]\n x = global_mean_pool(torch.cat(xs, dim=1), data.node_to_subgraph)\n x = global_mean_pool(x, data.subgraph_to_graph)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n\n\nclass GraphSAGE(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden, *args, **kwargs):\n super(GraphSAGE, self).__init__()\n self.conv1 = SAGEConv(dataset.num_features, hidden)\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers - 1):\n self.convs.append(SAGEConv(hidden, hidden))\n self.lin1 = torch.nn.Linear(num_layers * hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n xs = [x]\n for conv in self.convs:\n x = F.relu(conv(x, edge_index))\n xs += [x]\n x = global_mean_pool(torch.cat(xs, dim=1), batch)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n\n\nclass GraphSAGEWithoutJK(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden):\n super(GraphSAGEWithoutJK, self).__init__()\n self.conv1 = SAGEConv(dataset.num_features, hidden)\n self.convs = torch.nn.ModuleList()\n for i in range(num_layers - 1):\n self.convs.append(SAGEConv(hidden, hidden))\n self.lin1 = Linear(hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n for conv in self.convs:\n x = F.relu(conv(x, edge_index))\n x = global_mean_pool(x, batch)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.cat", "torch.nn.ModuleList", "torch.nn.Embedding", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Fryguy/py2rb
[ "0d2fbc5a86b82707a1d83241a21af6b2cc22c0b8" ]
[ "tests/numpy/asarray.py" ]
[ "import numpy as np\n\ndef print_matrix(data):\n data_i = []\n for i in list(data):\n data_j = []\n for j in i:\n data_j.append(int(\"%d\" % j))\n data_i.append(data_j)\n print(data_i)\n\ndef print_array(data):\n datas = []\n for i in data:\n datas.append(float(\"%.3f\" % i))\n print(datas)\n\nx = np.asarray([[1.,2.],[3.,4.]])\nprint_matrix(x)\n\nx = np.asarray([1.,2.])\nprint_array(x)\n\ny = np.asarray([3.,4.])\nprint_array(y)\n\nz = (x + y)[0]\nprint(z)\n" ]
[ [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
piraaa/VideoDigitalWatermarking
[ "6439881dc88fb7257a3dd9856b185e5c667b89b4" ]
[ "src/msequence.py" ]
[ "#\n# msequence.py\n# Created by pira on 2017/07/28.\n#\n\n#coding: utf-8\nu\"\"\"For M-Sequence.\"\"\"\n\nimport numpy as np\n\ndef generateM(N):\n\tu\"\"\"Create M-Sequence.\n \t@param N : length 2**N-1\n \t@return m : M-Sequence\n\t\"\"\"\n\n\tp = pow(2, N)\n\tm = [0] * (p-1)\n\n\tfor i in np.arange(1,p,2): \n\t\tf = p^i\n\t\ta = p\n\t\t#i = int()\n\t\tfor j in np.arange(N, p):\n\t\t\tif (a&p) == p:\n\t\t\t\ta ^= f\n\t\t\tif a == 1:\n\t\t\t\tbreak\n\t\t\ta <<= 1\n\t\tif j == p-1:\n\t\t\tinit = 1\n\t\t\tlfsr = init & (p-1)\n\t\t\tf >>= 1\n\t\t\tfor k in np.arange(0, p-1):\n\t\t\t\tlfsr = (lfsr>>1)^(-(int)(lfsr&1) & f)\n\t\t\t\tm[k] = (lfsr&1) * 2-1\n\t\t\treturn m\n\n#test\n#m = generateM(3)\n#print(m)" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gnes-ai/hub
[ "94cff9011ff6447ce1af51c5307813ab6fbbb156" ]
[ "encoder/i3d/i3d_encoder.py" ]
[ "# Tencent is pleased to support the open source community by making GNES available.\n#\n# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nimport numpy as np\n\nfrom gnes.encoder.base import BaseVideoEncoder\nfrom gnes.helper import batching, get_first_available_gpu\n\n\nclass I3dEncoder(BaseVideoEncoder):\n batch_size = 1\n\n def __init__(self, model_dir: str,\n output_layer: str,\n num_classes: int = 400,\n frame_size_x: int = 224,\n frame_size_y: int = 224,\n num_frame_per_clib: int = 16,\n rgb_channels: int = 3,\n on_gpu: bool = False,\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.model_dir = model_dir\n self.output_layer = output_layer\n self.num_classes = num_classes\n self.frame_size_x = frame_size_x\n self.frame_size_y = frame_size_y\n self.num_frame_per_clib = num_frame_per_clib\n self.rgb_channels = rgb_channels\n self.on_gpu = on_gpu\n\n def post_init(self):\n import tensorflow as tf\n from i3d_cores.i3d import InceptionI3d\n\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = str(get_first_available_gpu())\n\n with tf.Graph().as_default():\n self.rgb_images_placeholder = tf.placeholder(dtype=tf.float32, shape=(None,\n self.num_frame_per_clib,\n self.frame_size_x,\n self.frame_size_y,\n self.rgb_channels))\n is_training = False\n\n with tf.variable_scope('RGB'):\n self.feature, _ = InceptionI3d(\n num_classes=self.num_classes,\n spatial_squeeze=True,\n final_endpoint=self.output_layer,\n name='inception_i3d'\n )(self.rgb_images_placeholder, is_training)\n init = tf.global_variables_initializer()\n\n config = tf.ConfigProto(log_device_placement=False)\n if self.on_gpu:\n config.gpu_options.allow_growth = True\n\n self.sess = tf.Session(config=config)\n self.sess.run(init)\n\n checkpoint_file = self.model_dir\n meta_graph_location = self.model_dir + '.meta'\n saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True)\n saver.restore(self.sess, checkpoint_file)\n\n def encode(self, data: List['np.ndarray'], *args, **kwargs) -> np.ndarray:\n def _padding(data):\n _data = np.array(\n [np.concatenate((d, np.zeros((self.num_frame_per_clib - d.shape[0],\n self.frame_size_x,\n self.frame_size_y,\n self.rgb_channels), dtype=np.float32)), axis=0)\n if d.shape[0] < self.num_frame_per_clib else d[:self.num_frame_per_clib] for d in data])\n return _data\n\n @batching\n def _encode(_, data):\n feature, = self.sess.run([self.feature], feed_dict={self.rgb_images_placeholder: data})\n return np.array(feature).astype(np.float32)\n\n return _encode(self, _padding(data))\n" ]
[ [ "tensorflow.Graph", "tensorflow.placeholder", "tensorflow.train.import_meta_graph", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.variable_scope", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
Mohammedaabdu/pytorch-segmentation
[ "9fdf927d345146247f039042ee37612157e26582" ]
[ "models/ deeplabv3_plus_xception.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 21 15:16:18 2021\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom base import BaseModel\r\nimport torch\r\nimport math\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torchvision import models\r\nimport torch.utils.model_zoo as model_zoo\r\nfrom utils.helpers import initialize_weights,set_trainable\r\nfrom itertools import chain\r\n'''\r\n'xception_65.pth'URL:https://github.com/zhangtianlun12/deeplabv3-/releases/download/v0.1/xception_65.pth\r\n'''\r\n\r\n\r\n''' \r\n-> ResNet BackBone\r\n'''\r\n\r\nclass ResNet(nn.Module):\r\n def __init__(self, in_channels=3, output_stride=16, backbone='resnet101', pretrained=True):\r\n super(ResNet, self).__init__()\r\n model = getattr(models, backbone)(pretrained)\r\n if not pretrained or in_channels != 3:\r\n self.layer0 = nn.Sequential(\r\n nn.Conv2d(in_channels, 64, 7, stride=2, padding=3, bias=False),\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n )\r\n initialize_weights(self.layer0)\r\n else:\r\n self.layer0 = nn.Sequential(*list(model.children())[:4])\r\n \r\n self.layer1 = model.layer1\r\n self.layer2 = model.layer2\r\n self.layer3 = model.layer3\r\n self.layer4 = model.layer4\r\n\r\n if output_stride == 16: s3, s4, d3, d4 = (2, 1, 1, 2)\r\n elif output_stride == 8: s3, s4, d3, d4 = (1, 1, 2, 4)\r\n \r\n if output_stride == 8: \r\n for n, m in self.layer3.named_modules():\r\n if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):\r\n m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)\r\n elif 'conv2' in n:\r\n m.dilation, m.padding, m.stride = (d3,d3), (d3,d3), (s3,s3)\r\n elif 'downsample.0' in n:\r\n m.stride = (s3, s3)\r\n\r\n for n, m in self.layer4.named_modules():\r\n if 'conv1' in n and (backbone == 'resnet34' or backbone == 'resnet18'):\r\n m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)\r\n elif 'conv2' in n:\r\n m.dilation, m.padding, m.stride = (d4,d4), (d4,d4), (s4,s4)\r\n elif 'downsample.0' in n:\r\n m.stride = (s4, s4)\r\n\r\n def forward(self, x):\r\n x = self.layer0(x)\r\n x = self.layer1(x)\r\n low_level_features = x\r\n x = self.layer2(x)\r\n x = self.layer3(x)\r\n x = self.layer4(x)\r\n\r\n return x, low_level_features\r\n\r\n\r\n\"\"\"\r\nCreated on Fri Sep 13 19:04:23 2019\r\n\r\n@author: shirhe-lyh\r\n\r\n\r\nImplementation of Xception model.\r\nXception: Deep Learning with Depthwise Separable Convolutions, F. Chollect,\r\n arxiv:1610.02357 (https://arxiv.org/abs/1610.02357).\r\n\r\nOfficial tensorflow implementation:\r\n https://github.com/tensorflow/models/blob/master/research/deeplab/core/xception.py\r\n\"\"\"\r\n\r\nimport collections\r\nimport os\r\nimport torch\r\n\r\n\r\n_DEFAULT_MULTI_GRID = [1, 1, 1]\r\n# The cap for torch.clamp\r\n_CLIP_CAP = 6\r\n_BATCH_NORM_PARAMS = {\r\n 'eps': 0.001,\r\n 'momentum': 0.9997,\r\n 'affine': True,\r\n}\r\n\r\n\r\nclass Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):\r\n \"\"\"A named tuple describing an Xception block.\r\n \r\n Its parts are:\r\n scope: The scope of the block.\r\n unit_fn: The Xception unit function which takes as input a tensor and\r\n returns another tensor with the output of the Xception unit.\r\n args: A list of length equal to the number of units in the block. The\r\n list contains one dictionary for each unit in the block to serve \r\n as argument to unit_fn.\r\n \"\"\"\r\n \r\n \r\ndef fixed_padding(inputs, kernel_size, rate=1):\r\n \"\"\"Pads the input along the spatial dimensions independently of input size.\r\n \r\n Args:\r\n inputs: A tensor of size [batch, height_in, width_in, channels].\r\n kernel_size: The kernel to be used in the conv2d or max_pool2d \r\n operation. Should be a positive integer.\r\n rate: An integer, rate for atrous convolution.\r\n \r\n Returns:\r\n padded_inputs: A tensor of size [batch, height_out, width_out, \r\n channels] with the input, either intact (if kernel_size == 1) or \r\n padded (if kernel_size > 1).\r\n \"\"\"\r\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\r\n pad_total = kernel_size_effective - 1\r\n pad_beg = pad_total // 2\r\n pad_end = pad_total - pad_beg\r\n padded_inputs = torch.nn.functional.pad(\r\n inputs, pad=(pad_beg, pad_end, pad_beg, pad_end))\r\n return padded_inputs\r\n\r\n\r\nclass Conv2dSame(torch.nn.Module):\r\n \"\"\"Strided 2-D convolution with 'SAME' padding.\"\"\"\r\n \r\n def __init__(self, in_channels, out_channels, kernel_size, stride, rate=1):\r\n \"\"\"Constructor.\r\n \r\n If stride > 1 and use_explicit_padding is True, then we do explicit\r\n zero-padding, followed by conv2d with 'VALID' padding.\r\n \r\n Args:\r\n in_channels: An integer, the number of input filters.\r\n out_channels: An integer, the number of output filters.\r\n kernel_size: An integer with the kernel_size of the filters.\r\n stride: An integer, the output stride.\r\n rate: An integer, rate for atrous convolution.\r\n \"\"\"\r\n super(Conv2dSame, self).__init__()\r\n self._kernel_size = kernel_size\r\n self._rate = rate\r\n self._without_padding = stride == 1\r\n if self._without_padding:\r\n # Here, we assume that floor(padding) = padding\r\n padding = (kernel_size - 1) * rate // 2\r\n self._conv = torch.nn.Conv2d(in_channels, \r\n out_channels,\r\n kernel_size=kernel_size,\r\n stride=1,\r\n dilation=rate,\r\n padding=padding,\r\n bias=False)\r\n else:\r\n self._conv = torch.nn.Conv2d(in_channels,\r\n out_channels,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n dilation=rate,\r\n bias=False)\r\n self._batch_norm = torch.nn.BatchNorm2d(out_channels, \r\n **_BATCH_NORM_PARAMS)\r\n self._relu = torch.nn.ReLU(inplace=True)\r\n \r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: A 4-D tensor with shape [batch, height_in, width_in, channels].\r\n \r\n Returns:\r\n A 4-D tensor of size [batch, height_out, width_out, channels] with \r\n the convolution output.\r\n \"\"\"\r\n if not self._without_padding:\r\n x = fixed_padding(x, self._kernel_size, self._rate)\r\n x = self._conv(x)\r\n x = self._batch_norm(x)\r\n x = self._relu(x)\r\n return x\r\n\r\n\r\nclass SeparableConv2dSame(torch.nn.Module):\r\n \"\"\"Strided 2-D separable convolution with 'SAME' padding.\"\"\"\r\n \r\n def __init__(self, in_channels, out_channels, kernel_size, \r\n depth_multiplier, stride, rate, use_explicit_padding=True, \r\n activation_fn=None, regularize_depthwise=False, **kwargs):\r\n \"\"\"Constructor.\r\n \r\n If stride > 1 and use_explicit_padding is True, then we do explicit\r\n zero-padding, followed by conv2d with 'VALID' padding.\r\n \r\n Args:\r\n in_channels: An integer, the number of input filters.\r\n out_channels: An integer, the number of output filters.\r\n kernel_size: An integer with the kernel_size of the filters.\r\n depth_multiplier: The number of depthwise convolution output\r\n channels for each input channel. The total number of depthwise\r\n convolution output channels will be equal to `num_filters_in *\r\n depth_multiplier`.\r\n stride: An integer, the output stride.\r\n rate: An integer, rate for atrous convolution.\r\n use_explicit_padding: If True, use explicit padding to make the\r\n model fully compatible with the open source version, otherwise\r\n use the nattive Pytorch 'SAME' padding.\r\n activation_fn: Activation function.\r\n regularize_depthwise: Whether or not apply L2-norm regularization\r\n on the depthwise convolution weights.\r\n **kwargs: Additional keyword arguments to pass to torch.nn.Conv2d.\r\n \"\"\"\r\n super(SeparableConv2dSame, self).__init__()\r\n self._kernel_size = kernel_size\r\n self._rate = rate\r\n self._without_padding = stride == 1 or not use_explicit_padding\r\n \r\n out_channels_depthwise = in_channels * depth_multiplier\r\n if self._without_padding:\r\n # Separable convolution for padding 'SAME'\r\n # Here, we assume that floor(padding) = padding\r\n padding = (kernel_size - 1) * rate // 2\r\n self._conv_depthwise = torch.nn.Conv2d(in_channels, \r\n out_channels_depthwise,\r\n kernel_size=kernel_size, \r\n stride=stride, \r\n dilation=rate,\r\n groups=in_channels,\r\n padding=padding,\r\n bias=False,\r\n **kwargs)\r\n else:\r\n # Separable convolution for padding 'VALID'\r\n self._conv_depthwise = torch.nn.Conv2d(in_channels,\r\n out_channels_depthwise,\r\n kernel_size=kernel_size, \r\n stride=stride,\r\n dilation=rate,\r\n groups=in_channels,\r\n bias=False,\r\n **kwargs)\r\n self._batch_norm_depthwise = torch.nn.BatchNorm2d(\r\n out_channels_depthwise, **_BATCH_NORM_PARAMS)\r\n self._conv_pointwise = torch.nn.Conv2d(out_channels_depthwise,\r\n out_channels,\r\n kernel_size=1, \r\n stride=1,\r\n bias=False,\r\n **kwargs)\r\n self._batch_norm_pointwise = torch.nn.BatchNorm2d(\r\n out_channels, **_BATCH_NORM_PARAMS)\r\n self._activation_fn = activation_fn\r\n \r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: A 4-D tensor with shape [batch, height_in, width_in, channels].\r\n \r\n Returns:\r\n A 4-D tensor of size [batch, height_out, width_out, channels] with \r\n the convolution output.\r\n \"\"\"\r\n if not self._without_padding:\r\n x = fixed_padding(x, self._kernel_size, self._rate)\r\n x = self._conv_depthwise(x)\r\n x = self._batch_norm_depthwise(x)\r\n if self._activation_fn is not None:\r\n x = self._activation_fn(x)\r\n x = self._conv_pointwise(x)\r\n x = self._batch_norm_pointwise(x)\r\n if self._activation_fn is not None:\r\n x = self._activation_fn(x)\r\n return x\r\n \r\n\r\nclass XceptionModule(torch.nn.Module):\r\n \"\"\"An Xception module.\r\n \r\n The output of one Xception module is equal to the sum of `residual` and\r\n `shortcut`, where `residual` is the feature computed by three seperable\r\n convolution. The `shortcut` is the feature computed by 1x1 convolution\r\n with or without striding. In some cases, the `shortcut` path could be a\r\n simple identity function or none (i.e, no shortcut).\r\n \"\"\"\r\n \r\n def __init__(self, in_channels, depth_list, skip_connection_type, stride, \r\n unit_rate_list, rate=1, activation_fn_in_separable_conv=False, \r\n regularize_depthwise=False, use_bounded_activation=False,\r\n use_explicit_padding=True):\r\n \"\"\"Constructor.\r\n \r\n Args:\r\n in_channels: An integer, the number of input filters.\r\n depth_list: A list of three integers specifying the depth values\r\n of one Xception module.\r\n skip_connection_type: Skip connection type for the residual path.\r\n Only supports 'conv', 'sum', or 'none'.\r\n stride: The block unit's stride. Detemines the amount of \r\n downsampling of the units output compared to its input.\r\n unit_rate_list: A list of three integers, determining the unit \r\n rate for each separable convolution in the Xception module.\r\n rate: An integer, rate for atrous convolution.\r\n activation_fn_in_separable_conv: Includes activation function in\r\n the seperable convolution or not.\r\n regularize_depthwise: Whether or not apply L2-norm regularization\r\n on the depthwise convolution weights.\r\n use_bounded_activation: Whether or not to use bounded activations.\r\n Bounded activations better lend themselves to quantized \r\n inference.\r\n use_explicit_padding: If True, use explicit padding to make the\r\n model fully compatible with the open source version, otherwise\r\n use the nattive Pytorch 'SAME' padding.\r\n \r\n Raises:\r\n ValueError: If depth_list and unit_rate_list do not contain three\r\n integers, or if stride != 1 for the third seperable convolution\r\n operation in the residual path, or unsupported skip connection\r\n type.\r\n \"\"\"\r\n super(XceptionModule, self).__init__()\r\n \r\n if len(depth_list) != 3:\r\n raise ValueError('Expect three elements in `depth_list`.')\r\n if len(unit_rate_list) != 3:\r\n raise ValueError('Expect three elements in `unit_rate_list`.')\r\n if skip_connection_type not in ['conv', 'sum', 'none']:\r\n raise ValueError('Unsupported skip connection type.')\r\n \r\n # Activation function\r\n self._input_activation_fn = None\r\n if activation_fn_in_separable_conv:\r\n activation_fn = (torch.nn.ReLU6(inplace=False) if \r\n use_bounded_activation else \r\n torch.nn.ReLU(inplace=False))\r\n else:\r\n if use_bounded_activation:\r\n # When use_bounded_activation is True, we clip the feature\r\n # values and apply relu6 for activation.\r\n activation_fn = lambda x: torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)\r\n self._input_activation_fn = torch.nn.ReLU6(inplace=False)\r\n else:\r\n # Original network design.\r\n activation_fn = None\r\n self._input_activation_fn = torch.nn.ReLU(inplace=False)\r\n self._use_bounded_activation = use_bounded_activation\r\n self._output_activation_fn = None\r\n if use_bounded_activation:\r\n self._output_activation_fn = torch.nn.ReLU6(inplace=True)\r\n \r\n # Separable conv block.\r\n layers = []\r\n in_channels_ = in_channels\r\n for i in range(3):\r\n if self._input_activation_fn is not None:\r\n layers += [self._input_activation_fn]\r\n layers += [\r\n SeparableConv2dSame(in_channels_,\r\n depth_list[i],\r\n kernel_size=3,\r\n depth_multiplier=1,\r\n regularize_depthwise=regularize_depthwise,\r\n rate=rate*unit_rate_list[i],\r\n stride=stride if i==2 else 1,\r\n activation_fn=activation_fn,\r\n use_explicit_padding=use_explicit_padding)]\r\n in_channels_ = depth_list[i]\r\n self._separable_conv_block = torch.nn.Sequential(*layers)\r\n \r\n # Skip connection\r\n self._skip_connection_type = skip_connection_type\r\n if skip_connection_type == 'conv':\r\n self._conv_skip_connection = torch.nn.Conv2d(in_channels,\r\n depth_list[-1],\r\n kernel_size=1,\r\n stride=stride)\r\n self._batch_norm_shortcut = torch.nn.BatchNorm2d(\r\n depth_list[-1], **_BATCH_NORM_PARAMS)\r\n \r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: A 4-D tensor with shape [batch, height, width, channels].\r\n \r\n Returns:\r\n The Xception module's output.\r\n \"\"\"\r\n residual = self._separable_conv_block(x)\r\n if self._skip_connection_type == 'conv':\r\n shortcut = self._conv_skip_connection(x)\r\n shortcut = self._batch_norm_shortcut(shortcut)\r\n if self._use_bounded_activation:\r\n residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)\r\n shortcut = torch.clamp(shortcut, -_CLIP_CAP, _CLIP_CAP)\r\n outputs = residual + shortcut\r\n if self._use_bounded_activation:\r\n outputs = self._output_activation_fn(outputs)\r\n elif self._skip_connection_type == 'sum':\r\n if self._use_bounded_activation:\r\n residual = torch.clamp(residual, -_CLIP_CAP, _CLIP_CAP)\r\n x = torch.clamp(x, -_CLIP_CAP, _CLIP_CAP)\r\n outputs = residual + x\r\n if self._use_bounded_activation:\r\n outputs = self._output_activation_fn(outputs)\r\n else:\r\n outputs = residual\r\n return outputs\r\n \r\n \r\nclass StackBlocksDense(torch.nn.Module):\r\n \"\"\"Stacks Xception blocks and controls output feature density.\r\n \r\n This class allows the user to explicitly control the output stride, which\r\n is the ratio of the input to output spatial resolution. This is useful for\r\n dense prediction tasks such as semantic segmentation or object detection.\r\n \r\n Control of the output feature density is implemented by atrous convolution.\r\n \"\"\"\r\n \r\n def __init__(self, blocks, output_stride=None):\r\n \"\"\"Constructor.\r\n \r\n Args:\r\n blocks: A list of length equal to the number of Xception blocks.\r\n Each element is an Xception Block object describing the units\r\n in the block.\r\n output_stride: If None, then the output will be computed at the\r\n nominal network stride. If output_stride is not None, it \r\n specifies the requested ratio of input to output spatial\r\n resolution, which needs to be equal to the product of unit\r\n strides from the start up to some level of Xception. For\r\n example, if the Xception employs units with strides 1, 2, 1,\r\n 3, 4, 1, then valid values for the output_stride are 1, 2, 6,\r\n 24 or None (which is equivalent to output_stride=24).\r\n \r\n Raises:\r\n ValueError: If the target output_stride is not valid.\r\n \"\"\"\r\n super(StackBlocksDense, self).__init__()\r\n \r\n # The current_stride variable keeps track of the effective stride of\r\n # the activations. This allows us to invoke atrous convolution whenever\r\n # applying the next residual unit would result in the activations \r\n # having stride larger than the target output_stride.\r\n current_stride = 1\r\n \r\n # The atrous convolution rate parameter.\r\n rate = 1\r\n \r\n layers = []\r\n for block in blocks:\r\n for i, unit in enumerate(block.args):\r\n if output_stride is not None and current_stride > output_stride:\r\n raise ValueError('The target output_stride cannot be '\r\n 'reached.')\r\n # If we have reached the target output_stride, then we need to\r\n # employ atrous convolution with stride=1 and multiply the\r\n # atrous rate by the current unit's stride for use subsequent\r\n # layers.\r\n if output_stride is not None and current_stride == output_stride:\r\n layers += [block.unit_fn(rate=rate, **dict(unit, stride=1))]\r\n rate *= unit.get('stride', 1)\r\n else:\r\n layers += [block.unit_fn(rate=1, **unit)]\r\n current_stride *= unit.get('stride', 1)\r\n \r\n if output_stride is not None and current_stride != output_stride:\r\n raise ValueError('The target ouput_stride cannot be reached.')\r\n \r\n self._blocks = torch.nn.Sequential(*layers)\r\n \r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: A tensor of shape [batch, height, widht, channels].\r\n \r\n Returns:\r\n Output tensor with stride equal to the specified output_stride.\r\n \"\"\"\r\n x = self._blocks(x)\r\n return x\r\n \r\n \r\nclass Xception(torch.nn.Module):\r\n \"\"\"Generator for Xception models.\r\n \r\n This class generates a family of Xception models. See the xception_*()\r\n methods for specific model instantiations, obtained by selecting different\r\n block instantiations that produce Xception of various depths.\r\n \"\"\"\r\n \r\n def __init__(self, blocks, num_classes=None, global_pool=True, \r\n keep_prob=0.5, output_stride=None, scope=None):\r\n \"\"\"Constructor.\r\n \r\n Args:\r\n blocks: A list of length equal to the number of Xception blocks.\r\n Each element is an Xception Block object describing the units\r\n in the block.\r\n num_classes: Number of predicted classes for classification tasks.\r\n If 0 or None, we return the features before the logit layer.\r\n global_pool: If True, we perform global average pooling before\r\n computing logits. Set to True for image classification, False\r\n for dense prediction.\r\n keep_prob: Keep probability used in the pre-logits dropout layer.\r\n output_stride: If None, the the output will be computed at the \r\n nominal network stride. If output_stride is not None, it\r\n specifies the requested ratio of input to output spatial\r\n resolution.\r\n scope: Optional variable_scope.\r\n \r\n Raises:\r\n ValueError: If the target output_stride is not valid.\r\n \"\"\"\r\n super(Xception, self).__init__()\r\n \r\n self._scope = scope\r\n \r\n layers = []\r\n if output_stride is not None:\r\n if output_stride % 2 != 0:\r\n raise ValueError('The output_stride must be a multiple of 2.')\r\n output_stride /= 2\r\n # Root block function operated on inputs\r\n layers += [Conv2dSame(3, 32, 3, stride=2),\r\n Conv2dSame(32, 64, 3, stride=1)]\r\n \r\n # Extract features for entry_flow, middle_flow, and exit_flow\r\n layers += [StackBlocksDense(blocks, output_stride)]\r\n \r\n if global_pool:\r\n # Global average pooling\r\n layers += [torch.nn.AdaptiveAvgPool2d(output_size=(1, 1))]\r\n if num_classes:\r\n layers += [torch.nn.Dropout2d(p=keep_prob, inplace=True),\r\n torch.nn.Conv2d(blocks[-1].args[-1]['depth_list'][-1], \r\n num_classes, 1)]\r\n self._layers = torch.nn.Sequential(*layers)\r\n \r\n def forward(self, x):\r\n \"\"\"\r\n Args:\r\n x: A tensor of shape [batch, height, widht, channels].\r\n \r\n Returns:\r\n Output tensor with stride equal to the specified output_stride.\r\n \"\"\"\r\n output = self._layers(x)\r\n \r\n \r\n x1 = self._layers[0](x)\r\n x2 = self._layers[1](x1)\r\n low_level_features = self._layers[2]._blocks[0](x2)\r\n \r\n #low_level_features = self._layers[2]._blocks[0](x1)\r\n \r\n #print('x1',x1.size())\r\n #print('x2',x2.size()) \r\n #print('low_level_features',low_level_features.size())\r\n '''\r\n if output_stride = None:\r\n output.size() torch.Size([2, 2048, 7, 7])\r\n low_level_features.size() torch.Size([2, 128, 56, 56])\r\n elif output_stride = 16:\r\n output.size() torch.Size([2, 2048, 14, 14])\r\n low_level_features.size() torch.Size([2, 128, 56, 56])\r\n \r\n \r\n '''\r\n \r\n \r\n return output,low_level_features\r\n \r\n @property\r\n def scope(self):\r\n return self._scope\r\n \r\n \r\ndef xception_block(scope,\r\n in_channels,\r\n depth_list,\r\n skip_connection_type,\r\n activation_fn_in_separable_conv,\r\n regularize_depthwise,\r\n num_units,\r\n stride,\r\n unit_rate_list=None):\r\n \"\"\"Helper function for creating a Xception block.\r\n \r\n Args:\r\n scope: The scope of the block.\r\n in_channels: The number of input filters.\r\n depth_list: The depth of the bottleneck layer for each unit.\r\n skip_connection_type: Skip connection type for the residual path. Only\r\n supports 'conv', 'sum', or 'none'.\r\n activation_fn_in_separable_conv: Includes activation function in the\r\n separable convolution or not.\r\n regularize_depthwise: Whether or not apply L2-norm regularization on \r\n the depthwise convolution weights.\r\n num_units: The number of units in the block.\r\n stride: The stride of the block, implemented as a stride in the last\r\n unit. All other units have stride=1.\r\n unit_rate_list: A list of three integers, determining the unit rate in\r\n the corresponding xception block.\r\n \r\n Returns:\r\n An xception block.\r\n \"\"\"\r\n if unit_rate_list is None:\r\n unit_rate_list = _DEFAULT_MULTI_GRID\r\n return Block(scope, XceptionModule, [{\r\n 'in_channels': in_channels,\r\n 'depth_list': depth_list,\r\n 'skip_connection_type': skip_connection_type,\r\n 'activation_fn_in_separable_conv': activation_fn_in_separable_conv,\r\n 'regularize_depthwise': regularize_depthwise,\r\n 'stride': stride,\r\n 'unit_rate_list': unit_rate_list,\r\n }] * num_units)\r\n \r\n \r\n\r\ndef Xception41(num_classes=None,\r\n global_pool=True,\r\n keep_prob=0.5,\r\n output_stride=None,\r\n regularize_depthwise=False,\r\n multi_grid=None,\r\n scope='xception_41'):\r\n \"\"\"Xception-41 model.\"\"\"\r\n blocks = [\r\n xception_block('entry_flow/block1',\r\n in_channels=64,\r\n depth_list=[128, 128, 128],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('entry_flow/block2',\r\n in_channels=128,\r\n depth_list=[256, 256, 256],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('entry_flow/block3',\r\n in_channels=256,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('middle_flow/block1',\r\n in_channels=728,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='sum',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=8,\r\n stride=1),\r\n xception_block('exit_flow/block1',\r\n in_channels=728,\r\n depth_list=[728, 1024, 1024],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('exit_flow/block2',\r\n in_channels=1024,\r\n depth_list=[1536, 1536, 2048],\r\n skip_connection_type='none',\r\n activation_fn_in_separable_conv=True,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=1,\r\n unit_rate_list=multi_grid),\r\n ]\r\n return Xception(blocks=blocks, num_classes=num_classes,\r\n global_pool=global_pool, keep_prob=keep_prob,\r\n output_stride=output_stride, scope=scope)\r\n \r\n \r\ndef xception_41(num_classes=None,\r\n global_pool=True,\r\n keep_prob=0.5,\r\n output_stride=None,\r\n regularize_depthwise=False,\r\n multi_grid=None,\r\n scope='xception_41',\r\n pretrained=True,\r\n checkpoint_path='./pretrained/xception_41.pth'):\r\n \"\"\"Xception-41 model.\"\"\"\r\n xception = Xception41(num_classes=num_classes, global_pool=global_pool, \r\n keep_prob=keep_prob, output_stride=output_stride,\r\n scope=scope)\r\n if pretrained:\r\n _load_state_dict(xception, num_classes, checkpoint_path)\r\n return xception\r\n\r\n\r\ndef Xception65(num_classes=None,\r\n global_pool=True,\r\n keep_prob=0.5,\r\n output_stride=None,\r\n regularize_depthwise=False,\r\n multi_grid=None,\r\n scope='xception_65'):\r\n \"\"\"Xception-65 model.\"\"\"\r\n blocks = [\r\n xception_block('entry_flow/block1',\r\n in_channels=64,\r\n depth_list=[128, 128, 128],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('entry_flow/block2',\r\n in_channels=128,\r\n depth_list=[256, 256, 256],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('entry_flow/block3',\r\n in_channels=256,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('middle_flow/block1',\r\n in_channels=728,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='sum',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=16,\r\n stride=1),\r\n xception_block('exit_flow/block1',\r\n in_channels=728,\r\n depth_list=[728, 1024, 1024],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('exit_flow/block2',\r\n in_channels=1024,\r\n depth_list=[1536, 1536, 2048],\r\n skip_connection_type='none',\r\n activation_fn_in_separable_conv=True,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=1,\r\n unit_rate_list=multi_grid),\r\n ]\r\n return Xception(blocks=blocks, num_classes=num_classes,\r\n global_pool=global_pool, keep_prob=keep_prob,\r\n output_stride=output_stride, scope=scope)\r\n\r\n\r\ndef xception_65(num_classes=None,\r\n global_pool=False,\r\n keep_prob=0.5,\r\n output_stride=None,\r\n regularize_depthwise=False,\r\n multi_grid=None,\r\n scope='xception_65',\r\n pretrained=True,\r\n checkpoint_path='./pretrained/xception_65.pth'):\r\n \"\"\"Xception-65 model.\"\"\"\r\n xception = Xception65(num_classes=num_classes, global_pool=global_pool, \r\n keep_prob=keep_prob, output_stride=output_stride,\r\n scope=scope)\r\n if pretrained:\r\n _load_state_dict(xception, num_classes, checkpoint_path='./pretrained/xception_65.pth')\r\n return xception\r\n\r\n\r\ndef Xception71(num_classes=None,\r\n global_pool=True,\r\n keep_prob=0.5,\r\n output_stride=None,\r\n regularize_depthwise=False,\r\n multi_grid=None,\r\n scope='xception_71'):\r\n \"\"\"Xception-71 model.\"\"\"\r\n blocks = [\r\n xception_block('entry_flow/block1',\r\n in_channels=64,\r\n depth_list=[128, 128, 128],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('entry_flow/block2',\r\n in_channels=128,\r\n depth_list=[256, 256, 256],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=1),\r\n xception_block('entry_flow/block3',\r\n in_channels=256,\r\n depth_list=[256, 256, 256],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('entry_flow/block4',\r\n in_channels=256,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=1),\r\n xception_block('entry_flow/block5',\r\n in_channels=728,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('middle_flow/block1',\r\n in_channels=728,\r\n depth_list=[728, 728, 728],\r\n skip_connection_type='sum',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=16,\r\n stride=1),\r\n xception_block('exit_flow/block1',\r\n in_channels=728,\r\n depth_list=[728, 1024, 1024],\r\n skip_connection_type='conv',\r\n activation_fn_in_separable_conv=False,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=2),\r\n xception_block('exit_flow/block2',\r\n in_channels=1024,\r\n depth_list=[1536, 1536, 2048],\r\n skip_connection_type='none',\r\n activation_fn_in_separable_conv=True,\r\n regularize_depthwise=regularize_depthwise,\r\n num_units=1,\r\n stride=1,\r\n unit_rate_list=multi_grid),\r\n ]\r\n return Xception(blocks=blocks, num_classes=num_classes,\r\n global_pool=global_pool, keep_prob=keep_prob,\r\n output_stride=output_stride, scope=scope)\r\n\r\n\r\ndef xception_71(num_classes=None,\r\n global_pool=True,\r\n keep_prob=0.5,\r\n output_stride=None,\r\n regularize_depthwise=False,\r\n multi_grid=None,\r\n scope='xception_71',\r\n pretrained=True,\r\n checkpoint_path='./pretrained/xception_71.pth'):\r\n \"\"\"Xception-71 model.\"\"\"\r\n xception = Xception71(num_classes=num_classes, global_pool=global_pool, \r\n keep_prob=keep_prob, output_stride=output_stride,\r\n scope=scope)\r\n if pretrained:\r\n _load_state_dict(xception, num_classes, checkpoint_path)\r\n return xception\r\n\r\n\r\ndef _load_state_dict(model, num_classes, checkpoint_path):\r\n \"\"\"Load pretrained weights.\"\"\"\r\n if os.path.exists(checkpoint_path):\r\n state_dict = torch.load(checkpoint_path)\r\n if num_classes is None or num_classes != 1001:\r\n state_dict.pop('_layers.5.weight')\r\n state_dict.pop('_layers.5.bias')\r\n model.load_state_dict(state_dict, strict=False)\r\n print('Load pretrained weights successfully.')\r\n else:\r\n raise ValueError('`checkpoint_path` does not exist.')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n''' \r\n-> The Atrous Spatial Pyramid Pooling\r\n'''\r\n\r\ndef assp_branch(in_channels, out_channles, kernel_size, dilation):\r\n padding = 0 if kernel_size == 1 else dilation\r\n return nn.Sequential(\r\n nn.Conv2d(in_channels, out_channles, kernel_size, padding=padding, dilation=dilation, bias=False),\r\n nn.BatchNorm2d(out_channles),\r\n nn.ReLU(inplace=True))\r\n\r\nclass ASSP(nn.Module):\r\n def __init__(self, in_channels, output_stride):\r\n super(ASSP, self).__init__()\r\n\r\n assert output_stride in [8, 16], 'Only output strides of 8 or 16 are suported'\r\n if output_stride == 16: dilations = [1, 6, 12, 18]\r\n elif output_stride == 8: dilations = [1, 12, 24, 36]\r\n \r\n self.aspp1 = assp_branch(in_channels, 256, 1, dilation=dilations[0])\r\n self.aspp2 = assp_branch(in_channels, 256, 3, dilation=dilations[1])\r\n self.aspp3 = assp_branch(in_channels, 256, 3, dilation=dilations[2])\r\n self.aspp4 = assp_branch(in_channels, 256, 3, dilation=dilations[3])\r\n\r\n self.avg_pool = nn.Sequential(\r\n nn.AdaptiveAvgPool2d((1, 1)),\r\n nn.Conv2d(in_channels, 256, 1, bias=False),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(inplace=True))\r\n \r\n self.conv1 = nn.Conv2d(256*5, 256, 1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(256)\r\n self.relu = nn.ReLU(inplace=True)\r\n self.dropout = nn.Dropout(0.5)\r\n\r\n initialize_weights(self)\r\n\r\n def forward(self, x):\r\n x1 = self.aspp1(x)\r\n x2 = self.aspp2(x)\r\n x3 = self.aspp3(x)\r\n x4 = self.aspp4(x)\r\n x5 = F.interpolate(self.avg_pool(x), size=(x.size(2), x.size(3)), mode='bilinear', align_corners=True)\r\n\r\n x = self.conv1(torch.cat((x1, x2, x3, x4, x5), dim=1))\r\n x = self.bn1(x)\r\n x = self.dropout(self.relu(x))\r\n\r\n return x\r\n\r\n''' \r\n-> Decoder\r\n'''\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self, low_level_channels, num_classes):\r\n super(Decoder, self).__init__()\r\n self.conv1 = nn.Conv2d(low_level_channels, 48, 1, bias=False)\r\n self.bn1 = nn.BatchNorm2d(48)\r\n self.relu = nn.ReLU(inplace=True)\r\n\r\n # Table 2, best performance with two 3x3 convs\r\n self.output = nn.Sequential(\r\n nn.Conv2d(48+256, 256, 3, stride=1, padding=1, bias=False),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(256, 256, 3, stride=1, padding=1, bias=False),\r\n nn.BatchNorm2d(256),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.1),\r\n nn.Conv2d(256, num_classes, 1, stride=1),\r\n )\r\n initialize_weights(self)\r\n\r\n def forward(self, x, low_level_features):\r\n low_level_features = self.conv1(low_level_features)\r\n low_level_features = self.relu(self.bn1(low_level_features))\r\n H, W = low_level_features.size(2), low_level_features.size(3)\r\n\r\n x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\r\n x = self.output(torch.cat((low_level_features, x), dim=1))\r\n return x\r\n\r\n'''\r\n-> Deeplab V3 +\r\n'''\r\n\r\nclass DeepLab(BaseModel):\r\n def __init__(self, num_classes, in_channels=3, backbone='xception', pretrained=True, \r\n output_stride=16, freeze_bn=False,freeze_backbone=False, **_):\r\n \r\n super(DeepLab, self).__init__()\r\n assert ('xception' or 'resnet' in backbone)\r\n if 'resnet' in backbone:\r\n self.backbone = ResNet(in_channels=in_channels, output_stride=output_stride, pretrained=pretrained)\r\n low_level_channels = 256\r\n else:\r\n self.backbone = xception_65(output_stride=output_stride, pretrained=pretrained,global_pool=False,checkpoint_path='./pretrained/xception_65.pth')\r\n low_level_channels = 128\r\n\r\n self.ASSP = ASSP(in_channels=2048, output_stride=output_stride)\r\n self.decoder = Decoder(low_level_channels, num_classes)\r\n\r\n if freeze_bn: self.freeze_bn()\r\n if freeze_backbone: \r\n set_trainable([self.backbone], False)\r\n\r\n def forward(self, x):\r\n H, W = x.size(2), x.size(3)\r\n x, low_level_features = self.backbone(x)\r\n x = self.ASSP(x)\r\n x = self.decoder(x, low_level_features)\r\n x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\r\n return x\r\n\r\n # Two functions to yield the parameters of the backbone\r\n # & Decoder / ASSP to use differentiable learning rates\r\n # FIXME: in xception, we use the parameters from xception and not aligned xception\r\n # better to have higher lr for this backbone\r\n\r\n def get_backbone_params(self):\r\n return self.backbone.parameters()\r\n\r\n def get_decoder_params(self):\r\n return chain(self.ASSP.parameters(), self.decoder.parameters())\r\n\r\n def freeze_bn(self):\r\n for module in self.modules():\r\n if isinstance(module, nn.BatchNorm2d): module.eval()\r\n \r\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.ReLU6", "torch.clamp", "torch.nn.Dropout2d", "torch.load", "torch.cat", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.nn.functional.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nguyenvo09/EACL2021
[ "6860c87425619954cacbf5a14ad20befd18ec818" ]
[ "pytorch_transformers/utils_glue.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" BERT classification fine-tuning: utilities to work with GLUE tasks \"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport csv\nimport logging\nimport os\nimport sys\nfrom io import open\n\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import matthews_corrcoef, f1_score\nfrom .tokenization_utils import PreTrainedTokenizer\n\nlogger = logging.getLogger(__name__)\nfrom typing import List\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, label=None, tokenized_text_a: List[str]=None, tokenized_text_b: List[str]=None):\n \"\"\"Constructs a InputExample.\n\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids, label_id):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self, data_dir):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n\n\nclass MrpcProcessor(DataProcessor):\n \"\"\"Processor for the MRPC data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"train.tsv\")))\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n text_b = line[4]\n label = line[0]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass SearchProcessor(DataProcessor):\n \"\"\"Processor for the Search data set (BEN version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {}\".format(os.path.join(data_dir, \"Snopes.train.tsv\")))\n return self._create_examples(\n self._read_tsv2(os.path.join(data_dir, \"Snopes.train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir, tokenizer: PreTrainedTokenizer=None):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv2(os.path.join(data_dir, \"Snopes.dev.tsv\")), \"dev\", tokenizer=tokenizer)\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _read_tsv2(cls, input_file, quotechar=None, tokenizer:PreTrainedTokenizer=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines\n def _create_examples(self, lines, set_type, tokenizer:PreTrainedTokenizer=None):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n from tqdm import tqdm\n for (i, line) in tqdm(enumerate(lines)):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[1]\n # tokenized_text_a = tokenizer.tokenize(text_a)\n text_b = line[3]\n # tokenized_text_b = tokenizer.tokenize(text_b)\n label = line[4]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\nclass MnliProcessor(DataProcessor):\n \"\"\"Processor for the MultiNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_matched.tsv\")),\n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"contradiction\", \"entailment\", \"neutral\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[8]\n text_b = line[9]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass MnliMismatchedProcessor(MnliProcessor):\n \"\"\"Processor for the MultiNLI Mismatched data set (GLUE version).\"\"\"\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev_mismatched.tsv\")),\n \"dev_matched\")\n\n\nclass ColaProcessor(DataProcessor):\n \"\"\"Processor for the CoLA data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[3]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass Sst2Processor(DataProcessor):\n \"\"\"Processor for the SST-2 data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, i)\n text_a = line[0]\n label = line[1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples\n\n\nclass StsbProcessor(DataProcessor):\n \"\"\"Processor for the STS-B data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [None]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[7]\n text_b = line[8]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass QqpProcessor(DataProcessor):\n \"\"\"Processor for the QQP data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n try:\n text_a = line[3]\n text_b = line[4]\n label = line[5]\n except IndexError:\n continue\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass QnliProcessor(DataProcessor):\n \"\"\"Processor for the QNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \n \"dev_matched\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass RteProcessor(DataProcessor):\n \"\"\"Processor for the RTE data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"entailment\", \"not_entailment\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\nclass WnliProcessor(DataProcessor):\n \"\"\"Processor for the WNLI data set (GLUE version).\"\"\"\n\n def get_train_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"train.tsv\")), \"train\")\n\n def get_dev_examples(self, data_dir):\n \"\"\"See base class.\"\"\"\n return self._create_examples(\n self._read_tsv(os.path.join(data_dir, \"dev.tsv\")), \"dev\")\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\"]\n\n def _create_examples(self, lines, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (set_type, line[0])\n text_a = line[1]\n text_b = line[2]\n label = line[-1]\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples\n\n\ndef convert_examples_to_features(examples, label_list, max_seq_length,\n tokenizer, output_mode,\n cls_token_at_end=False,\n cls_token='[CLS]',\n cls_token_segment_id=1,\n sep_token='[SEP]',\n sep_token_extra=False,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n sequence_a_segment_id=0, \n sequence_b_segment_id=1,\n mask_padding_with_zero=True,\n tokenize_text=True):\n \"\"\" Loads a data file into a list of `InputBatch`s\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n \"\"\"\n\n label_map = {label : i for i, label in enumerate(label_list)}\n from tqdm import tqdm\n features = []\n ex_index = -1\n for example in tqdm(examples):\n ex_index += 1\n if ex_index % 10000 == 0:\n logger.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n if tokenize_text: tokens_a = tokenizer.tokenize(example.text_a)\n else: tokens_a = example.text_a.split()\n\n tokens_b = None\n if example.text_b:\n if tokenize_text: tokens_b = tokenizer.tokenize(example.text_b)\n else: tokens_b = example.text_b.split()\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\". \" -4\" for RoBERTa.\n special_tokens_count = 4 if sep_token_extra else 3\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)\n else:\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = 3 if sep_token_extra else 2\n if len(tokens_a) > max_seq_length - special_tokens_count:\n tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = tokens_a + [sep_token]\n if sep_token_extra:\n # roberta uses an extra separator b/w pairs of sentences\n tokens += [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [sep_token]\n segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)\n\n if cls_token_at_end:\n tokens = tokens + [cls_token]\n segment_ids = segment_ids + [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)\n segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if output_mode == \"classification\":\n label_id = label_map[example.label]\n elif output_mode == \"regression\":\n label_id = float(example.label)\n else:\n raise KeyError(output_mode)\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef simple_accuracy(preds, labels):\n return (preds == labels).mean()\n\n\ndef acc_and_f1(preds, labels):\n acc = simple_accuracy(preds, labels)\n f1 = f1_score(y_true=labels, y_pred=preds)\n return {\n \"acc\": acc,\n \"f1\": f1,\n \"acc_and_f1\": (acc + f1) / 2,\n }\n\n\ndef pearson_and_spearman(preds, labels):\n pearson_corr = pearsonr(preds, labels)[0]\n spearman_corr = spearmanr(preds, labels)[0]\n return {\n \"pearson\": pearson_corr,\n \"spearmanr\": spearman_corr,\n \"corr\": (pearson_corr + spearman_corr) / 2,\n }\n\n\ndef compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mrpc\":\n return acc_and_f1(preds, labels)\n elif task_name == \"search\":\n return acc_and_f1(preds, labels)\n elif task_name == \"sts-b\":\n return pearson_and_spearman(preds, labels)\n elif task_name == \"qqp\":\n return acc_and_f1(preds, labels)\n elif task_name == \"mnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"mnli-mm\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"qnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"rte\":\n return {\"acc\": simple_accuracy(preds, labels)}\n elif task_name == \"wnli\":\n return {\"acc\": simple_accuracy(preds, labels)}\n else:\n raise KeyError(task_name)\n\nprocessors = {\n \"cola\": ColaProcessor,\n \"mnli\": MnliProcessor,\n \"mnli-mm\": MnliMismatchedProcessor,\n \"mrpc\": MrpcProcessor,\n \"search\": SearchProcessor,\n \"sst-2\": Sst2Processor,\n \"sts-b\": StsbProcessor,\n \"qqp\": QqpProcessor,\n \"qnli\": QnliProcessor,\n \"rte\": RteProcessor,\n \"wnli\": WnliProcessor,\n}\n\noutput_modes = {\n \"cola\": \"classification\",\n \"mnli\": \"classification\",\n \"mnli-mm\": \"classification\",\n \"mrpc\": \"classification\",\n \"search\": \"classification\",\n \"sst-2\": \"classification\",\n \"sts-b\": \"regression\",\n \"qqp\": \"classification\",\n \"qnli\": \"classification\",\n \"rte\": \"classification\",\n \"wnli\": \"classification\",\n}\n\nGLUE_TASKS_NUM_LABELS = {\n \"cola\": 2,\n \"mnli\": 3,\n \"mrpc\": 2,\n \"sst-2\": 2,\n \"sts-b\": 1,\n \"qqp\": 2,\n \"qnli\": 2,\n \"rte\": 2,\n \"wnli\": 2,\n}\n" ]
[ [ "scipy.stats.spearmanr", "sklearn.metrics.f1_score", "sklearn.metrics.matthews_corrcoef", "scipy.stats.pearsonr" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Dopamine0717/mmdetection
[ "96abfd90cf0e38c5ce398795f949e9328eb85c1b", "40a6fddae20978de98a335cbb45e227db782f72b" ]
[ "mmdet/models/dense_heads/reppoints_head.py", "plt_test.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import DeformConv2d\n\nfrom mmdet.core import (build_assigner, build_sampler, images_to_levels,\n multi_apply, unmap)\nfrom mmdet.core.anchor.point_generator import MlvlPointGenerator\nfrom mmdet.core.utils import filter_scores_and_topk\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\n\n\[email protected]_module()\nclass RepPointsHead(AnchorFreeHead):\n \"\"\"RepPoint head.\n\n Args:\n point_feat_channels (int): Number of channels of points features.\n gradient_mul (float): The multiplier to gradients from\n points refinement and recognition.\n point_strides (Iterable): points strides.\n point_base_scale (int): bbox scale for assigning labels.\n loss_cls (dict): Config of classification loss.\n loss_bbox_init (dict): Config of initial points loss.\n loss_bbox_refine (dict): Config of points loss in refinement.\n use_grid_points (bool): If we use bounding box representation, the\n reppoints is represented as grid points on the bounding box.\n center_init (bool): Whether to use center point assignment.\n transform_method (str): The methods to transform RepPoints to bbox.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n \"\"\" # noqa: W605\n\n def __init__(self,\n num_classes,\n in_channels,\n point_feat_channels=256,\n num_points=9,\n gradient_mul=0.1,\n point_strides=[8, 16, 32, 64, 128],\n point_base_scale=4,\n loss_cls=dict(\n type='FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox_init=dict(\n type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),\n loss_bbox_refine=dict(\n type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n use_grid_points=False,\n center_init=True,\n transform_method='moment',\n moment_mul=0.01,\n init_cfg=dict(\n type='Normal',\n layer='Conv2d',\n std=0.01,\n override=dict(\n type='Normal',\n name='reppoints_cls_out',\n std=0.01,\n bias_prob=0.01)),\n **kwargs):\n self.num_points = num_points\n self.point_feat_channels = point_feat_channels\n self.use_grid_points = use_grid_points\n self.center_init = center_init\n\n # we use deform conv to extract points features\n self.dcn_kernel = int(np.sqrt(num_points))\n self.dcn_pad = int((self.dcn_kernel - 1) / 2)\n assert self.dcn_kernel * self.dcn_kernel == num_points, \\\n 'The points number should be a square number.'\n assert self.dcn_kernel % 2 == 1, \\\n 'The points number should be an odd square number.'\n dcn_base = np.arange(-self.dcn_pad,\n self.dcn_pad + 1).astype(np.float64)\n dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)\n dcn_base_x = np.tile(dcn_base, self.dcn_kernel)\n dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(\n (-1))\n self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)\n\n super().__init__(\n num_classes,\n in_channels,\n loss_cls=loss_cls,\n init_cfg=init_cfg,\n **kwargs)\n\n self.gradient_mul = gradient_mul\n self.point_base_scale = point_base_scale\n self.point_strides = point_strides\n self.prior_generator = MlvlPointGenerator(\n self.point_strides, offset=0.)\n\n self.sampling = loss_cls['type'] not in ['FocalLoss']\n if self.train_cfg:\n self.init_assigner = build_assigner(self.train_cfg.init.assigner)\n self.refine_assigner = build_assigner(\n self.train_cfg.refine.assigner)\n # use PseudoSampler when sampling is False\n if self.sampling and hasattr(self.train_cfg, 'sampler'):\n sampler_cfg = self.train_cfg.sampler\n else:\n sampler_cfg = dict(type='PseudoSampler')\n self.sampler = build_sampler(sampler_cfg, context=self)\n self.transform_method = transform_method\n if self.transform_method == 'moment':\n self.moment_transfer = nn.Parameter(\n data=torch.zeros(2), requires_grad=True)\n self.moment_mul = moment_mul\n\n self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n if self.use_sigmoid_cls:\n self.cls_out_channels = self.num_classes\n else:\n self.cls_out_channels = self.num_classes + 1\n self.loss_bbox_init = build_loss(loss_bbox_init)\n self.loss_bbox_refine = build_loss(loss_bbox_refine)\n\n def _init_layers(self):\n \"\"\"Initialize layers of the head.\"\"\"\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points\n self.reppoints_cls_conv = DeformConv2d(self.feat_channels,\n self.point_feat_channels,\n self.dcn_kernel, 1,\n self.dcn_pad)\n self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,\n self.cls_out_channels, 1, 1, 0)\n self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,\n self.point_feat_channels, 3,\n 1, 1)\n self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,\n pts_out_dim, 1, 1, 0)\n self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,\n self.point_feat_channels,\n self.dcn_kernel, 1,\n self.dcn_pad)\n self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,\n pts_out_dim, 1, 1, 0)\n\n def points2bbox(self, pts, y_first=True):\n \"\"\"Converting the points set into bounding box.\n\n :param pts: the input points sets (fields), each points\n set (fields) is represented as 2n scalar.\n :param y_first: if y_first=True, the point set is represented as\n [y1, x1, y2, x2 ... yn, xn], otherwise the point set is\n represented as [x1, y1, x2, y2 ... xn, yn].\n :return: each points set is converting to a bbox [x1, y1, x2, y2].\n \"\"\"\n pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])\n pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,\n ...]\n pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,\n ...]\n if self.transform_method == 'minmax':\n bbox_left = pts_x.min(dim=1, keepdim=True)[0]\n bbox_right = pts_x.max(dim=1, keepdim=True)[0]\n bbox_up = pts_y.min(dim=1, keepdim=True)[0]\n bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]\n bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],\n dim=1)\n elif self.transform_method == 'partial_minmax':\n pts_y = pts_y[:, :4, ...]\n pts_x = pts_x[:, :4, ...]\n bbox_left = pts_x.min(dim=1, keepdim=True)[0]\n bbox_right = pts_x.max(dim=1, keepdim=True)[0]\n bbox_up = pts_y.min(dim=1, keepdim=True)[0]\n bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]\n bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],\n dim=1)\n elif self.transform_method == 'moment':\n pts_y_mean = pts_y.mean(dim=1, keepdim=True)\n pts_x_mean = pts_x.mean(dim=1, keepdim=True)\n pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)\n pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)\n moment_transfer = (self.moment_transfer * self.moment_mul) + (\n self.moment_transfer.detach() * (1 - self.moment_mul))\n moment_width_transfer = moment_transfer[0]\n moment_height_transfer = moment_transfer[1]\n half_width = pts_x_std * torch.exp(moment_width_transfer)\n half_height = pts_y_std * torch.exp(moment_height_transfer)\n bbox = torch.cat([\n pts_x_mean - half_width, pts_y_mean - half_height,\n pts_x_mean + half_width, pts_y_mean + half_height\n ],\n dim=1)\n else:\n raise NotImplementedError\n return bbox\n\n def gen_grid_from_reg(self, reg, previous_boxes):\n \"\"\"Base on the previous bboxes and regression values, we compute the\n regressed bboxes and generate the grids on the bboxes.\n\n :param reg: the regression value to previous bboxes.\n :param previous_boxes: previous bboxes.\n :return: generate grids on the regressed bboxes.\n \"\"\"\n b, _, h, w = reg.shape\n bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.\n bwh = (previous_boxes[:, 2:, ...] -\n previous_boxes[:, :2, ...]).clamp(min=1e-6)\n grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(\n reg[:, 2:, ...])\n grid_wh = bwh * torch.exp(reg[:, 2:, ...])\n grid_left = grid_topleft[:, [0], ...]\n grid_top = grid_topleft[:, [1], ...]\n grid_width = grid_wh[:, [0], ...]\n grid_height = grid_wh[:, [1], ...]\n intervel = torch.linspace(0., 1., self.dcn_kernel).view(\n 1, self.dcn_kernel, 1, 1).type_as(reg)\n grid_x = grid_left + grid_width * intervel\n grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)\n grid_x = grid_x.view(b, -1, h, w)\n grid_y = grid_top + grid_height * intervel\n grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)\n grid_y = grid_y.view(b, -1, h, w)\n grid_yx = torch.stack([grid_y, grid_x], dim=2)\n grid_yx = grid_yx.view(b, -1, h, w)\n regressed_bbox = torch.cat([\n grid_left, grid_top, grid_left + grid_width, grid_top + grid_height\n ], 1)\n return grid_yx, regressed_bbox\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats)\n\n def forward_single(self, x):\n \"\"\"Forward feature map of a single FPN level.\"\"\"\n dcn_base_offset = self.dcn_base_offset.type_as(x)\n # If we use center_init, the initial reppoints is from center points.\n # If we use bounding bbox representation, the initial reppoints is\n # from regular grid placed on a pre-defined bbox.\n if self.use_grid_points or not self.center_init:\n scale = self.point_base_scale / 2\n points_init = dcn_base_offset / dcn_base_offset.max() * scale\n bbox_init = x.new_tensor([-scale, -scale, scale,\n scale]).view(1, 4, 1, 1)\n else:\n points_init = 0\n cls_feat = x\n pts_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n pts_feat = reg_conv(pts_feat)\n # initialize reppoints\n pts_out_init = self.reppoints_pts_init_out(\n self.relu(self.reppoints_pts_init_conv(pts_feat)))\n if self.use_grid_points:\n pts_out_init, bbox_out_init = self.gen_grid_from_reg(\n pts_out_init, bbox_init.detach())\n else:\n pts_out_init = pts_out_init + points_init\n # refine and classify reppoints\n pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(\n ) + self.gradient_mul * pts_out_init\n dcn_offset = pts_out_init_grad_mul - dcn_base_offset\n cls_out = self.reppoints_cls_out(\n self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))\n pts_out_refine = self.reppoints_pts_refine_out(\n self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))\n if self.use_grid_points:\n pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(\n pts_out_refine, bbox_out_init.detach())\n else:\n pts_out_refine = pts_out_refine + pts_out_init.detach()\n\n if self.training:\n return cls_out, pts_out_init, pts_out_refine\n else:\n return cls_out, self.points2bbox(pts_out_refine)\n\n def get_points(self, featmap_sizes, img_metas, device):\n \"\"\"Get points according to feature map sizes.\n\n Args:\n featmap_sizes (list[tuple]): Multi-level feature map sizes.\n img_metas (list[dict]): Image meta info.\n\n Returns:\n tuple: points of each image, valid flags of each image\n \"\"\"\n num_imgs = len(img_metas)\n\n # since feature map sizes of all images are the same, we only compute\n # points center for one time\n multi_level_points = self.prior_generator.grid_priors(\n featmap_sizes, device=device, with_stride=True)\n points_list = [[point.clone() for point in multi_level_points]\n for _ in range(num_imgs)]\n\n # for each image, we compute valid flags of multi level grids\n valid_flag_list = []\n for img_id, img_meta in enumerate(img_metas):\n multi_level_flags = self.prior_generator.valid_flags(\n featmap_sizes, img_meta['pad_shape'])\n valid_flag_list.append(multi_level_flags)\n\n return points_list, valid_flag_list\n\n def centers_to_bboxes(self, point_list):\n \"\"\"Get bboxes according to center points.\n\n Only used in :class:`MaxIoUAssigner`.\n \"\"\"\n bbox_list = []\n for i_img, point in enumerate(point_list):\n bbox = []\n for i_lvl in range(len(self.point_strides)):\n scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5\n bbox_shift = torch.Tensor([-scale, -scale, scale,\n scale]).view(1, 4).type_as(point[0])\n bbox_center = torch.cat(\n [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)\n bbox.append(bbox_center + bbox_shift)\n bbox_list.append(bbox)\n return bbox_list\n\n def offset_to_pts(self, center_list, pred_list):\n \"\"\"Change from point offset to point coordinate.\"\"\"\n pts_list = []\n for i_lvl in range(len(self.point_strides)):\n pts_lvl = []\n for i_img in range(len(center_list)):\n pts_center = center_list[i_img][i_lvl][:, :2].repeat(\n 1, self.num_points)\n pts_shift = pred_list[i_lvl][i_img]\n yx_pts_shift = pts_shift.permute(1, 2, 0).view(\n -1, 2 * self.num_points)\n y_pts_shift = yx_pts_shift[..., 0::2]\n x_pts_shift = yx_pts_shift[..., 1::2]\n xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)\n xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)\n pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center\n pts_lvl.append(pts)\n pts_lvl = torch.stack(pts_lvl, 0)\n pts_list.append(pts_lvl)\n return pts_list\n\n def _point_target_single(self,\n flat_proposals,\n valid_flags,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n stage='init',\n unmap_outputs=True):\n inside_flags = valid_flags\n if not inside_flags.any():\n return (None, ) * 7\n # assign gt and sample proposals\n proposals = flat_proposals[inside_flags, :]\n\n if stage == 'init':\n assigner = self.init_assigner\n pos_weight = self.train_cfg.init.pos_weight\n else:\n assigner = self.refine_assigner\n pos_weight = self.train_cfg.refine.pos_weight\n assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,\n None if self.sampling else gt_labels)\n sampling_result = self.sampler.sample(assign_result, proposals,\n gt_bboxes)\n\n num_valid_proposals = proposals.shape[0]\n bbox_gt = proposals.new_zeros([num_valid_proposals, 4])\n pos_proposals = torch.zeros_like(proposals)\n proposals_weights = proposals.new_zeros([num_valid_proposals, 4])\n labels = proposals.new_full((num_valid_proposals, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = proposals.new_zeros(\n num_valid_proposals, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n pos_gt_bboxes = sampling_result.pos_gt_bboxes\n bbox_gt[pos_inds, :] = pos_gt_bboxes\n pos_proposals[pos_inds, :] = proposals[pos_inds, :]\n proposals_weights[pos_inds, :] = 1.0\n if gt_labels is None:\n # Only rpn gives gt_labels as None\n # Foreground is the first class\n labels[pos_inds] = 0\n else:\n labels[pos_inds] = gt_labels[\n sampling_result.pos_assigned_gt_inds]\n if pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = pos_weight\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of proposals\n if unmap_outputs:\n num_total_proposals = flat_proposals.size(0)\n labels = unmap(labels, num_total_proposals, inside_flags)\n label_weights = unmap(label_weights, num_total_proposals,\n inside_flags)\n bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)\n pos_proposals = unmap(pos_proposals, num_total_proposals,\n inside_flags)\n proposals_weights = unmap(proposals_weights, num_total_proposals,\n inside_flags)\n\n return (labels, label_weights, bbox_gt, pos_proposals,\n proposals_weights, pos_inds, neg_inds)\n\n def get_targets(self,\n proposals_list,\n valid_flag_list,\n gt_bboxes_list,\n img_metas,\n gt_bboxes_ignore_list=None,\n gt_labels_list=None,\n stage='init',\n label_channels=1,\n unmap_outputs=True):\n \"\"\"Compute corresponding GT box and classification targets for\n proposals.\n\n Args:\n proposals_list (list[list]): Multi level points/bboxes of each\n image.\n valid_flag_list (list[list]): Multi level valid flags of each\n image.\n gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n img_metas (list[dict]): Meta info of each image.\n gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n ignored.\n gt_bboxes_list (list[Tensor]): Ground truth labels of each box.\n stage (str): `init` or `refine`. Generate target for init stage or\n refine stage\n label_channels (int): Channel of label.\n unmap_outputs (bool): Whether to map outputs back to the original\n set of anchors.\n\n Returns:\n tuple:\n - labels_list (list[Tensor]): Labels of each level.\n - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501\n - bbox_gt_list (list[Tensor]): Ground truth bbox of each level.\n - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501\n - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501\n - num_total_pos (int): Number of positive samples in all images. # noqa: E501\n - num_total_neg (int): Number of negative samples in all images. # noqa: E501\n \"\"\"\n assert stage in ['init', 'refine']\n num_imgs = len(img_metas)\n assert len(proposals_list) == len(valid_flag_list) == num_imgs\n\n # points number of multi levels\n num_level_proposals = [points.size(0) for points in proposals_list[0]]\n\n # concat all level points and flags to a single tensor\n for i in range(num_imgs):\n assert len(proposals_list[i]) == len(valid_flag_list[i])\n proposals_list[i] = torch.cat(proposals_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n (all_labels, all_label_weights, all_bbox_gt, all_proposals,\n all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(\n self._point_target_single,\n proposals_list,\n valid_flag_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n stage=stage,\n unmap_outputs=unmap_outputs)\n # no valid points\n if any([labels is None for labels in all_labels]):\n return None\n # sampled points of all images\n num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n labels_list = images_to_levels(all_labels, num_level_proposals)\n label_weights_list = images_to_levels(all_label_weights,\n num_level_proposals)\n bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)\n proposals_list = images_to_levels(all_proposals, num_level_proposals)\n proposal_weights_list = images_to_levels(all_proposal_weights,\n num_level_proposals)\n return (labels_list, label_weights_list, bbox_gt_list, proposals_list,\n proposal_weights_list, num_total_pos, num_total_neg)\n\n def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,\n label_weights, bbox_gt_init, bbox_weights_init,\n bbox_gt_refine, bbox_weights_refine, stride,\n num_total_samples_init, num_total_samples_refine):\n # classification loss\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n cls_score = cls_score.permute(0, 2, 3,\n 1).reshape(-1, self.cls_out_channels)\n cls_score = cls_score.contiguous()\n loss_cls = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=num_total_samples_refine)\n\n # points loss\n bbox_gt_init = bbox_gt_init.reshape(-1, 4)\n bbox_weights_init = bbox_weights_init.reshape(-1, 4)\n bbox_pred_init = self.points2bbox(\n pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)\n bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)\n bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)\n bbox_pred_refine = self.points2bbox(\n pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)\n normalize_term = self.point_base_scale * stride\n loss_pts_init = self.loss_bbox_init(\n bbox_pred_init / normalize_term,\n bbox_gt_init / normalize_term,\n bbox_weights_init,\n avg_factor=num_total_samples_init)\n loss_pts_refine = self.loss_bbox_refine(\n bbox_pred_refine / normalize_term,\n bbox_gt_refine / normalize_term,\n bbox_weights_refine,\n avg_factor=num_total_samples_refine)\n return loss_cls, loss_pts_init, loss_pts_refine\n\n def loss(self,\n cls_scores,\n pts_preds_init,\n pts_preds_refine,\n gt_bboxes,\n gt_labels,\n img_metas,\n gt_bboxes_ignore=None):\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n device = cls_scores[0].device\n label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n # target for initial stage\n center_list, valid_flag_list = self.get_points(featmap_sizes,\n img_metas, device)\n pts_coordinate_preds_init = self.offset_to_pts(center_list,\n pts_preds_init)\n if self.train_cfg.init.assigner['type'] == 'PointAssigner':\n # Assign target for center list\n candidate_list = center_list\n else:\n # transform center list to bbox list and\n # assign target for bbox list\n bbox_list = self.centers_to_bboxes(center_list)\n candidate_list = bbox_list\n cls_reg_targets_init = self.get_targets(\n candidate_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n stage='init',\n label_channels=label_channels)\n (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,\n num_total_pos_init, num_total_neg_init) = cls_reg_targets_init\n num_total_samples_init = (\n num_total_pos_init +\n num_total_neg_init if self.sampling else num_total_pos_init)\n\n # target for refinement stage\n center_list, valid_flag_list = self.get_points(featmap_sizes,\n img_metas, device)\n pts_coordinate_preds_refine = self.offset_to_pts(\n center_list, pts_preds_refine)\n bbox_list = []\n for i_img, center in enumerate(center_list):\n bbox = []\n for i_lvl in range(len(pts_preds_refine)):\n bbox_preds_init = self.points2bbox(\n pts_preds_init[i_lvl].detach())\n bbox_shift = bbox_preds_init * self.point_strides[i_lvl]\n bbox_center = torch.cat(\n [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)\n bbox.append(bbox_center +\n bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))\n bbox_list.append(bbox)\n cls_reg_targets_refine = self.get_targets(\n bbox_list,\n valid_flag_list,\n gt_bboxes,\n img_metas,\n gt_bboxes_ignore_list=gt_bboxes_ignore,\n gt_labels_list=gt_labels,\n stage='refine',\n label_channels=label_channels)\n (labels_list, label_weights_list, bbox_gt_list_refine,\n candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,\n num_total_neg_refine) = cls_reg_targets_refine\n num_total_samples_refine = (\n num_total_pos_refine +\n num_total_neg_refine if self.sampling else num_total_pos_refine)\n\n # compute loss\n losses_cls, losses_pts_init, losses_pts_refine = multi_apply(\n self.loss_single,\n cls_scores,\n pts_coordinate_preds_init,\n pts_coordinate_preds_refine,\n labels_list,\n label_weights_list,\n bbox_gt_list_init,\n bbox_weights_list_init,\n bbox_gt_list_refine,\n bbox_weights_list_refine,\n self.point_strides,\n num_total_samples_init=num_total_samples_init,\n num_total_samples_refine=num_total_samples_refine)\n loss_dict_all = {\n 'loss_cls': losses_cls,\n 'loss_pts_init': losses_pts_init,\n 'loss_pts_refine': losses_pts_refine\n }\n return loss_dict_all\n\n # Same as base_dense_head/_get_bboxes_single except self._bbox_decode\n def _get_bboxes_single(self,\n cls_score_list,\n bbox_pred_list,\n score_factor_list,\n mlvl_priors,\n img_meta,\n cfg,\n rescale=False,\n with_nms=True,\n **kwargs):\n \"\"\"Transform outputs of a single image into bbox predictions.\n\n Args:\n cls_score_list (list[Tensor]): Box scores from all scale\n levels of a single image, each item has shape\n (num_priors * num_classes, H, W).\n bbox_pred_list (list[Tensor]): Box energies / deltas from\n all scale levels of a single image, each item has shape\n (num_priors * 4, H, W).\n score_factor_list (list[Tensor]): Score factor from all scale\n levels of a single image. RepPoints head does not need\n this value.\n mlvl_priors (list[Tensor]): Each element in the list is\n the priors of a single level in feature pyramid, has shape\n (num_priors, 2).\n img_meta (dict): Image meta info.\n cfg (mmcv.Config): Test / postprocessing configuration,\n if None, test_cfg would be used.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n with_nms (bool): If True, do nms before return boxes.\n Default: True.\n\n Returns:\n tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n is False and mlvl_score_factor is None, return mlvl_bboxes and\n mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n mlvl_score_factor. Usually with_nms is False is used for aug\n test. If with_nms is True, then return the following format\n\n - det_bboxes (Tensor): Predicted bboxes with shape \\\n [num_bboxes, 5], where the first 4 columns are bounding \\\n box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n column are scores between 0 and 1.\n - det_labels (Tensor): Predicted labels of the corresponding \\\n box with shape [num_bboxes].\n \"\"\"\n cfg = self.test_cfg if cfg is None else cfg\n assert len(cls_score_list) == len(bbox_pred_list)\n img_shape = img_meta['img_shape']\n nms_pre = cfg.get('nms_pre', -1)\n\n mlvl_bboxes = []\n mlvl_scores = []\n mlvl_labels = []\n for level_idx, (cls_score, bbox_pred, priors) in enumerate(\n zip(cls_score_list, bbox_pred_list, mlvl_priors)):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n\n cls_score = cls_score.permute(1, 2,\n 0).reshape(-1, self.cls_out_channels)\n if self.use_sigmoid_cls:\n scores = cls_score.sigmoid()\n else:\n scores = cls_score.softmax(-1)[:, :-1]\n\n # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n # this operation keeps fewer bboxes under the same `nms_pre`.\n # There is no difference in performance for most models. If you\n # find a slight drop in performance, you can set a larger\n # `nms_pre` than before.\n results = filter_scores_and_topk(\n scores, cfg.score_thr, nms_pre,\n dict(bbox_pred=bbox_pred, priors=priors))\n scores, labels, _, filtered_results = results\n\n bbox_pred = filtered_results['bbox_pred']\n priors = filtered_results['priors']\n\n bboxes = self._bbox_decode(priors, bbox_pred,\n self.point_strides[level_idx],\n img_shape)\n\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_labels.append(labels)\n\n return self._bbox_post_process(\n mlvl_scores,\n mlvl_labels,\n mlvl_bboxes,\n img_meta['scale_factor'],\n cfg,\n rescale=rescale,\n with_nms=with_nms)\n\n def _bbox_decode(self, points, bbox_pred, stride, max_shape):\n bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)\n bboxes = bbox_pred * stride + bbox_pos_center\n x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1])\n y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0])\n x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1])\n y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0])\n decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n return decoded_bboxes\n", "import matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nax = plt.axes(projection='3d')\nax.scatter(np.random.rand(10),np.random.rand(10),np.random.rand(10))\nplt.show()" ]
[ [ "torch.linspace", "numpy.sqrt", "torch.Tensor", "torch.cat", "torch.zeros", "numpy.arange", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.ModuleList", "numpy.tile", "torch.zeros_like", "numpy.stack", "torch.exp", "torch.tensor", "torch.std", "torch.stack", "numpy.repeat" ], [ "matplotlib.pyplot.axes", "matplotlib.use", "matplotlib.pyplot.show", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liquidpizza/gpxo
[ "4f8eb43a4d6b879f51a7e688dfa80b4aa5558889" ]
[ "gpxo/track.py" ]
[ "\"\"\"General tools for gpx data processing based on gpxpy.\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport gpxpy\nfrom vincenty import vincenty\nimport mplleaflet\n\nfrom .general import smooth, closest_pt\n\n\n# =============================== Misc. Config ===============================\n\n# short names for plots\n\nshortnames = {'t': 'time',\n 's': 'duration (s)',\n 'd': 'distance (km)',\n 'v': 'velocity (km/h)',\n 'z': 'elevation (m)',\n 'c': 'compass (°)'}\n\n\n# ========================= Misc. private functions ==========================\n\n\n# Function to transform array of timedeltas to seoncds\n_total_seconds = np.vectorize(lambda dt: dt.total_seconds())\n\n\n# ============================ Main class (Track) ============================\n\nclass Track:\n\n def __init__(self, filename, track=0, segment=0):\n\n with open(filename, 'r') as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n\n pts = gpx.tracks[track].segments[segment].points\n\n self.latitude = np.array([pt.latitude for pt in pts])\n self.longitude = np.array([pt.longitude for pt in pts])\n self.elevation = np.array([pt.elevation for pt in pts])\n self.time = np.array([pt.time for pt in pts])\n\n # If some elevation or time data is missing, just set attribute to None\n\n if any(self.time == None):\n self.time = None\n\n if any(self.elevation == None):\n self.elevation = None\n\n @staticmethod\n def _distance(position1, position2):\n \"\"\"Distance between two positions (latitude, longitude).\"\"\"\n return vincenty(position1, position2)\n\n def _resample(self, quantity, reference):\n \"\"\"Resample quantities (velocity, compass) to fall back on reference\n\n Reference is typically time or distance.\"\"\"\n # midpoints correponding to shifted quantity\n midpts = reference[:-1] + (np.diff(reference) / 2)\n # linear interpolation to fall back to initial times\n qty_resampled = np.interp(reference, midpts, quantity)\n return qty_resampled\n\n @property\n def seconds(self):\n if self.time is not None:\n return _total_seconds(self.time - self.time[0])\n\n @property\n def distance(self):\n \"\"\"Travelled distance in kilometers.\"\"\"\n\n ds = [0]\n\n x1s = self.latitude[:-1]\n x2s = self.latitude[1:]\n\n y1s = self.longitude[:-1]\n y2s = self.longitude[1:]\n\n for x1, x2, y1, y2 in zip(x1s, x2s, y1s, y2s):\n dd = self._distance((x1, y1), (x2, y2))\n ds.append(dd)\n\n return np.cumsum(ds)\n\n @property\n def compass(self):\n \"\"\"Compass bearing in decimal degrees (°). See gpxo.compass\"\"\"\n lat1, long1 = np.radians((self.latitude[:-1], self.longitude[:-1]))\n lat2, long2 = np.radians((self.latitude[1:], self.longitude[1:]))\n\n d_long = long2 - long1\n\n x = np.sin(d_long) * np.cos(lat2)\n y = np.cos(lat1) * np.sin(lat2) - (np.sin(lat1) * np.cos(lat2) * np.cos(d_long))\n\n # Resample before taking arctan because if not, interpolation fails\n # when the signal fluctuates between 0 and 360° when compass is N\n x_res = self._resample(x, self.distance)\n y_res = self._resample(y, self.distance)\n\n initial_bearing = np.arctan2(x_res, y_res)\n\n # Now we have the initial bearing but np.arctan2 return values\n # from -180° to + 180° which is not what we want for a compass bearing\n # The solution is to normalize the initial bearing as shown below\n initial_bearing = np.degrees(initial_bearing)\n compass_bearing = (initial_bearing + 360) % 360\n\n return compass_bearing\n\n @property\n def velocity(self):\n \"\"\"Instantaneous velocity in km/h.\"\"\"\n if self.time is not None:\n dt = np.diff(self.seconds)\n dd = np.diff(self.distance)\n vs = 3600 * dd / dt\n return self._resample(vs, self.seconds)\n else:\n return None\n\n @property\n def data(self):\n \"\"\"pd.DataFrame with all track data (time, position, velocity etc.)\"\"\"\n\n names = ['latitude (°)', 'longitude (°)', 'distance (km)', 'compass (°)']\n columns = [self.latitude, self.longitude, self.distance, self.compass]\n\n if self.time is not None:\n names += ['time', ' duration (s)', 'velocity (km/h)']\n columns += [self.time, self.seconds, self.velocity]\n\n if self.elevation is not None:\n names.append('elevation (m)')\n columns.append(self.elevation)\n\n data = pd.DataFrame(dict(zip(names, columns)))\n\n if self.time is not None:\n data['time'] = data['time'].dt.tz_localize(None)\n data.set_index('time', inplace=True)\n\n return data\n\n def _shortname_to_column(self, name):\n \"\"\"shorname to column name in self.data.\"\"\"\n try:\n cname = shortnames[name]\n except KeyError:\n raise ValueError(f'Invalid short name: {name}. ')\n\n if cname == 'time':\n column = self.data.index\n else:\n try:\n column = self.data[cname]\n except KeyError:\n raise KeyError(f'{cname} Data unavailable in current track. ')\n\n return {'name': cname, 'column': column}\n\n def plot(self, mode, *args, **kwargs):\n \"\"\"Plot columns of self.data (use pandas DataFrame plot arguments).\n\n Parameters\n ----------\n - mode (str): 2 letters that define short names for x and y axis\n - *args: any additional argument for matplotlib ax.plot()\n - **kwargs: any additional keyword argument for matplotlib ax.plot()\n\n Output\n ------\n - matplotlib axes\n\n Short names\n -----------\n 't': 'time'\n 's': 'duration (s)'\n 'd': 'distance (km)'\n 'v': 'velocity (km/h)'\n 'z': 'elevation (m)'\n 'c': 'compass (°)'\n \"\"\"\n try:\n xname, yname = mode\n except ValueError:\n raise ValueError('Invalid plot mode (should be two letters, e.g. '\n f\"'tv', not {mode}\")\n\n xinfo = self._shortname_to_column(xname)\n xlabel = xinfo['name']\n x = xinfo['column']\n\n yinfo = self._shortname_to_column(yname)\n ylabel = yinfo['name']\n y = yinfo['column']\n\n fig, ax = plt.subplots()\n ax.plot(x, y, *args, **kwargs)\n\n if xlabel == 'time':\n fig.autofmt_xdate()\n\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n\n return ax\n\n def smooth(self, n=5, window='hanning'):\n \"\"\"Smooth position data (and subsequently distance, velocity etc.)\n\n Parameters\n ----------\n - n: size of moving window for smoothing\n - window: type of window (e.g. 'hanning' or 'flat', see gpxo.smooth())\n \"\"\"\n self.latitude = smooth(self.latitude, n=n, window=window)\n self.longitude = smooth(self.longitude, n=n, window=window)\n self.elevation = smooth(self.elevation, n=n, window=window)\n\n def closest_to(self, pt):\n \"\"\"Find index of point in trajectory that is closest to pt=(lat, long).\"\"\"\n return closest_pt(pt, (self.latitude, self.longitude))\n\n def map(self, map_type='osm', embed=False, ax=None, size=(10, 10),\n plot='plot', **kwargs):\n \"\"\"Plot trajectory on map.\n\n Parameters\n ----------\n - map_type can be e.g. osm, esri_aerial, esri_worldtopo, etc. see:\n https://github.com/jwass/mplleaflet/blob/master/mplleaflet/maptiles.py\n\n - embed: if True, embed plot in Jupyter. If False (default), open in\n browser.\n\n - ax: if not None, use provided matplotlib axes.\n\n - size: when embedded, size of the figure.\n\n - plot: 'plot' or 'scatter'\n\n - **kwargs: any plt.plot or plt.scatter keyword arguments\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots(figsize=size)\n else:\n fig = ax.figure\n\n if plot == 'plot':\n ax.plot(self.longitude, self.latitude, '.-r', **kwargs)\n elif plot == 'scatter':\n ax.scatter(self.longitude, self.latitude, **kwargs)\n else:\n raise ValueError(f'Unrecognized plot type: {plot}')\n\n parameters = {'fig': fig, 'tiles': map_type}\n if embed:\n leaflet = mplleaflet.display(**parameters)\n else:\n leaflet = mplleaflet.show(**parameters)\n\n return leaflet\n" ]
[ [ "numpy.radians", "numpy.degrees", "numpy.cumsum", "matplotlib.pyplot.subplots", "numpy.sin", "numpy.arctan2", "numpy.cos", "numpy.diff", "numpy.interp", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JayanthRR/ConCURL_NCE
[ "5471b022a571ae61bd891783084512c3a227829b" ]
[ "losses.py" ]
[ "import torch\nimport torch.nn as nn\nimport time\nimport sys\n\nsoftmax = nn.Softmax(dim=1).cuda()\n\n\ndef distributed_sinkhorn(Q, nmb_iters):\n with torch.no_grad():\n sum_Q = torch.sum(Q)\n # dist.all_reduce(sum_Q)\n Q /= sum_Q\n\n u = torch.zeros(Q.shape[0]).cuda(non_blocking=True)\n r = torch.ones(Q.shape[0]).cuda(non_blocking=True) / Q.shape[0]\n c = torch.ones(Q.shape[1]).cuda(non_blocking=True) / ( Q.shape[1])\n\n curr_sum = torch.sum(Q, dim=1)\n # dist.all_reduce(curr_sum)\n\n for it in range(nmb_iters):\n u = curr_sum\n Q *= (r / u).unsqueeze(1)\n Q *= (c / torch.sum(Q, dim=0)).unsqueeze(0)\n curr_sum = torch.sum(Q, dim=1)\n # dist.all_reduce(curr_sum)\n \n return (Q / torch.sum(Q, dim=0, keepdim=True)).t().float()\n\n\ndef getQ(out_queue, epsilon=0.05):\n \n return distributed_sinkhorn(torch.exp(out_queue / epsilon).t(), 3)\n \ndef byol_loss_fn(x, y):\n #x = F.normalize(x, dim=-1, p=2)\n #y = F.normalize(y, dim=-1, p=2)\n return 2 - 2 * (x * y).sum(dim=-1)\n\n\ndef ByolLoss(features_one, features_two):\n online_pred_one = nn.functional.normalize(features_one['online_pred'], dim=1, p=2)\n online_pred_two = nn.functional.normalize(features_two['online_pred'], dim=1, p=2)\n target_proj_one = nn.functional.normalize(features_one['target_proj'], dim=1, p=2)\n target_proj_two = nn.functional.normalize(features_two['target_proj'], dim=1, p=2)\n\n byol_loss = byol_loss_fn(online_pred_one, target_proj_two).mean() + byol_loss_fn(online_pred_two, target_proj_one).mean()\n\n sys.stdout.flush()\n return byol_loss\n\ndef softSubLosses(outOne, outTwo,qOne, qTwo, param=0.1):\n pOne = softmax(outOne/param)\n pTwo = softmax(outTwo/param)\n subloss_1 = - torch.mean(torch.sum(qTwo * torch.log(pOne), dim=1))\n subloss_2 = - torch.mean(torch.sum(qOne * torch.log(pTwo), dim=1))\n return subloss_1, subloss_2\n\n\ndef SoftLoss(outcodes_one, outcodes_two, alpha=1, temperature=0.1, overclustering=False):\n if alpha > 0:\n if overclustering:\n out_one, out_two = outcodes_one['cTz_overcluster'], outcodes_two['cTz_overcluster']\n else:\n out_one, out_two = outcodes_one['cTz'], outcodes_two['cTz']\n #ATTENTION: I have deleted clone operations. Please think about it. My decision can be wrong!!!!\n with torch.no_grad():\n q_one = getQ(out_one)\n q_two = getQ(out_two)\n \n subloss_1, subloss_2 = softSubLosses(out_one, out_two, q_one, q_two, temperature)\n\n sys.stdout.flush()\n\n return (subloss_1 + subloss_2)/2.0, q_one, q_two\n else:\n return torch.tensor(0), None, None\n\ndef ConsensusLossForAGivenProjection(out_rand_one, out_rand_two, q_one, q_two, param=0.1):\n p_rand_one = softmax(out_rand_one/ param)\n p_rand_two = softmax(out_rand_two/ param)\n rand_loss_1 = -torch.mean(torch.sum(q_two * torch.log(p_rand_one), dim=1))\n rand_loss_2 = -torch.mean(torch.sum(q_one * torch.log(p_rand_two), dim=1))\n return (-torch.mean(torch.sum(q_two * torch.log(p_rand_one), dim=1)) - torch.mean(torch.sum(q_one * torch.log(p_rand_two), dim=1)))/2\n\n\ndef ConsensusLoss(gamma, outcodes_one, outcodes_two, rand_outs_one, rand_outs_two, q_one, q_two, overclustering=False, temperature=0.1):\n loss = torch.tensor(0).cuda()\n\n if q_one is None or q_two is None:\n # check this when gamma>0 but alpha=0\n if overclustering:\n out_one, out_two = outcodes_one['cTz_overcluster'], outcodes_two['cTz_overcluster']\n else:\n out_one, out_two = outcodes_one['cTz'], outcodes_two['cTz']\n\n q_one = getQ(out_one)\n q_two = getQ(out_two)\n\n if gamma > 0:\n for randind in range(len(rand_outs_one)):\n\n if overclustering:\n temp = ConsensusLossForAGivenProjection(rand_outs_one[randind]['cTz_overcluster'], rand_outs_two[randind]['cTz_overcluster'], q_one, q_two, temperature)\n loss = loss + temp\n else:\n \n temp= ConsensusLossForAGivenProjection(rand_outs_one[randind]['cTz'], rand_outs_two[randind]['cTz'], q_one, q_two, temperature)\n loss = loss + temp\n\n sys.stdout.flush()\n\n return loss/len(rand_outs_one)\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.Softmax", "torch.ones", "torch.zeros", "torch.sum", "torch.tensor", "torch.exp", "torch.no_grad", "torch.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LucaAngioloni/ProteineSecondaryStructure-CNN
[ "c85571bbcdf17b4a753dce6ed0e4346111ea43a0" ]
[ "Whole Protein Prediction CNN/dataset.py" ]
[ "# MIT License\n#\n# Copyright (c) 2017 Luca Angioloni\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport numpy as np\n\ndataset_path = \"../dataset/cullpdb+profile_6133.npy\"\n# dataset_path = \"../dataset/cullpdb+profile_6133_filtered.npy\"\n\ncb513_path = \"../dataset/cb513+profile_split1.npy\"\n\nsequence_len = 700\ntotal_features = 57\namino_acid_residues = 21\nnum_classes = 8\n\n\ndef get_dataset(path=dataset_path):\n ds = np.load(path)\n ds = np.reshape(ds, (ds.shape[0], sequence_len, total_features))\n ret = np.zeros((ds.shape[0], ds.shape[1], amino_acid_residues + num_classes))\n ret[:, :, 0:amino_acid_residues] = ds[:, :, 35:56]\n ret[:, :, amino_acid_residues:] = ds[:, :, amino_acid_residues + 1:amino_acid_residues+ 1 + num_classes]\n return ret\n\n\ndef get_data_labels(D):\n X = D[:, :, 0:amino_acid_residues]\n Y = D[:, :, amino_acid_residues:amino_acid_residues + num_classes]\n return X, Y\n\n\ndef split_like_paper(Dataset):\n # Dataset subdivision following dataset readme and paper\n Train = Dataset[0:5600, :, :]\n Test = Dataset[5600:5877, :, :]\n Validation = Dataset[5877:, :, :]\n return Train, Test, Validation\n\n\ndef split_with_shuffle(Dataset, seed=None):\n np.random.seed(seed)\n np.random.shuffle(Dataset)\n train_split = int(Dataset.shape[0]*0.8)\n test_val_split = int(Dataset.shape[0]*0.1)\n Train = Dataset[0:train_split, :, :]\n Test = Dataset[train_split:train_split+test_val_split, :, :]\n Validation = Dataset[train_split+test_val_split:, :, :]\n return Train, Test, Validation\n\n\ndef get_cb513():\n CB = get_dataset(cb513_path)\n X, Y = get_data_labels(CB)\n return X, Y\n\nif __name__ == '__main__':\n dataset = get_dataset()\n\n D_train, D_test, D_val = split_with_shuffle(dataset, 100)\n\n X_train, Y_train = get_data_labels(D_train)\n X_test, Y_test = get_data_labels(D_test)\n X_val, Y_val = get_data_labels(D_val)\n\n print(\"Dataset Loaded\")" ]
[ [ "numpy.random.seed", "numpy.reshape", "numpy.random.shuffle", "numpy.load", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
18463105800/ssd.pruning.pytorch
[ "39592ee00e02f28742028a97592beec18d07258c" ]
[ "pruning/prune_resnet_tools.py" ]
[ "'''\r\n This file contains functions for pruning resnet-like model in layer level\r\n 1. prune_resconv_layer (resnet: conv layers)\r\n 2. prune_resnet_lconv_layer (resnet: lconv means identity layer)\r\n 3. prune_rbconv_by_indices (resnet: rbconv means right path's bottom layer)\r\n 4. prune_rbconv_by_number (resnet: used when you prune lconv but next block/layer cannot absorb your effect)\r\n 5. prune_ruconv1_layer (resnet: for resnet normal conv1 layers (i.e. right path's first upper layers))\r\n 6. prune_ruconv2_layer (resnet: for resnet normal conv2 layers (i.e. right path's second upper layers))\r\n\r\n Author: xuhuahuang as intern in YouTu 07/2018\r\n'''\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom torchvision import models\r\nimport cv2\r\ncv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in DataLoader\r\n# OpenCL may be enabled by default in OpenCV3;\r\n# disable it because it because it's not thread safe and causes unwanted GPU memory allocations\r\ncv2.ocl.setUseOpenCL(False)\r\nimport sys\r\nimport numpy as np\r\nfrom models.resnet import BasicBlock, Bottleneck\r\n\r\ndef replace_layers(model, i, indexes, layers):\r\n if i in indexes:\r\n # layers and indexes store new layers used to update old layers\r\n return layers[indexes.index(i)]\r\n # if i not in indexes, use old layers\r\n return model[i]\r\n\r\n# helper function\r\n'''\r\n Helper function for updating immediate following layer/block's input channels\r\n Args:\r\n model: model after pruning current layer/block\r\n layer_index: current layer index. Locate the block/layer being pruned filters NOW\r\n filters_to_prune: the output channels indices being pruned\r\n **Note**\r\n Not handle case described by prune_rbconv_by_number()\r\n Not handle case inside prune_ruconv1_layer() and prune_ruconv2_layer() because they are inside same block\r\n'''\r\ndef update_next_layers(model, layer_index, filters_to_prune):\r\n # only need to change in_channels for all following objects based on filters_to_prune\r\n next_conv = None\r\n next_blk = None\r\n next_ds = None # if next one is a block, and this block has downsample path, you need to update both residual and downsample path\r\n offset = 1\r\n # search for the next conv, based on current conv with id = (layer_index, filter_index)\r\n while layer_index + offset < len(model.base._modules.items()):\r\n res = list(model.base._modules.items())[layer_index+offset] # name, module\r\n if isinstance(res[1], torch.nn.modules.conv.Conv2d):\r\n next_name, next_conv = res\r\n next_is_block = False\r\n break\r\n elif isinstance(res[1], (BasicBlock, Bottleneck)):\r\n next_is_block = True\r\n next_blk = res[1]\r\n if res[1].downsample is None:\r\n next_conv = res[1].conv1\r\n next_ds = None\r\n else:\r\n next_conv = res[1].conv1\r\n next_ds = res[1].downsample\r\n break\r\n offset = offset + 1\r\n\r\n if next_conv is None:\r\n print(\"No filter will be prunned for this layer (last layer)\")\r\n return model\r\n if len(filters_to_prune) == 0:\r\n print(\"No filter will be prunned for this layer\")\r\n return model\r\n\r\n cut = len(filters_to_prune)\r\n\r\n # next_conv must exists\r\n next_new_conv = \\\r\n torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\\\r\n out_channels = next_conv.out_channels, \\\r\n kernel_size = next_conv.kernel_size, \\\r\n stride = next_conv.stride,\r\n padding = next_conv.padding,\r\n dilation = next_conv.dilation,\r\n groups = next_conv.groups,\r\n bias = next_conv.bias is not None)\r\n\r\n old_weights = next_conv.weight.data.cpu().numpy()\r\n new_weights = next_new_conv.weight.data.cpu().numpy()\r\n\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 1)\r\n next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if next_conv.bias is not None:\r\n next_new_conv.bias.data = next_conv.bias.data\r\n\r\n # next_ds exists or not is okay, no matter next_is_block is True or not\r\n if next_ds is not None:\r\n old_conv_in_next_ds = next_ds[0]\r\n new_conv_in_next_new_ds = \\\r\n torch.nn.Conv2d(in_channels = old_conv_in_next_ds.in_channels - cut,\\\r\n out_channels = old_conv_in_next_ds.out_channels, \\\r\n kernel_size = old_conv_in_next_ds.kernel_size, \\\r\n stride = old_conv_in_next_ds.stride,\r\n padding = old_conv_in_next_ds.padding,\r\n dilation = old_conv_in_next_ds.dilation,\r\n groups = old_conv_in_next_ds.groups,\r\n bias = old_conv_in_next_ds.bias is not None)\r\n\r\n old_weights = old_conv_in_next_ds.weight.data.cpu().numpy()\r\n new_weights = new_conv_in_next_new_ds.weight.data.cpu().numpy()\r\n\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 1)\r\n new_conv_in_next_new_ds.weight.data = torch.from_numpy(new_weights).cuda()\r\n if old_conv_in_next_ds.bias is not None:\r\n new_conv_in_next_new_ds.bias.data = old_conv_in_next_ds.bias.data # bias won't change\r\n\r\n next_new_ds = torch.nn.Sequential(new_conv_in_next_new_ds, next_ds[1]) # BN keeps unchanged\r\n else:\r\n next_new_ds = None\r\n\r\n # next_new_ds and next_new_conv are ready now, create a next_new_block for replace_layers()\r\n if next_is_block: #same as next_blk is not None:\r\n if isinstance(next_blk, BasicBlock):\r\n # rely on conv1 of old block to get in_planes, out_planes, tride\r\n next_new_block = BasicBlock(next_blk.conv1.in_channels - cut, \\\r\n next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)\r\n next_new_block.conv1 = next_new_conv # only update in_channels\r\n next_new_block.bn1 = next_blk.bn1\r\n next_new_block.relu = next_blk.relu\r\n next_new_block.conv2 = next_blk.conv2\r\n next_new_block.bn2 = next_blk.bn2\r\n else:\r\n next_new_block = Bottleneck(next_blk.conv1.in_channels - cut, \\\r\n next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)\r\n next_new_block.conv1 = next_new_conv # only update in_channels\r\n next_new_block.bn1 = next_blk.bn1\r\n next_new_block.conv2 = next_blk.conv2\r\n next_new_block.bn2 = next_blk.bn2\r\n next_new_block.conv3 = next_blk.conv3\r\n next_new_block.bn3 = next_blk.bn3\r\n next_new_block.relu = next_blk.relu\r\n\r\n if not next_is_block:\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [layer_index+offset], \\\r\n [next_new_conv]) for i, _ in enumerate(model.base)))\r\n else:\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [layer_index+offset], \\\r\n [next_new_block]) for i, _ in enumerate(model.base)))\r\n\r\n del model.base # delete and replace with brand new one\r\n model.base = base\r\n print(\"Finished update next layers.\")\r\n\r\n return model\r\n\r\n'''\r\n--------------------------------------------------------------------------------\r\n 1. Prune conv layers in resnet with/without BN (only support layers stored in model.base for now)\r\n Args:\r\n model: model for pruning\r\n layer_index: index the pruned layer's location within model\r\n cut_ratio: the ratio of filters you want to prune from this layer (e.g. 20% - cut 20% lowest weights layers)\r\n Adapted from: https://github.com/jacobgil/pytorch-pruning\r\n'''\r\ndef prune_resconv_layer(model, layer_index, cut_ratio=0.2, use_bn = True):\r\n _, conv = list(model.base._modules.items())[layer_index]\r\n if use_bn:\r\n _, old_bn = list(model.base._modules.items())[layer_index + 1]\r\n\r\n next_conv = None\r\n offset = 1\r\n # search for the next conv, based on current conv with id = (layer_index, filter_index)\r\n while layer_index + offset < len(model.base._modules.items()):\r\n res = list(model.base._modules.items())[layer_index+offset] # name, module\r\n if isinstance(res[1], torch.nn.modules.conv.Conv2d):\r\n next_name, next_conv = res\r\n break\r\n elif isinstance(res[1], (BasicBlock, Bottleneck)):\r\n next_conv = res[1].conv1\r\n break\r\n offset = offset + 1\r\n\r\n if next_conv is None:\r\n print(\"No filter will be prunned for this layer (last layer)\")\r\n return model\r\n\r\n num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3\r\n # skip the layer with only one filter left\r\n if num_filters <= 1:\r\n print(\"No filter will be prunned for this layer (num_filters<=1)\")\r\n return model\r\n\r\n cut = int(cut_ratio * num_filters)\r\n\r\n if cut < 1:\r\n print(\"No filter will be prunned for this layer (cut<1)\")\r\n return model\r\n if (num_filters - cut) < 1:\r\n print(\"No filter will be prunned for this layer (no filter left after cutting)\")\r\n return model\r\n\r\n # rank the filters within this layer and store into filter_ranks\r\n abs_wgt = torch.abs(conv.weight.data)\r\n values = \\\r\n torch.sum(abs_wgt, dim = 1, keepdim = True).\\\r\n sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data\r\n # Normalize the sum of weight by the filter dimensions in x 3 x 3\r\n values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)\r\n\r\n print(\"Ranking filters.. \")\r\n filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest\r\n print(\"Filters that will be prunned\", filters_to_prune)\r\n print(\"Pruning filters.. \")\r\n\r\n # the updated conv for current conv, with cut output channels being pruned\r\n new_conv = \\\r\n torch.nn.Conv2d(in_channels = conv.in_channels, \\\r\n out_channels = conv.out_channels - cut,\r\n kernel_size = conv.kernel_size, \\\r\n stride = conv.stride,\r\n padding = conv.padding,\r\n dilation = conv.dilation,\r\n groups = conv.groups,\r\n bias = conv.bias is not None) #(out_channels)\r\n\r\n old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]\r\n new_weights = new_conv.weight.data.cpu().numpy()\r\n\r\n # skip that filter's weight inside old_weights and store others into new_weights\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 0)\r\n new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if conv.bias is not None: # no bias for conv layers\r\n bias_numpy = conv.bias.data.cpu().numpy()\r\n\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune, axis = None)\r\n new_conv.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n # BatchNorm modification\r\n # TODO: Extract this function outside as a separate func.\r\n if use_bn:\r\n new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \\\r\n eps=old_bn.eps, momentum=old_bn.momentum, affine=old_bn.affine)\r\n # old_bn.affine == True, need to copy learning gamma and beta to new_bn\r\n # gamma: size = (num_features)\r\n old_weights = old_bn.weight.data.cpu().numpy()\r\n new_weights = new_bn.weight.data.cpu().numpy()\r\n new_weights = np.delete(old_weights, filters_to_prune)\r\n new_bn.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n # beta: size = (num_features)\r\n bias_numpy = old_bn.bias.data.cpu().numpy()\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune)\r\n new_bn.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n if use_bn:\r\n # BatchNorm modification\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [layer_index, layer_index+1], \\\r\n [new_conv, new_bn]) for i, _ in enumerate(model.base)))\r\n del old_bn\r\n else:\r\n # replace current layer and next_conv with new_conv and next_new_conv respectively\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [layer_index], \\\r\n [new_conv]) for i, _ in enumerate(model.base)))\r\n\r\n del model.base # delete and replace with brand new one\r\n del conv\r\n\r\n model.base = base # update current layer\r\n model = update_next_layers(model, layer_index, filters_to_prune) # update following layers\r\n\r\n message = str(100*float(cut) / num_filters) + \"%\"\r\n print(\"Filters prunned\", str(message))\r\n\r\n return model\r\n\r\n'''\r\n--------------------------------------------------------------------------------\r\n 2. Prune identity conv layers without/with BN in a resnet block\r\n (*Note: NOT used for normal layer, the 'layer' here must locate inside a block indexed by block_index)\r\n Args:\r\n block_index: a block also named as a 'layer' in torchvision implementation, locate lconv layer\r\n *Note:\r\n The index criteria based on 'one single block' unit, which means 1 index represents 1 BasicBlock/Bottleneck, instead of one layer (3-6 blocks)\r\n Return:\r\n cut_indices: the filters_to_prune in this layer, will be used in function 5.\r\n'''\r\ndef prune_resnet_lconv_layer(model, block_index, cut_ratio=0.2, use_bn = True):\r\n _, blk = list(model.base._modules.items())[block_index]\r\n cut_indices = None\r\n\r\n if not use_bn:\r\n print(\"ResNet without BN is not supported for prunning\")\r\n return cut_indices, model\r\n\r\n # check whether the left path has conv layer for prunning\r\n if blk.downsample == None:\r\n print(\"No filters will be prunned because lconv doesn't exist\")\r\n return cut_indices, model\r\n\r\n if not isinstance(blk, (BasicBlock, Bottleneck)):\r\n print(\"Only support for ResNet with BasicBlock or Bottleneck defined in torchvision\")\r\n return cut_indices, model\r\n\r\n # get old conv and bn on the left\r\n lconv = blk.downsample[0] # nn.Sequential for (lconv, lbn)\r\n lbn = blk.downsample[1]\r\n next_conv = None\r\n offset = 1\r\n # search for the next conv, can be conv1 within next block, or a normal conv layer\r\n while block_index + offset < len(model.base._modules.items()):\r\n res = list(model.base._modules.items())[block_index+offset] # name, module\r\n if isinstance(res[1], torch.nn.modules.conv.Conv2d):\r\n next_name, next_conv = res\r\n break\r\n elif isinstance(res[1], (BasicBlock, Bottleneck)):\r\n next_conv = res[1].conv1\r\n break\r\n offset = offset + 1\r\n\r\n if next_conv is None:\r\n print(\"No filters will be prunned because this is the last block\")\r\n return cut_indices, model\r\n\r\n num_filters = lconv.weight.data.size(0) # out_channels x in_channels x 3 x 3\r\n # skip the layer with only one filter left\r\n if num_filters <= 1:\r\n print(\"No filter will be prunned for this layer (num_filters<=1)\")\r\n return cut_indices, model\r\n\r\n cut = int(cut_ratio * num_filters)\r\n\r\n if cut < 1:\r\n print(\"No filter will be prunned for this layer (cut<1)\")\r\n return cut_indices, model\r\n if (num_filters - cut) < 1:\r\n print(\"No filter will be prunned for this layer (no filter left after cutting)\")\r\n return cut_indices, model\r\n\r\n # rank the filters within this layer and store into filter_ranks\r\n abs_wgt = torch.abs(lconv.weight.data)\r\n values = \\\r\n torch.sum(abs_wgt, dim = 1, keepdim = True).\\\r\n sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data\r\n # Normalize the sum of weight by the filter dimensions in x 3 x 3\r\n values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)\r\n\r\n print(\"Ranking filters.. \")\r\n filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest\r\n print(\"Filters that will be prunned\", filters_to_prune)\r\n print(\"Pruning filters.. \")\r\n\r\n # the updated conv for old lconv, with cut output channels being pruned\r\n new_conv = \\\r\n torch.nn.Conv2d(in_channels = lconv.in_channels, \\\r\n out_channels = lconv.out_channels - cut,\r\n kernel_size = lconv.kernel_size, \\\r\n stride = lconv.stride,\r\n padding = lconv.padding,\r\n dilation = lconv.dilation,\r\n groups = lconv.groups,\r\n bias = lconv.bias is not None) #(out_channels)\r\n\r\n old_weights = lconv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]\r\n new_weights = new_conv.weight.data.cpu().numpy()\r\n\r\n # skip that filter's weight inside old_weights and store others into new_weights\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 0)\r\n new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if lconv.bias is not None:\r\n bias_numpy = lconv.bias.data.cpu().numpy()\r\n\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune, axis = None)\r\n new_conv.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n # new BN layer after new_conv\r\n new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \\\r\n eps=lbn.eps, momentum=lbn.momentum, affine=lbn.affine)\r\n # old_bn.affine == True, need to copy learnable gamma and beta to new_bn\r\n # gamma: size = (num_features)\r\n old_weights = lbn.weight.data.cpu().numpy()\r\n new_weights = new_bn.weight.data.cpu().numpy()\r\n new_weights = np.delete(old_weights, filters_to_prune)\r\n new_bn.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n # beta: size = (num_features)\r\n bias_numpy = lbn.bias.data.cpu().numpy()\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune)\r\n new_bn.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n # replace\r\n # update current left conv + left BN layer, have BN by default\r\n new_ds = torch.nn.Sequential(\r\n *(replace_layers(blk.downsample, i, [0, 1], \\\r\n [new_conv, new_bn]) for i, _ in enumerate(blk.downsample)))\r\n\r\n # delete current and replace with a brand new BLOCK\r\n if isinstance(blk, BasicBlock):\r\n # rely on conv1 of old block to get in_planes, out_planes, tride\r\n new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = new_ds)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.relu = blk.relu\r\n new_blk.conv2 = blk.conv2\r\n new_blk.bn2 = blk.bn2\r\n else:\r\n new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = new_ds)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.conv2 = blk.conv2\r\n new_blk.bn2 = blk.bn2\r\n new_blk.conv3 = blk.conv3\r\n new_blk.bn3 = blk.bn3\r\n new_blk.relu = blk.relu\r\n\r\n # now new_blk is ready, it can act as a layer and replace old blk with replace_layers()\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [block_index], \\\r\n [new_blk]) for i, _ in enumerate(model.base)))\r\n\r\n # delete and replace with brand new one\r\n del model.base # delete the things pointed by pointer\r\n del blk\r\n\r\n model.base = base # update current layer\r\n model = update_next_layers(model, block_index, filters_to_prune) # update following layers\r\n\r\n cut_indices = filters_to_prune\r\n message = str(100*float(cut) / num_filters) + \"%\"\r\n print(\"Filters prunned\", str(message))\r\n\r\n return cut_indices, model\r\n\r\n'''\r\n--------------------------------------------------------------------------------\r\n 3. Prune residual conv layer, the one at the bottom of residual side with/without BN\r\n (*Note: MUST call this after you prune identity path with downsample, the size won't fit because upper functions only update left path)\r\n Args:\r\n block_index: the BasicBlock or Bottleneck Block this layer locates\r\n filters_to_prune: the filters' indices waiting for being pruned\r\n use_bn: use Batch Norm or not\r\n'''\r\ndef prune_rbconv_by_indices(model, block_index, filters_to_prune, use_bn = True):\r\n _, blk = list(model.base._modules.items())[block_index]\r\n\r\n if not use_bn:\r\n print(\"ResNet without BN is not supported for prunning\")\r\n return model\r\n\r\n # check whether the left path has conv layer for prunning\r\n if blk.downsample == None:\r\n print(\"Only support pruning for rbconv after lconv was pruned\")\r\n return model\r\n\r\n if not isinstance(blk, (BasicBlock, Bottleneck)):\r\n print(\"Only support for ResNet with BasicBlock or Bottleneck defined in torchvision\")\r\n return model\r\n\r\n if isinstance(blk, BasicBlock):\r\n # when it is BasicBlock, the rbconv is conv2, and its bn is bn2\r\n conv = blk.conv2\r\n bn = blk.bn2\r\n else:\r\n # when it is Bottleneck, the rbconv is conv3, and its bn is bn3\r\n conv = blk.conv3\r\n bn = blk.bn3\r\n # only need to update itself, no need to care about others such as next_ds/next_conv\r\n new_conv = \\\r\n torch.nn.Conv2d(in_channels = conv.in_channels, \\\r\n out_channels = conv.out_channels - len(filters_to_prune),\r\n kernel_size = conv.kernel_size, \\\r\n stride = conv.stride,\r\n padding = conv.padding,\r\n dilation = conv.dilation,\r\n groups = conv.groups,\r\n bias = conv.bias is not None) #(out_channels)\r\n\r\n old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]\r\n new_weights = new_conv.weight.data.cpu().numpy()\r\n\r\n # skip that filter's weight inside old_weights and store others into new_weights\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 0)\r\n new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if conv.bias is not None:\r\n bias_numpy = conv.bias.data.cpu().numpy()\r\n\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune, axis = None)\r\n new_conv.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n # new BN layer after new_conv\r\n new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \\\r\n eps=bn.eps, momentum=bn.momentum, affine=bn.affine)\r\n # old_bn.affine == True, need to copy learnable gamma and beta to new_bn\r\n # gamma: size = (num_features)\r\n old_weights = bn.weight.data.cpu().numpy()\r\n new_weights = new_bn.weight.data.cpu().numpy()\r\n new_weights = np.delete(old_weights, filters_to_prune)\r\n new_bn.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n # beta: size = (num_features)\r\n bias_numpy = bn.bias.data.cpu().numpy()\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune)\r\n new_bn.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n if isinstance(blk, BasicBlock):\r\n # replace with new block\r\n new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.relu = blk.relu\r\n new_blk.conv2 = new_conv # update with new conv\r\n new_blk.bn2 = new_bn # update with new bn\r\n else:\r\n # replace with new block\r\n new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.conv2 = blk.conv2\r\n new_blk.bn2 = blk.bn2\r\n new_blk.conv3 = new_conv\r\n new_blk.bn3 = new_bn\r\n new_blk.relu = blk.relu\r\n\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [block_index], \\\r\n [new_blk]) for i, _ in enumerate(model.base)))\r\n\r\n # delete and replace\r\n del model.base\r\n model.base = base\r\n print(\"Filters prunned for rb layer:\", filters_to_prune)\r\n\r\n return model\r\n\r\n'''\r\n--------------------------------------------------------------------------------\r\n 4. Prune residual conv layer, the one at the bottom of residual side with/without BN, based on its own weights\r\n (*Note: MUST call this when you prune lconv layer,\r\n the immediate following block/conv cannot absorb your effect due to its empty left path)\r\n Args:\r\n block_index: the BasicBlock or Bottleneck Block this layer locates\r\n num_cut: the number of filters waiting for being pruned\r\n use_bn: use Batch Norm or not\r\n'''\r\ndef prune_rbconv_by_number(model, block_index, num_cut, use_bn = True):\r\n _, blk = list(model.base._modules.items())[block_index]\r\n\r\n if not use_bn:\r\n print(\"ResNet without BN is not supported for prunning\")\r\n return model\r\n\r\n if not isinstance(blk, (BasicBlock, Bottleneck)):\r\n print(\"Only support for ResNet with BasicBlock or Bottleneck defined in torchvision\")\r\n return model\r\n\r\n if isinstance(blk, BasicBlock):\r\n # when it is BasicBlock, the rbconv is conv2, and its bn is bn2\r\n conv = blk.conv2\r\n bn = blk.bn2\r\n else:\r\n # when it is Bottleneck, the rbconv is conv3, and its bn is bn3\r\n conv = blk.conv3\r\n bn = blk.bn3\r\n\r\n num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3\r\n # skip the layer with only one filter left\r\n if num_filters <= 1:\r\n print(\"No filter will be prunned for this layer (num_filters<=1)\")\r\n return model\r\n\r\n if num_cut < 1:\r\n print(\"Error: No filter will be prunned for this layer (cut<1)\")\r\n return model\r\n if (num_filters - num_cut) < 1:\r\n print(\"Error: No filter will be prunned for this layer (no filter left after cutting)\")\r\n return model\r\n\r\n # rank the filters within this layer and store into filter_ranks\r\n abs_wgt = torch.abs(conv.weight.data)\r\n values = \\\r\n torch.sum(abs_wgt, dim = 1, keepdim = True).\\\r\n sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data\r\n # Normalize the sum of weight by the filter dimensions in x 3 x 3\r\n values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)\r\n\r\n print(\"Ranking filters.. \")\r\n filters_to_prune = np.argsort(values.cpu().numpy())[:num_cut] # order from smallest to largest\r\n print(\"Filters that will be prunned\", filters_to_prune)\r\n print(\"Pruning filters.. \")\r\n # only need to update itself, no need to care about others such as next_ds/next_conv\r\n new_conv = \\\r\n torch.nn.Conv2d(in_channels = conv.in_channels, \\\r\n out_channels = conv.out_channels - num_cut,\r\n kernel_size = conv.kernel_size, \\\r\n stride = conv.stride,\r\n padding = conv.padding,\r\n dilation = conv.dilation,\r\n groups = conv.groups,\r\n bias = conv.bias is not None) #(out_channels)\r\n\r\n old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]\r\n new_weights = new_conv.weight.data.cpu().numpy()\r\n\r\n # skip that filter's weight inside old_weights and store others into new_weights\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 0)\r\n new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if conv.bias is not None:\r\n bias_numpy = conv.bias.data.cpu().numpy()\r\n\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune, axis = None)\r\n new_conv.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n # new BN layer after new_conv\r\n new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \\\r\n eps=bn.eps, momentum=bn.momentum, affine=bn.affine)\r\n # old_bn.affine == True, need to copy learnable gamma and beta to new_bn\r\n # gamma: size = (num_features)\r\n old_weights = bn.weight.data.cpu().numpy()\r\n new_weights = new_bn.weight.data.cpu().numpy()\r\n new_weights = np.delete(old_weights, filters_to_prune)\r\n new_bn.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n # beta: size = (num_features)\r\n bias_numpy = bn.bias.data.cpu().numpy()\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune)\r\n new_bn.bias.data = torch.from_numpy(bias).cuda()\r\n\r\n if isinstance(blk, BasicBlock):\r\n # replace with new block\r\n new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.relu = blk.relu\r\n new_blk.conv2 = new_conv # update with new conv\r\n new_blk.bn2 = new_bn # update with new bn\r\n else:\r\n # replace with new block\r\n new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.conv2 = blk.conv2\r\n new_blk.bn2 = blk.bn2\r\n new_blk.conv3 = new_conv\r\n new_blk.bn3 = new_bn\r\n new_blk.relu = blk.relu\r\n\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [block_index], \\\r\n [new_blk]) for i, _ in enumerate(model.base)))\r\n\r\n # delete and replace\r\n del model.base\r\n del blk\r\n\r\n model.base = base\r\n model = update_next_layers(model, block_index, filters_to_prune) # update following layers\r\n\r\n print(\"Filters prunned for rb layer:\", filters_to_prune)\r\n\r\n return model\r\n\r\n\r\n'''\r\n--------------------------------------------------------------------------------\r\n 5. Prune normal residual conv layer, the FRIST one at the upper of residual side with/without BN\r\n Args:\r\n block_index: the BasicBlock or Bottleneck Block this layer locates\r\n cut_ratio: the ratio of filters pruned from conv1 (and conv2 if Bottleneck)\r\n use_bn: use Batch Norm or not\r\n'''\r\ndef prune_ruconv1_layer(model, block_index, cut_ratio=0.2, use_bn = True):\r\n _, blk = list(model.base._modules.items())[block_index]\r\n\r\n if not use_bn:\r\n print(\"ResNet without BN is not supported for prunning\")\r\n return model\r\n\r\n if not isinstance(blk, (BasicBlock, Bottleneck)):\r\n print(\"Conv1 only for ResNet with BasicBlock or Bottleneck defined in torchvision\")\r\n return model\r\n # cut conv1, and next conv is conv2\r\n conv = blk.conv1\r\n bn = blk.bn1\r\n next_conv = blk.conv2\r\n\r\n num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3\r\n # skip the layer with only one filter left\r\n if num_filters <= 1:\r\n print(\"No filter will be prunned for this layer (num_filters<=1)\")\r\n return model\r\n\r\n cut = int(cut_ratio * num_filters)\r\n\r\n if cut < 1:\r\n print(\"No filter will be prunned for this layer (cut<1)\")\r\n return model\r\n if (num_filters - cut) < 1:\r\n print(\"No filter will be prunned for this layer (no filter left after cutting)\")\r\n return model\r\n\r\n # rank the filters within this layer and store into filter_ranks\r\n abs_wgt = torch.abs(conv.weight.data)\r\n values = \\\r\n torch.sum(abs_wgt, dim = 1, keepdim = True).\\\r\n sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data\r\n # Normalize the sum of weight by the filter dimensions in x 3 x 3\r\n values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)\r\n\r\n print(\"Ranking filters.. \")\r\n filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest\r\n print(\"Filters that will be prunned\", filters_to_prune)\r\n print(\"Pruning filters.. \")\r\n\r\n # the updated conv for current conv, with cut output channels being pruned\r\n new_conv = \\\r\n torch.nn.Conv2d(in_channels = conv.in_channels, \\\r\n out_channels = conv.out_channels - cut,\r\n kernel_size = conv.kernel_size, \\\r\n stride = conv.stride,\r\n padding = conv.padding,\r\n dilation = conv.dilation,\r\n groups = conv.groups,\r\n bias = conv.bias is not None) #(out_channels)\r\n\r\n old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]\r\n new_weights = new_conv.weight.data.cpu().numpy()\r\n\r\n # skip that filter's weight inside old_weights and store others into new_weights\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 0)\r\n new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if conv.bias is not None:\r\n bias_numpy = conv.bias.data.cpu().numpy()\r\n\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune, axis = None)\r\n new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv1\r\n\r\n # BatchNorm layer\r\n new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \\\r\n eps=bn.eps, momentum=bn.momentum, affine=bn.affine)\r\n # gamma: size = (num_features)\r\n old_weights = bn.weight.data.cpu().numpy()\r\n new_weights = bn.weight.data.cpu().numpy()\r\n new_weights = np.delete(old_weights, filters_to_prune)\r\n new_bn.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n # beta: size = (num_features)\r\n bias_numpy = bn.bias.data.cpu().numpy()\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune)\r\n new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn1\r\n\r\n # new conv for next_conv\r\n next_new_conv = \\\r\n torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\\\r\n out_channels = next_conv.out_channels, \\\r\n kernel_size = next_conv.kernel_size, \\\r\n stride = next_conv.stride,\r\n padding = next_conv.padding,\r\n dilation = next_conv.dilation,\r\n groups = next_conv.groups,\r\n bias = next_conv.bias is not None)\r\n\r\n old_weights = next_conv.weight.data.cpu().numpy()\r\n new_weights = next_new_conv.weight.data.cpu().numpy()\r\n\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 1)\r\n next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if next_conv.bias is not None:\r\n next_new_conv.bias.data = next_conv.bias.data # new conv2\r\n\r\n # replace with new block\r\n if isinstance(blk, BasicBlock):\r\n new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = new_conv\r\n new_blk.bn1 = new_bn\r\n new_blk.relu = blk.relu\r\n new_blk.conv2 = next_new_conv # update with new conv\r\n new_blk.bn2 = blk.bn2 # update with new bn\r\n else:\r\n new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = new_conv\r\n new_blk.bn1 = new_bn\r\n new_blk.conv2 = next_new_conv\r\n new_blk.bn2 = blk.bn2\r\n new_blk.conv3 = blk.conv3\r\n new_blk.bn3 = blk.bn3\r\n new_blk.relu = blk.relu\r\n\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [block_index], \\\r\n [new_blk]) for i, _ in enumerate(model.base)))\r\n\r\n # delete and replace\r\n del model.base\r\n model.base = base\r\n print(\"Filters prunned:\", filters_to_prune)\r\n\r\n return model\r\n\r\n'''\r\n--------------------------------------------------------------------------------\r\n 6. Prune normal residual conv layer, the SECOND one at the upper of residual side with/without BN\r\n (*for Bottleneck only)\r\n Args:\r\n block_index: the BasicBlock or Bottleneck Block this layer locates\r\n cut_ratio: the ratio of filters pruned from conv1 (and conv2 if Bottleneck)\r\n use_bn: use Batch Norm or not\r\n'''\r\ndef prune_ruconv2_layer(model, block_index, cut_ratio=0.2, use_bn = True):\r\n _, blk = list(model.base._modules.items())[block_index]\r\n\r\n if not use_bn:\r\n print(\"ResNet without BN is not supported for prunning\")\r\n return model\r\n\r\n if not isinstance(blk, Bottleneck):\r\n print(\"Conv2 only for ResNet with Bottleneck defined in torchvision\")\r\n return model\r\n # cut conv1, and next conv is conv2\r\n conv = blk.conv2\r\n bn = blk.bn2\r\n next_conv = blk.conv3\r\n\r\n num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3\r\n # skip the layer with only one filter left\r\n if num_filters <= 1:\r\n print(\"No filter will be prunned for this layer (num_filters<=1)\")\r\n return model\r\n\r\n cut = int(cut_ratio * num_filters)\r\n\r\n if cut < 1:\r\n print(\"No filter will be prunned for this layer (cut<1)\")\r\n return model\r\n if (num_filters - cut) < 1:\r\n print(\"No filter will be prunned for this layer (no filter left after cutting)\")\r\n return model\r\n\r\n # rank the filters within this layer and store into filter_ranks\r\n abs_wgt = torch.abs(conv.weight.data)\r\n values = \\\r\n torch.sum(abs_wgt, dim = 1, keepdim = True).\\\r\n sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data\r\n # Normalize the sum of weight by the filter dimensions in x 3 x 3\r\n values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)\r\n\r\n print(\"Ranking filters.. \")\r\n filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest\r\n print(\"Filters that will be prunned\", filters_to_prune)\r\n print(\"Pruning filters.. \")\r\n\r\n # the updated conv for current conv, with cut output channels being pruned\r\n new_conv = \\\r\n torch.nn.Conv2d(in_channels = conv.in_channels, \\\r\n out_channels = conv.out_channels - cut,\r\n kernel_size = conv.kernel_size, \\\r\n stride = conv.stride,\r\n padding = conv.padding,\r\n dilation = conv.dilation,\r\n groups = conv.groups,\r\n bias = conv.bias is not None) #(out_channels)\r\n\r\n old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]\r\n new_weights = new_conv.weight.data.cpu().numpy()\r\n\r\n # skip that filter's weight inside old_weights and store others into new_weights\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 0)\r\n new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if conv.bias is not None:\r\n bias_numpy = conv.bias.data.cpu().numpy()\r\n\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune, axis = None)\r\n new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv2\r\n\r\n # BatchNorm layer\r\n new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \\\r\n eps=bn.eps, momentum=bn.momentum, affine=bn.affine)\r\n # gamma: size = (num_features)\r\n old_weights = bn.weight.data.cpu().numpy()\r\n new_weights = bn.weight.data.cpu().numpy()\r\n new_weights = np.delete(old_weights, filters_to_prune)\r\n new_bn.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n # beta: size = (num_features)\r\n bias_numpy = bn.bias.data.cpu().numpy()\r\n # change size to (out_channels - cut)\r\n bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)\r\n bias = np.delete(bias_numpy, filters_to_prune)\r\n new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn2\r\n\r\n # new conv for next_conv\r\n next_new_conv = \\\r\n torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\\\r\n out_channels = next_conv.out_channels, \\\r\n kernel_size = next_conv.kernel_size, \\\r\n stride = next_conv.stride,\r\n padding = next_conv.padding,\r\n dilation = next_conv.dilation,\r\n groups = next_conv.groups,\r\n bias = next_conv.bias is not None)\r\n\r\n old_weights = next_conv.weight.data.cpu().numpy()\r\n new_weights = next_new_conv.weight.data.cpu().numpy()\r\n\r\n new_weights = np.delete(old_weights, filters_to_prune, axis = 1)\r\n next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()\r\n\r\n if next_conv.bias is not None:\r\n next_new_conv.bias.data = next_conv.bias.data # new conv3\r\n\r\n # replace with new block\r\n new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \\\r\n blk.stride, downsample = blk.downsample)\r\n # keep all layers in residual path unchanged tempararily\r\n new_blk.conv1 = blk.conv1\r\n new_blk.bn1 = blk.bn1\r\n new_blk.conv2 = new_conv\r\n new_blk.bn2 = new_bn\r\n new_blk.conv3 = next_new_conv\r\n new_blk.bn3 = blk.bn3\r\n new_blk.relu = blk.relu\r\n\r\n base = torch.nn.Sequential(\r\n *(replace_layers(model.base, i, [block_index], \\\r\n [new_blk]) for i, _ in enumerate(model.base)))\r\n\r\n # delete and replace\r\n del model.base\r\n model.base = base\r\n print(\"Filters prunned:\", filters_to_prune)\r\n\r\n return model\r\n" ]
[ [ "torch.abs", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.sum", "torch.from_numpy", "numpy.delete", "torch.nn.BatchNorm2d", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mu-L/pytorch
[ "b0bdf588ea575928a94264c30999385d5ff2bc32" ]
[ "test/test_linalg.py" ]
[ "# -*- coding: utf-8 -*-\n# Owner(s): [\"module: linear algebra\"]\n\nimport torch\nimport numpy as np\n\nimport unittest\nimport itertools\nimport warnings\nimport math\nfrom math import inf, nan, isnan\nimport random\nfrom random import randrange\nfrom itertools import product\nfrom functools import reduce\n\nfrom torch.testing._internal.common_utils import \\\n (TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,\n TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU,\n iter_indices, gradcheck, gradgradcheck)\nfrom torch.testing._internal.common_device_type import \\\n (instantiate_device_type_tests, dtypes,\n onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,\n skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,\n onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver)\nfrom torch.testing import make_tensor\nfrom torch.testing._internal.common_dtype import (\n all_types, floating_types, floating_and_complex_types, get_all_dtypes, get_all_int_dtypes, get_all_complex_dtypes,\n get_all_fp_dtypes,\n)\nfrom torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, CUDA11OrLater, CUDA9\nfrom torch.distributions.binomial import Binomial\n\n# Protects against includes accidentally setting the default dtype\n# NOTE: jit_metaprogramming_utils sets the default dtype to double!\ntorch.set_default_dtype(torch.float32)\nassert torch.get_default_dtype() is torch.float32\n\nif TEST_SCIPY:\n import scipy\n\nclass TestLinalg(TestCase):\n def setUp(self):\n super(self.__class__, self).setUp()\n torch.backends.cuda.matmul.allow_tf32 = False\n\n def tearDown(self):\n torch.backends.cuda.matmul.allow_tf32 = True\n super(self.__class__, self).tearDown()\n\n exact_dtype = True\n\n @dtypes(torch.float, torch.cfloat)\n @precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})\n @tf32_on_and_off(5e-3)\n def test_inner(self, device, dtype):\n def check(a_sizes_, b_sizes_):\n for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):\n a = torch.randn(a_sizes, dtype=dtype, device=device)\n b = torch.randn(b_sizes, dtype=dtype, device=device)\n res = torch.inner(a, b)\n ref = np.inner(a.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))\n out = torch.zeros_like(res)\n torch.inner(a, b, out=out)\n self.assertEqual(res, out)\n\n check([], []) # scalar x scalar\n check([], [0]) # scalar x empty\n check([], [3]) # scalar x 1D\n check([], [2, 3, 4]) # scalar x 3D\n\n check([0], [0]) # empty x empty\n check([0], [2, 0]) # empty x 2D\n\n check([2], [2]) # 1D x 1D\n check([2], [3, 1, 2]) # 1D x 3D\n check([2], [3, 0, 2]) # 1D x 3D empty\n\n check([1, 2], [3, 2]) # 2D x 2D\n check([1, 2], [3, 4, 2]) # 2D x 3D\n check([2, 1, 3, 2], [1, 3, 2, 2]) # 4D x 4D\n\n # Test noncontiguous input\n a = torch.randn(3, 2, device=device, dtype=dtype).transpose_(0, 1)\n b = torch.randn(4, 3, device=device, dtype=dtype)[::2, :]\n self.assertFalse(a.is_contiguous() or b.is_contiguous())\n self.assertEqual(a.inner(b).cpu().numpy(), np.inner(a.cpu().numpy(), b.cpu().numpy()))\n\n # Test error message\n with self.assertRaisesRegex(RuntimeError,\n r\"inner\\(\\) the last dimension must match on both \"\n r\"input tensors but got shapes \\[2, 3\\] and \\[2, 2\\]\"):\n torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))\n\n # Tests torch.outer, and its alias, torch.ger, vs. NumPy\n @precisionOverride({torch.bfloat16: 1e-1})\n @dtypes(*(get_all_dtypes()))\n def test_outer(self, device, dtype):\n def run_test_case(a, b):\n if dtype == torch.bfloat16:\n a_np = a.to(torch.double).cpu().numpy()\n b_np = b.to(torch.double).cpu().numpy()\n exact_dtype = False\n else:\n a_np = a.cpu().numpy()\n b_np = b.cpu().numpy()\n exact_dtype = True\n expected = np.outer(a_np, b_np)\n\n self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)\n self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)\n\n self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)\n self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)\n\n # test out variant\n out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)\n torch.outer(a, b, out=out)\n self.assertEqual(out, expected, exact_dtype=False)\n\n out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)\n torch.ger(a, b, out=out)\n self.assertEqual(out, expected, exact_dtype=False)\n\n a = torch.randn(50).to(device=device, dtype=dtype)\n b = torch.randn(50).to(device=device, dtype=dtype)\n run_test_case(a, b)\n\n # test 0 strided tensor\n zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)\n run_test_case(zero_strided, b)\n run_test_case(a, zero_strided)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_linalg_lstsq(self, device, dtype):\n from torch.testing._internal.common_utils import random_well_conditioned_matrix\n if self.device_type == 'cpu':\n drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)\n else:\n drivers = ('gels', None)\n\n def check_solution_correctness(a, b, sol):\n sol2 = a.pinverse() @ b\n self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)\n\n def check_correctness_ref(a, b, res, ref, driver=\"default\"):\n def apply_if_not_empty(t, f):\n if t.numel():\n return f(t)\n else:\n return t\n\n def select_if_not_empty(t, i):\n selected = apply_if_not_empty(t, lambda x: x.select(0, i))\n return selected\n\n m = a.size(-2)\n n = a.size(-1)\n nrhs = b.size(-1)\n batch_size = int(np.prod(a.shape[:-2]))\n if batch_size == 0:\n batch_size = 1\n a_3d = a.view(batch_size, m, n)\n b_3d = b.view(batch_size, m, nrhs)\n\n solution_3d = res.solution.view(batch_size, n, nrhs)\n residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))\n rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))\n singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])\n\n if a.numel() > 0:\n for i in range(batch_size):\n sol, residuals, rank, singular_values = ref(\n a_3d.select(0, i).numpy(),\n b_3d.select(0, i).numpy()\n )\n # Singular values are None when lapack_driver='gelsy' in SciPy\n if singular_values is None:\n singular_values = []\n self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)\n self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)\n self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)\n\n # SciPy and NumPy operate only on non-batched input and\n # return an empty array with shape (0,) if rank(a) != n\n # in PyTorch the batched inputs are supported and\n # matrices in the batched input can have different ranks\n # we compute residuals only if all matrices have rank == n\n # see https://github.com/pytorch/pytorch/issues/56483\n if m > n:\n if torch.all(rank_1d == n):\n self.assertEqual(\n residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False\n )\n else:\n self.assertTrue(residuals_2d.numel() == 0)\n\n else:\n self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))\n self.assertEqual(res.rank.shape, a.shape[:-2])\n\n # residuals are not always computed (and have non-zero shape)\n if m > n and driver != \"gelsy\":\n self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))\n else:\n self.assertEqual(res.residuals.shape, (0, ))\n\n # singular_values are not always computed (and have non-zero shape)\n if driver == \"default\" or driver == \"gelsd\" or driver == \"gelss\":\n self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))\n else:\n self.assertEqual(res.singular_values.shape, (0, ))\n\n def check_correctness_scipy(a, b, res, driver, cond):\n # SciPy provides 3 driver options: gelsd, gelss, gelsy\n if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):\n import scipy.linalg\n\n def scipy_ref(a, b):\n return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)\n check_correctness_ref(a, b, res, scipy_ref, driver=driver)\n\n def check_correctness_numpy(a, b, res, driver, rcond):\n # NumPy uses only gelsd routine\n if driver == 'gelsd':\n\n def numpy_ref(a, b):\n return np.linalg.lstsq(a, b, rcond=rcond)\n check_correctness_ref(a, b, res, numpy_ref)\n\n version = torch.testing._internal.common_cuda._get_torch_cuda_version()\n cusolver_available = (version >= (10, 2))\n\n ms = [2 ** i for i in range(5)]\n m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]\n # cases m < n are only supported on CPU and for cuSOLVER path on CUDA\n m_l_n_sizes = [(m // 2, m) for m in ms]\n include_m_l_n_case = (cusolver_available or device == 'cpu')\n matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])\n batches = [(), (2,), (2, 2), (2, 2, 2)]\n # we generate matrices with singular values sampled from a normal distribution,\n # that is why we use `cond=1.0`, the mean to cut roughly half of all\n # the singular values and compare whether torch.linalg.lstsq agrees with\n # SciPy and NumPy.\n # if rcond is True then set value for it based on the used algorithm\n # rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance\n rconds = (None, True, -1)\n\n for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):\n # keep the rcond value if it is None or -1, set the driver specific value if it is True\n if rcond and rcond != -1:\n if driver in ('gelss', 'gelsd'):\n # SVD based algorithm; set to zero roughly half of all the singular values\n rcond = 1.0\n else:\n # driver == 'gelsy'\n # QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests\n rcond = 1e-4\n\n # specifying rcond value has no effect for gels driver so no need to run the tests again\n if driver == 'gels' and rcond is not None:\n continue\n\n shape = batch + matrix_size\n a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)\n b = torch.rand(*shape, dtype=dtype, device=device)\n\n m = a.size(-2)\n n = a.size(-1)\n res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)\n sol = res.solution\n\n # Only checks gelsd, gelss, gelsy drivers\n check_correctness_scipy(a, b, res, driver, rcond)\n\n # Only checks gelsd driver\n check_correctness_numpy(a, b, res, driver, rcond)\n\n # gels driver is not checked by comparing to NumPy or SciPy implementation\n # because NumPy and SciPy do not implement this driver\n if driver == 'gels' and rcond is None:\n check_solution_correctness(a, b, sol)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_linalg_lstsq_batch_broadcasting(self, device, dtype):\n from torch.testing._internal.common_utils import random_well_conditioned_matrix\n\n def check_correctness(a, b):\n sol = torch.linalg.lstsq(a, b).solution\n sol2 = a.pinverse() @ b\n self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)\n\n ms = [2 ** i for i in range(5)]\n batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]\n # the case when a single matrix is batch-broadcasted over the rhs\n for m, batch in itertools.product(ms, batches):\n a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)\n b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)\n check_correctness(a, b)\n\n # cases with broadcastable shapes\n for m in ms:\n a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)\n b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)\n check_correctness(a, b)\n\n # rhs are vectors, not matrices in this test\n b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)\n # unsqueeze for b because `check_correctness` checks against\n # a.pinverse() @ b, which requires b to be a matrix\n check_correctness(a, b.unsqueeze(-1))\n\n a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)\n b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)\n check_correctness(a, b)\n\n # rhs are vectors, not matrices in this test\n b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)\n check_correctness(a, b.unsqueeze(-1))\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_linalg_lstsq_input_checks(self, device, dtype):\n # check empty inputs\n # empty batches\n a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)\n b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)\n self.assertEqual(\n torch.linalg.lstsq(a, b)[0],\n torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)\n )\n # empty a and b\n a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)\n b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)\n self.assertEqual(\n torch.linalg.lstsq(a, b)[0],\n torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)\n )\n # empty a and b\n a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)\n b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)\n self.assertEqual(\n torch.linalg.lstsq(a, b)[0],\n torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)\n )\n # empty a but not b\n a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)\n b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)\n self.assertEqual(\n torch.linalg.lstsq(a, b)[0],\n torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)\n )\n\n # empty a and b\n if torch.device(device).type == 'cpu':\n # only CPU since CUDA does not support overdetermined systems\n a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)\n b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)\n self.assertEqual(\n torch.linalg.lstsq(a, b)[0],\n torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)\n )\n\n a = torch.rand(2, 3, dtype=dtype, device=device)\n b = torch.rand(3, dtype=dtype, device=device)\n\n with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):\n torch.linalg.lstsq(b, b)\n\n with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):\n torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))\n\n with self.assertRaisesRegex(RuntimeError, r'input.size\\(-2\\) should match other.size\\(-1\\)'):\n torch.linalg.lstsq(a, b)\n\n with self.assertRaisesRegex(RuntimeError, r'input.size\\(-2\\) should match other.size\\(-2\\)'):\n torch.linalg.lstsq(a, b.unsqueeze(-1))\n\n def complement_device(device):\n if device == 'cpu' and torch.cuda.is_available():\n return 'cuda'\n else:\n return 'cpu'\n\n a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)\n b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))\n if a.device != b.device:\n with self.assertRaisesRegex(RuntimeError, 'be on the same device'):\n torch.linalg.lstsq(a, b)\n\n b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()\n with self.assertRaisesRegex(RuntimeError, 'the same dtype'):\n torch.linalg.lstsq(a, b)\n\n a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)\n b = torch.rand(2, 2, 2, dtype=dtype, device=device)\n\n if device != 'cpu':\n with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):\n torch.linalg.lstsq(a, b, driver='fictitious_driver')\n # if on cpu\n else:\n with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \\(gels, gelsy, gelsd, gelss\\)'):\n torch.linalg.lstsq(a, b, driver='fictitious_driver')\n\n # cuSOLVER path supports underdetermined systems\n version = torch.testing._internal.common_cuda._get_torch_cuda_version()\n cusolver_not_available = (version < (10, 1))\n\n if device != 'cpu' and cusolver_not_available:\n a = torch.rand(2, 3, dtype=dtype, device=device)\n b = torch.rand(2, 1, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):\n torch.linalg.lstsq(a, b)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n def run_test(shape, batch, contiguous):\n A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)\n if A.numel() > 0 and not contiguous:\n A = A.mT\n self.assertFalse(A.is_contiguous())\n expected_L = np.linalg.cholesky(A.cpu().numpy())\n actual_L = torch.linalg.cholesky(A)\n\n # For fp32 individual entries in matrices can differ between PyTorch and NumPy\n # Let's compare the norms of matrices instead\n if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:\n # axis is specified to calculate matrix norm for batched input\n expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))\n actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))\n # Compare the norms with standard tolerances\n self.assertEqual(actual_norm, expected_norm)\n # and individual values with a higher tolerance\n self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)\n else:\n self.assertEqual(actual_L, expected_L)\n\n shapes = (0, 3, 5)\n batches = ((), (3, ), (2, 2))\n larger_input_case = [(100, (5, ), True)]\n for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:\n run_test(shape, batch, contiguous)\n\n # check the out= variant\n A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)\n out = torch.empty_like(A)\n ans = torch.linalg.cholesky(A, out=out)\n self.assertEqual(ans, out)\n expected = torch.linalg.cholesky(A)\n self.assertEqual(expected, out)\n\n # check the upper= variant\n expected = torch.linalg.cholesky(A).mH\n actual = torch.linalg.cholesky(A, upper=True)\n self.assertEqual(expected, actual)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_errors_and_warnings(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n # cholesky requires the input to be a square matrix or batch of square matrices\n A = torch.randn(2, 3, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):\n torch.linalg.cholesky(A)\n A = torch.randn(2, 2, 3, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):\n torch.linalg.cholesky(A)\n with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):\n np.linalg.cholesky(A.cpu().numpy())\n\n # cholesky requires the input to be at least 2 dimensional tensor\n A = torch.randn(2, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):\n torch.linalg.cholesky(A)\n with self.assertRaisesRegex(np.linalg.LinAlgError,\n r'1-dimensional array given\\. Array must be at least two-dimensional'):\n np.linalg.cholesky(A.cpu().numpy())\n\n # if the input matrix is not positive definite, an error should be raised\n A = torch.eye(3, 3, dtype=dtype, device=device)\n A[-1, -1] = 0 # Now A is not positive definite\n with self.assertRaisesRegex(RuntimeError, r'minor of order 3 is not positive-definite'):\n torch.linalg.cholesky(A)\n with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):\n np.linalg.cholesky(A.cpu().numpy())\n\n # if at least one matrix in the batch is singular, an error should be raised\n A = torch.eye(3, 3, dtype=dtype, device=device)\n A = A.reshape((1, 3, 3))\n A = A.repeat(5, 1, 1)\n A[4, -1, -1] = 0 # Now A[4] is not positive definite\n with self.assertRaisesRegex(RuntimeError, r'\\(Batch element 4\\): The factorization could not be completed'):\n torch.linalg.cholesky(A)\n\n # if out tensor with wrong shape is passed a warning is given\n A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)\n out = torch.empty(2, 3, dtype=dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.cholesky(A, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty(*A.shape, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.cholesky(A, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"Expected result and input tensors to be on the same device\"):\n torch.linalg.cholesky(A, out=out)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float64, torch.complex128)\n def test_cholesky_hermitian_grad(self, device, dtype):\n # Check that the gradient is Hermitian (or symmetric)\n def run_test(shape):\n root = torch.rand(*shape, dtype=dtype, device=device)\n root = torch.matmul(root, root.mH)\n root.requires_grad_()\n chol = torch.linalg.cholesky(root).sum().backward()\n self.assertEqual(root.grad, root.grad.mH)\n\n shapes = ((3, 3), (1, 1, 3, 3))\n for shape in shapes:\n run_test(shape)\n\n # NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n def test_old_cholesky_batched_many_batches(self, device, dtype):\n from torch.testing._internal.common_utils import random_symmetric_pd_matrix\n\n def cholesky_test_helper(n, batchsize, device, upper):\n A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)\n chol_fact = torch.cholesky(A, upper=upper)\n if upper:\n # Correctness check\n self.assertEqual(A, chol_fact.mT.matmul(chol_fact))\n # Upper triangular check\n self.assertEqual(chol_fact, chol_fact.triu())\n else:\n # Correctness check\n self.assertEqual(A, chol_fact.matmul(chol_fact.mT))\n # Lower triangular check\n self.assertEqual(chol_fact, chol_fact.tril())\n\n for upper, batchsize in itertools.product([True, False], [262144, 524288]):\n cholesky_test_helper(2, batchsize, device, upper)\n\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_cholesky_batched(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n def cholesky_test_helper(n, batch_dims, upper):\n A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)\n cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])\n cholesky_exp = cholesky_exp.reshape_as(A)\n self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))\n\n for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):\n cholesky_test_helper(3, batchsize, upper)\n\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @tf32_on_and_off(0.01)\n def test_old_cholesky(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)\n\n # default Case\n C = torch.cholesky(A)\n B = torch.mm(C, C.t().conj())\n self.assertEqual(A, B, atol=1e-14, rtol=0)\n\n # test Upper Triangular\n U = torch.cholesky(A, True)\n B = torch.mm(U.t().conj(), U)\n self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')\n\n # test Lower Triangular\n L = torch.cholesky(A, False)\n B = torch.mm(L, L.t().conj())\n self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_cholesky_empty(self, device, dtype):\n def run_test(upper):\n A = torch.empty(0, 0, dtype=dtype, device=device)\n chol = torch.cholesky(A, upper)\n chol_A = torch.matmul(chol, chol.t().conj())\n self.assertEqual(A, chol_A)\n for upper in [True, False]:\n run_test(upper)\n\n # Test for issue\n # https://github.com/pytorch/pytorch/issues/57032\n # torch.cholesky with upper=True for batched CUDA inputs was wrong\n # it was using the lower triangular part instead of the upper one\n @onlyCUDA\n @skipCUDAIfNoMagma\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_cholesky_batched_upper(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n batchsize = 2\n A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)\n A_triu = A.triu() # fill the lower triangular part with zero\n\n U = torch.cholesky(A_triu, upper=True)\n\n reconstruct_A = U.mH @ U\n self.assertEqual(A, reconstruct_A)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_ex(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n def run_test(n, batch):\n A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)\n expected_L = np.linalg.cholesky(A.cpu().numpy())\n expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)\n actual_L, actual_info = torch.linalg.cholesky_ex(A)\n\n # For fp32 individual entries in matrices can differ between PyTorch and NumPy\n # Let's compare the norms of matrices instead\n if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:\n # axis is specified to calculate matrix norm for batched input\n expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))\n actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))\n # Compare the norms with standard tolerances\n self.assertEqual(actual_norm, expected_norm)\n # and individual values with a higher tolerance\n self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)\n else:\n self.assertEqual(actual_L, expected_L)\n self.assertEqual(actual_info, expected_info)\n\n ns = (0, 3, 5)\n batches = ((), (2, ), (2, 1))\n for n, batch in itertools.product(ns, batches):\n run_test(n, batch)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_ex_non_pd(self, device, dtype):\n # if the input matrix is not positive definite, info with positive integer is returned\n A = torch.eye(3, 3, dtype=dtype, device=device)\n A[-1, -1] = 0 # Now A is singular\n _, info = torch.linalg.cholesky_ex(A)\n self.assertEqual(info, 3)\n with self.assertRaisesRegex(RuntimeError, r'minor of order 3 is not positive-definite'):\n torch.linalg.cholesky_ex(A, check_errors=True)\n\n # if at least one matrix in the batch is not positive definite,\n # batched info with positive integer for the corresponding matrix is returned\n A = torch.eye(3, 3, dtype=dtype, device=device)\n A = A.reshape((1, 3, 3))\n A = A.repeat(5, 1, 1)\n A[3, -2, -2] = 0 # Now A[3] is singular\n _, info = torch.linalg.cholesky_ex(A)\n\n expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)\n expected_info[3] = 2\n self.assertEqual(info, expected_info)\n with self.assertRaisesRegex(RuntimeError, r'\\(Batch element 3\\): The factorization could not be completed'):\n torch.linalg.cholesky_ex(A, check_errors=True)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_ex_out_info_error(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n # dtype for info must be torch.int32\n A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)\n L = torch.empty(A.shape, dtype=dtype, device=device)\n info = torch.empty(A.shape[:-2], dtype=torch.int64, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got info with dtype Long\"):\n torch.linalg.cholesky_ex(A, out=(L, info))\n\n @onlyCPU\n @skipCPUIfNoLapack\n @dtypes(torch.float64, torch.complex128)\n def test_old_cholesky_autograd(self, device, dtype):\n def func(root, upper):\n x = 0.5 * (root + root.mH)\n return torch.cholesky(x, upper)\n\n def run_test(upper, dims):\n root = torch.rand(*dims, dtype=dtype, device=device, requires_grad=True)\n root = root + torch.eye(dims[-1])\n\n gradcheck(func, [root, upper])\n gradgradcheck(func, [root, upper])\n\n root = torch.rand(*dims, dtype=dtype, device=device)\n root = torch.matmul(root, root.mH)\n root.requires_grad_()\n chol = root.cholesky().sum().backward()\n self.assertEqual(root.grad, root.grad.mH) # Check the gradient is hermitian\n\n for upper, dims in itertools.product([True, False], [(3, 3), (4, 3, 2, 2)]):\n run_test(upper, dims)\n\n def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):\n def check(m, a, b, beta, alpha):\n if dtype == torch.bfloat16:\n a_np = a.to(torch.double).cpu().numpy()\n b_np = b.to(torch.double).cpu().numpy()\n m_np = m.to(torch.double).cpu().numpy()\n exact_dtype = False\n else:\n a_np = a.cpu().numpy()\n b_np = b.cpu().numpy()\n m_np = m.cpu().numpy()\n exact_dtype = True\n if beta == 0:\n expected = alpha * np.outer(a_np, b_np)\n else:\n expected = beta * m_np + alpha * np.outer(a_np, b_np)\n\n res = torch.addr(m, a, b, beta=beta, alpha=alpha)\n self.assertEqual(res, expected, exact_dtype=exact_dtype)\n\n # Test out variant\n out = torch.empty_like(res)\n torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)\n self.assertEqual(out, expected, exact_dtype=exact_dtype)\n\n m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)\n a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)\n b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)\n\n check(m, a, b, beta, alpha)\n\n # test transpose\n m_transpose = torch.transpose(m, 0, 1)\n check(m_transpose, a, b, beta, alpha)\n\n # test 0 strided tensor\n zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)\n check(m, zero_strided, b, beta, alpha)\n\n # test scalar\n m_scalar = torch.tensor(1, device=device, dtype=dtype)\n check(m_scalar, a, b, beta, alpha)\n\n # test nans and infs are not propagated to the output when beta == 0\n float_and_complex_dtypes = get_all_fp_dtypes() + get_all_complex_dtypes()\n if beta == 0 and dtype in float_and_complex_dtypes:\n m[0][10] = m[10][10] = m[20][20] = float('inf')\n m[1][10] = m[11][10] = m[21][20] = float('nan')\n check(m, a, b, 0, alpha)\n\n @dtypes(torch.bool)\n def test_addr_bool(self, device, dtype):\n self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)\n self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)\n self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)\n self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)\n\n @dtypes(*(get_all_int_dtypes()))\n def test_addr_integral(self, device, dtype):\n with self.assertRaisesRegex(RuntimeError,\n 'argument beta must not be a floating point number.'):\n self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)\n with self.assertRaisesRegex(RuntimeError,\n 'argument alpha must not be a floating point number.'):\n self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)\n with self.assertRaisesRegex(RuntimeError,\n 'Boolean beta only supported for Boolean results.'):\n self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)\n with self.assertRaisesRegex(RuntimeError,\n 'Boolean alpha only supported for Boolean results.'):\n self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)\n\n # when beta is zero\n self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)\n # when beta is not zero\n self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)\n\n @precisionOverride({torch.bfloat16: 1e-1})\n @dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes()))\n def test_addr_float_and_complex(self, device, dtype):\n with self.assertRaisesRegex(RuntimeError,\n 'Boolean beta only supported for Boolean results.'):\n self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)\n with self.assertRaisesRegex(RuntimeError,\n 'Boolean alpha only supported for Boolean results.'):\n self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)\n\n # when beta is zero\n self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)\n # when beta is not zero\n self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)\n if dtype in get_all_complex_dtypes():\n self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))\n\n @dtypes(*itertools.product(get_all_dtypes(),\n get_all_dtypes()))\n def test_outer_type_promotion(self, device, dtypes):\n a = torch.randn(5).to(device=device, dtype=dtypes[0])\n b = torch.randn(5).to(device=device, dtype=dtypes[1])\n for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):\n result = op(a, b)\n self.assertEqual(result.dtype, torch.result_type(a, b))\n\n # don't use @dtypes decorator to avoid generating ~1700 tests per device\n def test_addr_type_promotion(self, device):\n for dtypes0, dtypes1, dtypes2 in product(get_all_dtypes(), repeat=3):\n a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)\n b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)\n m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)\n\n desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),\n dtypes2)\n for op in (torch.addr, torch.Tensor.addr):\n result = op(m, a, b)\n self.assertEqual(result.dtype, desired_dtype)\n\n # Tests migrated from test_torch.py\n # 1) test the shape of the result tensor when there is empty input tensor\n # 2) test the Runtime Exception when there is scalar input tensor\n def test_outer_ger_addr_legacy_tests(self, device):\n for size in ((0, 0), (0, 5), (5, 0)):\n a = torch.rand(size[0], device=device)\n b = torch.rand(size[1], device=device)\n\n self.assertEqual(torch.outer(a, b).shape, size)\n self.assertEqual(torch.ger(a, b).shape, size)\n\n m = torch.empty(size, device=device)\n self.assertEqual(torch.addr(m, a, b).shape, size)\n\n m = torch.randn(5, 6, device=device)\n a = torch.randn(5, device=device)\n b = torch.tensor(6, device=device)\n self.assertRaises(RuntimeError, lambda: torch.outer(a, b))\n self.assertRaises(RuntimeError, lambda: torch.outer(b, a))\n self.assertRaises(RuntimeError, lambda: torch.ger(a, b))\n self.assertRaises(RuntimeError, lambda: torch.ger(b, a))\n self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))\n self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))\n\n # Tests torch.det and its alias, torch.linalg.det, vs. NumPy\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double, torch.cdouble)\n def test_det(self, device, dtype):\n tensors = (\n torch.randn((2, 2), device=device, dtype=dtype),\n torch.randn((129, 129), device=device, dtype=dtype),\n torch.randn((3, 52, 52), device=device, dtype=dtype),\n torch.randn((4, 2, 26, 26), device=device, dtype=dtype))\n\n\n ops = (torch.det, torch.Tensor.det,\n torch.linalg.det)\n for t in tensors:\n expected = np.linalg.det(t.cpu().numpy())\n for op in ops:\n actual = op(t)\n self.assertEqual(actual, expected)\n self.compare_with_numpy(op, np.linalg.det, t)\n\n # NOTE: det requires a 2D+ tensor\n t = torch.randn(1, device=device, dtype=dtype)\n with self.assertRaises(RuntimeError):\n op(t)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n def test_eigh(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n def run_test(shape, batch, uplo):\n matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)\n expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)\n actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)\n self.assertEqual(actual_w, expected_w)\n # sign of eigenvectors is not unique and therefore absolute values are compared\n self.assertEqual(abs(actual_v), abs(expected_v))\n # additionally we can multiply the eigenvector with a phase factor e^{i\\phi} and then compare the values\n # let's choose the convention that the first element of the eigenvectors from torch and numpy be the same\n # for real inputs, this phase factor is plus or minus one\n if matrix.numel() > 0:\n phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])\n actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)\n self.assertEqual(actual_v_rotated, expected_v)\n\n # check the out= variant\n out_w = torch.empty_like(actual_w)\n out_v = torch.empty_like(actual_v)\n ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))\n self.assertEqual(ans_w, out_w)\n self.assertEqual(ans_v, out_v)\n self.assertEqual(ans_w, actual_w)\n self.assertEqual(abs(ans_v), abs(actual_v))\n\n shapes = (0, 3, 5)\n batches = ((), (3, ), (2, 2))\n uplos = [\"U\", \"L\"]\n for shape, batch, uplo in itertools.product(shapes, batches, uplos):\n run_test(shape, batch, uplo)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n def test_eigh_lower_uplo(self, device, dtype):\n def run_test(shape, batch, uplo):\n # check lower case uplo\n # use non-symmetric input to check whether uplo argument is working as intended\n matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)\n expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)\n actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)\n self.assertEqual(actual_w, expected_w)\n self.assertEqual(abs(actual_v), abs(expected_v))\n\n uplos = [\"u\", \"l\"]\n for uplo in uplos:\n run_test(3, (2, 2), uplo)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_eigh_errors_and_warnings(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n # eigh requires a square matrix\n t = torch.randn(2, 3, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.linalg.eigh(t)\n\n # eigh requires 'uplo' parameter to be 'U' or 'L'\n t = torch.randn(3, 3, device=device, dtype=dtype)\n for uplo in [\"a\", \"wrong\"]:\n with self.assertRaisesRegex(RuntimeError, \"be \\'L\\' or \\'U\\'\"):\n torch.linalg.eigh(t, UPLO=uplo)\n with self.assertRaisesRegex(ValueError, \"be \\'L\\' or \\'U\\'\"):\n np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = random_hermitian_matrix(3, dtype=dtype, device=device)\n real_dtype = a.real.dtype if dtype.is_complex else dtype\n out_w = torch.empty(7, 7, dtype=real_dtype, device=device)\n out_v = torch.empty(7, 7, dtype=dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.eigh(a, out=(out_w, out_v))\n # Check warning occurs\n self.assertEqual(len(w), 2)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-2].message))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out_w = torch.empty(0, dtype=real_dtype, device=device)\n out_v = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvectors with dtype Int\"):\n torch.linalg.eigh(a, out=(out_w, out_v))\n\n out_w = torch.empty(0, dtype=torch.int, device=device)\n out_v = torch.empty(0, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvalues with dtype Int\"):\n torch.linalg.eigh(a, out=(out_w, out_v))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out_w = torch.empty(0, device=wrong_device, dtype=dtype)\n out_v = torch.empty(0, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.eigh(a, out=(out_w, out_v))\n out_w = torch.empty(0, device=device, dtype=dtype)\n out_v = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.eigh(a, out=(out_w, out_v))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n def test_eigh_non_contiguous(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n def run_test(matrix, uplo):\n self.assertFalse(matrix.is_contiguous())\n expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)\n actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)\n self.assertEqual(actual_w, expected_w)\n # sign of eigenvectors is not unique and therefore absolute values are compared\n self.assertEqual(abs(actual_v), abs(expected_v))\n\n def run_test_permuted(shape, batch, uplo):\n # check for permuted / transposed inputs\n matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)\n matrix = matrix.mT\n run_test(matrix, uplo)\n\n def run_test_skipped_elements(shape, batch, uplo):\n # check for inputs with skipped elements\n matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)\n matrix = matrix[::2]\n run_test(matrix, uplo)\n\n shapes = (3, 5)\n batches = ((4, ), (4, 2))\n uplos = [\"U\", \"L\"]\n for shape, batch, uplo in itertools.product(shapes, batches, uplos):\n run_test_permuted(shape, batch, uplo)\n run_test_skipped_elements(shape, batch, uplo)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float64, torch.complex128)\n def test_eigh_hermitian_grad(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n def run_test(dims, uplo):\n x = random_hermitian_matrix(dims[-1], *dims[:-2], device=device, dtype=dtype).requires_grad_()\n w, v = torch.linalg.eigh(x)\n (w.sum() + abs(v).sum()).backward()\n self.assertEqual(x.grad, x.grad.mH) # Check the gradient is Hermitian\n\n for dims, uplo in itertools.product([(3, 3), (1, 1, 3, 3)], [\"L\", \"U\"]):\n run_test(dims, uplo)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n def test_eigvalsh(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n def run_test(shape, batch, uplo):\n matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)\n expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)\n actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)\n self.assertEqual(actual_w, expected_w)\n\n # check the out= variant\n out = torch.empty_like(actual_w)\n ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, actual_w)\n\n shapes = (0, 3, 5)\n batches = ((), (3, ), (2, 2))\n uplos = [\"U\", \"L\"]\n for shape, batch, uplo in itertools.product(shapes, batches, uplos):\n run_test(shape, batch, uplo)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_eigvalsh_errors_and_warnings(self, device, dtype):\n # eigvalsh requires a square matrix\n t = torch.randn(2, 3, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.linalg.eigvalsh(t)\n\n # eigvalsh requires 'uplo' parameter to be 'U' or 'L'\n t = torch.randn(3, 3, device=device, dtype=dtype)\n for uplo in [\"a\", \"wrong\"]:\n with self.assertRaisesRegex(RuntimeError, \"be \\'L\\' or \\'U\\'\"):\n torch.linalg.eigvalsh(t, UPLO=uplo)\n with self.assertRaisesRegex(ValueError, \"be \\'L\\' or \\'U\\'\"):\n np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n real_dtype = t.real.dtype if dtype.is_complex else dtype\n out = torch.empty_like(t).to(real_dtype)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.eigvalsh(t, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.eigvalsh(t, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.eigvalsh(t, out=out)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})\n def test_eigvalsh_non_contiguous(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n def run_test(matrix, uplo):\n self.assertFalse(matrix.is_contiguous())\n expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)\n actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)\n self.assertEqual(actual_w, expected_w)\n\n def run_test_permuted(shape, batch, uplo):\n # check for permuted / transposed inputs\n matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)\n matrix = matrix.mT\n run_test(matrix, uplo)\n\n def run_test_skipped_elements(shape, batch, uplo):\n # check for inputs with skipped elements\n matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)\n matrix = matrix[::2]\n run_test(matrix, uplo)\n\n shapes = (3, 5)\n batches = ((4, ), (4, 2))\n uplos = [\"U\", \"L\"]\n for shape, batch, uplo in itertools.product(shapes, batches, uplos):\n run_test_permuted(shape, batch, uplo)\n run_test_skipped_elements(shape, batch, uplo)\n\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_kron(self, device, dtype):\n\n def run_test_case(a_shape, b_shape):\n a = torch.rand(a_shape, dtype=dtype, device=device)\n b = torch.rand(b_shape, dtype=dtype, device=device)\n\n expected = np.kron(a.cpu().numpy(), b.cpu().numpy())\n result = torch.kron(a, b)\n self.assertEqual(result, expected)\n\n # check the out= variant\n out = torch.empty_like(result)\n ans = torch.kron(a, b, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]\n for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):\n run_test_case(a_shape, b_shape)\n\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_kron_non_contiguous(self, device, dtype):\n\n def run_test_transposed(a_shape, b_shape):\n # check for transposed case\n a = torch.rand(a_shape, dtype=dtype, device=device).mT\n b = torch.rand(b_shape, dtype=dtype, device=device).mT\n self.assertFalse(a.is_contiguous())\n self.assertFalse(b.is_contiguous())\n\n expected = np.kron(a.cpu().numpy(), b.cpu().numpy())\n result = torch.kron(a, b)\n self.assertEqual(result, expected)\n\n # check the out= variant\n out = torch.empty(result.mT.shape, dtype=dtype, device=device).mT\n self.assertFalse(out.is_contiguous())\n ans = torch.kron(a, b, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n def run_test_skipped_elements(a_shape, b_shape):\n # check for transposed case\n a = torch.rand(2 * a_shape[0], *a_shape[1:], dtype=dtype, device=device)[::2]\n b = torch.rand(2 * b_shape[0], *b_shape[1:], dtype=dtype, device=device)[::2]\n self.assertFalse(a.is_contiguous())\n self.assertFalse(b.is_contiguous())\n\n expected = np.kron(a.cpu().numpy(), b.cpu().numpy())\n result = torch.kron(a, b)\n self.assertEqual(result, expected)\n\n # check the out= variant\n out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]\n self.assertFalse(out.is_contiguous())\n ans = torch.kron(a, b, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n shapes = [(2, 2), (2, 2, 3), (2, 2, 3, 3)]\n for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):\n # run_test_transposed(a_shape, b_shape)\n run_test_skipped_elements(a_shape, b_shape)\n\n # Test that kron perserve memory format\n a = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)\n b = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)\n c = torch.kron(a, b)\n self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))\n torch.kron(a, b, out=c)\n self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))\n c = c.contiguous(memory_format=torch.contiguous_format)\n torch.kron(a, b, out=c)\n self.assertTrue(c.is_contiguous(memory_format=torch.contiguous_format))\n\n\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_kron_empty(self, device, dtype):\n\n def run_test_case(empty_shape):\n a = torch.eye(3, dtype=dtype, device=device)\n b = torch.empty(empty_shape, dtype=dtype, device=device)\n result = torch.kron(a, b)\n expected = np.kron(a.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(result, expected)\n\n # NumPy doesn't work if the first argument is empty\n result = torch.kron(b, a)\n self.assertEqual(result.shape, expected.shape)\n\n empty_shapes = [(0,), (2, 0), (1, 0, 3)]\n for empty_shape in empty_shapes:\n run_test_case(empty_shape)\n\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_kron_errors_and_warnings(self, device, dtype):\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.eye(3, dtype=dtype, device=device)\n b = torch.ones((2, 2), dtype=dtype, device=device)\n out = torch.empty_like(a)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.kron(a, b, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should match\n out = torch.empty_like(a).to(torch.int)\n with self.assertRaisesRegex(RuntimeError, \"can't be cast to the desired output type\"):\n torch.kron(a, b, out=out)\n\n # This test confirms that torch.linalg.norm's dtype argument works\n # as expected, according to the function's documentation\n @skipCUDAIfNoMagma\n def test_norm_dtype(self, device):\n def run_test_case(input_size, ord, keepdim, from_dtype, to_dtype):\n # Determine the best dtype to use for comparisons between tensors\n # of two different types\n def get_compare_dtype(type0, type1):\n types_32bit_based = [torch.float, torch.cfloat]\n is_complex = type0.is_complex or type1.is_complex\n\n if type0 in types_32bit_based or type1 in types_32bit_based:\n return torch.cfloat if is_complex else torch.float\n else:\n return torch.cdouble if is_complex else torch.double\n\n compare_dtype = get_compare_dtype(from_dtype, to_dtype)\n\n def get_value_type(dtype):\n if dtype == torch.cfloat:\n return torch.float\n elif dtype == torch.cdouble:\n return torch.double\n elif dtype == torch.complex32:\n return torch.float16\n else:\n return dtype\n\n msg = (\n f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '\n f'from_dtype={from_dtype}, to_dtype={to_dtype}')\n input = torch.randn(*input_size, dtype=from_dtype, device=device)\n result = torch.linalg.norm(input, ord, keepdim=keepdim)\n if from_dtype.is_complex:\n # By default, norm downgrades a complex input to the corresponding real number type\n self.assertEqual(result.dtype, get_value_type(from_dtype), msg=msg)\n else:\n self.assertEqual(result.dtype, from_dtype, msg=msg)\n\n result_out = torch.empty((0), dtype=to_dtype, device=device)\n torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)\n self.assertEqual(result_out.dtype, to_dtype, msg=msg)\n self.assertEqual(result.to(compare_dtype), result_out.to(compare_dtype), msg=msg)\n\n result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)\n self.assertEqual(result_with_dtype.dtype, to_dtype, msg=msg)\n\n if from_dtype.is_complex:\n result_convert_first = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)\n self.assertEqual(result_with_dtype.to(compare_dtype), result_convert_first.to(compare_dtype), msg=msg)\n else:\n self.assertEqual(result.to(compare_dtype), result_with_dtype.to(compare_dtype), msg=msg)\n\n result_out_with_dtype = torch.empty_like(result_with_dtype)\n torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)\n self.assertEqual(result_out_with_dtype.dtype, to_dtype, msg=msg)\n self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)\n\n ord_vector = [0, 0.1, -0.1, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]\n ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]\n S = 10\n test_cases = [\n ((S, ), ord_vector),\n ((S, S), ord_matrix),\n ]\n for keepdim in [True, False]:\n for input_size, ord_settings in test_cases:\n for ord in ord_settings:\n if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:\n continue\n\n dtypes = [torch.float, torch.double, torch.cfloat, torch.cdouble]\n for from_dtype, to_dtype in itertools.product(dtypes, dtypes):\n if from_dtype.is_complex and not to_dtype.is_complex:\n continue\n run_test_case(input_size, ord, keepdim, from_dtype, to_dtype)\n\n # Make sure that setting dtype != out.dtype raises an error\n dtype_pairs = [\n (torch.float, torch.double),\n (torch.double, torch.float),\n (torch.cfloat, torch.cdouble),\n (torch.cdouble, torch.cfloat),\n ]\n for keepdim in [True, False]:\n for input_size, ord_settings in test_cases:\n for ord in ord_settings:\n for dtype, out_dtype in dtype_pairs:\n input = torch.rand(*input_size)\n result = torch.tensor([]).to(out_dtype)\n with self.assertRaisesRegex(RuntimeError, r'provided dtype must match dtype of result'):\n torch.linalg.norm(input, ord=ord, keepdim=keepdim, dtype=dtype, out=result)\n\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)\n def test_vector_norm(self, device, dtype):\n # This test compares torch.linalg.vector_norm's output with\n # torch.linalg.norm given a flattened tensor\n ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]\n input_sizes = [\n (10, ),\n (4, 5),\n (3, 4, 5),\n (0, ),\n (0, 10),\n (0, 0),\n (10, 0, 10),\n ]\n\n def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):\n if dim is None:\n input_maybe_flat = input.flatten(0, -1)\n else:\n input_maybe_flat = input\n\n result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)\n if keepdim and dim is None:\n result = result.reshape([1] * input.dim())\n return result\n\n def run_test_case(input, ord, dim, keepdim, norm_dtype):\n msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}'\n error_msg = None\n if input.numel() == 0:\n if ord < 0:\n error_msg = r'linalg.vector_norm of negative order cannot be performed on an empty tensor'\n elif ord == inf and (dim is None or input.size(dim) == 0):\n error_msg = (\n r'linalg.vector_norm cannot compute the infinity norm on an empty '\n r'dimension because the operation does not have an identity')\n if error_msg is None:\n result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)\n result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)\n self.assertEqual(result_dtype, result_dtype_reference, msg=msg)\n\n if norm_dtype is not None:\n result_convert_before = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)\n if norm_dtype.is_complex:\n result_convert_before = result_convert_before.to(norm_dtype)\n\n result_out = torch.empty((0), dtype=norm_dtype, device=device)\n torch.linalg.vector_norm(input, ord, dtype=norm_dtype, dim=dim, keepdim=keepdim, out=result_out)\n self.assertEqual(result_convert_before, result_out, msg=msg)\n else:\n result_out = torch.empty((0), dtype=result_dtype.dtype, device=device)\n torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, out=result_out)\n self.assertEqual(result_dtype, result_out, msg=msg)\n else:\n with self.assertRaises(RuntimeError):\n vector_norm_reference(input, ord, dim=dim, keepdim=keepdim)\n with self.assertRaisesRegex(RuntimeError, error_msg):\n torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)\n\n if dtype.is_complex:\n norm_dtypes = [None, torch.cfloat, torch.cdouble]\n else:\n norm_dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]\n\n for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):\n input = make_tensor(input_size, device, dtype, low=-9, high=9)\n for dim in [None, random.randint(0, len(input_size) - 1)]:\n run_test_case(\n input,\n ord,\n dim,\n keepdim,\n norm_dtype)\n\n def test_vector_norm_dim_tuple_arg(self, device):\n test_cases = [\n # input size, dim, error, error message\n ((4, ), (0, ), None, None),\n ((4, ), (1, ), IndexError, r'Dimension out of range'),\n ((4, ), (-2, ), IndexError, r'Dimension out of range'),\n ((4, 3), (0, -1), None, None),\n ((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),\n ((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),\n ((4, 3), (0, 1.0), TypeError, r\"argument 'dim' must be tuple of ints\"),\n ((4, 3), (None, ), TypeError, r\"argument 'dim' must be tuple of ints\"),\n ]\n for input_size, dim_tuple, error, error_msg in test_cases:\n input = torch.randn(input_size, device=device)\n # vector_norm should accept a tuple or a list for dim arg\n for dim in [dim_tuple, list(dim_tuple)]:\n if error is None:\n torch.linalg.vector_norm(input, dim=dim)\n else:\n with self.assertRaises(error):\n torch.linalg.vector_norm(input, dim=dim)\n\n # Test that linalg.vector_norm throws an error if the out tensor's dtype\n # does not match the expected output dtype\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)\n def test_vector_norm_out_dtype_error(self, device, dtype):\n input = torch.randn(10, device=device, dtype=dtype)\n dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]\n\n for norm_dtype, out_dtype in product(dtypes, dtypes):\n if out_dtype is None:\n continue\n\n if norm_dtype is None:\n if dtype == torch.cfloat:\n expected_dtype = torch.float\n elif dtype == torch.cdouble:\n expected_dtype = torch.double\n else:\n expected_dtype = dtype\n else:\n expected_dtype = norm_dtype\n\n result = torch.empty((0), device=device, dtype=out_dtype)\n msg = f'norm_dtype: {norm_dtype}, out_dtype: {out_dtype}, expected_dtype: {expected_dtype}'\n\n if dtype.is_complex and norm_dtype is not None and not norm_dtype.is_complex:\n with self.assertRaisesRegex(RuntimeError, r\"linalg.vector_norm expected complex 'dtype'\", msg=msg):\n torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)\n\n elif out_dtype != expected_dtype:\n with self.assertRaisesRegex(RuntimeError, r'linalg.vector_norm expected out tensor dtype', msg=msg):\n torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)\n else:\n torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)\n\n # This test compares torch.linalg.norm and numpy.linalg.norm to ensure that\n # their vector norm results match\n @dtypes(torch.float, torch.double)\n def test_norm_vector(self, device, dtype):\n def run_test_case(input, p, dim, keepdim):\n result = torch.linalg.norm(input, ord, dim, keepdim)\n input_numpy = input.cpu().numpy()\n result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)\n\n msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'\n self.assertEqual(result, result_numpy, msg=msg)\n\n result_out = torch.empty_like(result)\n torch.linalg.norm(input, ord, dim, keepdim, out=result_out)\n self.assertEqual(result, result_out, msg=msg)\n\n ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]\n S = 10\n test_cases = [\n # input size, p settings, dim\n ((S, ), ord_vector, None),\n ((S, ), ord_vector, 0),\n ((S, S, S), ord_vector, 0),\n ((S, S, S), ord_vector, 1),\n ((S, S, S), ord_vector, 2),\n ((S, S, S), ord_vector, -1),\n ((S, S, S), ord_vector, -2),\n ]\n L = 1_000_000\n if dtype == torch.double:\n test_cases.append(((L, ), ord_vector, None))\n for keepdim in [True, False]:\n for input_size, ord_settings, dim in test_cases:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for ord in ord_settings:\n run_test_case(input, ord, dim, keepdim)\n\n # This test compares torch.linalg.norm, torch.linalg.matrix_norm and numpy.linalg.norm to\n # ensure that their matrix norm results match.\n @skipMeta # https://github.com/pytorch/pytorch/issues/54082\n @skipCUDAIfNoMagma\n @dtypes(torch.float, torch.double)\n @precisionOverride({torch.float32: 2e-5})\n def test_norm_matrix(self, device, dtype):\n def run_test_case(input, ord, dim, keepdim):\n msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'\n result = torch.linalg.norm(input, ord, dim, keepdim)\n input_numpy = input.cpu().numpy()\n result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)\n\n def check(op):\n result = op(input, ord, dim, keepdim)\n self.assertEqual(result, result_numpy, msg=msg)\n result_out = torch.empty_like(result)\n op(input, ord, dim, keepdim, out=result_out)\n self.assertEqual(result, result_out, msg=msg)\n\n check(torch.linalg.norm)\n if ord is not None and dim is not None:\n check(torch.linalg.matrix_norm)\n\n ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']\n S = 10\n test_cases = [\n # input size, p settings, dim\n ((S, S), ord_matrix, None),\n ((S, S), ord_matrix, (0, 1)),\n ((S, S), ord_matrix, (1, 0)),\n ((S, S, S, S), ord_matrix, (2, 0)),\n ((S, S, S, S), ord_matrix, (-1, -2)),\n ((S, S, S, S), ord_matrix, (-1, -3)),\n ((S, S, S, S), ord_matrix, (-3, 2)),\n ]\n L = 1_000\n\n if dtype == torch.double:\n test_cases.append(((L, L), ord_matrix, None))\n\n for keepdim in [True, False]:\n for input_size, ord_settings, dim in test_cases:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for ord in ord_settings:\n if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:\n continue\n run_test_case(input, ord, dim, keepdim)\n\n\n @onlyCUDA\n @dtypes(torch.bfloat16, torch.float16)\n def test_norm_fused_type_promotion(self, device, dtype):\n x = torch.randn(10, device=device, dtype=dtype)\n\n def profile_and_check(fn, x, kwargs, fn_name):\n with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:\n fn(x, **kwargs, dtype=torch.float)\n # smoke check that profiler returned some events\n self.assertTrue(fn_name in map(lambda e: e.name, p.events()))\n # test that there was no explicit copy\n self.assertFalse(\"aten::to\" in map(lambda e: e.name, p.events()))\n\n for f, kwargs, fn_name in zip((torch.norm, torch.linalg.vector_norm), ({\"p\" : 2}, {}),\n (\"aten::norm\", \"aten::linalg_vector_norm\")):\n profile_and_check(f, x, kwargs, fn_name)\n\n @skipMeta # https://github.com/pytorch/pytorch/issues/53739\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3})\n def test_cond(self, device, dtype):\n def run_test_case(input, p):\n result = torch.linalg.cond(input, p)\n result_numpy = np.linalg.cond(input.cpu().numpy(), p)\n self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)\n self.assertEqual(result.shape, result_numpy.shape)\n\n # test out= variant\n out = torch.empty_like(result)\n ans = torch.linalg.cond(input, p, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]\n input_sizes = [(32, 32), (2, 3, 3, 3)]\n for input_size in input_sizes:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for p in norm_types:\n run_test_case(input, p)\n\n # test empty batch sizes\n input_sizes = [(0, 3, 3), (0, 2, 5, 5)]\n for input_size in input_sizes:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for p in norm_types:\n run_test_case(input, p)\n\n # test non-square input\n input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]\n for input_size in input_sizes:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for p in [2, -2, None]:\n run_test_case(input, p)\n\n # test for singular input\n a = torch.eye(3, dtype=dtype, device=device)\n a[-1, -1] = 0 # make 'a' singular\n for p in norm_types:\n try:\n run_test_case(a, p)\n except np.linalg.LinAlgError:\n # Numpy may fail to converge for some BLAS backends (although this is very rare)\n # See the discussion in https://github.com/pytorch/pytorch/issues/67675\n pass\n\n # test for 0x0 matrices. NumPy doesn't work for such input, we return 0\n input_sizes = [(0, 0), (2, 5, 0, 0)]\n for input_size in input_sizes:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for p in ['fro', 2]:\n expected_dtype = a.real.dtype if dtype.is_complex else dtype\n expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)\n actual = torch.linalg.cond(input, p)\n self.assertEqual(actual, expected)\n\n @skipMeta # https://github.com/pytorch/pytorch/issues/53739\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3})\n def test_cond_errors_and_warnings(self, device, dtype):\n norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]\n\n # cond expects the input to be at least 2-dimensional\n a = torch.ones(3, dtype=dtype, device=device)\n for p in norm_types:\n with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):\n torch.linalg.cond(a, p)\n\n # for some norm types cond expects the input to be square\n a = torch.ones(3, 2, dtype=dtype, device=device)\n norm_types = [1, -1, inf, -inf, 'fro', 'nuc']\n for p in norm_types:\n with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):\n torch.linalg.cond(a, p)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.ones((2, 2), dtype=dtype, device=device)\n for p in ['fro', 2]:\n real_dtype = a.real.dtype if dtype.is_complex else dtype\n out = torch.empty(a.shape, dtype=real_dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.cond(a, p, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty(0, dtype=torch.int, device=device)\n for p in ['fro', 2]:\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.cond(a, p, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n for p in ['fro', 2]:\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.cond(a, p, out=out)\n\n # for batched input if at least one matrix in the batch is not invertible,\n # we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.\n # this should change when at::inverse works with silent errors\n # NumPy works fine in this case because it's possible to silence the error and get the inverse matrix results\n # possibly filled with NANs\n batch_dim = 3\n a = torch.eye(3, 3, dtype=dtype, device=device)\n a = a.reshape((1, 3, 3))\n a = a.repeat(batch_dim, 1, 1)\n a[1, -1, -1] = 0 # now a[1] is singular\n for p in [1, -1, inf, -inf, 'fro', 'nuc']:\n result = torch.linalg.cond(a, p)\n self.assertEqual(result[1], float('inf'))\n\n # check invalid norm type\n a = torch.ones(3, 3, dtype=dtype, device=device)\n for p in ['wrong_norm', 5]:\n with self.assertRaisesRegex(RuntimeError, f\"linalg.cond got an invalid norm type: {p}\"):\n torch.linalg.cond(a, p)\n\n # This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments\n # to ensure that they both throw errors\n @dtypes(torch.float, torch.double)\n def test_norm_errors(self, device, dtype):\n def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):\n test_case_info = (\n f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '\n f'keepdim={keepdim}, dtype={dtype}')\n\n with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):\n torch.linalg.norm(input, ord, dim, keepdim)\n\n input_numpy = input.cpu().numpy()\n\n msg = f'numpy does not raise error but pytorch does, for case \"{test_case_info}\"'\n with self.assertRaises(Exception, msg=test_case_info):\n np.linalg.norm(input_numpy, ord, dim, keepdim)\n\n S = 10\n error_test_cases = [\n # input size, p settings, dim, error type, error regex\n ((S, ), ['fro'], None, RuntimeError, r'order \"fro\" can only be used if either len\\(dim\\) == 2'),\n ((S, ), ['nuc'], None, RuntimeError, r'order \"nuc\" can only be used if either len\\(dim\\) == 2'),\n ((S, S), [3.5], None, RuntimeError, r'Order 3.5 not supported for matrix norm'),\n ((S, S), [0], None, RuntimeError, r'Order 0 not supported for matrix norm'),\n ((S, S), ['nuc'], 0, RuntimeError, r'order \"nuc\" can only be used if either len\\(dim\\) == 2'),\n ((S, S), ['fro'], 0, RuntimeError, r'order \"fro\" can only be used if either len\\(dim\\) == 2'),\n ((S, S), ['nuc'], (0, 0), RuntimeError, r'duplicate or invalid dimensions'),\n ((S, S), ['fro', 0], (0, 0), RuntimeError, r'Expected dims to be different'),\n ((S, S), ['fro', 'nuc', 0], (0, 4), IndexError, r'Dimension out of range'),\n ((S, ), [0], (4, ), IndexError, r'Dimension out of range'),\n ((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),\n ((S, S, S), [1], (0, 1, 2), RuntimeError, r\"'dim' must specify 1 or 2 dimensions\"),\n ((S, S, S), [1], None, RuntimeError, r\"'dim' must specify 1 or 2 dimensions\"),\n ((S, S), ['garbage'], (0, 1), RuntimeError, r'Invalid norm order: garbage'),\n ]\n for keepdim in [True, False]:\n for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for ord in ord_settings:\n run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)\n\n # Test complex number inputs for linalg.norm\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.cfloat, torch.cdouble)\n @precisionOverride({torch.cfloat: 2e-4})\n def test_norm_complex(self, device, dtype):\n def gen_error_message(input_size, ord, keepdim, dim=None):\n return \"complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s\" % (\n input_size, ord, keepdim, dim)\n\n vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]\n matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]\n\n # Test supported ords\n for keepdim in [False, True]:\n # vector norm\n x = torch.randn(25, device=device, dtype=dtype)\n xn = x.cpu().numpy()\n for ord in vector_ords:\n res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, ord, keepdims=keepdim)\n msg = gen_error_message(x.size(), ord, keepdim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg, exact_dtype=False)\n\n res_out = torch.tensor([]).to(device)\n torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)\n self.assertEqual(res_out.shape, expected.shape, msg=msg)\n self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)\n\n # matrix norm\n x = torch.randn(25, 25, device=device, dtype=dtype)\n xn = x.cpu().numpy()\n for ord in matrix_ords:\n res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, ord, keepdims=keepdim)\n msg = gen_error_message(x.size(), ord, keepdim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg, exact_dtype=False)\n\n res_out = torch.tensor([]).to(device)\n torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)\n self.assertEqual(res_out.shape, expected.shape, msg=msg)\n self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)\n\n # Test that linal.vector_norm gives the same result as numpy when inputs\n # contain extreme values (inf, -inf, nan)\n def test_vector_norm_extreme_values(self, device):\n vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]\n vectors = []\n for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):\n vectors.append(list(pair))\n for vector in vectors:\n x = torch.tensor(vector, device=device)\n x_n = x.cpu().numpy()\n for ord in vector_ords:\n msg = f'ord={ord}, vector={vector}'\n result = torch.linalg.vector_norm(x, ord=ord)\n result_n = np.linalg.norm(x_n, ord=ord)\n self.assertEqual(result, result_n, msg=msg)\n\n @skipMeta # https://github.com/pytorch/pytorch/issues/54082\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double)\n @precisionOverride({torch.float32: 2e-5})\n def test_matrix_norm(self, device, dtype):\n # Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm\n A = make_tensor((2, 2, 2), device, dtype)\n\n with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\\(\\):.*must be a matrix.*'):\n torch.linalg.matrix_norm(make_tensor((2,), device, dtype))\n with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\\(\\):.*must be a 2-tuple.*'):\n torch.linalg.matrix_norm(A, dim=(0,))\n with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):\n torch.linalg.matrix_norm(A, ord=0)\n with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):\n torch.linalg.matrix_norm(A, ord=3.0)\n\n # Test dim=None behavior\n ref = torch.linalg.norm(A, dim=(-2, -1))\n res = torch.linalg.matrix_norm(A)\n self.assertEqual(ref, res)\n\n # Test that linal.norm gives the same result as numpy when inputs\n # contain extreme values (inf, -inf, nan)\n @unittest.skipIf(IS_WINDOWS, \"Skipped on Windows!\")\n @unittest.skipIf(IS_MACOS, \"Skipped on MacOS!\")\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n def test_norm_extreme_values(self, device):\n vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]\n matrix_ords = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf]\n vectors = []\n matrices = []\n for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):\n vectors.append(list(pair))\n matrices.append([[pair[0], pair[1]]])\n matrices.append([[pair[0]], [pair[1]]])\n for vector in vectors:\n x = torch.tensor(vector).to(device)\n x_n = x.cpu().numpy()\n for ord in vector_ords:\n msg = f'ord={ord}, vector={vector}'\n result = torch.linalg.norm(x, ord=ord)\n result_n = np.linalg.norm(x_n, ord=ord)\n self.assertEqual(result, result_n, msg=msg)\n\n # TODO: Remove this function once the broken cases are fixed\n def is_broken_matrix_norm_case(ord, x):\n if self.device_type == 'cuda':\n if x.size() == torch.Size([1, 2]):\n if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:\n # These cases are broken because of an issue with svd\n # https://github.com/pytorch/pytorch/issues/43567\n return True\n if ord in ['nuc', 2, -2]:\n # These cases are broken because of another issue with svd\n # https://github.com/pytorch/pytorch/issues/52633\n return True\n return False\n\n for matrix in matrices:\n x = torch.tensor(matrix).to(device)\n x_n = x.cpu().numpy()\n for ord in matrix_ords:\n msg = f'ord={ord}, matrix={matrix}'\n if is_broken_matrix_norm_case(ord, x):\n continue\n else:\n result = torch.linalg.norm(x, ord=ord)\n result_n = np.linalg.norm(x_n, ord=ord)\n self.assertEqual(result, result_n, msg=msg)\n\n # Test degenerate shape results match numpy for linalg.norm vector norms\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @unittest.skipIf(TEST_WITH_ASAN, \"Skipped on ASAN since it checks for undefined behavior.\")\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_norm_vector_degenerate_shapes(self, device, dtype):\n def run_test_case(input, ord, dim, keepdim):\n msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'\n should_error = False\n if ord is not None and ord < 0:\n should_error = True\n elif ord == inf:\n if dim is None or input.size(dim) == 0:\n should_error = True\n\n if should_error:\n with self.assertRaises(RuntimeError):\n torch.linalg.norm(input, ord, dim, keepdim)\n else:\n input_numpy = input.cpu().numpy()\n result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)\n result = torch.linalg.norm(input, ord, dim, keepdim)\n self.assertEqual(result, result_numpy, msg=msg)\n\n ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf, None]\n S = 10\n test_cases = [\n # input size, dim\n ((0, ), None),\n ((0, S), 0),\n ((0, S), 1),\n ((S, 0), 0),\n ((S, 0), 1),\n ]\n for keepdim in [True, False]:\n for input_size, dim in test_cases:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for ord in ord_vector:\n run_test_case(input, ord, dim, keepdim)\n\n # Test degenerate shape results match numpy for linalg.norm matrix norms\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_norm_matrix_degenerate_shapes(self, device, dtype):\n def run_test_case(input, ord, dim, keepdim, should_error):\n msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'\n input_numpy = input.cpu().numpy()\n ops = [torch.linalg.norm]\n\n if ord is not None and dim is not None:\n ops.append(torch.linalg.matrix_norm)\n\n if should_error:\n with self.assertRaises(ValueError):\n np.linalg.norm(input_numpy, ord, dim, keepdim)\n for op in ops:\n with self.assertRaises(IndexError):\n op(input, ord, dim, keepdim)\n else:\n result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)\n for op in ops:\n result = op(input, ord, dim, keepdim)\n self.assertEqual(result, result_numpy, msg=msg)\n\n ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]\n S = 10\n test_cases = [\n # input size, p settings that cause error, dim\n ((0, 0), [1, 2, inf, -1, -2, -inf], None),\n ((0, S), [2, inf, -2, -inf], None),\n ((S, 0), [1, 2, -1, -2], None),\n ((S, S, 0), [], (0, 1)),\n ((1, S, 0), [], (0, 1)),\n ((0, 0, S), [1, 2, inf, -1, -2, -inf], (0, 1)),\n ((0, 0, S), [1, 2, inf, -1, -2, -inf], (1, 0)),\n ]\n\n for keepdim in [True, False]:\n for input_size, error_ords, dim in test_cases:\n input = torch.randn(*input_size, dtype=dtype, device=device)\n for ord in ord_matrix:\n run_test_case(input, ord, dim, keepdim, ord in error_ords)\n\n def test_norm_fastpaths(self, device):\n x = torch.randn(3, 5, device=device)\n\n # slow path\n result = torch.linalg.norm(x, 4.5, 1)\n expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)\n self.assertEqual(result, expected)\n\n # fast 0-norm\n result = torch.linalg.norm(x, 0, 1)\n expected = (x != 0).type_as(x).sum(1)\n self.assertEqual(result, expected)\n\n # fast 1-norm\n result = torch.linalg.norm(x, 1, 1)\n expected = x.abs().sum(1)\n self.assertEqual(result, expected)\n\n # fast 2-norm\n result = torch.linalg.norm(x, 2, 1)\n expected = torch.sqrt(x.pow(2).sum(1))\n self.assertEqual(result, expected)\n\n # fast 3-norm\n result = torch.linalg.norm(x, 3, 1)\n expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)\n self.assertEqual(result, expected)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(*floating_and_complex_types())\n def test_old_eig_basic(self, device, dtype):\n a = torch.tensor([[1.96, 0.00, 0.00, 0.00, 0.00],\n [-6.49, 3.80, 0.00, 0.00, 0.00],\n [-0.47, -6.39, 4.17, 0.00, 0.00],\n [-7.20, 1.50, -1.51, 5.70, 0.00],\n [-0.65, -6.34, 2.67, 1.80, -7.10]],\n dtype=dtype, device=device).t()\n e = torch.eig(a)[0]\n ee, vv = torch.eig(a, True)\n te = torch.tensor((), dtype=dtype, device=device)\n tv = torch.tensor((), dtype=dtype, device=device)\n eee, vvv = torch.eig(a, True, out=(te, tv))\n self.assertEqual(e, ee, atol=1e-12, rtol=0)\n self.assertEqual(ee, eee, atol=1e-12, rtol=0)\n self.assertEqual(ee, te, atol=1e-12, rtol=0)\n self.assertEqual(vv, vvv, atol=1e-12, rtol=0)\n self.assertEqual(vv, tv, atol=1e-12, rtol=0)\n #\n # compare with numpy\n np_e, np_v = np.linalg.eig(a.cpu().numpy())\n if dtype.is_complex:\n self.assertEqual(ee, np_e)\n else:\n # np_e.shape == (n, 2), where each column contain the real and\n # imaginary parts of the result\n self.assertEqual(ee[:, 0], np_e) # real part\n self.assertEqual(ee[:, 1], torch.zeros(ee.shape[0], dtype=dtype)) # imaginary part\n self.assertEqual(vv, np_v)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.double, torch.float)\n def test_old_eig_reuse(self, device, dtype):\n X = torch.randn(4, 4, dtype=dtype, device=device)\n X = torch.mm(X.t(), X)\n e = torch.zeros(4, 2, dtype=dtype, device=device)\n v = torch.zeros(4, 4, dtype=dtype, device=device)\n torch.eig(X, True, out=(e, v))\n Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.select(1, 0)).cpu()), v.t().cpu())\n if dtype is torch.float:\n atol = 1e-7\n rtol = 1e-5\n else:\n atol = 1e-8\n rtol = 0\n self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\\' wrong')\n self.assertTrue(v.is_contiguous(), 'V is not contiguous')\n\n torch.eig(X, True, out=(e, v))\n Xhat = np.matmul(v.cpu(), np.matmul(e.select(1, 0).diag().cpu(), v.t().cpu()))\n self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\\' wrong')\n self.assertTrue(v.is_contiguous(), 'V is not contiguous')\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.double, torch.float)\n def test_old_eig_non_contiguous(self, device, dtype):\n X = torch.randn(4, 4, dtype=dtype, device=device)\n X = torch.mm(X.t(), X)\n e = torch.zeros(4, 2, 2, dtype=dtype, device=device)[:, 1]\n v = torch.zeros(4, 2, 4, dtype=dtype, device=device)[:, 1]\n self.assertFalse(v.is_contiguous(), 'V is contiguous')\n self.assertFalse(e.is_contiguous(), 'E is contiguous')\n torch.eig(X, True, out=(e, v))\n Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.cpu().select(1, 0))), v.t().cpu())\n if dtype is torch.float:\n atol = 1e-7\n rtol = 1e-5\n else:\n atol = 1e-8\n rtol = 0\n self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\\' wrong')\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.double, torch.float)\n def test_old_eig_invalid_input(self, device, dtype):\n # test invalid input\n self.assertRaisesRegex(\n RuntimeError,\n 'input should be 2 dimensional',\n lambda: torch.eig(torch.ones((2))))\n self.assertRaisesRegex(\n RuntimeError,\n 'input should be square',\n lambda: torch.eig(torch.ones((2, 3))))\n self.assertRaisesRegex(\n RuntimeError,\n 'input should not contain infs or NaNs',\n lambda: torch.eig(np.inf * torch.ones((2, 2))))\n self.assertRaisesRegex(\n RuntimeError,\n 'input should not contain infs or NaNs',\n lambda: torch.eig(np.nan * torch.ones((2, 2))))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double, torch.float)\n def test_old_eig_out(self, device, dtype):\n # the out version of torch.eig needs to be tested manually: we can't\n # use the \"test_out=True\" parameter to tensor_op_tests because the\n # signature is irregular (since we have *two* output vectors)\n t = torch.randn(10, 10, dtype=dtype, device=device)\n evals, evecs = torch.eig(t, eigenvectors=True)\n #\n # check that the out= version computes the same values as the normal one\n out_evals = torch.empty_like(evals)\n out_evecs = torch.empty_like(evecs)\n evals2, evecs2 = torch.eig(t, eigenvectors=True, out=(out_evals, out_evecs))\n # check that the out tensors were used in-place\n self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())\n self.assertEqual(evecs2.data_ptr(), out_evecs.data_ptr())\n # check that the result is the same as the non-out version\n self.assertEqual(evals, out_evals)\n self.assertEqual(evecs, out_evecs)\n #\n # check what happens in the eigenvectors=False case\n out_evals = torch.empty_like(evals)\n out_evecs = torch.tensor([1, 2, 3], dtype=dtype, device=device)\n evals2, evecs2 = torch.eig(t, eigenvectors=False, out=(out_evals, out_evecs))\n # check that the out_evals was used in-place\n self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())\n self.assertEqual(evals, out_evals)\n # check that out_evecs was NOT touched at all\n assert out_evecs.tolist() == [1, 2, 3]\n #\n # check that we complain if we pass an out vector of the wrong dtype\n wrong_out = torch.empty((0, 0), dtype=int)\n with self.assertRaisesRegex(RuntimeError, r\"Expected .* but got .*\"):\n torch.eig(t, eigenvectors=True, out=(wrong_out, out_evecs))\n with self.assertRaisesRegex(RuntimeError, r\"Expected .* but got .*\"):\n torch.eig(t, eigenvectors=True, out=(out_evals, wrong_out))\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n # NumPy computes only in float64 and complex128 precisions\n # for float32 or complex64 results might be very different from float64 or complex128\n @dtypes(torch.float64, torch.complex128)\n def test_eig_numpy(self, device, dtype):\n def run_test(shape, *, symmetric=False):\n from torch.testing._internal.common_utils import random_symmetric_matrix\n\n if not dtype.is_complex and symmetric:\n # for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero\n # unlike NumPy the result is not cast to float32 or float64 dtype in this case\n a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)\n else:\n a = make_tensor(shape, dtype=dtype, device=device)\n\n actual = torch.linalg.eig(a)\n\n # compare with NumPy\n # the eigenvalues are not necessarily ordered\n # so order of NumPy and PyTorch can be different\n expected = np.linalg.eig(a.cpu().numpy())\n\n # sort NumPy output\n ind = np.argsort(expected[0], axis=-1)[::-1]\n expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))\n\n # sort PyTorch output\n # torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead\n # RuntimeError: _th_sort not supported on CUDAType for ComplexDouble\n # RuntimeError: \"sorting_kernel_method_name\" not implemented for 'ComplexDouble'\n ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]\n actual_np = [x.cpu().numpy() for x in actual]\n sorted_actual = (\n np.take_along_axis(actual_np[0], ind, axis=-1),\n np.take_along_axis(actual_np[1], ind[:, None], axis=-1))\n\n self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)\n self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)\n\n shapes = [(0, 0), # Empty matrix\n (5, 5), # Single matrix\n (0, 0, 0), (0, 5, 5), # Zero batch dimension tensors\n (2, 5, 5), # 3-dim tensors\n (2, 1, 5, 5)] # 4-dim tensors\n for shape in shapes:\n run_test(shape)\n run_test(shape, symmetric=True)\n\n @onlyCUDA\n @skipCUDAIfNoMagma\n @dtypes(*floating_and_complex_types())\n def test_eig_compare_backends(self, device, dtype):\n def run_test(shape, *, symmetric=False):\n from torch.testing._internal.common_utils import random_symmetric_matrix\n\n if not dtype.is_complex and symmetric:\n # for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero\n a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)\n else:\n a = make_tensor(shape, dtype=dtype, device=device)\n\n actual = torch.linalg.eig(a)\n\n complementary_device = 'cpu'\n\n # compare with CPU\n expected = torch.linalg.eig(a.to(complementary_device))\n self.assertEqual(expected[0], actual[0])\n self.assertEqual(expected[1], actual[1])\n\n shapes = [(0, 0), # Empty matrix\n (5, 5), # Single matrix\n (0, 0, 0), (0, 5, 5), # Zero batch dimension tensors\n (2, 5, 5), # 3-dim tensors\n (2, 1, 5, 5)] # 4-dim tensors\n for shape in shapes:\n run_test(shape)\n run_test(shape, symmetric=True)\n\n @slowTest\n @onlyCUDA\n @skipCUDAIfNoMagma\n @dtypes(torch.float32)\n def test_eig_check_magma(self, device, dtype):\n # For CUDA inputs only matrices of size larger than 2048x2048 actually call MAGMA library\n shape = (2049, 2049)\n a = make_tensor(shape, dtype=dtype, device=device)\n w, v = torch.linalg.eig(a)\n # check correctness using eigendecomposition identity\n self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(*floating_and_complex_types())\n def test_eig_errors_and_warnings(self, device, dtype):\n # eig requires the input to be at least 2 dimensional tensor\n a = make_tensor(2, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must have at least 2 dimensions\"):\n torch.linalg.eig(a)\n\n # eig requires a square matrix\n a = make_tensor((2, 3), dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.linalg.eig(a)\n\n # if out tensor with floating dtype is passed for complex output an error is thrown\n if not dtype.is_complex:\n # The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i\n a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)\n out0 = torch.empty(0, device=device, dtype=dtype)\n out1 = torch.empty(0, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"Expected eigenvalues to be safely castable\"):\n torch.linalg.eig(a, out=(out0, out1))\n\n out0 = torch.empty(0, device=device, dtype=torch.complex128)\n with self.assertRaisesRegex(RuntimeError, \"Expected eigenvectors to be safely castable\"):\n torch.linalg.eig(a, out=(out0, out1))\n\n # dtypes should be safely castable\n a = make_tensor((3, 3), dtype=dtype, device=device)\n out0 = torch.empty(0, dtype=torch.int, device=device)\n out1 = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvalues with dtype Int\"):\n torch.linalg.eig(a, out=(out0, out1))\n\n out0 = torch.empty(0, dtype=torch.complex128, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvectors with dtype Int\"):\n torch.linalg.eig(a, out=(out0, out1))\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = make_tensor((3, 3), dtype=dtype, device=device)\n out0 = torch.empty(1, device=device, dtype=torch.complex128)\n out1 = torch.empty(1, device=device, dtype=torch.complex128)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.eig(a, out=(out0, out1))\n # Check warning occurs\n self.assertEqual(len(w), 2)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-2].message))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)\n out_v = torch.empty(0, device=device, dtype=torch.complex128)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.eig(a, out=(out_w, out_v))\n out_w = torch.empty(0, device=device, dtype=torch.complex128)\n out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.eig(a, out=(out_w, out_v))\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(*floating_and_complex_types())\n def test_eig_with_nan(self, device, dtype):\n for val in [np.inf, np.nan]:\n for batch_dim in [(), (10,)]:\n a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)\n a[..., -1, -1] = val\n\n with self.assertRaisesRegex(RuntimeError, \"torch.linalg.eig: input tensor should not\"):\n torch.linalg.eig(a)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n # NumPy computes only in float64 and complex128 precisions\n # for float32 or complex64 results might be very different from float64 or complex128\n @dtypes(torch.float64, torch.complex128)\n def test_eigvals_numpy(self, device, dtype):\n def run_test(shape, *, symmetric=False):\n from torch.testing._internal.common_utils import random_symmetric_matrix\n\n if not dtype.is_complex and symmetric:\n # for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero\n # unlike NumPy the result is not cast to float32 or float64 dtype in this case\n a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)\n else:\n a = make_tensor(shape, dtype=dtype, device=device)\n\n actual = torch.linalg.eigvals(a)\n\n # compare with NumPy\n # the eigenvalues are not necessarily ordered\n # so order of NumPy and PyTorch can be different\n expected = np.linalg.eigvals(a.cpu().numpy())\n\n # sort NumPy output\n ind = np.argsort(expected, axis=-1)[::-1]\n expected = np.take_along_axis(expected, ind, axis=-1)\n\n # sort PyTorch output\n # torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead\n # RuntimeError: _th_sort not supported on CUDAType for ComplexDouble\n # RuntimeError: \"sorting_kernel_method_name\" not implemented for 'ComplexDouble'\n ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]\n actual_np = actual.cpu().numpy()\n sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)\n\n self.assertEqual(expected, sorted_actual, exact_dtype=False)\n\n shapes = [(0, 0), # Empty matrix\n (5, 5), # Single matrix\n (0, 0, 0), (0, 5, 5), # Zero batch dimension tensors\n (2, 5, 5), # 3-dim tensors\n (2, 1, 5, 5)] # 4-dim tensors\n for shape in shapes:\n run_test(shape)\n run_test(shape, symmetric=True)\n\n @onlyCUDA\n @skipCUDAIfNoMagma\n @dtypes(*floating_and_complex_types())\n def test_eigvals_compare_backends(self, device, dtype):\n def run_test(shape, *, symmetric=False):\n from torch.testing._internal.common_utils import random_symmetric_matrix\n\n if not dtype.is_complex and symmetric:\n # for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero\n a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)\n else:\n a = make_tensor(shape, dtype=dtype, device=device)\n\n actual = torch.linalg.eigvals(a)\n\n complementary_device = 'cpu'\n\n # compare with CPU\n expected = torch.linalg.eigvals(a.to(complementary_device))\n self.assertEqual(expected, actual)\n\n # check out= variant\n complex_dtype = dtype\n if not dtype.is_complex:\n complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64\n out = torch.empty(0, dtype=complex_dtype, device=device)\n ans = torch.linalg.eigvals(a, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(expected.to(complex_dtype), out)\n\n # check non-contiguous out\n if a.numel() > 0:\n out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]\n self.assertFalse(out.is_contiguous())\n ans = torch.linalg.eigvals(a, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(expected.to(complex_dtype), out)\n\n shapes = [(0, 0), # Empty matrix\n (5, 5), # Single matrix\n (0, 0, 0), (0, 5, 5), # Zero batch dimension tensors\n (2, 5, 5), # 3-dim tensors\n (2, 1, 5, 5)] # 4-dim tensors\n for shape in shapes:\n run_test(shape)\n run_test(shape, symmetric=True)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(*floating_and_complex_types())\n def test_eigvals_errors_and_warnings(self, device, dtype):\n # eig requires the input to be at least 2 dimensional tensor\n a = make_tensor(2, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must have at least 2 dimensions\"):\n torch.linalg.eigvals(a)\n\n # eig requires a square matrix\n a = make_tensor((2, 3), dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.linalg.eigvals(a)\n\n # if out tensor with floating dtype is passed for complex output an error is thrown\n if not dtype.is_complex:\n # The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i\n a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)\n out = torch.empty(0, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"Expected eigenvalues to be safely castable\"):\n torch.linalg.eigvals(a, out=out)\n\n # dtypes should be safely castable\n a = make_tensor((3, 3), dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvalues with dtype Int\"):\n torch.linalg.eigvals(a, out=out)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n out = torch.empty(1, device=device, dtype=torch.complex128)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.eigvals(a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.eigvals(a, out=out_w)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n def test_norm_old(self, device):\n def gen_error_message(input_size, p, keepdim, dim=None):\n return \"norm failed for input size %s, p=%s, keepdim=%s, dim=%s\" % (\n input_size, p, keepdim, dim)\n\n for keepdim in [False, True]:\n # full reduction\n x = torch.randn(25, device=device)\n xn = x.cpu().numpy()\n for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:\n res = x.norm(p, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, p, keepdims=keepdim)\n self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))\n\n # one dimension\n x = torch.randn(25, 25, device=device)\n xn = x.cpu().numpy()\n for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:\n dim = 1\n res = x.norm(p, dim, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)\n msg = gen_error_message(x.size(), p, keepdim, dim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg)\n\n # matrix norm\n for p in ['fro', 'nuc']:\n res = x.norm(p, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, p, keepdims=keepdim)\n msg = gen_error_message(x.size(), p, keepdim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg)\n\n # zero dimensions\n x = torch.randn((), device=device)\n xn = x.cpu().numpy()\n res = x.norm(keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, keepdims=keepdim)\n msg = gen_error_message(x.size(), None, keepdim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg)\n\n # larger tensor sanity check\n self.assertEqual(\n 2 * torch.norm(torch.ones(10000), keepdim=keepdim),\n torch.norm(torch.ones(40000), keepdim=keepdim))\n\n # matrix norm with non-square >2-D tensors, all combinations of reduction dims\n x = torch.randn(5, 6, 7, 8, device=device)\n xn = x.cpu().numpy()\n for p in ['fro', 'nuc']:\n for dim in itertools.product(*[list(range(4))] * 2):\n if dim[0] == dim[1]:\n continue\n res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)\n msg = gen_error_message(x.size(), p, keepdim, dim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg)\n\n # Test that torch.norm with p=+/-inf propagates NaN\n def test_norm_old_nan_propagation(self, device):\n ords = [inf, -inf]\n for pair in itertools.product([0.0, nan, 1.0], repeat=2):\n x = torch.tensor(list(pair), device=device)\n for ord in ords:\n result = torch.norm(x, p=ord)\n result_check = torch.linalg.norm(x, ord=ord)\n self.assertEqual(result, result_check)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n def test_norm_complex_old(self, device):\n def gen_error_message(input_size, p, keepdim, dim=None):\n return \"complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s\" % (\n input_size, p, keepdim, dim)\n\n for keepdim in [False, True]:\n # vector norm\n x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)\n xn = x.cpu().numpy()\n for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:\n res = x.norm(p, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, p, keepdims=keepdim)\n msg = gen_error_message(x.size(), p, keepdim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg)\n\n # matrix norm\n x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)\n xn = x.cpu().numpy()\n for p in ['nuc', 'fro']:\n res = x.norm(p, keepdim=keepdim).cpu()\n expected = np.linalg.norm(xn, p, keepdims=keepdim)\n msg = gen_error_message(x.size(), p, keepdim)\n self.assertEqual(res.shape, expected.shape, msg=msg)\n self.assertEqual(res, expected, msg=msg, rtol=1.3e-6, atol=3e-4)\n\n # Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations\n @dtypes(torch.float)\n def test_norm_fro_2_equivalence_old(self, device, dtype):\n input_sizes = [\n (0,),\n (10,),\n (0, 0),\n (4, 30),\n (0, 45),\n (100, 0),\n (45, 10, 23),\n (0, 23, 59),\n (23, 0, 37),\n (34, 58, 0),\n (0, 0, 348),\n (0, 3434, 0),\n (0, 0, 0),\n (5, 3, 8, 1, 3, 5)]\n\n for input_size in input_sizes:\n a = make_tensor(input_size, device, dtype, low=-9, high=9)\n\n # Try full reduction\n dim_settings = [None]\n\n # Try all possible 1-D reductions\n dim_settings += list(range(-a.dim(), a.dim()))\n\n def wrap_dim(dim, ndims):\n assert (dim < ndims) and (dim >= -ndims)\n if dim >= 0:\n return dim\n else:\n return dim + ndims\n\n # Try all possible 2-D reductions\n dim_settings += [\n (d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)\n if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]\n\n for dim in dim_settings:\n for keepdim in [True, False]:\n a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)\n a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)\n self.assertEqual(a_norm_fro, a_norm_2)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n def test_nuclear_norm_axes_small_brute_force_old(self, device):\n def check_single_nuclear_norm(x, axes):\n if self.device_type != 'cpu' and randrange(100) < 95:\n return # too many cpu <==> device copies\n\n a = np.array(x.cpu(), copy=False)\n expected = np.linalg.norm(a, \"nuc\", axis=axes)\n\n ans = torch.norm(x, \"nuc\", dim=axes)\n self.assertTrue(ans.is_contiguous())\n self.assertEqual(ans.shape, expected.shape)\n self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)\n\n out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)\n ans = torch.norm(x, \"nuc\", dim=axes, out=out)\n self.assertIs(ans, out)\n self.assertTrue(ans.is_contiguous())\n self.assertEqual(ans.shape, expected.shape)\n self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)\n\n for n in range(1, 3):\n for m in range(1, 3):\n for axes in itertools.permutations([0, 1], 2):\n # 2d, inner dimensions C\n x = torch.randn(n, m, device=device)\n check_single_nuclear_norm(x, axes)\n\n # 2d, inner dimensions Fortran\n x = torch.randn(m, n, device=device).mT\n check_single_nuclear_norm(x, axes)\n\n # 2d, inner dimensions non-contiguous\n x = torch.randn(n, 2 * m, device=device)[:, ::2]\n check_single_nuclear_norm(x, axes)\n\n # 2d, all dimensions non-contiguous\n x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]\n check_single_nuclear_norm(x, axes)\n\n for o in range(1, 3):\n for axes in itertools.permutations([0, 1, 2], 2):\n # 3d, inner dimensions C\n x = torch.randn(o, n, m, device=device)\n check_single_nuclear_norm(x, axes)\n\n # 3d, inner dimensions Fortran\n x = torch.randn(o, m, n, device=device).mT\n check_single_nuclear_norm(x, axes)\n\n # 3d, inner dimensions non-contiguous\n x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]\n check_single_nuclear_norm(x, axes)\n\n # 3d, all dimensions non-contiguous\n x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]\n check_single_nuclear_norm(x, axes)\n\n for r in range(1, 3):\n for axes in itertools.permutations([0, 1, 2, 3], 2):\n # 4d, inner dimensions C\n x = torch.randn(r, o, n, m, device=device)\n check_single_nuclear_norm(x, axes)\n\n # 4d, inner dimensions Fortran\n x = torch.randn(r, o, n, m, device=device).mT\n check_single_nuclear_norm(x, axes)\n\n # 4d, inner dimensions non-contiguous\n x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]\n check_single_nuclear_norm(x, axes)\n\n # 4d, all dimensions non-contiguous\n x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]\n check_single_nuclear_norm(x, axes)\n\n @skipCUDAIfNoMagma\n def test_nuclear_norm_exceptions_old(self, device):\n for lst in [], [1], [1, 2]:\n x = torch.tensor(lst, dtype=torch.double, device=device)\n for axes in (), (0,):\n self.assertRaises(RuntimeError, torch.norm, x, \"nuc\", axes)\n self.assertRaises(IndexError, torch.norm, x, \"nuc\", (0, 1))\n\n x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)\n self.assertRaisesRegex(RuntimeError, \"duplicate or invalid\", torch.norm, x, \"nuc\", (0, 0))\n self.assertRaisesRegex(IndexError, \"Dimension out of range\", torch.norm, x, \"nuc\", (0, 2))\n\n # ~~~ tests for torch.svd ~~~\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n def test_svd(self, device, dtype):\n def run_test(dims, some, compute_uv):\n x = torch.randn(*dims, dtype=dtype, device=device)\n outu = torch.empty(0, dtype=dtype, device=device)\n outs = torch.empty(0, dtype=dtype, device=device)\n outv = torch.empty(0, dtype=dtype, device=device)\n torch.svd(x, some=some, compute_uv=compute_uv, out=(outu, outs, outv))\n\n if compute_uv:\n if some:\n x_recon = torch.matmul(outu, torch.matmul(outs.diag_embed(), outv.mT))\n self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')\n else:\n narrow_u = outu[..., :min(*dims[-2:])]\n narrow_v = outv[..., :min(*dims[-2:])]\n x_recon = torch.matmul(narrow_u, torch.matmul(outs.diag_embed(), narrow_v.mT))\n self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')\n else:\n _, singvals, _ = torch.svd(x, compute_uv=True)\n self.assertEqual(singvals, outs, msg='Singular values mismatch')\n self.assertEqual(outu, torch.zeros_like(outu), msg='U not zero')\n self.assertEqual(outv, torch.zeros_like(outv), msg='V not zero')\n\n resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)\n self.assertEqual(resu, outu, msg='outputs of svd and svd with out differ')\n self.assertEqual(ress, outs, msg='outputs of svd and svd with out differ')\n self.assertEqual(resv, outv, msg='outputs of svd and svd with out differ')\n\n # test non-contiguous\n x = torch.randn(*dims, dtype=dtype, device=device)\n if x.numel() > 0:\n n_dim = len(dims)\n # Reverse the batch dimensions and the matrix dimensions and then concat them\n x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))\n assert not x.is_contiguous(), \"x is intentionally non-contiguous\"\n resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)\n if compute_uv:\n if some:\n x_recon = torch.matmul(resu, torch.matmul(ress.diag_embed(), resv.mT))\n self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')\n else:\n narrow_u = resu[..., :min(*dims[-2:])]\n narrow_v = resv[..., :min(*dims[-2:])]\n x_recon = torch.matmul(narrow_u, torch.matmul(ress.diag_embed(), narrow_v.mT))\n self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')\n else:\n _, singvals, _ = torch.svd(x, compute_uv=True)\n self.assertEqual(singvals, ress, msg='Singular values mismatch')\n self.assertEqual(resu, torch.zeros_like(resu), msg='U not zero')\n self.assertEqual(resv, torch.zeros_like(resv), msg='V not zero')\n\n shapes = [(0, 0), (5, 0), (0, 5), # empty matrices\n (0, 0, 0), (0, 5, 5), (0, 5, 3), # zero batch dimension\n (3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices\n (7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices\n (3, 7), (5, 3, 7), (7, 5, 3, 7)] # thin matrices\n for dims, some, compute_uv in product(shapes, [True, False], [True, False]):\n run_test(dims, some, compute_uv)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float)\n def test_svd_no_singularvectors(self, device, dtype):\n for size in [(5, 5), (5, 20), (20, 5)]:\n a = torch.randn(*size, device=device, dtype=dtype)\n u, s_expect, v = torch.svd(a)\n u, s_actual, v = torch.svd(a, compute_uv=False)\n self.assertEqual(s_expect, s_actual, msg=\"Singular values don't match\")\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n def test_svd_lowrank(self, device, dtype):\n from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix\n\n def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):\n density = options.pop('density', 1)\n if isinstance(matrix_size, int):\n rows = columns = matrix_size\n else:\n rows, columns = matrix_size\n if density == 1:\n a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)\n a = a_input\n else:\n assert batches == ()\n a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)\n a = a_input.to_dense()\n\n q = min(*size)\n u, s, v = svd_lowrank(a_input, q=q, **options)\n\n # check if u, s, v is a SVD\n u, s, v = u[..., :q], s[..., :q], v[..., :q]\n A = u.matmul(s.diag_embed()).matmul(v.mT)\n self.assertEqual(A, a, rtol=1e-7, atol=2e-7)\n\n # check if svd_lowrank produces same singular values as torch.svd\n U, S, V = torch.svd(a)\n self.assertEqual(s.shape, S.shape)\n self.assertEqual(u.shape, U.shape)\n self.assertEqual(v.shape, V.shape)\n self.assertEqual(s, S)\n\n if density == 1:\n # actual_rank is known only for dense inputs\n #\n # check if pairs (u, U) and (v, V) span the same\n # subspaces, respectively\n u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]\n U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]\n self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))\n self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))\n\n all_batches = [(), (1,), (3,), (2, 3)]\n for actual_rank, size, all_batches in [\n (2, (17, 4), all_batches),\n (4, (17, 4), all_batches),\n (4, (17, 17), all_batches),\n (10, (100, 40), all_batches),\n (7, (1000, 1000), [()]),\n ]:\n # dense input\n for batches in all_batches:\n run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)\n if size != size[::-1]:\n run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)\n\n # sparse input\n for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:\n for density in [0.005, 0.1]:\n run_subtest(None, size, (), device, torch.svd_lowrank, density=density)\n\n # jitting support\n jitted = torch.jit.script(torch.svd_lowrank)\n actual_rank, size, batches = 2, (17, 4), ()\n run_subtest(actual_rank, size, batches, device, jitted)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.cfloat)\n def test_svd_complex(self, device, dtype):\n # this test verifies that torch.svd really returns V and not V.conj()\n # see: https://github.com/pytorch/pytorch/issues/45821\n t = torch.randn((10, 10), dtype=dtype, device=device)\n U, S, V = torch.svd(t, some=False)\n # verify that t ≈ t2\n # t2 = U @ diag(S) @ Vᴴ\n # Vᴴ is the conjugate transpose of V\n t2 = U @ torch.diag(S).type(dtype) @ V.conj().T\n self.assertEqual(t, t2)\n\n def _test_svd_helper(self, shape, some, col_maj, device, dtype):\n # test implementation below uses cpu unconditionally\n if not torch._C.has_lapack:\n reason = \"PyTorch compiled without Lapack\"\n raise unittest.SkipTest(reason)\n # To have accurate tests and less false positives on different CPUs and GPUs,\n # we use double or complex double accuracy for CPU reference.\n cpu_dtype = torch.complex128 if dtype.is_complex else torch.float64\n cpu_tensor = torch.randn(shape, device='cpu', dtype=cpu_dtype)\n device_tensor = cpu_tensor.to(device=device, dtype=dtype)\n if col_maj:\n cpu_tensor = cpu_tensor.t()\n device_tensor = device_tensor.t()\n cpu_result = torch.svd(cpu_tensor, some=some)\n device_result = torch.svd(device_tensor, some=some)\n m = min(cpu_tensor.shape[-2:])\n # torch.svd returns torch.return_types.svd which is a tuple of (U, V, S).\n # - When some==False, U[..., m:] can be arbitrary.\n # - When some==True, U shape: [..., m], V shape: [m, m]\n # - Signs are not deterministic. If the sign of a column of U is changed\n # then the corresponding column of the V has to be changed.\n # Thus here we only compare result[..., :m].abs() from CPU and device.\n for x, y in zip(cpu_result, device_result):\n self.assertEqual(x[..., :m].abs(), y[..., :m].abs(), exact_dtype=False)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_svd_errors_and_warnings(self, device, dtype):\n for svd in [torch.svd, torch.linalg.svd]:\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.randn(3, 3, dtype=dtype, device=device)\n real_dtype = a.real.dtype if dtype.is_complex else dtype\n out_u = torch.empty(2, 2, dtype=dtype, device=device)\n out_s = torch.empty(4, 4, dtype=real_dtype, device=device)\n out_v = torch.empty(6, 6, dtype=dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n svd(a, out=(out_u, out_s, out_v))\n # Check warning occurs\n self.assertEqual(len(w), 3)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-3].message))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-2].message))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out_u = torch.empty(0, dtype=torch.int, device=device)\n out_s = torch.empty(0, dtype=torch.int, device=device)\n out_v = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got U with dtype Int\"):\n svd(a, out=(out_u, out_s, out_v))\n\n out_u = torch.empty(0, dtype=dtype, device=device)\n if svd == torch.linalg.svd:\n msg = \"but got Vh with dtype Int\"\n else:\n msg = \"but got V with dtype Int\"\n with self.assertRaisesRegex(RuntimeError, msg):\n svd(a, out=(out_u, out_s, out_v))\n\n out_v = torch.empty(0, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got S with dtype Int\"):\n svd(a, out=(out_u, out_s, out_v))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out_u = torch.empty(0, device=wrong_device, dtype=dtype)\n out_s = torch.empty(0, device=wrong_device, dtype=real_dtype)\n out_v = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n # error from out_u\n svd(a, out=(out_u, out_s, out_v))\n\n out_u = torch.empty(0, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n # error from out_s\n svd(a, out=(out_u, out_s, out_v))\n\n out_s = torch.empty(0, device=device, dtype=real_dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n # error from out_v\n svd(a, out=(out_u, out_s, out_v))\n\n # if input contains NaN then an error is triggered for svd\n error_msg = 'The algorithm failed to converge' \\\n if (self.device_type == 'cpu' or TEST_WITH_ROCM) \\\n else 'CUSOLVER_STATUS_EXECUTION_FAILED'\n a = torch.full((3, 3), float('nan'), dtype=dtype, device=device)\n a[0] = float('nan')\n with self.assertRaisesRegex(RuntimeError, error_msg):\n svd(a)\n error_msg = r'\\(Batch element 1\\): The algorithm failed to converge' \\\n if (self.device_type == 'cpu' or TEST_WITH_ROCM) \\\n else 'CUSOLVER_STATUS_EXECUTION_FAILED'\n a = torch.randn(3, 33, 33, dtype=dtype, device=device)\n a[1, 0, 0] = float('nan')\n with self.assertRaisesRegex(RuntimeError, error_msg):\n svd(a)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(*floating_and_complex_types())\n def test_svd_square(self, device, dtype):\n self._test_svd_helper((10, 10), True, False, device, dtype)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(*floating_types())\n def test_svd_square_col_maj(self, device, dtype):\n self._test_svd_helper((10, 10), True, True, device, dtype)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(*floating_types())\n def test_svd_tall_some(self, device, dtype):\n self._test_svd_helper((20, 5), True, False, device, dtype)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(*floating_types())\n def test_svd_tall_all(self, device, dtype):\n self._test_svd_helper((20, 5), False, False, device, dtype)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(*floating_types())\n def test_svd_tall_some_col_maj(self, device, dtype):\n self._test_svd_helper((5, 20), True, True, device, dtype)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(*floating_types())\n def test_svd_tall_all_col_maj(self, device, dtype):\n self._test_svd_helper((5, 20), False, True, device, dtype)\n\n # ~~~ tests for torch.linalg.svd ~~~\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_linalg_svd_compute_uv(self, device, dtype):\n \"\"\"\n Test the default case. Here we have the very same behavior as\n NumPy with compute_uv=True.\n \"\"\"\n t = torch.randn((10, 11), device=device, dtype=dtype)\n np_t = t.cpu().numpy()\n for full_matrices in (True, False):\n # check linalg.svd vs numpy\n expected = np.linalg.svd(np_t, full_matrices, compute_uv=True)\n actual = torch.linalg.svd(t, full_matrices)\n # sign/phase of the singular vectors is not unique and therefore absolute values are compared\n self.assertEqual(abs(actual[0]), abs(expected[0]))\n self.assertEqual(actual[1], expected[1])\n self.assertEqual(abs(actual[2]), abs(expected[2]))\n # check linalg.svd vs linalg.svd(out=...)\n out = (torch.empty_like(actual[0]),\n torch.empty_like(actual[1]),\n torch.empty_like(actual[2]))\n out2 = torch.linalg.svd(t, full_matrices, out=out)\n self.assertEqual(actual, out)\n self.assertEqual(actual, out2)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_svdvals(self, device, dtype):\n\n def run_test(shape):\n # NumPy doesn't have separate svdvals function, it is included in\n # svd with compute_uv=False\n # so we test our implementation against numpy.linalg.svd(*, compute_uv=False)\n A = make_tensor(shape, dtype=dtype, device=device)\n expected = np.linalg.svd(A.cpu(), compute_uv=False)\n actual = torch.linalg.svdvals(A)\n self.assertEqual(actual, expected)\n\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n for batch, (m, n) in itertools.product(batches, product(ns, ns)):\n run_test((*batch, m, n))\n\n @skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case\n @skipCUDAIfRocm\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_svd_memory_allocation(self, device, dtype):\n # test for https://github.com/pytorch/pytorch/issues/61949\n # the problem was that tensors of incorrect size were allocated and then narrowed\n m = 3\n n = 2**20\n a = make_tensor((m, n), dtype=dtype, device=device)\n # the following should run without errors\n result = torch.linalg.svdvals(a)\n result = torch.linalg.svd(a, full_matrices=False)\n\n out0 = torch.empty_like(result[0])\n out1 = torch.empty_like(result[1])\n out2 = torch.empty_like(result[2])\n torch.linalg.svdvals(a, out=out0)\n torch.linalg.svd(a, full_matrices=False, out=(out0, out1, out2))\n\n def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n b = torch.randn(*b_dims, dtype=dtype, device=device)\n A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)\n L = torch.cholesky(A, upper=upper)\n return b, A, L\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_cholesky_solve(self, device, dtype):\n for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):\n b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)\n x = torch.cholesky_solve(b, L, upper=upper)\n self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_cholesky_solve_batched(self, device, dtype):\n def cholesky_solve_batch_helper(A_dims, b_dims, upper):\n b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)\n x_exp_list = []\n for i in range(b_dims[0]):\n x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))\n x_exp = torch.stack(x_exp_list) # Stacked output\n x_act = torch.cholesky_solve(b, L, upper=upper) # Actual output\n self.assertEqual(x_act, x_exp) # Equality check\n Ax = np.matmul(A.cpu(), x_act.cpu())\n self.assertEqual(b, Ax) # Correctness check\n\n for upper, batchsize in itertools.product([True, False], [1, 3, 4]):\n cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_solve_batched_non_contiguous(self, device, dtype):\n from numpy.linalg import solve\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n for upper in [True, False]:\n A = random_hermitian_pd_matrix(2, 2, dtype=dtype, device='cpu')\n b = torch.randn(2, 2, 2, dtype=dtype, device='cpu')\n x_exp = solve(A.permute(0, 2, 1).numpy(), b.permute(2, 1, 0).numpy())\n A = A.to(device).permute(0, 2, 1)\n b = b.to(device).permute(2, 1, 0)\n assert not A.is_contiguous() and not b.is_contiguous(), \"contiguous inputs\"\n L = torch.cholesky(A, upper)\n x = torch.cholesky_solve(b, L, upper=upper)\n self.assertEqual(x, x_exp)\n\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_cholesky_solve_batched_many_batches(self, device, dtype):\n for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):\n for upper in [True, False]:\n b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)\n x = torch.cholesky_solve(b, L, upper)\n Ax = torch.matmul(A, x)\n self.assertEqual(Ax, b.expand_as(Ax))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_cholesky_solve_batched_broadcasting(self, device, dtype):\n from numpy.linalg import solve\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n def run_test(A_dims, b_dims, upper):\n A_matrix_size = A_dims[-1]\n A_batch_dims = A_dims[:-2]\n A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,\n dtype=dtype, device='cpu')\n b = torch.randn(*b_dims, dtype=dtype, device='cpu')\n x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)\n A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)\n L = torch.linalg.cholesky(A, upper=upper)\n x = torch.cholesky_solve(b, L, upper=upper)\n self.assertEqual(x, x_exp)\n # https://github.com/pytorch/pytorch/issues/42695\n x = torch.cholesky_solve(b, L, upper=upper, out=x)\n self.assertEqual(x, x_exp)\n\n # test against numpy.linalg.solve\n for upper in [True, False]:\n run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper) # no broadcasting\n run_test((2, 1, 3, 4, 4), (4, 6), upper) # broadcasting b\n run_test((4, 4), (2, 1, 3, 4, 2), upper) # broadcasting A\n run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper) # broadcasting A & b\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float64, torch.complex128)\n def test_cholesky_solve_autograd(self, device, dtype):\n def run_test(A_dims, B_dims, upper):\n root = torch.randn(*A_dims, device=device, dtype=dtype).requires_grad_()\n b = torch.randn(*B_dims, device=device, dtype=dtype).requires_grad_()\n\n def func(root, b, upper):\n if upper:\n A = root.triu()\n else:\n A = root.tril()\n return torch.cholesky_solve(b, A, upper)\n\n gradcheck(func, [root, b, upper])\n # TODO(#50743): the following fails with batched grad testing\n # TODO(#56235): disabling temporarily\n # gradgradcheck(func, [root, b, upper], atol=1e-3, check_batched_grad=False)\n\n for (a_size, b_size), upper in itertools.product([((3, 3), (3, 4)), ((3, 3), (3, 2)),\n ((2, 3, 3), (2, 3, 4)), ((2, 3, 3), (2, 3, 2))],\n [True, False]):\n run_test(a_size, b_size, upper)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):\n # dtypes should be safely castable\n a = torch.eye(2, dtype=dtype, device=device)\n b = torch.randn(2, 1, dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.cholesky_solve(b, a, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.cholesky_solve(b, a, out=out)\n\n # if out tensor with wrong shape is passed a warning is given\n with warnings.catch_warnings(record=True) as w:\n out = torch.empty(1, dtype=dtype, device=device)\n # Trigger warning\n torch.cholesky_solve(b, a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_inverse(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n def run_test(torch_inverse, matrix, batches, n):\n matrix_inverse = torch_inverse(matrix)\n\n # Compare against NumPy output\n # NumPy uses 'gesv' LAPACK routine solving the equation A A_inv = I\n # But in PyTorch 'gertf' + 'getri' is used causing element-wise differences\n expected = np.linalg.inv(matrix.cpu().numpy())\n self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)\n\n # Additional correctness tests, check matrix*matrix_inverse == identity\n identity = torch.eye(n, dtype=dtype, device=device)\n self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))\n self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))\n\n # check the out= variant\n # prepare the expected out tensor\n matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)\n matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)\n matrix_inverse_out = matrix_inverse_out_t.mT\n ans = torch_inverse(matrix, out=matrix_inverse_out)\n self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)\n self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)\n\n # batched matrices: 3+ dimensional tensors, check matrix_inverse same as single-inverse for each matrix\n if matrix.ndim > 2 and batches[0] != 0:\n expected_inv_list = []\n p = int(np.prod(batches)) # use `p` instead of -1, so that the test works for empty input as well\n for mat in matrix.contiguous().view(p, n, n):\n expected_inv_list.append(torch_inverse(mat))\n expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)\n if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:\n # single-inverse is done using cuSOLVER, while batched inverse is done using MAGMA\n # individual values can be significantly different for fp32, hence rather high rtol is used\n # the important thing is that torch_inverse passes above checks with identity\n self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)\n else:\n self.assertEqual(matrix_inverse, expected_inv)\n\n # helper function for testing torch.linalg.inv_ex\n def test_inv_ex(input, out=None):\n if out is not None:\n info = torch.empty(0, dtype=torch.int32, device=device)\n return torch.linalg.inv_ex(input, out=(out, info)).inverse\n return torch.linalg.inv_ex(input).inverse\n\n for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:\n for batches, n in itertools.product(\n [[], [0], [2], [2, 1]],\n [0, 5]\n ):\n matrices = random_fullrank_matrix_distinct_singular_value(n, *batches, dtype=dtype, device=device)\n run_test(torch_inverse, matrices, batches, n)\n\n # test non-contiguous input\n run_test(torch_inverse, matrices.mT, batches, n)\n if n > 0:\n run_test(\n torch_inverse,\n random_fullrank_matrix_distinct_singular_value(n * 2, *batches, dtype=dtype, device=device)\n .view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),\n batches, n\n )\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_inv_ex_info_device(self, device, dtype):\n A = torch.eye(3, 3, dtype=dtype, device=device)\n info = torch.linalg.inv_ex(A).info\n self.assertTrue(info.device == A.device)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @skipCUDAIfRocm\n def test_inv_ex_singular(self, device, dtype):\n # if the input matrix is not invertible, info with positive integer is returned\n A = torch.eye(3, 3, dtype=dtype, device=device)\n A[-1, -1] = 0 # Now A is singular\n info = torch.linalg.inv_ex(A).info\n self.assertEqual(info, 3)\n with self.assertRaisesRegex(RuntimeError, r'diagonal element 3 is zero, the inversion could not be completed'):\n torch.linalg.inv_ex(A, check_errors=True)\n\n # if at least one matrix in the batch is not positive definite,\n # batched info with positive integer for the corresponding matrix is returned\n A = torch.eye(3, 3, dtype=dtype, device=device)\n A = A.reshape((1, 3, 3))\n A = A.repeat(5, 1, 1)\n A[3, -2, -2] = 0 # Now A[3] is singular\n info = torch.linalg.inv_ex(A).info\n\n expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)\n expected_info[3] = 2\n self.assertEqual(info, expected_info)\n with self.assertRaisesRegex(RuntimeError, r'\\(Batch element 3\\): The diagonal element 2 is zero'):\n torch.linalg.inv_ex(A, check_errors=True)\n\n @slowTest\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,\n torch.float64: 1e-5, torch.complex128: 1e-5})\n def test_inverse_many_batches(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n def test_inverse_many_batches_helper(torch_inverse, b, n):\n matrices = random_fullrank_matrix_distinct_singular_value(b, n, n, dtype=dtype, device=device)\n matrices_inverse = torch_inverse(matrices)\n\n # Compare against NumPy output\n expected = np.linalg.inv(matrices.cpu().numpy())\n self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)\n\n for torch_inverse in [torch.inverse, torch.linalg.inv]:\n test_inverse_many_batches_helper(torch_inverse, 5, 256)\n test_inverse_many_batches_helper(torch_inverse, 3, 512)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @onlyNativeDeviceTypes # TODO: XLA doesn't raise exception\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_inverse_errors(self, device, dtype):\n # inverse expects batches of square matrices as input\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.inverse(torch.randn(2, 3, 4, 3))\n\n # if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch\n def run_test_singular_input(batch_dim, n):\n x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)\n x[n, -1, -1] = 0\n with self.assertRaisesRegex(RuntimeError, rf'\\(Batch element {n}\\): The diagonal element 3 is zero'):\n torch.inverse(x)\n\n for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:\n run_test_singular_input(*params)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @onlyNativeDeviceTypes # TODO: XLA doesn't raise exception\n @skipCUDAIfRocm\n @skipCUDAVersionIn([(11, 3)]) # https://github.com/pytorch/pytorch/issues/57482\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_inverse_errors_large(self, device, dtype):\n # Test batched inverse of singular matrices reports errors without crashing (gh-51930)\n x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)\n x[:] = torch.eye(616, dtype=dtype, device=device)\n x[..., 10, 10] = 0\n with self.assertRaisesRegex(RuntimeError, r'\\(Batch element 0\\): The diagonal element 11 is zero'):\n torch.inverse(x)\n\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_pinv(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n def run_test_main(A, hermitian):\n # Testing against definition for pseudo-inverses\n A_pinv = torch.linalg.pinv(A, hermitian=hermitian)\n np_A = A.cpu().numpy()\n np_A_pinv = A_pinv.cpu().numpy()\n if A.numel() > 0:\n self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)\n self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)\n self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))\n self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))\n else:\n self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))\n\n # Check out= variant\n out = torch.empty_like(A_pinv)\n ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, A_pinv)\n\n def run_test_numpy(A, hermitian):\n # Check against NumPy output\n # Test float rcond, and specific value for each matrix\n rconds = [float(torch.rand(1)), ]\n # Test different types of rcond tensor\n for rcond_type in all_types():\n rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))\n # Test broadcasting of rcond\n if A.ndim > 2:\n rconds.append(torch.rand(A.shape[-3], device=device))\n for rcond in rconds:\n actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)\n torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)\n self.assertEqual(actual, torch_rtol)\n numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()\n expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)\n self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)\n\n for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices\n (3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices\n (2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices\n (0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices\n A = torch.randn(*sizes, dtype=dtype, device=device)\n hermitian = False\n run_test_main(A, hermitian)\n run_test_numpy(A, hermitian)\n\n # Check hermitian = True\n for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices\n (0, 0), (3, 0, 0), ]: # zero numel square matrices\n A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)\n hermitian = True\n run_test_main(A, hermitian)\n run_test_numpy(A, hermitian)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_pinv_errors_and_warnings(self, device, dtype):\n # pinv requires at least 2D tensor\n a = torch.randn(1, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"expected a tensor with 2 or more dimensions\"):\n torch.linalg.pinv(a)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.randn(3, 3, dtype=dtype, device=device)\n out = torch.empty(7, 7, dtype=dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.pinv(a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes of out and input should be safely castable\n out = torch.empty_like(a).to(torch.int)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.pinv(a, out=out)\n\n if torch.cuda.is_available():\n # device of out and input should match\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty_like(a).to(wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"Expected result and input tensors to be on the same device\"):\n torch.linalg.pinv(a, out=out)\n\n # device of rcond and input should match\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n rcond = torch.full((), 1e-2, device=wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n torch.linalg.pinv(a, rcond=rcond)\n\n # rcond can't be complex\n rcond = torch.full((), 1j, device=device)\n with self.assertRaisesRegex(RuntimeError, \"rcond tensor of complex type is not supported\"):\n torch.linalg.pinv(a, rcond=rcond)\n\n # atol can't be complex\n atol = torch.full((), 1j, device=device)\n with self.assertRaisesRegex(RuntimeError, \"atol tensor of complex type is not supported\"):\n torch.linalg.pinv(a, atol=atol)\n\n # rtol can't be complex\n rtol = torch.full((), 1j, device=device)\n with self.assertRaisesRegex(RuntimeError, \"rtol tensor of complex type is not supported\"):\n torch.linalg.pinv(a, rtol=rtol)\n\n @skipCUDAIfNoMagmaAndNoCusolver\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_inv_errors_and_warnings(self, device, dtype):\n # inv expects batches of square matrices as input\n a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.linalg.inv(a)\n\n # inv requires the input to be at least 2 dimensional tensor\n a = torch.randn(2, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"must have at least 2 dimensions\"):\n torch.linalg.inv(a)\n\n # if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch\n def run_test_singular_input(batch_dim, n):\n a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)\n a[n, -1, -1] = 0\n with self.assertRaisesRegex(RuntimeError, rf\"\\(Batch element {n}\\): The diagonal element 3 is zero\"):\n torch.linalg.inv(a)\n\n for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:\n run_test_singular_input(*params)\n\n # dtypes should match\n a = torch.eye(2, dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"got result with dtype Int\"):\n torch.linalg.inv(a, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.inv(a, out=out)\n\n # if out tensor with wrong shape is passed a warning is given\n with warnings.catch_warnings(record=True) as w:\n a = torch.eye(2, dtype=dtype, device=device)\n out = torch.empty(1, dtype=dtype, device=device)\n # Trigger warning\n torch.linalg.inv(a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # if out tensor in batched column major format but with wrong a warning is given\n with warnings.catch_warnings(record=True) as w:\n a = torch.eye(2, dtype=dtype, device=device)\n out = torch.empty(3, 3, dtype=dtype, device=device)\n out = out.mT.clone(memory_format=torch.contiguous_format)\n out = out.mT\n self.assertTrue(out.mT.is_contiguous())\n # Trigger warning\n torch.linalg.inv(a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n def solve_test_helper(self, A_dims, b_dims, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n b = torch.randn(*b_dims, dtype=dtype, device=device)\n A = random_fullrank_matrix_distinct_singular_value(*A_dims, dtype=dtype, device=device)\n return b, A\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})\n def test_solve(self, device, dtype):\n def run_test(n, batch, rhs):\n A_dims = (n, *batch)\n b_dims = (*batch, n, *rhs)\n b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)\n\n # Correctness test\n x = torch.linalg.solve(A, b)\n if rhs == ():\n Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())\n Ax.squeeze_(-1)\n else:\n Ax = np.matmul(A.cpu(), x.cpu())\n self.assertEqual(b.expand_as(Ax), Ax)\n\n # Check against NumPy\n expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy())\n self.assertEqual(x, expected)\n\n # Check out= variant\n out = torch.empty_like(x)\n ans = torch.linalg.solve(A, b, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(x, out)\n\n # Check out= variant with complex128 out tensor\n out = torch.empty_like(x).to(torch.complex128)\n ans = torch.linalg.solve(A, b, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(x.to(torch.complex128), out)\n\n # Check empty out\n out = torch.empty(0, dtype=dtype, device=device)\n ans = torch.linalg.solve(A, b, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(x, out)\n\n batches = [(), (0, ), (3, ), (2, 3)]\n ns = [0, 5, 32]\n nrhs = [(), (1, ), (5, )]\n for n, batch, rhs in itertools.product(ns, batches, nrhs):\n run_test(n, batch, rhs)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})\n def test_solve_batched_non_contiguous(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device).permute(1, 0, 2)\n b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)\n self.assertFalse(A.is_contiguous())\n self.assertFalse(b.is_contiguous())\n actual = torch.linalg.solve(A, b)\n expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(actual, expected)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_solve_errors_and_warnings(self, device, dtype):\n # solve expects batches of square matrices as input\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)\n b = torch.randn(2, 3, 4, 1, dtype=dtype, device=device)\n torch.linalg.solve(a, b)\n\n # solve expects compatible shapes for A x = b\n with self.assertRaisesRegex(RuntimeError, \"Incompatible matrix sizes\"):\n a = torch.randn(2, 3, 3, 3, dtype=dtype, device=device)\n b = torch.randn(2, 3, 2, 1, dtype=dtype, device=device)\n torch.linalg.solve(a, b)\n\n # if input is not solvable, RuntimeError is raised mentioning the first non-solvable batch\n def run_test_singular_input(batch_dim, n):\n a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)\n a[n, -1, -1] = 0\n b = torch.randn(batch_dim, 3, 1, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, rf'\\(Batch element {n}\\): The diagonal element 3 is zero'):\n torch.linalg.solve(a, b)\n\n for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:\n run_test_singular_input(*params)\n\n # if out tensor with wrong shape is passed a warning is given\n # matrix 'b' case\n with warnings.catch_warnings(record=True) as w:\n A = torch.eye(2, dtype=dtype, device=device).reshape((1, 2, 2)).repeat(2, 1, 1)\n b = torch.randn(2, 2, 2, dtype=dtype, device=device)\n out = torch.zeros(1, dtype=dtype, device=device)\n # Trigger warning\n torch.linalg.solve(A, b, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # if out tensor with wrong shape is passed a warning is given\n # vector 'b' case\n with warnings.catch_warnings(record=True) as w:\n A = torch.eye(2, dtype=dtype, device=device)\n b = torch.randn(2, dtype=dtype, device=device)\n out = torch.zeros(1, dtype=dtype, device=device)\n # Trigger warning\n torch.linalg.solve(A, b, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n a = torch.eye(2, dtype=dtype, device=device)\n b = torch.randn(2, 1, dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.solve(a, b, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n clone_a = torch.empty_like(a)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.solve(a, b, out=out)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_solve(self, device, dtype):\n for (k, n) in zip([2, 3, 5], [3, 5, 7]):\n b, A = self.solve_test_helper((n,), (n, k), device, dtype)\n x = torch.solve(b, A)[0]\n self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_solve_batched(self, device, dtype):\n def solve_batch_helper(A_dims, b_dims):\n b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)\n x_exp_list = []\n for i in range(b_dims[0]):\n x_exp_list.append(torch.solve(b[i], A[i])[0])\n x_exp = torch.stack(x_exp_list) # Stacked output\n x_act = torch.solve(b, A)[0] # Actual output\n self.assertEqual(x_exp, x_act) # Equality check\n Ax = np.matmul(A.cpu(), x_act.cpu())\n self.assertEqual(b, Ax)\n\n for batchsize in [1, 3, 4]:\n solve_batch_helper((5, batchsize), (batchsize, 5, 10))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_solve_batched_non_contiguous(self, device, dtype):\n from numpy.linalg import solve\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device).permute(1, 0, 2)\n b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)\n x, _ = torch.solve(b, A)\n x_exp = solve(A.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(x, x_exp)\n\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_solve_batched_many_batches(self, device, dtype):\n for A_dims, b_dims in zip([(5, 256, 256), (3, )], [(5, 1), (512, 512, 3, 1)]):\n b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)\n x, _ = torch.solve(b, A)\n Ax = torch.matmul(A, x)\n self.assertEqual(Ax, b.expand_as(x))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_solve_batched_broadcasting(self, device, dtype):\n from numpy.linalg import solve\n\n def run_test(A_dims, b_dims):\n A_matrix_size = A_dims[-1]\n A_batch_dims = A_dims[:-2]\n b, A = self.solve_test_helper((A_matrix_size,) + A_batch_dims, b_dims, device, dtype)\n x, _ = torch.solve(b, A)\n x_exp = solve(A.cpu().numpy(), b.cpu().numpy())\n self.assertEqual(x, x_exp)\n\n # test against numpy.linalg.solve\n run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting\n run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b\n run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A\n run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_solve_errors_and_warnings(self, device, dtype):\n # dtypes should be safely castable\n a = torch.eye(2, dtype=dtype, device=device)\n b = torch.randn(2, 1, dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.int, device=device)\n lu = torch.empty(0, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got solution with dtype Int\"):\n torch.solve(b, a, out=(out, lu))\n\n out = torch.empty(0, dtype=dtype, device=device)\n lu = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got lu with dtype Int\"):\n torch.solve(b, a, out=(out, lu))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n lu = torch.empty_like(a)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.solve(b, a, out=(out, lu))\n out = torch.empty(0, dtype=dtype, device=device)\n lu = torch.empty_like(a).to(wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.solve(b, a, out=(out, lu))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n @precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})\n def test_tensorsolve(self, device, dtype):\n def run_test(a_shape, dims):\n a = torch.randn(a_shape, dtype=dtype, device=device)\n b = torch.randn(a_shape[:2], dtype=dtype, device=device)\n result = torch.linalg.tensorsolve(a, b, dims=dims)\n expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)\n self.assertEqual(result, expected)\n\n # check the out= variant\n out = torch.empty_like(result)\n ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n a_shapes = [(2, 3, 6), (3, 4, 4, 3)]\n dims = [None, (0, 2)]\n for a_shape, d in itertools.product(a_shapes, dims):\n run_test(a_shape, d)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_tensorsolve_empty(self, device, dtype):\n # Check for empty inputs. NumPy does not work for these cases.\n a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)\n b = torch.empty(a.shape[:2], dtype=dtype, device=device)\n x = torch.linalg.tensorsolve(a, b)\n self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n @precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})\n def test_tensorsolve_non_contiguous(self, device, dtype):\n def run_test_permuted(a_shape, dims):\n # check for permuted / transposed inputs\n a = torch.randn(a_shape, dtype=dtype, device=device)\n a = a.movedim((0, 2), (-2, -1))\n self.assertFalse(a.is_contiguous())\n b = torch.randn(a.shape[:2], dtype=dtype, device=device)\n b = b.t()\n self.assertFalse(b.is_contiguous())\n result = torch.linalg.tensorsolve(a, b, dims=dims)\n expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)\n self.assertEqual(result, expected)\n\n def run_test_skipped_elements(a_shape, dims):\n # check for inputs with skipped elements\n a = torch.randn(a_shape, dtype=dtype, device=device)\n a = a[::2]\n self.assertFalse(a.is_contiguous())\n b = torch.randn(a_shape[:2], dtype=dtype, device=device)\n b = b[::2]\n self.assertFalse(b.is_contiguous())\n result = torch.linalg.tensorsolve(a, b, dims=dims)\n expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)\n self.assertEqual(result, expected)\n\n # check non-contiguous out\n out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]\n self.assertFalse(out.is_contiguous())\n ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n a_shapes = [(2, 3, 6), (3, 4, 4, 3)]\n dims = [None, (0, 2)]\n for a_shape, d in itertools.product(a_shapes, dims):\n run_test_permuted(a_shape, d)\n\n a_shapes = [(4, 3, 6), (6, 4, 4, 3)]\n dims = [None, (0, 2)]\n for a_shape, d in itertools.product(a_shapes, dims):\n run_test_skipped_elements(a_shape, d)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32)\n def test_tensorsolve_errors_and_warnings(self, device, dtype):\n # tensorsolve expects the input that can be reshaped to a square matrix\n a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))\n b = torch.randn(8, 4, dtype=dtype, device=device)\n self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))\n with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):\n torch.linalg.tensorsolve(a, b)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n out = torch.empty_like(a)\n b = torch.randn(6, 4, dtype=dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.tensorsolve(a, b, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty_like(a).to(torch.int)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.tensorsolve(a, b, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.tensorsolve(a, b, out=out)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})\n def test_tensorinv(self, device, dtype):\n\n def run_test(a_shape, ind):\n a = torch.randn(a_shape, dtype=dtype, device=device)\n a_numpy = a.cpu().numpy()\n result = torch.linalg.tensorinv(a, ind=ind)\n expected = np.linalg.tensorinv(a_numpy, ind=ind)\n self.assertEqual(result, expected)\n\n # check the out= variant\n out = torch.empty_like(result)\n ans = torch.linalg.tensorinv(a, ind=ind, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n # compare to NumPy output\n run_test((12, 3, 4), ind=1)\n run_test((3, 8, 24), ind=2)\n run_test((18, 3, 3, 2), ind=1)\n run_test((1, 4, 2, 2), ind=2)\n run_test((2, 3, 5, 30), ind=3)\n run_test((24, 2, 2, 3, 2), ind=1)\n run_test((3, 4, 2, 3, 2), ind=2)\n run_test((1, 2, 3, 2, 3), ind=3)\n run_test((3, 2, 1, 2, 12), ind=4)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})\n def test_tensorinv_non_contiguous(self, device, dtype):\n\n def run_test(a_shape, ind):\n # check for permuted (transposed) case\n a = torch.randn(a_shape, dtype=dtype, device=device)\n permutation = list(range(0, a.ndim))\n a = a.permute(permutation[ind:] + permutation[:ind])\n self.assertFalse(a.is_contiguous())\n a_numpy = a.cpu().numpy()\n result = torch.linalg.tensorinv(a, ind=a.ndim - ind)\n expected = np.linalg.tensorinv(a_numpy, ind=a.ndim - ind)\n self.assertEqual(result, expected)\n\n def run_test_skipped_elements(a_shape, ind):\n # check for input with skipped elements\n a = torch.randn(a_shape, dtype=dtype, device=device)\n a = a[::2]\n self.assertFalse(a.is_contiguous())\n a_numpy = a.cpu().numpy()\n result = torch.linalg.tensorinv(a, ind=ind)\n expected = np.linalg.tensorinv(a_numpy, ind=ind)\n self.assertEqual(result, expected)\n\n # check non-contiguous out\n out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]\n self.assertFalse(out.is_contiguous())\n ans = torch.linalg.tensorinv(a, ind=ind, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, result)\n\n run_test((12, 3, 4), ind=1)\n run_test((3, 8, 24), ind=2)\n run_test((18, 3, 3, 2), ind=1)\n run_test((1, 4, 2, 2), ind=2)\n run_test((2, 3, 5, 30), ind=3)\n run_test((24, 2, 2, 3, 2), ind=1)\n run_test((3, 4, 2, 3, 2), ind=2)\n run_test((1, 2, 3, 2, 3), ind=3)\n run_test((3, 2, 1, 2, 12), ind=4)\n\n run_test_skipped_elements((12, 3, 2), ind=1)\n run_test_skipped_elements((18, 3, 3, 1), ind=1)\n\n @skipMeta # See https://github.com/pytorch/pytorch/issues/53739\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_tensorinv_empty(self, device, dtype):\n for ind in range(1, 4):\n # Check for empty inputs. NumPy does not work for these cases.\n a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)\n a_inv = torch.linalg.tensorinv(a, ind=ind)\n self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])\n\n @skipMeta # See https://github.com/pytorch/pytorch/issues/53739\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_tensorinv_errors_and_warnings(self, device, dtype):\n\n def check_shape(a_shape, ind):\n # tensorinv requires the input to satisfy\n # prod(a.shape[ind:]) == prod(a.shape[:ind])\n a = torch.randn(a_shape, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"Expected self to satisfy the requirement\"):\n torch.linalg.tensorinv(a, ind=ind)\n\n def check_ind(a_shape, ind):\n a = torch.randn(a_shape, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"Expected a strictly positive integer\"):\n torch.linalg.tensorinv(a, ind=ind)\n\n def check_out(a_shape, ind):\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.randn(a_shape, dtype=dtype, device=device)\n out = torch.empty_like(a)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.tensorinv(a, ind=ind, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.tensorinv(a, ind=ind, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.tensorinv(a, ind=ind, out=out)\n\n # test for invalid shape\n check_shape((2, 3, 4), ind=1)\n check_shape((1, 2, 3, 4), ind=3)\n\n # test for invalid ind\n check_ind((12, 3, 4), ind=-1)\n check_ind((18, 3, 3, 2), ind=0)\n\n # test for invalid out tensor\n check_out((12, 3, 4), ind=1)\n check_out((3, 8, 24), ind=2)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_tensorinv_singular_input(self, device, dtype):\n\n def check_singular_input(a_shape, ind):\n prod_ind_end = np.prod(a_shape[ind:])\n a = torch.eye(prod_ind_end, dtype=dtype, device=device)\n a[-1, -1] = 0 # Now `a` is singular\n a = a.reshape(a_shape)\n with self.assertRaisesRegex(RuntimeError, \"Failed to invert the input tensor, because it is singular\"):\n torch.linalg.tensorinv(a, ind=ind)\n\n # test for non-invertible input\n check_singular_input((12, 3, 4), ind=1)\n check_singular_input((3, 6, 18), ind=2)\n\n def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):\n def check(x, y):\n # Compare with numpy\n res = torch_fn(x, y)\n if x.dtype == torch.bfloat16:\n ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))\n else:\n ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))\n if res.dtype == torch.bfloat16:\n self.assertEqual(res.cpu(), ref.bfloat16())\n else:\n self.assertEqual(res.cpu(), ref)\n\n # Test out variant\n out = torch.empty_like(res)\n torch_fn(x, y, out=out)\n self.assertEqual(out, res)\n\n # Empty\n x = torch.tensor([], dtype=dtype, device=device)\n y = torch.tensor([], dtype=dtype, device=device)\n check(x, y)\n\n # Contiguous\n x = 0.1 * torch.randn(5000, dtype=dtype, device=device)\n y = 0.1 * torch.randn(5000, dtype=dtype, device=device)\n check(x, y)\n\n # 0 strided\n y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)\n check(x, y)\n\n # 2 strided\n check(x[::2], y[::2])\n\n @dtypes(torch.float, torch.cfloat, torch.bfloat16)\n @dtypesIfCUDA(torch.float, torch.cfloat)\n @precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})\n def test_dot_vs_numpy(self, device, dtype):\n self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)\n\n @dtypes(torch.float, torch.cfloat)\n @precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})\n def test_vdot_vs_numpy(self, device, dtype):\n self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)\n\n def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):\n def check(x, y, regex):\n with self.assertRaisesRegex(RuntimeError, regex):\n torch_fn(x, y)\n\n if complex_dtypes:\n x = torch.randn(1, dtype=torch.cfloat, device=device)\n y = torch.randn(3, dtype=torch.cdouble, device=device)\n else:\n x = torch.randn(1, dtype=torch.float, device=device)\n y = torch.randn(3, dtype=torch.double, device=device)\n\n check(x, y, 'dot : expected both vectors to have same dtype')\n check(x.reshape(1, 1), y, '1D tensors expected')\n check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')\n\n if self.device_type != 'cpu':\n x_cpu = x.expand(3).cpu()\n check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')\n\n @onlyNativeDeviceTypes\n def test_vdot_invalid_args(self, device):\n self._test_dot_vdot_invalid_args(device, torch.vdot)\n self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)\n\n @onlyNativeDeviceTypes\n def test_dot_invalid_args(self, device):\n self._test_dot_vdot_invalid_args(device, torch.dot)\n self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_matrix_rank(self, device, dtype):\n matrix_rank = torch.linalg.matrix_rank\n\n def run_test(shape0, shape1, batch):\n a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)\n rank_a = matrix_rank(a)\n\n self.assertEqual(rank_a, matrix_rank(a.mH))\n aaH = torch.matmul(a, a.mH)\n rank_aaH = matrix_rank(aaH)\n rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)\n self.assertEqual(rank_aaH, rank_aaH_hermitian)\n aHa = torch.matmul(a.mH, a)\n self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))\n\n # check against NumPy\n self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))\n self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))\n\n self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))\n self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))\n\n # hermitian flag for NumPy was added in 1.14.0\n if np.lib.NumpyVersion(np.__version__) >= '1.14.0':\n self.assertEqual(rank_aaH_hermitian,\n np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))\n self.assertEqual(matrix_rank(aaH, 0.01, True),\n np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))\n\n # check out= variant\n out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)\n ans = matrix_rank(a, out=out)\n self.assertEqual(ans, out)\n self.assertEqual(ans, rank_a)\n\n shapes = (3, 13)\n batches = ((), (0, ), (4, ), (3, 5, ))\n for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):\n run_test(shape0, shape1, batch)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_matrix_rank_atol(self, device, dtype):\n\n def run_test_atol(shape0, shape1, batch):\n a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)\n # Check against NumPy output\n # Test float tol, and specific value for each matrix\n tolerances = [float(torch.rand(1)), ]\n # Test different types of tol tensor\n for tol_type in all_types():\n tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))\n # Test broadcasting of tol\n if a.ndim > 2:\n tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))\n for tol in tolerances:\n actual = torch.linalg.matrix_rank(a, atol=tol)\n actual_tol = torch.linalg.matrix_rank(a, tol=tol)\n self.assertEqual(actual, actual_tol)\n numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()\n expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)\n self.assertEqual(actual, expected)\n\n shapes = (3, 13)\n batches = ((), (0, ), (4, ), (3, 5, ))\n for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):\n run_test_atol(shape0, shape1, batch)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float64)\n def test_matrix_rank_atol_rtol(self, device, dtype):\n from torch.testing._internal.common_utils import make_fullrank_matrices_with_distinct_singular_values\n\n # creates a matrix with singular values arange(1/(n+1), 1, 1/(n+1)) and rank=n\n n = 9\n a = make_fullrank_matrices_with_distinct_singular_values(n, n, dtype=dtype, device=device)\n\n # test float and tensor variants\n for tol_value in [0.51, torch.tensor(0.51, device=device)]:\n # using rtol (relative tolerance) takes into account the largest singular value (0.9 in this case)\n result = torch.linalg.matrix_rank(a, rtol=tol_value)\n self.assertEqual(result, 5) # there are 5 singular values above 0.9*0.51=0.459\n\n # atol is used directly to compare with singular values\n result = torch.linalg.matrix_rank(a, atol=tol_value)\n self.assertEqual(result, 4) # there are 4 singular values above 0.51\n\n # when both are specified the maximum tolerance is used\n result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)\n self.assertEqual(result, 4) # there are 4 singular values above max(0.51, 0.9*0.51)\n\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_matrix_rank_empty(self, device, dtype):\n matrix_rank = torch.linalg.matrix_rank\n\n # NumPy doesn't work for input with no elements\n def run_test(shape0, shape1, batch):\n a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)\n rank_a = matrix_rank(a)\n expected = torch.zeros(batch, dtype=torch.int64, device=device)\n\n self.assertEqual(rank_a, matrix_rank(a.mH))\n\n aaH = torch.matmul(a, a.mH)\n rank_aaH = matrix_rank(aaH)\n rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)\n self.assertEqual(rank_aaH, rank_aaH_hermitian)\n\n aHa = torch.matmul(a.mH, a)\n self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))\n\n self.assertEqual(rank_a, expected)\n self.assertEqual(matrix_rank(a, 0.01), expected)\n\n self.assertEqual(rank_aaH, expected)\n self.assertEqual(matrix_rank(aaH, 0.01), expected)\n\n self.assertEqual(rank_aaH_hermitian, expected)\n self.assertEqual(matrix_rank(aaH, 0.01, True), expected)\n\n batches = ((), (4, ), (3, 5, ))\n for batch in batches:\n run_test(0, 0, batch)\n run_test(0, 3, batch)\n run_test(3, 0, batch)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_matrix_rank_out_errors_and_warnings(self, device, dtype):\n # dtypes should be safely castable\n a = torch.eye(2, dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.bool, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Bool\"):\n torch.linalg.matrix_rank(a, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.matrix_rank(a, out=out)\n\n # if out tensor with wrong shape is passed a warning is given\n with warnings.catch_warnings(record=True) as w:\n out = torch.empty(3, dtype=dtype, device=device)\n # Trigger warning\n torch.linalg.matrix_rank(a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_matrix_rank_basic(self, device, dtype):\n matrix_rank = torch.linalg.matrix_rank\n\n a = torch.eye(10, dtype=dtype, device=device)\n self.assertEqual(matrix_rank(a).item(), 10)\n self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)\n\n a[5, 5] = 0\n self.assertEqual(matrix_rank(a).item(), 9)\n self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_old_matrix_rank(self, device, dtype):\n a = torch.eye(10, dtype=dtype, device=device)\n self.assertEqual(torch.matrix_rank(a).item(), 10)\n self.assertEqual(torch.matrix_rank(a, True).item(), 10)\n\n a[5, 5] = 0\n self.assertEqual(torch.matrix_rank(a).item(), 9)\n self.assertEqual(torch.matrix_rank(a, True).item(), 9)\n\n a = torch.randn(24, 42, dtype=dtype, device=device)\n self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))\n aaT = torch.mm(a, a.conj().t())\n self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))\n aTa = torch.mm(a.conj().t(), a)\n self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))\n\n a = torch.randn(35, 75, dtype=dtype, device=device)\n self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))\n self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))\n\n aaT = torch.mm(a, a.conj().t())\n self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))\n self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))\n\n if np.lib.NumpyVersion(np.__version__) >= '1.14.0':\n self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))\n self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))\n\n @onlyNativeDeviceTypes\n @dtypes(torch.double)\n # This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an \"alias\" for.\n def test_chain_matmul(self, device, dtype):\n # chain_matmul accepts a single input tensor while multi_dot does not\n t = make_tensor((2, 2), device, dtype)\n self.assertEqual(t, torch.chain_matmul(t))\n with self.assertRaisesRegex(RuntimeError, r\"chain_matmul\\(\\): Expected one or more matrices\"):\n torch.chain_matmul()\n\n # chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to\n # be either 1D or 2D\n with self.assertRaisesRegex(RuntimeError, r\"Tensor dimension is 1, expected 2 instead\"):\n torch.chain_matmul(make_tensor(1, device, dtype), make_tensor(1, device, dtype))\n\n @onlyNativeDeviceTypes\n @dtypes(torch.double, torch.cdouble)\n def test_multi_dot(self, device, dtype):\n def check(*shapes, noncontiguous=False):\n tensors = [make_tensor(shape, device, dtype, noncontiguous=noncontiguous) for shape in shapes]\n np_arrays = [tensor.cpu().numpy() for tensor in tensors]\n res = torch.linalg.multi_dot(tensors).cpu()\n ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))\n self.assertEqual(res, ref)\n\n # test for inputs with empty dimensions\n check([0], [0])\n check([2], [2, 0])\n check([1, 0], [0])\n check([0, 2], [2, 1])\n check([2, 2], [2, 0])\n check([2, 0], [0, 3])\n check([0, 0], [0, 1])\n check([4, 2], [2, 0], [0, 3], [3, 2])\n\n # test variable output shapes\n check([2], [2])\n check([1, 2], [2])\n check([2], [2, 1])\n check([1, 2], [2, 1])\n check([3, 2], [2, 4])\n\n # test multiple input tensors\n check([3], [3, 4], [4, 2], [2, 5], [5])\n check([1, 2], [2, 2], [2, 3], [3, 1])\n\n # test large tensors\n check([10, 100], [100, 5], [5, 50])\n check([10, 20], [20, 30], [30, 5])\n\n # test noncontiguous input\n check([3, 2], [2, 2], [2, 3], [3, 4], noncontiguous=True)\n check([15, 5], [5, 10], [10, 20], [20, 25], noncontiguous=True)\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float)\n def test_multi_dot_errors(self, device, dtype):\n def check(tensors, out, msg):\n with self.assertRaisesRegex(RuntimeError, msg):\n torch.linalg.multi_dot(tensors, out=out)\n\n a = make_tensor(2, device, dtype)\n\n check([], None, \"expected at least 2 tensors\")\n check([a], None, \"expected at least 2 tensors\")\n\n check([torch.tensor(1, device=device, dtype=dtype), a], None, \"the first tensor must be 1D or 2D\")\n check([a, torch.tensor(1, device=device, dtype=dtype)], None, \"the last tensor must be 1D or 2D\")\n\n check([a, a, a], None, \"tensor 1 must be 2D\")\n check([a, make_tensor((2, 2, 2), device, dtype), a], None, \"tensor 1 must be 2D\")\n\n check([a, make_tensor(2, device, torch.double)], None, \"all tensors must have be the same dtype\")\n check([a, a], torch.empty(0, device=device, dtype=torch.double), \"expected out tensor to have dtype\")\n\n if self.device_type == 'cuda':\n check([a, make_tensor(2, 'cpu', dtype)], None, \"all tensors must be on the same device\")\n check([a, a], torch.empty(0, dtype=dtype), \"expected out tensor to be on device\")\n\n check([a, make_tensor(3, device, dtype)], None, \"cannot be multiplied\")\n check([a, make_tensor((3, 2), device, dtype), a], None, \"cannot be multiplied\")\n\n @precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_qr(self, device, dtype):\n def run_test(tensor_dims, some):\n A = torch.randn(*tensor_dims, dtype=dtype, device=device)\n Q, R = torch.qr(A, some=some)\n\n # Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)\n m, n = tensor_dims[-2:]\n n_columns = m if (not some) and m > n else min(m, n)\n self.assertEqual(Q.size(-2), m)\n self.assertEqual(R.size(-1), n)\n self.assertEqual(Q.size(-1), n_columns)\n\n A_ = A.cpu().numpy()\n Q_ = Q.cpu().numpy()\n R_ = R.cpu().numpy()\n\n # Check1: A = QR\n self.assertEqual(A_, np.matmul(Q_, R_))\n\n # Check2: A = QR (with out)\n Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)\n torch.qr(A, some=some, out=(Q_out, R_out))\n Q_out_ = Q_out.cpu().numpy()\n R_out_ = R_out.cpu().numpy()\n self.assertEqual(A_, np.matmul(Q_out_, R_out_))\n\n # Check3: Q == Q_out, R == R_out\n self.assertEqual(Q_, Q_out_)\n self.assertEqual(R_, R_out_)\n\n # Check4: Q^{T}Q = I, triu(R) = R\n eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()\n self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)\n self.assertEqual(R.triu(), R)\n\n tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors\n (2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors\n (3, 5), (5, 5), (5, 3), # Single matrix\n (7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors\n (7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors\n for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):\n run_test(tensor_dims, some)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_qr_vs_numpy(self, device, dtype):\n \"\"\"\n test torch.linalg.qr vs numpy.linalg.qr\n \"\"\"\n sizes_to_test = [\n (7, 5),\n (5, 7),\n (5, 0), # empty\n (0, 5), # empty\n ]\n for size in sizes_to_test:\n t = torch.randn(size, device=device, dtype=dtype)\n np_t = t.cpu().numpy()\n for mode in ['reduced', 'complete']:\n exp_q, exp_r = np.linalg.qr(np_t, mode=mode)\n q, r = torch.linalg.qr(t, mode=mode)\n self.assertEqual(q, exp_q)\n self.assertEqual(r, exp_r)\n #\n # for mode='r' we need a special logic because numpy returns only r\n exp_r = np.linalg.qr(np_t, mode='r')\n q, r = torch.linalg.qr(t, mode='r')\n # check that q is empty\n self.assertEqual(q.shape, (0,))\n self.assertEqual(q.dtype, t.dtype)\n self.assertEqual(q.device, t.device)\n # check r\n self.assertEqual(r, exp_r)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float)\n def test_linalg_qr_autograd_errors(self, device, dtype):\n # torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but\n # without 'q' you cannot compute the backward pass. Check that\n # linalg_qr_backward complains cleanly in that case.\n inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)\n q, r = torch.linalg.qr(inp, mode='r')\n self.assertEqual(q.shape, (0,)) # empty tensor\n b = torch.sum(r)\n with self.assertRaisesRegex(RuntimeError,\n \"The derivative of qr is not implemented when mode='r'\"):\n b.backward()\n #\n inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)\n q, r = torch.linalg.qr(inp, mode='complete')\n b = torch.sum(r)\n with self.assertRaisesRegex(RuntimeError,\n \"The derivative of qr is not implemented when mode='complete' and nrows > ncols\"):\n b.backward()\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_qr_batched(self, device, dtype):\n \"\"\"\n test torch.linalg.qr vs numpy.linalg.qr. We need some special logic\n because numpy does not support batched qr\n \"\"\"\n def np_qr_batched(a, mode):\n \"\"\"poor's man batched version of np.linalg.qr\"\"\"\n all_q = []\n all_r = []\n for matrix in a:\n result = np.linalg.qr(matrix, mode=mode)\n if mode == 'r':\n all_r.append(result)\n else:\n q, r = result\n all_q.append(q)\n all_r.append(r)\n if mode == 'r':\n return np.array(all_r)\n else:\n return np.array(all_q), np.array(all_r)\n\n t = torch.randn((3, 7, 5), device=device, dtype=dtype)\n np_t = t.cpu().numpy()\n for mode in ['reduced', 'complete']:\n exp_q, exp_r = np_qr_batched(np_t, mode=mode)\n q, r = torch.linalg.qr(t, mode=mode)\n self.assertEqual(q, exp_q)\n self.assertEqual(r, exp_r)\n # for mode='r' we need a special logic because numpy returns only r\n exp_r = np_qr_batched(np_t, mode='r')\n q, r = torch.linalg.qr(t, mode='r')\n # check that q is empty\n self.assertEqual(q.shape, (0,))\n self.assertEqual(q.dtype, t.dtype)\n self.assertEqual(q.device, t.device)\n # check r\n self.assertEqual(r, exp_r)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_qr_out(self, device, dtype):\n \"\"\"\n test torch.linalg.qr(out=...) vs torch.lingalg.qr\n \"\"\"\n sizes_to_test = [\n (7, 5),\n (5, 7),\n (5, 0), # empty\n (0, 5), # empty\n ]\n for size in sizes_to_test:\n t = torch.randn(size, device=device, dtype=dtype)\n np_t = t.cpu().numpy()\n for mode in ['reduced', 'complete', 'r']:\n q, r = torch.linalg.qr(t, mode=mode)\n out = (torch.empty((0), dtype=dtype, device=device),\n torch.empty((0), dtype=dtype, device=device))\n q2, r2 = torch.linalg.qr(t, mode=mode, out=out)\n self.assertIs(q2, out[0])\n self.assertIs(r2, out[1])\n self.assertEqual(q2, q)\n self.assertEqual(r2, r)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float)\n def test_qr_error_cases(self, device, dtype):\n t1 = torch.randn(5, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, 'qr input should have at least 2 dimensions, but has 1 dimensions instead'):\n torch.linalg.qr(t1)\n t2 = torch.randn((5, 7), device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"qr received unrecognized mode 'hello'\"):\n torch.linalg.qr(t2, mode='hello')\n\n def _check_einsum(self, *args, np_args=None):\n if np_args is None:\n np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]\n res = torch.einsum(*args)\n ref = np.einsum(*np_args)\n self.assertEqual(torch.from_numpy(np.array(ref)), res)\n\n @dtypes(torch.double, torch.cdouble)\n def test_einsum(self, device, dtype):\n # Test cases from https://gist.github.com/rockt/15ee013889d65342088e9260a377dc8f\n x = make_tensor((5,), device, dtype)\n y = make_tensor((7,), device, dtype)\n A = make_tensor((3, 5), device, dtype)\n B = make_tensor((2, 5), device, dtype)\n C = make_tensor((2, 3, 5), device, dtype)\n D = make_tensor((2, 5, 7), device, dtype)\n E = make_tensor((7, 9), device, dtype)\n F = make_tensor((2, 3, 3, 5), device, dtype)\n G = make_tensor((5, 4, 6), device, dtype)\n H = make_tensor((4, 4), device, dtype)\n I = make_tensor((2, 3, 2), device, dtype)\n\n # Vector operations\n self._check_einsum('i->', x) # sum\n self._check_einsum('i,i->', x, x) # dot\n self._check_einsum('i,i->i', x, x) # vector element-wisem mul\n self._check_einsum('i,j->ij', x, y) # outer\n\n # Matrix operations\n self._check_einsum(\"ij->ji\", A) # transpose\n self._check_einsum(\"ij->j\", A) # row sum\n self._check_einsum(\"ij->i\", A) # col sum\n self._check_einsum(\"ij,ij->ij\", A, A) # matrix element-wise mul\n self._check_einsum(\"ij,j->i\", A, x) # matrix vector multiplication\n self._check_einsum(\"ij,kj->ik\", A, B) # matmul\n self._check_einsum(\"ij,ab->ijab\", A, E) # matrix outer product\n\n # Tensor operations\n self._check_einsum(\"Aij,Ajk->Aik\", C, D) # batch matmul\n self._check_einsum(\"ijk,jk->i\", C, A) # tensor matrix contraction\n self._check_einsum(\"aij,jk->aik\", D, E) # tensor matrix contraction\n self._check_einsum(\"abCd,dFg->abCFg\", F, G) # tensor tensor contraction\n self._check_einsum(\"ijk,jk->ik\", C, A) # tensor matrix contraction with double indices\n self._check_einsum(\"ijk,jk->ij\", C, A) # tensor matrix contraction with double indices\n self._check_einsum(\"ijk,ik->j\", C, B) # non contiguous\n self._check_einsum(\"ijk,ik->jk\", C, B) # non contiguous with double indices\n\n # Test diagonals\n self._check_einsum(\"ii\", H) # trace\n self._check_einsum(\"ii->i\", H) # diagonal\n self._check_einsum('iji->j', I) # non-contiguous trace\n self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), device, dtype))\n\n # Test ellipsis\n self._check_einsum(\"i...->...\", H)\n self._check_einsum(\"ki,...k->i...\", A.t(), B)\n self._check_einsum(\"k...,jk->...\", A.t(), B)\n self._check_einsum('...ik, ...j -> ...ij', C, x)\n self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), device, dtype))\n self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), device, dtype))\n\n # torch.bilinear with noncontiguous tensors\n l = make_tensor((5, 10), device, dtype, noncontiguous=True)\n r = make_tensor((5, 20), device, dtype, noncontiguous=True)\n w = make_tensor((15, 10, 20), device, dtype)\n self._check_einsum(\"bn,anm,bm->ba\", l, w, r)\n\n # with strided tensors\n self._check_einsum(\"bn,Anm,bm->bA\", l[:, ::2], w[:, ::2, ::2], r[:, ::2])\n\n @dtypes(torch.double, torch.cdouble)\n def test_einsum_sublist_format(self, device, dtype):\n x = make_tensor((5,), device, dtype)\n y = make_tensor((7,), device, dtype)\n A = make_tensor((3, 5), device, dtype)\n B = make_tensor((2, 5), device, dtype)\n C = make_tensor((2, 1, 3, 1, 4), device, dtype)\n\n self._check_einsum(x, [0])\n self._check_einsum(x, [0], [])\n self._check_einsum(x, [0], y, [1], [0, 1])\n self._check_einsum(A, [0, 1], [1, 0])\n self._check_einsum(A, [0, 1], x, [1], [0])\n self._check_einsum(A, [0, 1], B, [2, 1])\n self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])\n self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])\n self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])\n self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])\n self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])\n\n # torch.bilinear with noncontiguous tensors\n l = make_tensor((5, 10), device, dtype, noncontiguous=True)\n r = make_tensor((5, 20), device, dtype, noncontiguous=True)\n w = make_tensor((15, 10, 20), device, dtype)\n self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])\n\n @dtypes(torch.double, torch.cdouble)\n def test_einsum_random(self, device, dtype):\n def convert_label(label):\n if label == ...:\n return '...'\n elif label < 26:\n return chr(ord('A') + label)\n else:\n return chr(ord('a') + label - 26)\n\n def convert_sublist(sublist):\n return ''.join(convert_label(label) for label in sublist)\n\n def test(n=10, # how many tests to generate\n n_labels=5, # how many labels available\n min_ops=1, max_ops=3, # min and max number of operands per test\n min_dims=1, max_dims=3, # min and max number of dimensions per operand\n min_size=1, max_size=8, # min and max size of each dimension\n max_out_dim=3, # max number of dimensions for the output\n enable_diagonals=True, # controls if labels can be repeated for diagonals\n ellipsis_prob=0.5, # probability of including ellipsis in operand\n broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting\n\n all_labels = torch.arange(52)\n\n assert 0 <= n\n assert 0 <= n_labels < len(all_labels)\n assert 0 < min_ops <= max_ops\n assert 0 <= min_dims <= max_dims\n assert 0 <= min_size <= max_size\n assert 0 <= max_out_dim\n assert enable_diagonals or max_dims <= n_labels\n\n for _ in range(n):\n\n # Select a subset of labels for this test and give them random sizes\n possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]\n labels_size = torch.randint_like(all_labels, min_size, max_size + 1)\n ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))\n\n operands = []\n sublists = []\n\n ell_size = 0\n valid_labels = set()\n\n # create random input operands\n for _ in range(random.randint(min_ops, max_ops)):\n n_dim = random.randint(min_dims, max_dims)\n labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)\n labels = possible_labels[labels_idx]\n valid_labels.update(labels.tolist())\n shape = labels_size[labels]\n\n # turn some dimensions to size 1 for testing broadcasting\n mask = Binomial(probs=broadcasting_prob).sample((n_dim,))\n broadcast_labels = torch.unique(labels[mask == 1])\n shape[(labels[..., None] == broadcast_labels).any(-1)] = 1\n\n labels = labels.tolist()\n shape = shape.tolist()\n\n # include ellipsis if not all dimensions were assigned a label already\n if n_dim < max_dims and torch.rand(1) < ellipsis_prob:\n ell_num_dim = random.randint(1, max_dims - n_dim)\n ell_size = max(ell_size, ell_num_dim)\n ell_shape = ellipsis_shape[-ell_num_dim:]\n # again, turn some dimensions to size 1 for broadcasting\n mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))\n ell_shape[mask == 1] = 1\n ell_index = random.randint(0, n_dim)\n shape[ell_index:ell_index] = ell_shape\n labels.insert(ell_index, ...)\n\n operands.append(make_tensor(shape, device, dtype))\n sublists.append(labels)\n\n # NumPy has a bug with the sublist format so for now we compare PyTorch sublist\n # implementation against the equation format implementation of NumPy\n # see https://github.com/numpy/numpy/issues/10926\n np_operands = [op.cpu().numpy() for op in operands]\n\n # test equation format\n equation = ','.join(convert_sublist(l) for l in sublists)\n self._check_einsum(equation, *operands, np_args=(equation, *np_operands))\n\n # test sublist format\n args = [*itertools.chain(*zip(operands, sublists))]\n self._check_einsum(*args, np_args=(equation, *np_operands))\n\n # generate an explicit output\n out_sublist = []\n num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)\n if num_out_labels > 0:\n out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)\n out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()\n out_sublist.insert(random.randint(0, num_out_labels), ...)\n\n # test equation format with explicit output\n equation += '->' + convert_sublist(out_sublist)\n self._check_einsum(equation, *operands, np_args=(equation, *np_operands))\n\n # test sublist format with explicit output\n args.append(out_sublist)\n self._check_einsum(*args, np_args=(equation, *np_operands))\n\n test(100)\n\n def test_einsum_corner_cases(self, device):\n def check(equation, *operands, expected_output):\n tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)\n else make_tensor(operand, device, torch.float32) for operand in operands]\n output = torch.einsum(equation, tensors)\n self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))\n\n # Test equation variantions\n check(' ', 1, expected_output=1)\n check(' -> ', 1, expected_output=1)\n check(' , ', 2, 2, expected_output=4)\n check(' , , ', 2, 2, 2, expected_output=8)\n check(' , -> ', 2, 2, expected_output=4)\n check(' i ', [1], expected_output=[1])\n check(' i -> ', [1], expected_output=1)\n check(' i -> i ', [1], expected_output=[1])\n check(' i , i ', [2], [2], expected_output=4)\n check(' i , i -> i ', [2], [2], expected_output=[4])\n\n # Test tensors with 0 size dimensions\n check('i', [], expected_output=[])\n check(' i j -> j', [[], []], expected_output=[])\n check('ij->i', [[], []], expected_output=[0., 0.])\n check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])\n\n # Test broadcasting\n check('i,j', [2], [1, 2], expected_output=[[2, 4]])\n check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])\n\n # Test ellipsis broadcasting\n check('...', 1, expected_output=1)\n check('...->', 1, expected_output=1)\n check('...->...', 1, expected_output=1)\n check('...', [1], expected_output=[1])\n check('...->', [1], expected_output=1)\n check('z...->z', [1], expected_output=[1])\n check('Z...->...Z', [1], expected_output=[1])\n check('...a->', [[2], [4]], expected_output=6)\n check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])\n\n def test_einsum_error_cases(self, device):\n def check(*args, regex, exception=RuntimeError):\n with self.assertRaisesRegex(exception, r'einsum\\(\\):.*' + regex):\n torch.einsum(*args)\n\n x = make_tensor((2,), device, torch.float32)\n y = make_tensor((2, 3), device, torch.float32)\n\n check('', [], regex=r'at least one operand', exception=ValueError)\n check('. ..', [x], regex=r'found \\'.\\' for operand 0 that is not part of any ellipsis')\n check('... ...', [x], regex=r'found \\'.\\' for operand 0 for which an ellipsis was already found')\n check('1', [x], regex=r'invalid subscript given at index 0')\n check(',', [x], regex=r'fewer operands were provided than specified in the equation')\n check('', [x, x], regex=r'more operands were provided than specified in the equation')\n check('', [x], regex=r'the number of subscripts in the equation \\(0\\) does not match the number '\n r'of dimensions \\(1\\) for operand 0 and no ellipsis was given')\n check('ai', [x], regex=r'the number of subscripts in the equation \\(2\\) does not match the number '\n r'of dimensions \\(1\\) for operand 0 and no ellipsis was given')\n check('ai...', [x], regex=r'the number of subscripts in the equation \\(2\\) is more than the number '\n r'of dimensions \\(1\\) for operand 0')\n check('a->... .', [x], regex=r'found \\'.\\' for output but an ellipsis \\(...\\) was already found')\n check('a->..', [x], regex=r'found \\'.\\' for output that is not part of any ellipsis \\(...\\)')\n check('a->1', [x], regex=r'invalid subscript given at index 3')\n check('a->aa', [x], regex=r'output subscript a appears more than once in the output')\n check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')\n check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\\'t match, 3 != 2')\n check('a, ba', [x, y], regex=r'operands do not broadcast with remapped shapes \\[original->remapped\\]: '\n r'\\[2\\]->\\[1, 2\\] \\[2, 3\\]->\\[2, 3\\]')\n\n check(x, [-1], regex=r'not within the valid range \\[0, 52\\)', exception=ValueError)\n check(x, [52], regex=r'not within the valid range \\[0, 52\\)', exception=ValueError)\n\n def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,\n device, dtype):\n triangle_function = torch.triu if upper else torch.tril\n b = torch.randn(*b_dims, dtype=dtype, device=device)\n A = torch.randn(*A_dims, dtype=dtype, device=device)\n # create positive definite matrix\n A = torch.matmul(A, A.mT)\n A_triangular = triangle_function(A)\n if unitriangular:\n A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)\n return b, A_triangular\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_triangular_solve(self, device, dtype):\n ks = [0, 1, 3]\n ns = [0, 5]\n for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,\n itertools.product([True, False], repeat=3)):\n b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,\n unitriangular, device, dtype)\n x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]\n if transpose:\n self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))\n else:\n self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_triangular_solve_batched(self, device, dtype):\n def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):\n b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,\n unitriangular, device, dtype)\n x_exp_list = []\n for i in range(b_dims[0]):\n x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,\n unitriangular=unitriangular,\n transpose=transpose)[0])\n x_exp = torch.stack(x_exp_list) # Stacked output\n x_act = torch.triangular_solve(b, A, upper=upper,\n unitriangular=unitriangular,\n transpose=transpose)[0] # Actual output\n self.assertEqual(x_act, x_exp) # Equality check\n if transpose:\n A = A.mT\n\n Ax = np.matmul(A.cpu(), x_act.cpu())\n self.assertEqual(b, Ax)\n\n def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):\n b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,\n unitriangular, device, dtype)\n x = torch.triangular_solve(b, A, upper=upper,\n unitriangular=unitriangular,\n transpose=transpose)[0]\n self.assertTrue(x.shape == b.shape)\n\n for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):\n batchsize = 3\n triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),\n upper, unitriangular, transpose)\n\n # test empty input\n triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),\n upper, unitriangular, transpose)\n triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),\n upper, unitriangular, transpose)\n\n # test zero batch case\n batchsize = 0\n triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),\n upper, unitriangular, transpose)\n\n\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_triangular_solve_batched_many_batches(self, device, dtype):\n for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):\n # test batched A case\n b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),\n upper, unitriangular, device, dtype)\n x, _ = torch.triangular_solve(b, A,\n upper=upper, transpose=transpose, unitriangular=unitriangular)\n if transpose:\n A = A.mT\n\n Ax = torch.matmul(A, x)\n\n rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision\n self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)\n\n # test batched b case\n b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),\n upper, unitriangular, device, dtype)\n x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,\n unitriangular=unitriangular)\n if transpose:\n A = A.mT\n\n self.assertEqual(torch.matmul(A, x), b)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @unittest.skipIf(not TEST_SCIPY, \"SciPy not found\")\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_triangular_solve_batched_broadcasting(self, device, dtype):\n from scipy.linalg import solve_triangular as tri_solve\n\n def scipy_tri_solve_batched(A, B, upper, trans, diag):\n batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]\n single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]\n expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),\n torch.Size(batch_dims_B)))\n expand_A = np.broadcast_to(A, expand_dims + single_dim_A)\n expand_B = np.broadcast_to(B, expand_dims + single_dim_B)\n flat_A = expand_A.reshape((-1,) + single_dim_A)\n flat_B = expand_B.reshape((-1,) + single_dim_B)\n flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)\n for a, b in zip(flat_A, flat_B)])\n return flat_X.reshape(expand_B.shape)\n\n def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):\n b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,\n unitriangular, device, dtype)\n x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),\n upper, transpose, unitriangular))\n x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]\n\n self.assertEqual(x, x_exp.to(device))\n\n for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):\n # test against scipy.linalg.solve_triangular\n run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting\n run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b\n run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A\n run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_triangular_solve_out_errors_and_warnings(self, device, dtype):\n # dtypes should be safely castable\n a = torch.eye(2, dtype=dtype, device=device)\n b = torch.randn(2, 1, dtype=dtype, device=device)\n out = torch.empty_like(b).to(torch.int)\n clone_a = torch.empty_like(a)\n with self.assertRaisesRegex(RuntimeError, \"Expected out tensor to have dtype\"):\n torch.triangular_solve(b, a, out=(out, clone_a))\n\n out = torch.empty_like(b)\n clone_a = clone_a.to(torch.int)\n with self.assertRaisesRegex(RuntimeError, \"Expected out tensor to have dtype\"):\n torch.triangular_solve(b, a, out=(out, clone_a))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n clone_a = torch.empty_like(a)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.triangular_solve(b, a, out=(out, clone_a))\n out = torch.empty(0, dtype=dtype, device=device)\n clone_a = torch.empty_like(a).to(wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.triangular_solve(b, a, out=(out, clone_a))\n\n # if out tensor with wrong shape is passed a warning is given\n with warnings.catch_warnings(record=True) as w:\n out = torch.empty(1, dtype=dtype, device=device)\n clone_a = torch.empty(1, dtype=dtype, device=device)\n # Trigger warning\n torch.triangular_solve(b, a, out=(out, clone_a))\n # Check warning occurs\n self.assertEqual(len(w), 2)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-2].message))\n\n def check_single_matmul(self, x, y, shape):\n a = np.array(x, copy=False)\n b = np.array(y, copy=False)\n expected = np.matmul(a, b)\n\n ans = torch.matmul(x, y)\n self.assertTrue(ans.is_contiguous())\n self.assertTrue(np.array_equal(ans, expected))\n\n out = torch.zeros(*shape, dtype=torch.int64).to(x.device)\n ans = torch.matmul(x, y, out=out)\n self.assertIs(ans, out)\n self.assertTrue(ans.is_contiguous())\n self.assertTrue(np.array_equal(ans, expected))\n\n # TODO: update to run on CUDA, too\n @onlyCPU\n def test_matmul_small_brute_force_1d_Nd(self, device):\n # Issue #20452: range(0, 10) does not work.\n n = 1\n for m in range(1, 8):\n for p in range(1, 8):\n for o in range(1, 5):\n # 1d, 3d, inner dimensions C\n x = torch.arange(m, device=device)\n y = torch.arange(o * m * p, device=device).reshape(o, m, p)\n self.check_single_matmul(x, y, (o, n, p))\n\n # 1d, 3d, inner dimensions Fortran\n x = torch.arange(m, device=device)\n y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT\n self.check_single_matmul(x, y, (o, n, p))\n\n # 1d, 3d, inner dimensions non-contiguous\n x = torch.arange(2 * m, device=device)[::2]\n y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]\n self.check_single_matmul(x, y, (o, n, p))\n\n for r in range(1, 5):\n # 1d, 4d, inner dimensions C\n x = torch.arange(m)\n y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)\n self.check_single_matmul(x, y, (r, o, n, p))\n\n # 1d, 4d, inner dimensions Fortran\n x = torch.arange(m)\n y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT\n self.check_single_matmul(x, y, (r, o, n, p))\n\n # 1d, 4d, inner dimensions non-contiguous\n x = torch.arange(2 * m, device=device)[::2]\n y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]\n self.check_single_matmul(x, y, (r, o, n, p))\n\n # TODO: update to run on CUDA, too\n @onlyCPU\n def test_matmul_small_brute_force_2d_Nd(self, device):\n # Issue #20452: range(0, 10) does not work.\n for n in range(1, 5):\n for m in range(1, 5):\n for p in range(1, 5):\n for o in range(1, 3):\n # 2d, 3d, inner dimensions C\n x = torch.arange(n * m, device=device).reshape(n, m)\n y = torch.arange(o * m * p, device=device).reshape(o, m, p)\n self.check_single_matmul(x, y, (o, n, p))\n\n # 2d, 3d, inner dimensions Fortran\n x = torch.arange(m * n, device=device).reshape(m, n).mT\n y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT\n self.check_single_matmul(x, y, (o, n, p))\n\n # 2d, 3d, inner dimensions non-contiguous\n x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]\n y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]\n self.check_single_matmul(x, y, (o, n, p))\n\n for r in range(1, 2):\n # 2d, 4d, inner dimensions C\n x = torch.arange(n * m, device=device).reshape(n, m)\n y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)\n self.check_single_matmul(x, y, (r, o, n, p))\n\n # 2d, 4d, inner dimensions Fortran\n x = torch.arange(m * n, device=device).reshape(m, n).mT\n y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT\n self.check_single_matmul(x, y, (r, o, n, p))\n\n # 2d, 4d, inner dimensions non-contiguous\n x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]\n y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]\n self.check_single_matmul(x, y, (r, o, n, p))\n\n def test_linear_algebra_scalar_raises(self, device) -> None:\n m = torch.randn(5, 5, device=device)\n v = torch.randn(5, device=device)\n s = torch.tensor(7, device=device)\n self.assertRaises(RuntimeError, lambda: torch.mv(m, s))\n self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))\n\n @dtypes(torch.float32, torch.complex64)\n def test_cross(self, device, dtype):\n x = torch.rand(100, 3, 100, dtype=dtype, device=device)\n y = torch.rand(100, 3, 100, dtype=dtype, device=device)\n res1 = torch.cross(x, y)\n res2 = torch.tensor((), dtype=dtype, device=device)\n torch.cross(x, y, out=res2)\n self.assertEqual(res1, res2)\n\n @dtypes(torch.float32, torch.complex64)\n def test_linalg_cross(self, device, dtype):\n x = torch.rand(100, 3, 100, dtype=dtype, device=device)\n y = torch.rand(100, 3, 100, dtype=dtype, device=device)\n res1 = torch.linalg.cross(x, y, dim=1)\n res2 = torch.tensor((), dtype=dtype, device=device)\n torch.linalg.cross(x, y, dim=1, out=res2)\n self.assertEqual(res1, res2)\n\n # test for broadcastable inputs\n x = torch.rand(1, 3, 2, dtype=dtype, device=device)\n y = torch.rand(4, 3, 1, dtype=dtype, device=device)\n res1 = torch.linalg.cross(x, y, dim=1)\n res2 = torch.tensor((), dtype=dtype, device=device)\n torch.linalg.cross(x, y, dim=1, out=res2)\n self.assertEqual(res1, res2)\n\n # non contiguous case 1\n x = torch.rand((4, 4, 4, 3), dtype=dtype,\n device=device).contiguous(memory_format=torch.channels_last) # non-contiguous\n y = torch.rand((4, 4, 4, 3), dtype=dtype,\n device=device).contiguous(memory_format=torch.channels_last) # non-contiguous\n np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=-1)\n res = torch.linalg.cross(x, y, dim=-1)\n # numpy reference compared to torch result\n self.assertEqual(res.cpu().numpy(), np_expected_ref)\n\n # non contiguous case 2\n x = torch.rand(1, 3, 2, dtype=dtype, device=device) # contiguous\n y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous\n np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)\n res = torch.linalg.cross(x, y, dim=1)\n # numpy reference compared to torch result\n self.assertEqual(res.cpu().numpy(), np_expected_ref)\n\n # non contiguous case 3\n x = torch.rand(2, 3, 1, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous\n y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous\n np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)\n res = torch.linalg.cross(x, y, dim=1)\n # numpy reference compared to torch result\n self.assertEqual(res.cpu().numpy(), np_expected_ref)\n\n # non contiguous case 4\n x = torch.randn(12, 3, device=device, dtype=dtype)[::2, :] # non-contiguous\n y = torch.randn(18, 3, device=device, dtype=dtype)[::3, :] # non-contiguous\n np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)\n res = torch.linalg.cross(x, y, dim=1)\n # numpy reference compared to torch result\n self.assertEqual(res.cpu().numpy(), np_expected_ref)\n\n # non contiguous case 5\n x = torch.randn(1, device=device, dtype=dtype) # contiguous\n y = torch.randn(6, device=device, dtype=dtype)[::2] # non-contiguous\n np_expected_ref = np.cross(x.expand(3).cpu().numpy(), y.cpu().numpy())\n res = torch.linalg.cross(x, y)\n # numpy reference compared to torch result\n self.assertEqual(res.cpu().numpy(), np_expected_ref)\n\n @dtypes(torch.float32, torch.complex64)\n def test_cross_with_and_without_dim(self, device, dtype):\n x = torch.rand(100, 3, dtype=dtype, device=device)\n y = torch.rand(100, 3, dtype=dtype, device=device)\n res1 = torch.cross(x, y, dim=1)\n res2 = torch.cross(x, y, dim=-1)\n res3 = torch.cross(x, y)\n self.assertEqual(res1, res2)\n self.assertEqual(res1, res3)\n\n @dtypes(torch.float32, torch.complex64)\n def test_linalg_cross_with_and_without_dim(self, device, dtype):\n x = torch.rand(100, 3, dtype=dtype, device=device)\n y = torch.rand(100, 3, dtype=dtype, device=device)\n res1 = torch.linalg.cross(x, y, dim=1)\n res2 = torch.linalg.cross(x, y, dim=-1)\n res3 = torch.linalg.cross(x, y)\n self.assertEqual(res1, res2)\n self.assertEqual(res1, res3)\n\n def test_cross_errors(self, device):\n self.assertRaisesRegex(\n RuntimeError, \"must match the size of tensor\",\n lambda: torch.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))\n self.assertRaisesRegex(\n RuntimeError, \"must match the size of tensor\",\n lambda: torch.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))\n self.assertRaisesRegex(\n RuntimeError, \"no dimension of size 3 in input\",\n lambda: torch.cross(torch.rand(5, 4, device=device), torch.rand(5, 4, device=device)))\n self.assertRaisesRegex(\n RuntimeError, \"dimension 0 does not have size 3\",\n lambda: torch.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))\n self.assertRaisesRegex(\n RuntimeError, \"dimension -1 does not have size 3\",\n lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))\n self.assertRaisesRegex(\n IndexError, \"Dimension out of range\",\n lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))\n\n def test_linalg_cross_errors(self, device):\n self.assertRaisesRegex(\n RuntimeError, \"dimension -1 does not have size 3\",\n lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device)))\n self.assertRaisesRegex(\n RuntimeError, \"must match the size of tensor\",\n lambda: torch.linalg.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))\n self.assertRaisesRegex(\n RuntimeError, \"must match the size of tensor\",\n lambda: torch.linalg.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))\n self.assertRaisesRegex(\n RuntimeError, \"dimension 0 does not have size 3\",\n lambda: torch.linalg.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))\n self.assertRaisesRegex(\n RuntimeError, \"dimension -1 does not have size 3\",\n lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))\n self.assertRaisesRegex(\n IndexError, \"Dimension out of range\",\n lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))\n\n def test_renorm(self, device):\n m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path\n res1 = torch.tensor((), device=device)\n\n def renorm(matrix, value, dim, max_norm):\n m1 = matrix.transpose(dim, 0).contiguous()\n # collapse non-dim dimensions.\n m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))\n norms = m2.norm(value, 1, True)\n # clip\n new_norms = norms.clone()\n new_norms[torch.gt(norms, max_norm)] = max_norm\n new_norms.div_(norms.add_(1e-7))\n # renormalize\n m1.mul_(new_norms.expand_as(m1))\n return m1.transpose(dim, 0)\n\n # note that the axis fed to torch.renorm is different (2~=1)\n maxnorm = m1.norm(2, 1).mean()\n m2 = renorm(m1, 2, 1, maxnorm)\n m1.renorm_(2, 1, maxnorm)\n self.assertEqual(m1, m2, atol=1e-5, rtol=0)\n self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)\n\n m1 = torch.randn(3, 4, 5, device=device)\n m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)\n maxnorm = m2.norm(2, 0).mean()\n m2 = renorm(m2, 2, 1, maxnorm)\n m1.renorm_(2, 1, maxnorm)\n m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)\n self.assertEqual(m3, m2)\n self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoCusolver\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_ormqr(self, device, dtype):\n\n def run_test(batch, m, n, fortran_contiguous):\n A = make_tensor((*batch, m, n), dtype=dtype, device=device)\n reflectors, tau = torch.geqrf(A)\n if not fortran_contiguous:\n self.assertTrue(reflectors.mT.is_contiguous())\n reflectors = reflectors.contiguous()\n\n # Q is of size m x m\n Q, _ = torch.linalg.qr(A, mode='complete')\n C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)\n C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)\n\n expected = Q @ C_right\n actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)\n self.assertEqual(expected, actual)\n\n expected = C_left @ Q\n actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)\n self.assertEqual(expected, actual)\n\n expected = Q.mH @ C_right\n actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)\n self.assertEqual(expected, actual)\n\n expected = C_left @ Q.mH\n actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)\n self.assertEqual(expected, actual)\n\n # if tau is all zeros then the implicit matrix Q is the identity matrix\n # so the actual result should be C_right in this case\n zero_tau = torch.zeros_like(tau)\n actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)\n self.assertEqual(C_right, actual)\n\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):\n run_test(batch, m, n, fortran_contiguous)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoCusolver\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_ormqr_errors_and_warnings(self, device, dtype):\n test_cases = [\n # input1 size, input2 size, input3 size, error regex\n ((10,), (2,), (2,), r\"input must have at least 2 dimensions\"),\n ((2, 2), (2,), (2,), r\"other must have at least 2 dimensions\"),\n ((10, 6), (20,), (10, 6), r\"other.shape\\[-2\\] must be greater than or equal to tau.shape\\[-1\\]\"),\n ((6, 6), (5,), (5, 5), r\"other.shape\\[-2\\] must be equal to input.shape\\[-2\\]\"),\n ((1, 2, 2), (2, 2), (1, 2, 2), r\"batch dimensions of tau to be equal to input.shape\\[:-2\\]\"),\n ((1, 2, 2), (1, 2), (2, 2, 2), r\"batch dimensions of other to be equal to input.shape\\[:-2\\]\"),\n ]\n for a_size, tau_size, c_size, error_regex in test_cases:\n a = make_tensor(a_size, dtype=dtype, device=device)\n tau = make_tensor(tau_size, dtype=dtype, device=device)\n c = make_tensor(c_size, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, error_regex):\n torch.ormqr(a, tau, c)\n\n def test_blas_empty(self, device):\n def fn(torchfn, *args, test_out=False, **kwargs):\n def call_torch_fn(*args, **kwargs):\n return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape\n for shape in args), **kwargs)\n result = call_torch_fn(*args, **kwargs)\n if not test_out:\n return result\n else:\n out = torch.full_like(result, math.nan)\n out1 = call_torch_fn(*args, **kwargs, out=out)\n return out\n\n # mm, addmm\n self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)\n self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)\n self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)\n self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)\n self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))\n self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))\n\n self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)\n self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)\n t = torch.randn((5, 6), device=device)\n self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))\n self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))\n\n # mv, addmv\n self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)\n self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)\n self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))\n self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))\n\n self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)\n t = torch.randn((3,), device=device)\n self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))\n self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))\n\n # bmm, baddbmm\n self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)\n self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)\n self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)\n self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))\n self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))\n\n self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)\n self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)\n self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)\n self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)\n c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)\n self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467\n self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467\n\n # addbmm\n self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)\n self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)\n t = torch.randn((5, 6), device=device)\n self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))\n self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))\n\n # matmul\n self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))\n self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))\n self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)\n self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)\n self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)\n self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))\n self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))\n\n # dot\n self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))\n self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))\n\n if torch._C.has_lapack:\n # lu\n A_LU, pivots = fn(torch.lu, (0, 5, 5))\n self.assertEqual([(0, 5, 5), (0, 5)], [A_LU.shape, pivots.shape])\n A_LU, pivots = fn(torch.lu, (0, 0, 0))\n self.assertEqual([(0, 0, 0), (0, 0)], [A_LU.shape, pivots.shape])\n A_LU, pivots = fn(torch.lu, (2, 0, 0))\n self.assertEqual([(2, 0, 0), (2, 0)], [A_LU.shape, pivots.shape])\n\n @dtypesIfCUDA(torch.cfloat, torch.cdouble,\n *get_all_fp_dtypes(include_half=not CUDA9, include_bfloat16=(CUDA11OrLater and SM53OrLater)))\n @dtypes(*(set(get_all_dtypes()) - {torch.half, torch.bool}))\n def test_blas_alpha_beta_empty(self, device, dtype):\n # This test is disabled on CUDA 9 due to:\n # See: https://github.com/pytorch/pytorch/issues/31006\n if dtype is torch.bfloat16 and self.device_type == 'xla':\n # TODO (@zasdfgbnm): this causes the following error on test\n # TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:\n #\n # RuntimeError: _th_equal not supported on CPUType for BFloat16\n return\n # ensure beta is respected\n value = 11\n input = torch.full((2,), value, dtype=dtype, device=device)\n mat = torch.ones((2, 0), dtype=dtype, device=device)\n vec = torch.ones((0,), dtype=dtype, device=device)\n out = torch.empty((2,), dtype=dtype, device=device)\n if dtype.is_complex:\n alpha = 6 + 7j\n beta = 3 + 4j\n else:\n alpha = 6\n beta = 3\n self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),\n torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))\n self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),\n torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))\n\n # torch.addmm\n input = torch.full((2, 3), value, dtype=dtype, device=device)\n mat2 = torch.ones((0, 3), dtype=dtype, device=device)\n out = torch.empty((2, 3), dtype=dtype, device=device)\n self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),\n torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))\n self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),\n torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))\n\n @dtypes(*(get_all_complex_dtypes() + get_all_fp_dtypes()))\n def test_blas_nan_out(self, device, dtype):\n # These functions should work correctly with NaN filled outputs,\n # but need special handling, see [NOTE: cpu_zero]\n b = 3\n n = 5\n m = 7\n p = 11\n\n # torch.mv\n nm = torch.randn((m, n), device=device).t()\n _m = torch.randn((), device=device).expand(m)\n _m_out = torch.full((m,), float('nan'), device=device)\n self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))\n self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())\n\n # torch.mm\n mp = torch.randn((p, m), device=device).t()\n np_out = torch.full((n, p), float('nan'), device=device)\n self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))\n\n # torch.bmm\n bnm = torch.randn((b, m, n), device=device).transpose(1, 2)\n bmp = torch.randn((b, p, m), device=device).transpose(1, 2)\n bnp_out = torch.full((b, n, p), float('nan'), device=device)\n self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))\n\n @onlyCPU # not supported by CUBLAS\n def test_blas_mv_large_input(self, device):\n # This would previously fail if the allocated output had NaNs, see:\n # https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]\n n = 3000\n m = 200\n\n nm = torch.randn((m, n), device=device).t()\n _m = torch.randn((), device=device).expand(m)\n _m_out = torch.full((m,), 0., device=device)\n\n self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))\n\n @onlyCPU\n def test_renorm_ps(self, device):\n # full reduction\n x = torch.randn(5, 5)\n xn = x.numpy()\n for p in [1, 2, 3, 4, inf]:\n res = x.renorm(p, 1, 1)\n expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)\n self.assertEqual(res, expected, msg=\"renorm failed for {}-norm\".format(p))\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoCusolver\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_householder_product(self, device, dtype):\n def generate_reflectors_and_tau(A):\n \"\"\"\n This function uses numpy.linalg.qr with mode \"raw\" to extract output of LAPACK's geqrf.\n There is torch.geqrf function but it doesn't work with complex-valued input.\n \"\"\"\n if A.numel() > 0:\n A_cpu = A.cpu()\n flattened_batch_shape = [-1, *A_cpu.shape[-2:]]\n reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)\n tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]\n tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])\n for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):\n reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))\n reflectors_i[:] = reflectors_tmp.T\n reflectors = reflectors.view(*A_cpu.shape)\n tau = tau.view(tau_shape)\n return reflectors.to(A.device), tau.to(A.device)\n\n reflectors = torch.empty_like(A)\n tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)\n return reflectors, tau\n\n def run_test(shape):\n A = torch.randn(*shape, dtype=dtype, device=device)\n reflectors, tau = generate_reflectors_and_tau(A)\n expected, _ = torch.linalg.qr(A)\n actual = torch.linalg.householder_product(reflectors, tau)\n # torch.linalg.qr does not work correctly for zero batch dimension tensors\n # see https://github.com/pytorch/pytorch/issues/50576\n if (A.numel() > 0):\n self.assertEqual(expected, actual)\n else:\n self.assertTrue(actual.shape == shape)\n\n # if tau is empty and A is not the result should be a matrix with ones on the diagonal\n if (A.numel() > 0):\n tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)\n identity_mat = torch.zeros_like(reflectors)\n identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1\n actual = torch.linalg.householder_product(reflectors, tau_empty)\n self.assertEqual(actual, identity_mat)\n\n out = torch.empty_like(A)\n ans = torch.linalg.householder_product(reflectors, tau, out=out)\n self.assertEqual(ans, out)\n if (A.numel() > 0):\n self.assertEqual(expected, out)\n\n shapes = [(0, 0), (5, 0), # Empty matrix\n (5, 5), (5, 3), # Single matrix\n (0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors\n (2, 5, 5), (2, 5, 3), # 3-dim tensors\n (2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors\n for shape in shapes:\n run_test(shape)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoCusolver\n def test_householder_product_errors_and_warnings(self, device):\n test_cases = [\n # input1 size, input2 size, error regex\n ((10,), (2,), r\"input must have at least 2 dimensions\"),\n ((10, 6), (20,), r\"input.shape\\[-1\\] must be greater than or equal to tau.shape\\[-1\\]\"),\n ((6, 10), (5,), r\"input.shape\\[-2\\] must be greater than or equal to input.shape\\[-1\\]\"),\n ]\n for a_size, tau_size, error_regex in test_cases:\n a = torch.rand(*a_size, device=device)\n tau = torch.rand(*tau_size, device=device)\n with self.assertRaisesRegex(RuntimeError, error_regex):\n torch.linalg.householder_product(a, tau)\n\n # if out tensor with wrong shape is passed a warning is given\n reflectors = torch.randn(3, 3, device=device)\n tau = torch.randn(3, device=device)\n out = torch.empty(2, 3, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.householder_product(reflectors, tau, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty_like(reflectors).to(torch.int)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.linalg.householder_product(reflectors, tau, out=out)\n\n with self.assertRaisesRegex(RuntimeError, \"tau dtype Int does not match input dtype\"):\n torch.linalg.householder_product(reflectors, tau.to(torch.int))\n\n if torch.cuda.is_available():\n # device of out and input should match\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty_like(reflectors).to(wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n torch.linalg.householder_product(reflectors, tau, out=out)\n\n # device of tau and input should match\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n tau = tau.to(wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n torch.linalg.householder_product(reflectors, tau)\n\n @precisionOverride({torch.complex64: 5e-6})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double, torch.cfloat, torch.cdouble)\n def test_lu(self, device, dtype):\n from torch.testing._internal.common_utils import random_matrix\n\n def run_test(device, pivot):\n def run_subtest(matrix_size, batches, device, pivot, singular=False, a=None):\n if isinstance(matrix_size, int):\n rows = columns = matrix_size\n else:\n rows, columns = matrix_size\n if a is None:\n a = random_matrix(rows, columns, *batches, **dict(singular=singular, dtype=dtype, device=device))\n a_LU_info, pivots_info, info_ = a.lu(pivot=pivot, get_infos=True)\n self.assertEqual(a_LU_info.size(), torch.Size(batches + (rows, columns)))\n self.assertEqual(pivots_info.size(), torch.Size(batches + (min(rows, columns),)))\n self.assertEqual(info_.size(), torch.Size(batches))\n # If a randomly generated input matrix is singular,\n # then info_ contains indices i such that U[i, i] ==\n # 0. This however conveys that the factorization was\n # successful albeit with a singular input. Therefore,\n # we require info.min() >= 0\n self.assertGreaterEqual(info_.min(), 0)\n a_LU, pivots = a.lu(pivot=pivot)\n self.assertEqual(a_LU, a_LU_info)\n self.assertEqual(pivots_info, pivots)\n\n\n P, L, U = torch.lu_unpack(a_LU, pivots)\n P_ = P.cpu().numpy()\n L_ = L.cpu().numpy()\n U_ = U.cpu().numpy()\n\n self.assertEqual(np.matmul(P_, np.matmul(L_, U_)), a)\n\n if self.device_type == 'cuda':\n # lu without pivoting is implemented only for cuda device\n a_LU_info_nopiv, nopiv, info_nopiv = a.lu(pivot=False, get_infos=True)\n P_nopiv, L_nopiv, U_nopiv = torch.lu_unpack(a_LU_info_nopiv, nopiv)\n P_nopiv_ = P_nopiv.cpu().numpy()\n L_nopiv_ = L_nopiv.cpu().numpy()\n U_nopiv_ = U_nopiv.cpu().numpy()\n\n self.assertEqual(np.matmul(P_nopiv_, np.matmul(L_nopiv_, U_nopiv_)), a)\n\n k = min(rows, columns)\n self.assertEqual(nopiv, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(a.shape[:-2] + (k, )))\n if not singular:\n # It is not guaranteed that LU factorization\n # without pivoting is able to determine if a\n # matrix is singular while LU factorization\n # with pivoting is. Therefore, we require the\n # equality of info-s only for non-singular\n # matrices.\n # NOTE: infor_ is reshaped because info_nopiv might have\n # squashed batch dimensions for complex types on CUDA,\n # see the TODOs above.\n self.assertEqual(info_.reshape(info_nopiv.shape), info_nopiv)\n\n for ms, batch in itertools.product([3, 5, 7, (4, 2), (3, 4)], [(), (2,), (3,), (3, 5)]):\n run_subtest(ms, batch, device, pivot)\n run_subtest(ms, batch, device, pivot, singular=True)\n\n # Reproducer of a magma bug, see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on\n a = torch.ones(batch + (ms if isinstance(ms, tuple) else (ms, ms)), dtype=torch.double, device=device)\n run_subtest(ms, batch, device, pivot, singular=True, a=a)\n\n # Info should be positive for rank deficient matrices\n a = torch.ones(5, 3, 3, device=device)\n self.assertGreater(a.lu(pivot=pivot, get_infos=True)[2][0], 0)\n\n run_test(device, True)\n\n if self.device_type == 'cpu':\n # Error checking, no pivoting variant on CPU\n with self.assertRaisesRegex(RuntimeError, 'lu without pivoting is not implemented on the CPU'):\n torch.lu(torch.empty(1, 2, 2), pivot=False)\n else:\n run_test(device, False)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n @skipCUDAIfRocm\n @precisionOverride({torch.float: 1e-3})\n def test_lu_unpack(self, device, dtype):\n def run_test(pivot):\n for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):\n a = torch.randn(*shape, dtype=dtype, device=device)\n a_lu, p = torch.lu(a, pivot=pivot)\n p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)\n self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)\n for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3),\n (3, 5), (5, 3), (3, 3, 5), (3, 5, 3),\n (7, 5, 3, 5, 3), (7, 5, 3, 3, 5),\n # empty tensors\n (0, 0), (0, 0, 0), (0, 3, 3)\n ):\n a = make_tensor(shape, dtype=dtype, device=device, low=-0.1, high=+0.1)\n a_lu, p = torch.lu(a, pivot=pivot)\n p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)\n self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)\n\n run_test(True)\n\n if self.device_type == 'cuda':\n run_test(False)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.double)\n def test_lu_unpack_check_input(self, device, dtype):\n x = torch.rand(5, 5, 5, device=device, dtype=dtype)\n lu_data, lu_pivots = torch.lu(x, pivot=True)\n\n with self.assertRaisesRegex(RuntimeError, \"torch.int32 dtype\"):\n torch.lu_unpack(lu_data, lu_pivots.long())\n with self.assertRaisesRegex(RuntimeError, \"contiguous tensor\"):\n torch.lu_unpack(lu_data, lu_pivots.mT)\n\n # check that onces flags are unset, Nones are returned\n p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)\n self.assertTrue((l == u) and l is None)\n p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)\n self.assertTrue(p is None)\n p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)\n self.assertTrue((p == l == u) and p is None)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n @skipCUDAIfRocm\n def test_lobpcg_basic(self, device, dtype):\n self._test_lobpcg_method(device, dtype, 'basic')\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n @skipCUDAIfRocm\n def test_lobpcg_ortho(self, device, dtype):\n self._test_lobpcg_method(device, dtype, 'ortho')\n\n def _test_lobpcg_method(self, device, dtype, method):\n from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix\n from torch._linalg_utils import matmul, qform\n from torch._lobpcg import lobpcg\n\n def test_tracker(worker):\n k = worker.iparams['k']\n nc = worker.ivars['converged_count']\n if k <= nc:\n tol = worker.fparams['tol']\n rerr = worker.tvars['rerr']\n X = worker.X\n E = worker.E\n B = worker.B\n A = worker.A\n dtype = X.dtype\n device = X.device\n\n # Check convergence\n self.assertLessEqual(rerr[:k].max(), tol)\n\n # Check B-orthogonality\n I = torch.eye(k, k, dtype=dtype, device=device)\n self.assertEqual(qform(B, X[:, :k]), I)\n\n # Check block equation\n self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)\n\n orig_lobpcg = lobpcg\n\n def lobpcg(*args, **kwargs):\n kwargs['tracker'] = test_tracker\n kwargs['niter'] = 1000\n kwargs['method'] = method\n kwargs['tol'] = 1e-8\n return orig_lobpcg(*args, **kwargs)\n prec = 5e-4\n\n # check dense input\n mm = torch.matmul\n for batches in [(), (2,), (2, 3)]:\n for m, n, k in [\n (9, 3, 1),\n (9, 3, 2),\n (9, 2, 2),\n (100, 15, 5),\n ]:\n # skip tests that are known to fail with the basic\n # LOBPCG method due to calling cholesky on singular\n # input\n if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:\n continue\n A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)\n B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)\n\n # classical eigenvalue problem, smallest eigenvalues\n E, V = lobpcg(A, k=k, n=n, largest=False)\n self.assertEqual(E.shape, batches + (k,))\n self.assertEqual(V.shape, batches + (m, k))\n self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)\n e = torch.symeig(A)[0]\n e_smallest = e[..., :k]\n self.assertEqual(E, e_smallest)\n\n # classical eigenvalue problem, largest eigenvalues\n E, V = lobpcg(A, k=k, n=n, largest=True)\n e_largest, _ = torch.sort(e[..., -k:], descending=True)\n self.assertEqual(E, e_largest, atol=prec, rtol=0)\n self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)\n\n # generalized eigenvalue problem, smallest eigenvalues\n E, V = lobpcg(A, B=B, k=k, n=n, largest=False)\n self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)\n\n # generalized eigenvalue problem, largest eigenvalues\n E, V = lobpcg(A, B=B, k=k, n=n, largest=True)\n self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),\n atol=prec, rtol=0)\n\n # check sparse input\n for m, n, k, density in [\n (5, 1, 1, 0.8),\n (9, 3, 2, 0.5),\n (100, 1, 1, 0.1),\n (1000, 7, 3, 0.01),\n ]:\n # skip tests that are known to fail with the basic LOBCG\n # method due to insufficient accuracy\n if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:\n continue\n A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)\n B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)\n A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m\n e_smallest = A_eigenvalues[..., :k]\n e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)\n\n # classical eigenvalue problem, smallest eigenvalues\n E, V = lobpcg(A, k=k, n=n, largest=False)\n self.assertEqual(E, e_smallest)\n self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)\n\n # classical eigenvalue problem, largest eigenvalues\n E, V = lobpcg(A, k=k, n=n, largest=True)\n self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)\n self.assertEqual(E, e_largest)\n\n # generalized eigenvalue problem, smallest eigenvalues\n E, V = lobpcg(A, B=B, k=k, n=n, largest=False)\n self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)\n\n # generalized eigenvalue problem, largest eigenvalues\n E, V = lobpcg(A, B=B, k=k, n=n, largest=True)\n self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),\n atol=prec, rtol=0)\n\n @skipCPUIfNoLapack\n @onlyCPU\n @dtypes(torch.double)\n def test_lobpcg_torchscript(self, device, dtype):\n from torch.testing._internal.common_utils import random_sparse_pd_matrix\n from torch._linalg_utils import matmul as mm\n\n lobpcg = torch.jit.script(torch.lobpcg)\n\n m = 500\n k = 5\n A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)\n X1 = torch.randn((m, k), dtype=dtype, device=device)\n E1, V1 = lobpcg(A1, X=X1)\n eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()\n self.assertLess(eq_err, 1e-6)\n\n @unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and scipy.__version__ < '1.4.1'), \"Scipy not found or older than 1.4.1\")\n @skipCPUIfNoLapack\n @onlyCPU\n @dtypes(torch.double)\n def test_lobpcg_scipy(self, device, dtype):\n \"\"\"Compare torch and scipy.sparse.linalg implementations of lobpcg\n \"\"\"\n import time\n from torch.testing._internal.common_utils import random_sparse_pd_matrix\n from torch._linalg_utils import matmul as mm\n from scipy.sparse.linalg import lobpcg as scipy_lobpcg\n import scipy.sparse\n\n def toscipy(A):\n if A.layout == torch.sparse_coo:\n values = A.coalesce().values().cpu().numpy().copy()\n indices = A.coalesce().indices().cpu().numpy().copy()\n return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)\n return A.cpu().numpy().copy()\n\n niter = 1000\n repeat = 10\n m = 500 # size of the square matrix\n k = 7 # the number of requested eigenpairs\n A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)\n B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)\n X1 = torch.randn((m, k), dtype=dtype, device=device)\n\n A2 = toscipy(A1)\n B2 = toscipy(B1)\n X2 = toscipy(X1)\n\n lambdas1 = []\n\n def tracker(worker):\n lambdas1.append(worker.E[:])\n\n tol = 1e-8\n # tol for scipy lobpcg will be choosed so that the number of\n # iterations will be equal or very close to pytorch lobpcg\n # (that is around 170-180)\n\n # Standard eigenvalue problem\n E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)\n E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)\n iters1 = len(lambdas1)\n iters2 = len(lambdas2)\n self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))\n\n E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)\n\n eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()\n eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()\n self.assertLess(eq_err, 1e-6) # std\n self.assertLess(eq_err_scipy, 1e-6) # std\n\n self.assertEqual(E1, torch.from_numpy(E2.copy()))\n\n # Generalized eigenvalue problem\n lambdas1 = []\n\n def tracker(worker):\n lambdas1.append(worker.E[:])\n\n E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)\n E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)\n E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)\n iters1 = len(lambdas1)\n iters2 = len(lambdas2)\n self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))\n\n eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()\n eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()\n self.assertLess(eq_err, 1e-6) # general\n self.assertLess(eq_err_scipy, 1e-6) # general\n\n self.assertEqual(E1, torch.from_numpy(E2.copy()))\n\n # Timings\n elapsed_ortho = 0\n elapsed_ortho_general = 0\n elapsed_scipy = 0\n elapsed_general_scipy = 0\n for i in range(repeat):\n start = time.time()\n torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)\n end = time.time()\n elapsed_ortho += end - start\n\n start = time.time()\n torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)\n end = time.time()\n elapsed_ortho_general += end - start\n\n start = time.time()\n scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)\n end = time.time()\n elapsed_scipy += end - start\n\n start = time.time()\n scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)\n end = time.time()\n elapsed_general_scipy += end - start\n\n elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat\n elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat\n elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat\n elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat\n\n print('''\nCPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg\n-------------------------------------------------------\n | standard | generalized | method\ntorch.lobpcg | {:10.2f} | {:10.2f} | ortho\nscipy_lobpcg | {:10.2f} | {:10.2f} | N/A\n-(input size: {:4}, eigenpairs:{:2}, units: ms per call)-\n '''.format(elapsed_ortho_ms, elapsed_ortho_general_ms,\n elapsed_scipy_ms, elapsed_general_scipy_ms,\n m, k))\n\n # Handling of very small tolerence\n tol = 1e-100\n\n lambdas1 = []\n\n def tracker(worker):\n lambdas1.append(worker.E[:])\n\n E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)\n iters1 = len(lambdas1)\n eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()\n\n try:\n E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)\n iters2 = len(lambdas2)\n eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()\n except Exception as msg:\n print('Calling scipy_lobpcg failed [standard]:', msg)\n iters2 = -1\n eq_err_scipy = -1\n\n lambdas1 = []\n\n def tracker(worker):\n lambdas1.append(worker.E[:])\n\n E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)\n iters1_general = len(lambdas1)\n eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()\n\n try:\n E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)\n iters2_general = len(lambdas2)\n eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()\n except Exception as msg:\n print('Calling scipy_lobpcg failed [generalized]:', msg)\n iters2_general = -1\n eq_err_general_scipy = -1\n\n print('''\\\nHandling of small tol={:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg\n----------------------------------------------------------------------------\n | standard | generalized | niter | method\ntorch.lobpcg | {:10.2e} | {:10.2e} | {:6} | ortho\nscipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A\n---(input size: {:4}, eigenpairs:{:2}, units: relative error, maxiter={:4})---\n'''.format(tol, eq_err, eq_err_general, iters1, eq_err_scipy, eq_err_general_scipy, iters2, m, k, niter))\n\n def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False):\n dtype = t.dtype\n numpy_dtype = dtype\n if dtype in {torch.bfloat16}:\n numpy_dtype = torch.float\n if dtype.is_complex:\n alpha = 0.9 + 0.3j if alpha is None else alpha\n beta = 0.5 + 0.6j if beta is None else beta\n else:\n alpha = 1.2 if alpha is None else alpha\n beta = 0.8 if beta is None else beta\n res1 = f(t, m, v, alpha=alpha, beta=beta)\n res2 = torch.full_like(res1, math.nan)\n if transpose_out:\n res2 = res2.t().clone(memory_format=torch.contiguous_format).t()\n f(t, m, v, alpha=alpha, beta=beta, out=res2)\n res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())\n if beta != 0:\n res3 += (beta * t).to(numpy_dtype).cpu().numpy()\n res3 = torch.from_numpy(res3).to(dtype)\n self.assertEqual(res1, res2)\n self.assertEqual(res1, res3)\n\n @precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8,\n torch.cfloat: 1e-4, torch.cdouble: 1e-8})\n @dtypesIfCUDA(*get_all_complex_dtypes(),\n *get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)),\n include_half=(not TEST_WITH_ROCM)))\n @dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_addmv(self, device, dtype):\n # have to use torch.randn(...).to(bfloat16) instead of\n # torch.randn(..., dtype=bfloat16). randn does not support\n # bfloat16 yet.\n # \"*0.2\" to reduce errors for low precision\n ts = [\n 0.2 * torch.randn(50, device=device).to(dtype),\n 0.2 * torch.randn(1, device=device).to(dtype).expand(50),\n ]\n vs = [\n 0.2 * torch.randn(100, device=device).to(dtype),\n 0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision\n ]\n ms = [\n # 0d\n 0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision\n # 1d\n 0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),\n # this initialization reduces errors for low precision for broadcasted matrices\n # by making sure that intermediate and result values are exactly representable\n # in low precision type\n 0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),\n # 2d\n 0.2 * torch.randn((50, 100), device=device).to(dtype),\n 0.2 * torch.randn((100, 50), device=device).to(dtype).t(),\n ]\n for m, v, t in itertools.product(ms, vs, ts):\n self._test_addmm_addmv(torch.addmv, t, m, v)\n # Test beta=0, t=nan\n t = torch.full((50,), math.nan, device=device).to(dtype)\n for m, v in itertools.product(ms, vs):\n self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)\n\n @dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))\n @dtypes(torch.float, torch.double)\n def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):\n # tests (o, s)*(s). o is output size, s is summed size.\n o = 5\n s = 3\n a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)\n x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)\n y_data = torch.ones(o, device=device, dtype=dtype)\n control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)\n\n def _test(row_major, incx, incy, lda_tail):\n if row_major:\n a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)\n else:\n a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)\n a = a_storage[:o, :s].copy_(a_data)\n\n x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)\n x = x_storage[:, 0].copy_(x_data)\n\n y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)\n y = y_storage[:, 0].copy_(y_data)\n\n self._test_addmm_addmv(torch.addmv, y, a, x)\n\n for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):\n _test(row_major, incx, incy, lda_tail)\n\n @precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,\n torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})\n @dtypesIfCUDA(*get_all_complex_dtypes(),\n *get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))\n @dtypes(*get_all_complex_dtypes(), *get_all_fp_dtypes())\n @tf32_on_and_off(0.05)\n def test_addmm(self, device, dtype):\n M = torch.randn(10, 25, device=device).to(dtype)\n m1 = torch.randn(10, 50, device=device).to(dtype)\n m2 = torch.randn(50, 25, device=device).to(dtype)\n self._test_addmm_addmv(torch.addmm, M, m1, m2)\n\n # Test 0-strided\n M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)\n m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)\n m2 = torch.randn(50, 25, device=device).to(dtype)\n self._test_addmm_addmv(torch.addmm, M, m1, m2)\n\n # Test beta=0, M=nan\n M = torch.full((10, 25), math.nan, device=device).to(dtype)\n m1 = torch.randn(10, 50, device=device).to(dtype)\n m2 = torch.randn(50, 25, device=device).to(dtype)\n self._test_addmm_addmv(torch.addmm, M, m1, m2, beta=0)\n\n # Test transpose\n for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):\n def maybe_transpose(cond, m):\n if not cond:\n return m\n return m.t().clone(memory_format=torch.contiguous_format).t()\n\n M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))\n m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))\n m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))\n self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4)\n\n @dtypes(torch.float, torch.double)\n @dtypesIfCUDA(*([torch.float, torch.double] + get_all_complex_dtypes()))\n @tf32_on_and_off(0.005)\n def test_addmm_sizes(self, device, dtype):\n for m in [0, 1, 25]:\n for n in [0, 1, 10]:\n for k in [0, 1, 8]:\n M = torch.randn(n, m, device=device).to(dtype)\n m1 = torch.randn(n, k, device=device).to(dtype)\n m2 = torch.randn(k, m, device=device).to(dtype)\n self._test_addmm_addmv(torch.addmm, M, m1, m2)\n\n m1 = torch.randn(n, k + 1, device=device).to(dtype)\n m2 = torch.randn(k, m, device=device).to(dtype)\n self.assertRaisesRegex(RuntimeError, f\"{n}x{k + 1}.*{k}x{m}\", lambda: torch.addmm(M, m1, m2))\n self.assertRaisesRegex(RuntimeError, f\"{n}x{k + 1}.*{k}x{m}\", lambda: torch.mm(m1, m2))\n\n @dtypes(torch.half)\n @onlyCUDA\n def test_addmm_baddbmm_overflow(self, device, dtype):\n orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction\n torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False\n inp = torch.zeros(128, 128, dtype=torch.half, device=device)\n mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100\n mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100\n out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)\n # just check for no overflow on ROCM\n if TEST_WITH_ROCM:\n self.assertFalse(out.isinf().any())\n else:\n self.assertTrue((out == 10000.).all())\n inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)\n mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100\n mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100\n out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)\n if TEST_WITH_ROCM:\n self.assertFalse(out.isinf().any())\n else:\n self.assertTrue((out == 10000.).all())\n torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig\n\n @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, \"cublas runtime error\")\n @onlyCUDA\n def test_matmul_45724(self, device):\n # https://github.com/pytorch/pytorch/issues/45724\n a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)\n b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)\n c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)\n cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()\n torch.matmul(a, b, out=c)\n self.assertEqual(c, cpu_result)\n\n @slowTest\n @onlyNativeDeviceTypes\n @dtypes(torch.float32, torch.float64, torch.bfloat16, torch.int32, torch.int64, torch.cfloat, torch.cdouble)\n @dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)\n @tf32_on_and_off(0.01)\n def test_mm(self, device, dtype):\n def _test_mm(n, m, p, dtype, genf):\n # helper function\n def matrixmultiply(mat1, mat2):\n n = mat1.size(0)\n m = mat1.size(1)\n p = mat2.size(1)\n res = torch.zeros(n, p, dtype=dtype, device=device)\n for i, j in iter_indices(res):\n res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))\n return res\n\n # contiguous case\n mat1 = genf(n, m)\n mat2 = genf(m, p)\n res = torch.mm(mat1, mat2)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n # non contiguous case 1\n mat1 = genf(n, m)\n mat2 = genf(p, m).t()\n res = torch.mm(mat1, mat2)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n # non contiguous case 2\n mat1 = genf(m, n).t()\n mat2 = genf(m, p)\n res = torch.mm(mat1, mat2)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n # non contiguous case 3\n mat1 = genf(m, n).t()\n mat2 = genf(p, m).t()\n res = torch.mm(mat1, mat2)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n # test with zero stride\n mat1 = genf(n, m)\n mat2 = genf(m, 1).expand(m, p)\n res = torch.mm(mat1, mat2)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n # explicitly exercise the _out variant in torch.mm().\n # contiguous case\n mat1 = genf(n, m)\n mat2 = genf(m, p)\n res = genf(n, p)\n torch.mm(mat1, mat2, out=res)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n # explicitly exercise the _out variant in torch.mm().\n # non contiguous case 3\n mat1 = genf(m, n).t()\n mat2 = genf(p, m).t()\n res = genf(n, p)\n torch.mm(mat1, mat2, out=res)\n\n res2 = matrixmultiply(mat1, mat2)\n self.assertEqual(res, res2)\n\n def genf_int(x, y):\n return torch.randint(0, 100, (x, y), dtype=dtype, device=device)\n\n def genf_bfloat(x, y):\n return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1\n\n def genf_float(x, y):\n return torch.randn(x, y, dtype=dtype, device=device)\n\n for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:\n if (dtype == torch.int32) or (dtype == torch.int64):\n genf = genf_int\n elif (dtype == torch.bfloat16):\n genf = genf_bfloat\n else:\n genf = genf_float\n\n _test_mm(n, m, p, dtype, genf)\n\n @onlyNativeDeviceTypes\n def test_mm_bmm_non_memory_dense(self, device):\n def _slice(tensor, fn):\n return fn(tensor)[..., ::2]\n A = torch.randn(3, 6, dtype=torch.cfloat, device=device)\n B = torch.randn(3, 3, dtype=torch.cfloat, device=device)\n out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()\n out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()\n A_conj = _slice(A, torch.conj)\n A_conj_physical = _slice(A, torch.conj_physical)\n\n self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))\n self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))\n\n Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)\n Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)\n Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)\n out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT\n\n Ab_conj = _slice(Ab, torch.conj)\n Ab_conj_physical = _slice(Ab, torch.conj_physical)\n\n def t_b(tensor):\n return tensor.mT\n\n self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))\n self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))\n\n # test broadcasting\n self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))\n self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float32, torch.float64)\n def test_strided_mm_bmm(self, device, dtype):\n # Tests strided view case with stride smaller than corresponding dimension size\n x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)\n new_shape = [2, 2, 2]\n new_stride = [3, 1, 1]\n sx = torch.as_strided(x, size=new_shape, stride=new_stride)\n\n torch_fn = lambda x: torch.bmm(x, x) # noqa: E731\n np_fn = lambda x: np.matmul(x, x) # noqa: E731\n self.compare_with_numpy(torch_fn, np_fn, sx)\n\n torch_fn = lambda x: torch.mm(x, x) # noqa: E731\n self.compare_with_numpy(torch_fn, np_fn, sx[0])\n\n @precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})\n @skipCUDAIf(torch.version.cuda == \"10.1\", \"flaky on CUDA 10.1\")\n @onlyNativeDeviceTypes\n @dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())\n @tf32_on_and_off(0.05)\n def test_bmm(self, device, dtype):\n if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:\n # cuBLAS does not guarantee BFloat16 support on SM < 53.\n # So on PyTorch, we consider BFloat16 support on SM < 53 as\n # undefined bahavior\n return\n\n batch_sizes = [1, 10]\n M, N, O = 23, 15, 12\n numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32\n\n is_supported = True\n if dtype == torch.bfloat16 and self.device_type == 'cuda':\n is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)\n\n if not is_supported:\n for num_batches in batch_sizes:\n b1 = torch.randn(num_batches, M, N, device=device).to(dtype)\n b2 = torch.randn(num_batches, N, O, device=device).to(dtype)\n self.assertRaisesRegex(RuntimeError, \"type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED\",\n lambda: torch.bmm(b1, b2))\n return\n\n def invert_perm(p):\n d = {x: i for i, x in enumerate(p)}\n return (d[0], d[1], d[2])\n\n def generate_inputs(num_batches):\n # transposed tensors\n for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):\n b1 = make_tensor((num_batches, M, N), device, dtype, low=-0.1, high=0.1)\n b2 = make_tensor((num_batches, N, O), device, dtype, low=-0.1, high=0.1)\n b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))\n b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))\n yield b1, b2\n # broadcasting tensors\n for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):\n shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)\n shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)\n b1 = make_tensor(shape1, device, dtype, low=-0.1, high=0.1).expand(num_batches, M, N)\n b2 = make_tensor(shape2, device, dtype, low=-0.1, high=0.1).expand(num_batches, N, O)\n yield b1, b2\n # zero-sized tensors\n for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):\n shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)\n shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)\n b1 = torch.randn(shape1, dtype=dtype, device=device)\n b2 = torch.randn(shape2, dtype=dtype, device=device)\n yield b1, b2\n\n for num_batches in batch_sizes:\n for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):\n res1 = torch.bmm(b1, b2)\n res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \\\n .permute(perm3).contiguous().permute(invert_perm(perm3))\n torch.bmm(b1, b2, out=res2)\n expect = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)\n self.assertEqual(expect, res1)\n self.assertEqual(expect, res2)\n\n if self.device_type == 'cuda':\n # check that mixed arguments are rejected\n self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))\n self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))\n self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))\n\n def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):\n getattr(out_tensor, func + \"_\")(b1, b2)\n self.assertEqual(out_tensor, ref)\n res3 = out_tensor.clone()\n\n with self.assertWarnsOnceRegex(\n UserWarning, f\"This overload of {func}_ is deprecated\"):\n getattr(out_tensor, func + \"_\")(1, b1, b2)\n self.assertEqual(out_tensor, ref * 2),\n getattr(res3, func + \"_\")(b1, b2, beta=1)\n self.assertEqual(out_tensor, res3)\n\n with self.assertWarnsOnceRegex(\n UserWarning, f\"This overload of {func}_ is deprecated\"):\n getattr(out_tensor, func + \"_\")(1., .5, b1, b2)\n self.assertEqual(out_tensor, ref * 2.5)\n getattr(res3, func + \"_\")(b1, b2, beta=1., alpha=.5)\n self.assertEqual(out_tensor, res3)\n\n with self.assertWarnsOnceRegex(\n UserWarning, f\"This overload of {func} is deprecated\"):\n self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))\n\n res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)\n self.assertEqual(res4, ref * 3),\n\n nan = torch.full_like(out_tensor, math.nan)\n res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)\n self.assertEqual(res5, ref)\n\n if b1.is_complex():\n res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)\n self.assertEqual(res6, out_tensor * .1j + .5j * ref)\n else:\n res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)\n self.assertEqual(res6, out_tensor * .1 + .5 * ref)\n\n res7 = torch.full_like(out_tensor, math.nan)\n getattr(torch, func)(nan, b1, b2, beta=0, out=res7)\n self.assertEqual(res7, ref)\n\n @precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})\n @onlyNativeDeviceTypes\n @dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())\n @tf32_on_and_off(0.05)\n def test_addbmm(self, device, dtype):\n if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:\n # cuBLAS does not guarantee BFloat16 support on SM < 53.\n # So on PyTorch, we consider BFloat16 support on SM < 53 as\n # undefined bahavior\n return\n\n num_batches = 2\n M, N, O = 16, 17, 18\n\n is_supported = True\n if dtype == torch.bfloat16:\n if self.device_type == 'cpu':\n self.precision = 1 # 43 vs 43.75\n else:\n is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)\n\n if not is_supported:\n b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)\n b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)\n t = make_tensor((M, O), device, dtype, low=-1, high=1)\n self.assertRaisesRegex(RuntimeError, \"type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED\",\n lambda: torch.addbmm(t, b1, b2))\n return\n\n def invert_perm(p):\n d = {x: i for i, x in enumerate(p)}\n return (d[0], d[1], d[2])\n\n def generate_tensor():\n numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32\n # transposed tensors\n for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):\n for perm3 in itertools.permutations((0, 1)):\n b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1) * 0.1\n b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1) * 0.1\n b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))\n b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))\n ref = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()\n ).to(device=device, dtype=dtype).sum(0)\n out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)\n yield b1, b2, ref, out_tensor\n # broadcasting tensors\n for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):\n shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)\n shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)\n b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N) * 0.1\n b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O) * 0.1\n ref = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()\n ).to(device=device, dtype=dtype).sum(0)\n out_tensor = torch.zeros_like(ref)\n yield b1, b2, ref, out_tensor\n # zero-sized tensors\n for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):\n shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)\n shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)\n b1 = make_tensor(shape1, device, dtype, low=-1, high=1) * 0.1\n b2 = make_tensor(shape2, device, dtype, low=-1, high=1) * 0.1\n ref = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()\n ).to(device=device, dtype=dtype).sum(0)\n out_tensor = torch.zeros_like(ref)\n yield b1, b2, ref, out_tensor\n\n for b1, b2, ref, out_tensor in generate_tensor():\n self._test_addbmm_baddbmm(\"addbmm\", b1, b2, ref, out_tensor)\n\n @precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})\n @onlyNativeDeviceTypes\n @dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())\n @tf32_on_and_off(0.05)\n def test_baddbmm(self, device, dtype):\n if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:\n # cuBLAS does not guarantee BFloat16 support on SM < 53.\n # So on PyTorch, we consider BFloat16 support on SM < 53 as\n # undefined bahavior\n return\n\n num_batches = 10\n M, N, O = 12, 8, 50\n\n is_supported = True\n if dtype == torch.bfloat16 and self.device_type == 'cuda':\n is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)\n\n if not is_supported:\n b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)\n b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)\n t = make_tensor((num_batches, M, O), device, dtype, low=-1, high=1)\n self.assertRaisesRegex(RuntimeError, \"type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED\",\n lambda: torch.baddbmm(t, b1, b2))\n return\n\n def invert_perm(p):\n d = {x: i for i, x in enumerate(p)}\n return (d[0], d[1], d[2])\n\n def generate_tensor():\n numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32\n # transposed tensors\n for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):\n b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)\n b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)\n b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))\n b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))\n ref = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)\n out_tensor = torch.zeros_like(ref)\n out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))\n yield b1, b2, ref, out_tensor\n # broadcasting tensors\n for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):\n shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)\n shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)\n b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N)\n b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O)\n ref = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)\n out_tensor = torch.zeros_like(ref)\n yield b1, b2, ref, out_tensor\n # zero-sized tensors\n for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):\n shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)\n shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)\n b1 = make_tensor(shape1, device, dtype, low=-2, high=2)\n b2 = make_tensor(shape2, device, dtype, low=-2, high=2)\n ref = torch.from_numpy(\n b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)\n out_tensor = torch.zeros_like(ref)\n yield b1, b2, ref, out_tensor\n\n for b1, b2, ref, out_tensor in generate_tensor():\n self._test_addbmm_baddbmm(\"baddbmm\", b1, b2, ref, out_tensor)\n\n # TODO: update to compare against NumPy\n @onlyCUDA\n def test_solve_methods_arg_device(self, device):\n for b_device, A_device in itertools.product(['cpu', device], repeat=2):\n if b_device == A_device:\n continue\n\n b = torch.randn(3, 1, device=b_device)\n A = torch.randn(3, 3, device=A_device)\n\n # solve and cholesky_solve goes through generic backend dispatch and hit kernel specific device check first\n # triangular_solve goes through specific backend dispatch (CPU/CUDA) and hit auto-generated device check first\n generic_backend_dispatch_err_str = \"Expected b and A to be on the same device\"\n specific_backend_dispatch_err_str = \"Expected all tensors to be on the same device\"\n with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):\n torch.solve(b, A)\n\n with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):\n torch.cholesky_solve(b, A)\n\n with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):\n torch.triangular_solve(b, A)\n\n # b and A have to be modified to match accepted inputs sizes for lu_solve\n b = b.unsqueeze(0)\n A = A.unsqueeze(0)\n with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):\n torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=A_device).int())\n\n # This checks if a suitable error message is thrown\n # when LU output and pivots are not on the same device\n with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):\n torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=b_device).int())\n\n @precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_pinverse(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value as fullrank\n\n def run_test(M):\n # Testing against definition for pseudo-inverses\n MPI = torch.pinverse(M)\n MPI_ = MPI.cpu().numpy()\n M_ = M.cpu().numpy()\n if M.numel() > 0:\n self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))\n self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))\n self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())\n self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())\n else:\n self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))\n for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices\n (3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices\n (2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices\n (0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices\n M = torch.randn(*sizes, dtype=dtype, device=device)\n run_test(M)\n\n # Test inverse and pseudo-inverse for invertible matrix\n for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:\n matsize = sizes[-1]\n batchdims = sizes[:-2]\n M = fullrank(matsize, *batchdims, dtype=dtype, device=device)\n self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),\n atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagmaAndNoCusolver\n @dtypes(torch.double, torch.cdouble)\n def test_matrix_power_non_negative(self, device, dtype):\n def check(*size, noncontiguous=False):\n t = make_tensor(size, device, dtype, noncontiguous=noncontiguous)\n for n in range(8):\n res = torch.linalg.matrix_power(t, n)\n ref = np.linalg.matrix_power(t.cpu().numpy(), n)\n self.assertEqual(res.cpu(), torch.from_numpy(ref))\n\n check(0, 0)\n check(1, 1)\n check(5, 5)\n check(5, 5, noncontiguous=True)\n check(0, 3, 3)\n check(2, 3, 3)\n check(2, 3, 4, 4, noncontiguous=True)\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagmaAndNoCusolver\n @dtypes(torch.double, torch.cdouble)\n def test_matrix_power_negative(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n def check(*size):\n t = random_fullrank_matrix_distinct_singular_value(*size, dtype=dtype, device=device)\n for n in range(-7, 0):\n res = torch.linalg.matrix_power(t, n)\n ref = np.linalg.matrix_power(t.cpu().numpy(), n)\n self.assertEqual(res.cpu(), torch.from_numpy(ref))\n\n check(0)\n check(5)\n check(0, 2)\n check(3, 0)\n check(3, 2)\n check(5, 2, 3)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.complex64)\n def test_linalg_matrix_exp_utils(self, device, dtype):\n # test linear combination\n def run_test(coeff_shape, data_shape):\n coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)\n x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)\n\n res1 = torch._compute_linear_combination(x, coeffs)\n res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)\n self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)\n\n # check `out=` version\n res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)\n torch._compute_linear_combination(x, coeffs, out=res3)\n self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)\n\n res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)\n torch._compute_linear_combination(x, coeffs, out=res4)\n self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)\n\n res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)\n res5_clone = res5.clone()\n torch._compute_linear_combination(x, coeffs, out=res5)\n self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)\n\n run_test([1, 3], [2, 2])\n run_test([3, 1], [2, 2])\n run_test([1, 10], [10, 10])\n run_test([10, 1], [10, 10])\n run_test([5, 3], [2, 2])\n run_test([5, 3], [100, 100])\n run_test([3, 4], [3, 3, 3])\n run_test([3, 4], [3, 3, 3, 3])\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.complex64, torch.complex128)\n def test_linalg_matrix_exp_boundary_cases(self, device, dtype):\n expm = torch.linalg.matrix_exp\n\n with self.assertRaisesRegex(RuntimeError, \"Expected a floating point or complex tensor\"):\n expm(torch.randn(3, 3).type(torch.int))\n\n with self.assertRaisesRegex(RuntimeError, \"must have at least 2 dimensions\"):\n expm(torch.randn(3))\n\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n expm(torch.randn(3, 2, 1))\n\n # check 1x1 matrices\n x = torch.randn(3, 3, 1, 1)\n self.assertEqual(expm(x), x.exp())\n\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_linalg_matrix_exp_analytic(self, device, dtype):\n expm = torch.linalg.matrix_exp\n # check zero matrix\n x = torch.zeros(20, 20, dtype=dtype, device=device)\n self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())\n\n def normalize_to_1_operator_norm(sample, desired_norm):\n sample_norm, _ = sample.abs().sum(-2).max(-1)\n sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)\n return sample_to_1_norm * desired_norm\n\n def gen_good_cond_number_matrices(*n):\n \"\"\"\n Generates a diagonally-domimant matrix\n with the eigenvalues centered at 1\n and the radii at most (n[-1] - 1) / (n[-2] ** 2)\n \"\"\"\n identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)\n x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)\n x = (x - x * identity) + identity\n return x\n\n def run_test(*n):\n if dtype == torch.float:\n thetas = [\n 1.192092800768788e-07, # deg 1\n 5.978858893805233e-04, # deg 2\n 5.116619363445086e-02, # deg 4\n 5.800524627688768e-01, # deg 8\n 1.461661507209034e+00, # deg 12\n 3.010066362817634e+00 # deg 18\n ]\n else: # if torch.double\n thetas = [\n 2.220446049250313e-16, # deg 1\n 2.580956802971767e-08, # deg 2\n 3.397168839976962e-04, # deg 4\n 4.991228871115323e-02, # deg 8\n 2.996158913811580e-01, # deg 12\n 1.090863719290036e+00 # deg 18\n ]\n\n # generate input\n q = gen_good_cond_number_matrices(*n)\n q_ = q.cpu().numpy()\n qinv = torch.inverse(q)\n qinv_ = qinv.cpu().numpy()\n d = torch.randn(n[:-1], dtype=dtype, device=device)\n x = torch.from_numpy(\n np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)\n x_norm, _ = x.abs().sum(-2).max(-1)\n\n # test simple analytic whatever norm generated\n mexp = expm(x)\n mexp_analytic = np.matmul(\n q_,\n np.matmul(\n torch.diag_embed(d.exp()).cpu().numpy(),\n qinv_\n )\n )\n self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)\n\n # generate norms to test different degree expansions\n sample_norms = []\n for i in range(len(thetas) - 1):\n sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))\n sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]\n\n # matrices to equal norm\n for sample_norm in sample_norms:\n x_normalized = normalize_to_1_operator_norm(x, sample_norm)\n\n mexp = expm(x_normalized)\n mexp_analytic = np.matmul(\n q_,\n np.matmul(\n torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),\n qinv_\n )\n )\n self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)\n\n # single matrix\n run_test(2, 2)\n run_test(3, 3)\n run_test(4, 4)\n run_test(5, 5)\n run_test(100, 100)\n run_test(200, 200)\n\n # small batch of matrices\n run_test(3, 2, 2)\n run_test(3, 3, 3)\n run_test(3, 4, 4)\n run_test(3, 5, 5)\n run_test(3, 100, 100)\n run_test(3, 200, 200)\n\n # large batch of matrices\n run_test(3, 3, 2, 2)\n run_test(3, 3, 3, 3)\n run_test(3, 3, 4, 4)\n run_test(3, 3, 5, 5)\n run_test(3, 3, 100, 100)\n run_test(3, 3, 200, 200)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double)\n def test_linalg_matrix_exp_batch(self, device, dtype):\n\n def run_test(*n):\n tensors_batch = torch.zeros(n, dtype=dtype, device=device)\n tensors_batch = tensors_batch.view(-1, n[-2], n[-1])\n\n num_matrices = tensors_batch.size(0)\n tensors_list = []\n for i in range(num_matrices):\n tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))\n\n for i in range(num_matrices):\n tensors_batch[i, ...] = tensors_list[i]\n\n tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)\n tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)\n\n for i, tensor_exp in enumerate(tensors_exp_map):\n self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)\n\n # small batch of matrices\n run_test(3, 2, 2)\n run_test(3, 3, 3)\n run_test(3, 4, 4)\n run_test(3, 5, 5)\n\n # large batch of matrices\n run_test(3, 3, 2, 2)\n run_test(3, 3, 3, 3)\n run_test(3, 3, 4, 4)\n run_test(3, 3, 5, 5)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)\n def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):\n\n def normalize_to_1_operator_norm(sample, desired_norm):\n sample_norm, _ = sample.abs().sum(-2).max(-1)\n sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)\n return sample_to_1_norm * desired_norm\n\n def gen_good_cond_number_matrices(*n):\n \"\"\"\n Generates a diagonally-domimant matrix\n with the eigenvalues centered at 1\n and the radii at most (n[-1] - 1) / (n[-2] ** 2)\n \"\"\"\n identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)\n x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)\n x = (x - x * identity) + identity\n return x\n\n def get_taylor_approximation(a, deg):\n a_ = a.cpu().numpy()\n identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)\n res = identity.cpu().numpy()\n taylor_term = identity.cpu().numpy()\n\n for i in range(1, deg + 1):\n taylor_term = np.matmul(a_, taylor_term) / i\n res = res + taylor_term\n\n return res\n\n def scale_square(a, deg):\n if a.abs().pow(2).sum().sqrt() < 1.0:\n return get_taylor_approximation(a, 12)\n else:\n s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())\n b = a / (2 ** s)\n b = get_taylor_approximation(b, 18)\n for _ in range(s):\n b = np.matmul(b, b)\n return torch.from_numpy(b).to(a.device)\n\n def run_test(*n):\n degs = [1, 2, 4, 8, 12, 18]\n if dtype == torch.float:\n thetas = [\n 1.192092800768788e-07, # deg 1\n 5.978858893805233e-04, # deg 2\n 5.116619363445086e-02, # deg 4\n 5.800524627688768e-01, # deg 8\n 1.461661507209034e+00, # deg 12\n 3.010066362817634e+00 # deg 18\n ]\n else: # if torch.double\n thetas = [\n 2.220446049250313e-16, # deg 1\n 2.580956802971767e-08, # deg 2\n 3.397168839976962e-04, # deg 4\n 4.991228871115323e-02, # deg 8\n 2.996158913811580e-01, # deg 12\n 1.090863719290036e+00 # deg 18\n ]\n\n # generate norms to test different degree expansions\n sample_norms = []\n for i in range(len(thetas) - 1):\n sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))\n sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]\n degs = [degs[0]] + degs\n\n for sample_norm, deg in zip(sample_norms, degs):\n x = gen_good_cond_number_matrices(*n)\n x = normalize_to_1_operator_norm(x, sample_norm)\n\n mexp = torch.linalg.matrix_exp(x)\n mexp_taylor = scale_square(x, deg)\n\n self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)\n\n # single matrix\n run_test(2, 2)\n run_test(3, 3)\n run_test(4, 4)\n run_test(5, 5)\n\n # small batch of matrices\n run_test(3, 2, 2)\n run_test(3, 3, 3)\n run_test(3, 4, 4)\n run_test(3, 5, 5)\n\n # large batch of matrices\n run_test(3, 3, 2, 2)\n run_test(3, 3, 3, 3)\n run_test(3, 3, 4, 4)\n run_test(3, 3, 5, 5)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_slogdet(self, device, dtype):\n from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,\n random_hermitian_pd_matrix, random_square_matrix_of_rank)\n\n # mat_chars denotes matrix characteristics\n # possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular\n def run_test(matsize, batchdims, mat_chars):\n num_matrices = np.prod(batchdims)\n list_of_matrices = []\n if num_matrices != 0:\n for idx in range(num_matrices):\n mat_type = idx % len(mat_chars)\n if mat_chars[mat_type] == 'hermitian':\n list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'hermitian_psd':\n list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'hermitian_pd':\n list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'singular':\n list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'non_singular':\n list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))\n full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))\n else:\n full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)\n\n actual_value = torch.linalg.slogdet(full_tensor)\n expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())\n self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)\n self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)\n\n # test out=variant\n sign_out = torch.empty_like(actual_value[0])\n logabsdet_out = torch.empty_like(actual_value[1])\n ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))\n self.assertEqual(ans[0], sign_out)\n self.assertEqual(ans[1], logabsdet_out)\n self.assertEqual(sign_out, actual_value[0])\n self.assertEqual(logabsdet_out, actual_value[1])\n\n for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):\n run_test(matsize, batchdims, mat_chars=['hermitian_pd'])\n run_test(matsize, batchdims, mat_chars=['singular'])\n run_test(matsize, batchdims, mat_chars=['non_singular'])\n run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])\n run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_slogdet_errors_and_warnings(self, device, dtype):\n # slogdet requires the input to be a square matrix or batch of square matrices\n a = torch.randn(2, 3, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):\n torch.linalg.slogdet(a)\n\n # slogdet requires the input to be at least 2 dimensional tensor\n a = torch.randn(2, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):\n torch.linalg.slogdet(a)\n\n # slogdet requires the input to be of float, double, cfloat or cdouble types\n a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)\n with self.assertRaisesRegex(RuntimeError, r'of float, double, cfloat or cdouble types'):\n torch.linalg.slogdet(a)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.randn(2, 3, 3, device=device, dtype=dtype)\n sign_out = torch.empty(1, device=device, dtype=dtype)\n real_dtype = a.real.dtype if dtype.is_complex else dtype\n logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n sign_out = torch.empty_like(a).to(torch.int)\n logabsdet_out = torch.empty_like(a).to(torch.int)\n with self.assertRaisesRegex(RuntimeError, \"but got sign with dtype Int\"):\n torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))\n\n sign_out = torch.empty(0, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"but got logabsdet with dtype Int\"):\n torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n sign_out = torch.empty(0, device=wrong_device, dtype=dtype)\n logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))\n\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n def test_det_logdet_slogdet(self, device, dtype):\n def reference_slogdet(M):\n sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())\n return M.new_tensor(sdet), M.new_tensor(logabsdet)\n\n def test_single_det(M, target, desc):\n target_sdet, target_logabsdet = target\n\n det = M.det()\n logdet = M.logdet()\n sdet, logabsdet = M.slogdet()\n linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)\n\n # Test det\n self.assertEqual(det, target_sdet * target_logabsdet.exp(),\n atol=1e-7, rtol=0, msg='{} (det)'.format(desc))\n\n # Test slogdet\n # Compare the overall value rather than individual parts because of\n # precision issues when det is near zero.\n self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),\n atol=1e-7, rtol=0, msg='{} (slogdet)'.format(desc))\n self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),\n atol=1e-7, rtol=0, msg='{} (linalg_slogdet)'.format(desc))\n\n # Test logdet\n # Compare logdet against our own pytorch slogdet because they should\n # be consistent, while it may behave slightly differently with other\n # slogdet implementations when det is near zero due to precision\n # issues.\n if sdet.item() < 0:\n self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))\n else:\n self.assertEqual(logdet.exp(), target_logabsdet.exp(),\n atol=1e-7, rtol=0, msg='{} (logdet non-negative case)'.format(desc))\n\n eye = torch.eye(5, dtype=dtype, device=device)\n test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')\n # Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)\n for n in range(250, 551, 100):\n mat = torch.randn(n, n, dtype=dtype, device=device)\n q, _ = torch.qr(mat)\n ref_det, ref_logabsdet = reference_slogdet(q)\n test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')\n\n def test(M):\n assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'\n M = M.to(device)\n\n ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)\n\n test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')\n if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular\n M_inv = M.inverse()\n test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')\n\n test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')\n\n for x in [0, 2, 4]:\n for scale in [-2, -0.1, 0, 10]:\n if scale > 0:\n target = ref_M_sdet, ref_M_logabsdet + math.log(scale)\n elif scale == 0:\n target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)\n else:\n target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)\n\n # dim 0\n M_clone = M.clone()\n M_clone[:, x] *= scale\n test_single_det(M_clone, target, 'scale a row')\n # dim 1\n M_clone = M.clone()\n M_clone[x, :] *= scale\n test_single_det(M_clone, target, 'scale a column')\n\n for x1, x2 in [(0, 3), (4, 1), (3, 2)]:\n assert x1 != x2, 'x1 and x2 needs to be different for this test'\n target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)\n # dim 0\n M_clone = M.clone()\n M_clone[:, x2] = M_clone[:, x1]\n test_single_det(M_clone, target, 'two rows are same')\n # dim 1\n M_clone = M.clone()\n M_clone[x2, :] = M_clone[x1, :]\n test_single_det(M_clone, target, 'two columns are same')\n\n for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:\n det_scale = scale1 * scale2 * -1\n if det_scale > 0:\n target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)\n elif det_scale == 0:\n target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)\n else:\n target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)\n\n # dim 0\n M_clone = M.clone()\n t = M_clone[:, x1] * scale1\n M_clone[:, x1] += M_clone[:, x2] * scale2\n M_clone[:, x2] = t\n test_single_det(M_clone, target, 'exchanging rows')\n # dim 1\n M_clone = M.clone()\n t = M_clone[x1, :] * scale1\n M_clone[x1, :] += M_clone[x2, :] * scale2\n M_clone[x2, :] = t\n test_single_det(M_clone, target, 'exchanging columns')\n\n def get_random_mat_scale(n):\n # For matrices with values i.i.d. with 0 mean, unit variance, and\n # subexponential tail, we have:\n # E[log det(A^2)] \\approx log((n-1)!)\n #\n # Notice:\n # log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]\n #\n # So:\n # stddev[det(A)] >= sqrt( (n-1)! )\n #\n # We use this as an intuitive guideline to scale random generated\n # matrices so our closeness tests can work more robustly:\n # scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))\n #\n # source: https://arxiv.org/pdf/1112.0752.pdf\n\n # TODO: technically we need subexponential distn for this to hold,\n # but we mostly use gaussian entries below. Consider switching\n # to Chi-sq if this turns out not stable enough, since Chi-sq\n # is easy enough to sample from.\n return math.factorial(n - 1) ** (-1.0 / (2 * n))\n\n for n in [5, 10, 25]:\n scale = get_random_mat_scale(n)\n test(torch.randn(n, n, dtype=dtype, device=device) * scale)\n r = torch.randn(n, n, dtype=dtype, device=device) * scale\n # symmetric psd\n test(r.mm(r.t()))\n # symmetric pd\n r = torch.randn(n, n, dtype=dtype, device=device) * scale\n test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)\n # symmetric\n r = torch.randn(n, n, dtype=dtype, device=device) * scale\n for i in range(n):\n for j in range(i):\n r[i, j] = r[j, i]\n test(r)\n # non-contiguous\n test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])\n # det = 0\n r = torch.randn(n, n, dtype=dtype, device=device) * scale\n u, s, v = r.svd()\n if reference_slogdet(u)[0] < 0:\n u = -u\n if reference_slogdet(v)[0] < 0:\n v = -v\n s[0] *= -1\n s[-1] = 0\n test(u.mm(s.diag()).mm(v))\n\n # Small values to test numerical stability. Note that we don't scale\n # this matrix.\n r = torch.randn(512, 512, dtype=dtype, device=device)\n u, s, v = r.svd()\n s.fill_(1. / (100 * s.numel()))\n test(u.mm(s.diag()).mm(v))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n def test_det_logdet_slogdet_batched(self, device, dtype):\n from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,\n random_symmetric_pd_matrix, random_square_matrix_of_rank)\n\n # mat_chars denotes matrix characteristics\n # possible values are: sym, sym_psd, sym_pd, sing, non_sym\n def run_test(matsize, batchdims, mat_chars):\n num_matrices = reduce(lambda x, y: x * y, batchdims, 1)\n list_of_matrices = []\n\n for idx in range(num_matrices):\n mat_type = idx % len(mat_chars)\n if mat_chars[mat_type] == 'sym':\n list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'sym_psd':\n list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'sym_pd':\n list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'sing':\n list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))\n elif mat_chars[mat_type] == 'non_sing':\n list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))\n full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))\n # Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet\n full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))\n\n for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:\n expected_value = []\n actual_value = fn(full_tensor)\n for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):\n expected_value.append(fn(full_tensor[full_idx]))\n\n if fn == torch.slogdet or fn == torch.linalg.slogdet:\n sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)\n expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)\n self.assertEqual(sign_value, actual_value[0])\n self.assertEqual(expected_value, actual_value[1])\n else:\n expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)\n self.assertEqual(actual_value, expected_value)\n\n for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):\n run_test(matsize, batchdims, mat_chars=['sym_pd'])\n run_test(matsize, batchdims, mat_chars=['sing'])\n run_test(matsize, batchdims, mat_chars=['non_sing'])\n run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])\n run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_inverse(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_pd_matrix\n\n def run_test(shape, batch, upper, contiguous):\n A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)\n if A.numel() > 0 and not contiguous:\n A = A.mT\n self.assertFalse(A.is_contiguous())\n L = torch.linalg.cholesky(A)\n expected_inverse = torch.inverse(A)\n L = L.mH if upper else L\n actual_inverse = torch.cholesky_inverse(L, upper)\n self.assertEqual(actual_inverse, expected_inverse)\n\n shapes = (0, 3, 5)\n batches = ((), (0,), (3, ), (2, 2))\n for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):\n run_test(shape, batch, upper, contiguous)\n\n # check the out= variant\n A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)\n L = torch.linalg.cholesky(A)\n\n # There are two code paths currently for the out= variant\n # 1. When 'out' tensor is in Fortran (column-major) memory format\n # then the fast route is taken and the storage is reused directly in the computations\n # 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally\n # and the result is copied from the temporary tensor to 'out' tensor\n\n # This test checks the first code path\n out = torch.empty_like(A)\n out_t = out.mT.clone(memory_format=torch.contiguous_format)\n out = out_t.mT\n ans = torch.cholesky_inverse(L, out=out)\n self.assertEqual(ans, out)\n expected = torch.inverse(A)\n self.assertEqual(expected, out)\n\n # This test checks the second code path\n out = torch.empty_like(A)\n ans = torch.cholesky_inverse(L, out=out)\n self.assertEqual(ans, out)\n expected = torch.inverse(A)\n self.assertEqual(expected, out)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_cholesky_inverse_errors_and_warnings(self, device, dtype):\n # cholesky_inverse requires the input to be at least 2 dimensional tensor\n a = torch.randn(2, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"must have at least 2 dimensions\"):\n torch.cholesky_inverse(a)\n\n # cholesky_inverse requires a square matrix\n a = torch.randn(2, 3, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"must be batches of square matrices\"):\n torch.cholesky_inverse(a)\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = torch.randn(3, 3, device=device, dtype=dtype)\n out = torch.empty(2, 3, device=device, dtype=dtype)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.cholesky_inverse(a, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out = torch.empty(*a.shape, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.cholesky_inverse(a, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n torch.cholesky_inverse(a, out=out)\n\n # cholesky_inverse raises an error for invalid inputs on CPU\n # for example if at least one diagonal element is zero\n a = torch.randn(3, 3, device=device, dtype=dtype)\n a[1, 1] = 0\n if self.device_type == 'cpu':\n with self.assertRaisesRegex(RuntimeError, r\"cholesky_inverse: The diagonal element 2 is zero\"):\n torch.cholesky_inverse(a)\n # cholesky_inverse on GPU does not raise an error for this case\n elif self.device_type == 'cuda':\n out = torch.cholesky_inverse(a)\n self.assertTrue(out.isinf().any() or out.isnan().any())\n\n def _select_broadcastable_dims(self, dims_full=None):\n # select full dimensionality\n if dims_full is None:\n dims_full = []\n ndims = random.randint(1, 4)\n dims_full = [random.randint(1, 8) for _ in range(ndims)]\n else:\n ndims = len(dims_full)\n\n # select actual dimensions for ops:\n # larger: full ndims, individual sizes may be reduced\n # smaller: possibly reduced ndims, sizes may be reduced\n smaller_ndims = random.randint(1, ndims)\n dims_small = []\n dims_large = []\n for i in range(ndims - 1, -1, -1):\n j = random.randint(1, 3)\n if j == 1: # no reduced singleton dimension\n ds = dims_full[i]\n dl = dims_full[i]\n elif j == 2: # larger may have reduced singleton dimension\n ds = dims_full[i]\n dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]\n elif j == 3: # smaller may have reduced singleton dimension\n ds = 1\n dl = dims_full[i]\n dims_large = [dl] + dims_large\n if len(dims_small) < smaller_ndims:\n dims_small = [ds] + dims_small\n return (dims_small, dims_large, dims_full)\n\n def test_broadcast_fused_matmul(self, device):\n fns = [\"baddbmm\", \"addbmm\", \"addmm\", \"addmv\", \"addr\"]\n\n for fn in fns:\n batch_dim = random.randint(1, 8)\n n_dim = random.randint(1, 8)\n m_dim = random.randint(1, 8)\n p_dim = random.randint(1, 8)\n\n def dims_full_for_fn():\n if fn == \"baddbmm\":\n return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])\n elif fn == \"addbmm\":\n return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])\n elif fn == \"addmm\":\n return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])\n elif fn == \"addmv\":\n return ([n_dim], [n_dim, m_dim], [m_dim])\n elif fn == \"addr\":\n return ([n_dim, m_dim], [n_dim], [m_dim])\n else:\n raise AssertionError(\"unknown function\")\n\n (t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()\n (t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)\n\n t0_small = torch.randn(*t0_dims_small, device=device).float()\n t1 = torch.randn(*t1_dims, device=device).float()\n t2 = torch.randn(*t2_dims, device=device).float()\n\n t0_full = t0_small.expand(*t0_dims_full).to(device)\n\n fntorch = getattr(torch, fn)\n r0 = fntorch(t0_small, t1, t2)\n r1 = fntorch(t0_full, t1, t2)\n self.assertEqual(r0, r1)\n\n @tf32_on_and_off(0.001)\n def test_broadcast_batched_matmul(self, device):\n n_dim = random.randint(1, 8)\n m_dim = random.randint(1, 8)\n p_dim = random.randint(1, 8)\n full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]\n (batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)\n\n def verify_batched_matmul(full_lhs, one_dimensional):\n if not one_dimensional:\n lhs_dims = [n_dim, m_dim]\n rhs_dims = [m_dim, p_dim]\n result_dims = [n_dim, p_dim]\n else:\n lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]\n rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]\n result_dims = [n_dim] if full_lhs else [p_dim]\n\n lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]\n rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]\n full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims\n dim0_dims = rhs_dims if full_lhs else lhs_dims\n small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)\n\n small = torch.randn(*(small_dims), device=device).float()\n dim0 = torch.randn(*(dim0_dims), device=device).float()\n full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()\n if not one_dimensional:\n (lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))\n else:\n (lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))\n\n def maybe_squeeze_result(l, r, result):\n if len(lhs_dims) == 1 and l.dim() != 1:\n return result.squeeze(-2)\n elif len(rhs_dims) == 1 and r.dim() != 1:\n return result.squeeze(-1)\n else:\n return result\n\n for lhs in lhsTensors:\n lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))\n lhs_expanded_matmul_fn = lhs_expanded.matmul\n for rhs in rhsTensors:\n rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).\n expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))\n truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))\n for l in (lhs, lhs_expanded):\n for r in (rhs, rhs_expanded):\n l_matmul_fn = l.matmul\n result = maybe_squeeze_result(l, r, l_matmul_fn(r))\n self.assertEqual(truth, result)\n # test torch.matmul function as well\n torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))\n self.assertEqual(truth, torch_result)\n # test torch.matmul with out\n out = torch.zeros_like(torch_result)\n torch.matmul(l, r, out=out)\n self.assertEqual(truth, maybe_squeeze_result(l, r, out))\n\n # compare to bmm\n bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),\n rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))\n self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))\n\n for indices in itertools.product((True, False), repeat=2):\n verify_batched_matmul(*indices)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_lu_solve_batched_non_contiguous(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n A = random_fullrank_matrix_distinct_singular_value(2, 2, dtype=dtype, device=device)\n b = torch.randn(2, 2, 2, dtype=dtype, device=device)\n x_exp = np.linalg.solve(A.cpu().permute(0, 2, 1).numpy(), b.cpu().permute(2, 1, 0).numpy())\n A = A.permute(0, 2, 1)\n b = b.permute(2, 1, 0)\n assert not A.is_contiguous() and not b.is_contiguous(), \"contiguous inputs\"\n LU_data, LU_pivots = torch.lu(A)\n x = torch.lu_solve(b, LU_data, LU_pivots)\n self.assertEqual(x, x_exp)\n\n def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n b = torch.randn(*b_dims, dtype=dtype, device=device)\n A = random_fullrank_matrix_distinct_singular_value(*A_dims, dtype=dtype, device=device)\n LU_data, LU_pivots, info = torch.lu(A, get_infos=True, pivot=pivot)\n self.assertEqual(info, torch.zeros_like(info))\n return b, A, LU_data, LU_pivots\n\n @skipCPUIfNoLapack\n @skipCUDAIfNoMagma\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_lu_solve(self, device, dtype):\n def sub_test(pivot):\n for k, n in zip([2, 3, 5], [3, 5, 7]):\n b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n,), (n, k), pivot, device, dtype)\n x = torch.lu_solve(b, LU_data, LU_pivots)\n self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))\n\n sub_test(True)\n if self.device_type == 'cuda':\n sub_test(False)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n @precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,\n torch.float64: 1e-8, torch.complex128: 1e-8})\n def test_lu_solve_batched(self, device, dtype):\n def sub_test(pivot):\n def lu_solve_batch_test_helper(A_dims, b_dims, pivot):\n b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)\n x_exp_list = []\n for i in range(b_dims[0]):\n x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))\n x_exp = torch.stack(x_exp_list) # Stacked output\n x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual output\n self.assertEqual(x_exp, x_act) # Equality check\n Ax = np.matmul(A.cpu(), x_act.cpu())\n self.assertEqual(b, Ax)\n\n for batchsize in [1, 3, 4]:\n lu_solve_batch_test_helper((5, batchsize), (batchsize, 5, 10), pivot)\n\n # Tests tensors with 0 elements\n b = torch.randn(3, 0, 3, dtype=dtype, device=device)\n A = torch.randn(3, 0, 0, dtype=dtype, device=device)\n LU_data, LU_pivots = torch.lu(A)\n self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))\n\n sub_test(True)\n if self.device_type == 'cuda':\n sub_test(False)\n\n @slowTest\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_lu_solve_batched_many_batches(self, device, dtype):\n def run_test(A_dims, b_dims):\n b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)\n x = torch.lu_solve(b, LU_data, LU_pivots)\n Ax = torch.matmul(A, x)\n self.assertEqual(Ax, b.expand_as(Ax))\n\n run_test((5, 65536), (65536, 5, 10))\n run_test((5, 262144), (262144, 5, 10))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_lu_solve_batched_broadcasting(self, device, dtype):\n from torch.testing._internal.common_utils import random_fullrank_matrix_distinct_singular_value\n\n def run_test(A_dims, b_dims, pivot=True):\n A_matrix_size = A_dims[-1]\n A_batch_dims = A_dims[:-2]\n A = random_fullrank_matrix_distinct_singular_value(A_matrix_size, *A_batch_dims, dtype=dtype, device=device)\n b = make_tensor(b_dims, dtype=dtype, device=device)\n x_exp = np.linalg.solve(A.cpu(), b.cpu())\n LU_data, LU_pivots = torch.lu(A, pivot=pivot)\n x = torch.lu_solve(b, LU_data, LU_pivots)\n self.assertEqual(x, x_exp)\n\n # test against numpy.linalg.solve\n run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting\n run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b\n run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A\n run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b\n\n @onlyCUDA\n @skipCUDAIfNoMagma\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n # this tests https://github.com/pytorch/pytorch/issues/36921\n def test_lu_solve_large_matrices(self, device, dtype):\n def run_test(A_dims, b_dims):\n b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)\n x = torch.lu_solve(b, LU_data, LU_pivots)\n Ax = torch.matmul(A, x)\n self.assertEqual(Ax, b.expand_as(Ax))\n\n run_test((1, 1), (1, 1, 1025))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_lu_solve_out_errors_and_warnings(self, device, dtype):\n # dtypes should be safely castable\n a = torch.eye(2, dtype=dtype, device=device)\n LU_data, LU_pivots = torch.lu(a, pivot=True)\n b = torch.randn(2, 1, dtype=dtype, device=device)\n out = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got result with dtype Int\"):\n torch.lu_solve(b, LU_data, LU_pivots, out=out)\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out = torch.empty(0, dtype=dtype, device=wrong_device)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.lu_solve(b, LU_data, LU_pivots, out=out)\n\n # if out tensor with wrong shape is passed a warning is given\n with warnings.catch_warnings(record=True) as w:\n out = torch.empty(1, dtype=dtype, device=device)\n # Trigger warning\n torch.lu_solve(b, LU_data, LU_pivots, out=out)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n @precisionOverride({torch.float32: 1e-5, torch.complex64: 1e-5})\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_symeig(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n def run_test(dims, eigenvectors, upper):\n x = random_hermitian_matrix(*dims, dtype=dtype, device=device)\n if dtype.is_complex:\n real_dtype = torch.float32 if dtype is torch.complex64 else torch.float64\n else:\n real_dtype = dtype\n oute = torch.empty(dims[1:] + dims[:1], dtype=real_dtype, device=device)\n outv = torch.empty(dims[1:] + dims[:1] * 2, dtype=dtype, device=device)\n torch.symeig(x, eigenvectors=eigenvectors, upper=upper, out=(oute, outv))\n\n if eigenvectors:\n outv_ = outv.cpu().numpy()\n x_recon = np.matmul(np.matmul(outv_, torch.diag_embed(oute.to(dtype)).cpu().numpy()),\n outv_.swapaxes(-2, -1).conj())\n self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')\n else:\n eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)\n self.assertEqual(eigvals, oute, msg='Eigenvalues mismatch')\n self.assertEqual(torch.empty(0, device=device, dtype=dtype), outv, msg='Eigenvector matrix not empty')\n\n rese, resv = x.symeig(eigenvectors=eigenvectors, upper=upper)\n self.assertEqual(rese, oute, msg=\"outputs of symeig and symeig with out don't match\")\n self.assertEqual(resv, outv, msg=\"outputs of symeig and symeig with out don't match\")\n\n # test non-contiguous\n x = random_hermitian_matrix(*dims, dtype=dtype, device=device)\n n_dim = len(dims) + 1\n # Reverse the batch dimensions and the matrix dimensions and then concat them\n x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))\n assert not x.is_contiguous(), \"x is intentionally non-contiguous\"\n rese, resv = torch.symeig(x, eigenvectors=eigenvectors, upper=upper)\n if eigenvectors:\n resv_ = resv.cpu().numpy()\n x_recon = np.matmul(np.matmul(resv_, torch.diag_embed(rese.to(dtype)).cpu().numpy()),\n resv_.swapaxes(-2, -1).conj())\n self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')\n else:\n eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)\n self.assertEqual(eigvals, rese, msg='Eigenvalues mismatch')\n self.assertEqual(torch.empty(0, device=device, dtype=dtype), resv, msg='Eigenvector matrix not empty')\n\n batch_dims_set = [(), (3,), (3, 5), (5, 3, 5)]\n for batch_dims, eigenvectors, upper in itertools.product(batch_dims_set, (True, False), (True, False)):\n run_test((5,) + batch_dims, eigenvectors, upper)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_symeig_out_errors_and_warnings(self, device, dtype):\n from torch.testing._internal.common_utils import random_hermitian_matrix\n\n # if non-empty out tensor with wrong shape is passed a warning is given\n a = random_hermitian_matrix(3, dtype=dtype, device=device)\n real_dtype = a.real.dtype if dtype.is_complex else dtype\n out_w = torch.empty(7, 7, dtype=real_dtype, device=device)\n out_v = torch.empty(7, 7, dtype=dtype, device=device)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n torch.symeig(a, out=(out_w, out_v))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-2].message))\n self.assertTrue(\"An output with one or more elements was resized\" in str(w[-1].message))\n\n # dtypes should be safely castable\n out_w = torch.empty(0, dtype=real_dtype, device=device)\n out_v = torch.empty(0, dtype=torch.int, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvectors with dtype Int\"):\n torch.symeig(a, out=(out_w, out_v))\n\n out_w = torch.empty(0, dtype=torch.int, device=device)\n out_v = torch.empty(0, dtype=dtype, device=device)\n with self.assertRaisesRegex(RuntimeError, \"but got eigenvalues with dtype Int\"):\n torch.symeig(a, out=(out_w, out_v))\n\n # device should match\n if torch.cuda.is_available():\n wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'\n out_w = torch.empty(0, device=wrong_device, dtype=dtype)\n out_v = torch.empty(0, device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.symeig(a, out=(out_w, out_v))\n out_w = torch.empty(0, device=device, dtype=dtype)\n out_v = torch.empty(0, device=wrong_device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"tensors to be on the same device\"):\n torch.symeig(a, out=(out_w, out_v))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n def test_pca_lowrank(self, device):\n from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix\n\n dtype = torch.double\n\n def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):\n density = options.pop('density', 1)\n if isinstance(matrix_size, int):\n rows = columns = matrix_size\n else:\n rows, columns = matrix_size\n if density == 1:\n a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)\n a = a_input\n else:\n a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)\n a = a_input.to_dense()\n\n u, s, v = pca(a_input, q=guess_rank, **options)\n\n self.assertEqual(s.shape[-1], guess_rank)\n self.assertEqual(u.shape[-2], rows)\n self.assertEqual(u.shape[-1], guess_rank)\n self.assertEqual(v.shape[-1], guess_rank)\n self.assertEqual(v.shape[-2], columns)\n\n A1 = u.matmul(s.diag_embed()).matmul(v.mT)\n ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)\n c = a.sum(axis=-2) / rows\n c = c.reshape(batches + (1, columns))\n A2 = a - ones_m1.matmul(c)\n self.assertEqual(A1, A2)\n\n if density == 1:\n # actual rank is known only for dense input\n detect_rank = (s.abs() > 1e-5).sum(axis=-1)\n self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)\n S = torch.linalg.svdvals(A2)\n self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])\n\n all_batches = [(), (1,), (3,), (2, 3)]\n for actual_rank, size, all_batches in [\n (2, (17, 4), all_batches),\n (2, (100, 4), all_batches),\n (6, (100, 40), all_batches),\n (12, (1000, 1000), [()]),\n ]:\n for batches in all_batches:\n for guess_rank in [\n actual_rank,\n actual_rank + 2,\n actual_rank + 6,\n ]:\n if guess_rank <= min(*size):\n run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)\n run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)\n\n # sparse input\n for guess_rank, size in [\n (4, (17, 4)), (4, (4, 17)), (16, (17, 17)),\n (21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:\n for density in [0.005, 0.1]:\n run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)\n\n # jitting support\n jitted = torch.jit.script(torch.pca_lowrank)\n guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()\n run_subtest(guess_rank, actual_rank, size, batches, device, jitted)\n\n # Ensure that nuclear_norm's out variant gives the same result as the non-out\n @onlyNativeDeviceTypes\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64)\n def test_nuclear_norm_out(self, device, dtype):\n test_cases = [\n # input size, dim\n ((25, 25), None),\n ((25, 25), (0, 1)),\n ((25, 25), (1, 0)),\n ((25, 25, 25), (2, 0)),\n ((25, 25, 25), (0, 1)),\n ]\n for keepdim in [False, True]:\n for input_size, dim in test_cases:\n msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'\n x = torch.randn(*input_size, device=device, dtype=dtype)\n result_out = torch.empty(0, device=device, dtype=dtype)\n if dim is None:\n result = torch.nuclear_norm(x, keepdim=keepdim)\n torch.nuclear_norm(x, keepdim=keepdim, out=result_out)\n else:\n result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)\n torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)\n self.assertEqual(result, result_out, msg=msg)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)\n def test_geqrf(self, device, dtype):\n\n def run_test(shape):\n # numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf\n # so this test compares against that function\n A = make_tensor(shape, dtype=dtype, device=device)\n\n # numpy.linalg.qr doesn't work with batched input\n m, n = A.shape[-2:]\n tau_size = \"n\" if m > n else \"m\"\n np_dtype = A.cpu().numpy().dtype\n ot = [np_dtype, np_dtype]\n numpy_geqrf_batched = np.vectorize(\n lambda x: np.linalg.qr(x, mode='raw'),\n otypes=ot,\n signature=f'(m,n)->(n,m),({tau_size})')\n\n expected = numpy_geqrf_batched(A.cpu())\n actual = torch.geqrf(A)\n\n # numpy.linalg.qr returns transposed result\n self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])\n self.assertEqual(expected[1], actual[1])\n\n batches = [(), (0, ), (2, ), (2, 1)]\n ns = [5, 2, 0]\n for batch, (m, n) in product(batches, product(ns, ns)):\n run_test((*batch, m, n))\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n @dtypes(torch.double)\n def test_lstsq(self, device, dtype):\n def _test_underdetermined(a, b, expectedNorm):\n # underdetermined systems are only supported on CPU\n if self.device_type != 'cpu':\n return\n\n m = a.size()[0]\n n = a.size()[1]\n assert(m <= n)\n\n a_copy = a.clone()\n b_copy = b.clone()\n res1 = torch.lstsq(b, a)[0]\n self.assertEqual(a, a_copy, atol=0, rtol=0)\n self.assertEqual(b, b_copy, atol=0, rtol=0)\n self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)\n\n ta = torch.tensor((), dtype=dtype, device=device)\n tb = torch.tensor((), dtype=dtype, device=device)\n res2 = torch.lstsq(b, a, out=(tb, ta))[0]\n self.assertEqual(a, a_copy, atol=0, rtol=0)\n self.assertEqual(b, b_copy, atol=0, rtol=0)\n self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)\n\n res3 = torch.lstsq(b, a, out=(b, a))[0]\n self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, atol=1e-8, rtol=0)\n self.assertEqual(res1, tb, atol=0, rtol=0)\n self.assertEqual(res1, b, atol=0, rtol=0)\n self.assertEqual(res1, res2, atol=0, rtol=0)\n self.assertEqual(res1, res3, atol=0, rtol=0)\n\n def _test_overdetermined(a, b, expectedNorm):\n m = a.size()[0]\n n = a.size()[1]\n assert(m > n)\n\n def check_norm(a, b, expected_norm, gels_result):\n # Checks |ax - b| and the residual info from the result\n\n # The first n rows is the least square solution.\n # Rows n to m-1 contain residual information.\n x = gels_result[:n]\n resid_info = gels_result[n:]\n\n resid_norm = (torch.mm(a, x) - b).norm()\n self.assertEqual(resid_norm, expectedNorm, atol=1e-8, rtol=0)\n self.assertEqual(resid_info.norm(), resid_norm, atol=1e-8, rtol=0)\n\n a_copy = a.clone()\n b_copy = b.clone()\n res1 = torch.lstsq(b, a)[0]\n self.assertEqual(a, a_copy, atol=0, rtol=0)\n self.assertEqual(b, b_copy, atol=0, rtol=0)\n check_norm(a, b, expectedNorm, res1)\n\n ta = torch.tensor((), dtype=dtype, device=device)\n tb = torch.tensor((), dtype=dtype, device=device)\n res2 = torch.lstsq(b, a, out=(tb, ta))[0]\n self.assertEqual(a, a_copy, atol=0, rtol=0)\n self.assertEqual(b, b_copy, atol=0, rtol=0)\n check_norm(a, b, expectedNorm, res2)\n\n res3 = torch.lstsq(b, a, out=(b, a))[0]\n check_norm(a_copy, b_copy, expectedNorm, res3)\n\n self.assertEqual(res1, tb, atol=0, rtol=0)\n self.assertEqual(res1, b, atol=0, rtol=0)\n self.assertEqual(res1, res2, atol=0, rtol=0)\n self.assertEqual(res1, res3, atol=0, rtol=0)\n\n # basic test\n expectedNorm = 0\n a = torch.tensor(((1.44, -9.96, -7.55, 8.34),\n (-7.84, -0.28, 3.24, 8.09),\n (-4.39, -3.24, 6.27, 5.28),\n (4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()\n b = torch.tensor(((8.58, 8.26, 8.48, -5.28),\n (9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()\n _test_underdetermined(a, b, expectedNorm)\n\n # test overdetermined\n expectedNorm = 17.390200628863\n a = torch.tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),\n (-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),\n (-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),\n (4.53, 3.83, -6.64, 2.06, -2.47, 4.70)), dtype=dtype, device=device).t()\n b = torch.tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),\n (9.35, -4.43, -0.70, -0.26, -7.36, -2.52)), dtype=dtype, device=device).t()\n _test_overdetermined(a, b, expectedNorm)\n\n # test underdetermined\n expectedNorm = 0\n a = torch.tensor(((1.44, -9.96, -7.55),\n (-7.84, -0.28, 3.24),\n (-4.39, -3.24, 6.27),\n (4.53, 3.83, -6.64)), dtype=dtype, device=device).t()\n b = torch.tensor(((8.58, 8.26, 8.48),\n (9.35, -4.43, -0.70)), dtype=dtype, device=device).t()\n _test_underdetermined(a, b, expectedNorm)\n\n # test reuse\n expectedNorm = 0\n a = torch.tensor(((1.44, -9.96, -7.55, 8.34),\n (-7.84, -0.28, 3.24, 8.09),\n (-4.39, -3.24, 6.27, 5.28),\n (4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()\n b = torch.tensor(((8.58, 8.26, 8.48, -5.28),\n (9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()\n ta = torch.tensor((), dtype=dtype, device=device)\n tb = torch.tensor((), dtype=dtype, device=device)\n torch.lstsq(b, a, out=(tb, ta))\n self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)\n torch.lstsq(b, a, out=(tb, ta))\n self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)\n torch.lstsq(b, a, out=(tb, ta))\n self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)\n\n @skipCUDAIfNoMagma\n @skipCPUIfNoLapack\n def test_lapack_empty(self, device):\n # FIXME: these are just a selection of LAPACK functions -- we need a general strategy here.\n # The LAPACK functions themselves generally do NOT work with zero sized dimensions, although\n # numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that \"does the right thing\"\n # (e.g. lu). We often name our functions identically to the lapack function, so it will take work\n # to name / migrate-to better wrappers.\n def fn(torchfn, *args):\n return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape\n for shape in args))\n\n # inverse, pinverse\n self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)\n self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)\n self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)\n self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)\n\n # det, logdet, slogdet\n self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))\n self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))\n self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),\n fn(torch.slogdet, (0, 0)))\n\n # eig, symeig\n evalues, evectors = fn(torch.eig, (0, 0), True)\n self.assertEqual([(0, 2), (0, 0)], [evalues.shape, evectors.shape])\n evalues, evectors = fn(torch.symeig, (0, 0), True)\n self.assertEqual([(0,), (0, 0)], [evalues.shape, evectors.shape])\n\n # qr\n q, r = fn(torch.qr, (3, 0), True)\n self.assertEqual([(3, 0), (0, 0)], [q.shape, r.shape])\n q, r = fn(torch.qr, (0, 3), True)\n self.assertEqual([(0, 0), (0, 3)], [q.shape, r.shape])\n q, r = fn(torch.qr, (3, 0), False)\n self.assertEqual([(3, 3), (3, 0)], [q.shape, r.shape])\n\n # lstsq\n self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))\n self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))\n\n @tf32_on_and_off(0.005)\n def test_tensordot(self, device):\n a = torch.arange(60., device=device).reshape(3, 4, 5)\n b = torch.arange(24., device=device).reshape(4, 3, 2)\n c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()\n cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),\n axes=([1, 0], [0, 1])))\n self.assertEqual(c, cn)\n\n cout = torch.zeros((5, 2), device=device)\n torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()\n self.assertEqual(c, cout)\n\n a = torch.randn(2, 3, 4, 5, device=device)\n b = torch.randn(4, 5, 6, 7, device=device)\n c = torch.tensordot(a, b, dims=2).cpu()\n cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),\n axes=2))\n\n with self.assertRaisesRegex(RuntimeError, \"expects dims >= 0\"):\n torch.tensordot(a, b, dims=-1)\n\n self.assertEqual(c, cn)\n c = torch.tensordot(a, b).cpu()\n cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))\n self.assertEqual(c, cn)\n\n a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)\n an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))\n self.assertEqual(a, an)\n\n\ninstantiate_device_type_tests(TestLinalg, globals())\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.all", "torch.addmv", "torch.lu_unpack", "torch.randint", "torch.testing._internal.common_utils.random_sparse_matrix", "torch.zeros", "torch.testing._internal.common_utils.iter_indices", "torch.linalg.householder_product", "torch.device", "torch.linalg.eigvalsh", "torch.outer", "torch.randn", "numpy.matmul", "torch.linalg.inv_ex", "numpy.linalg.multi_dot", "torch.bmm", "numpy.outer", "numpy.zeros", "torch.empty_like", "torch.testing._internal.common_dtype.get_all_complex_dtypes", "torch.full", "torch.triangular_solve", "torch.profiler.profile", "torch.distributions.binomial.Binomial", "torch.testing._internal.common_utils.random_hermitian_matrix", "torch.linalg.tensorsolve", "torch.testing._internal.common_utils.run_tests", "numpy.array", "torch.chain_matmul", "torch.gt", "torch.testing.make_tensor", "numpy.take_along_axis", "torch.transpose", "torch.lstsq", "torch.diag_embed", "torch.sum", "torch.result_type", "torch.cuda.is_available", "torch.addmm", "torch.norm", "torch.einsum", "torch.tensor", "torch.cholesky", "torch.linalg.qr", "torch.inverse", "torch.linalg.solve", "torch.rand", "torch.sort", "torch.testing._internal.common_utils.random_sparse_pd_matrix", "torch.testing._internal.common_utils.random_hermitian_psd_matrix", "numpy.lib.NumpyVersion", "torch.tensordot", "torch.mv", "torch.linalg.tensorinv", "torch.zeros_like", "torch.testing._internal.common_device_type.dtypes", "torch.linalg.norm", "torch.diag", "torch.stack", "torch._linalg_utils.matmul", "torch.as_strided", "torch.addbmm", "torch.testing._internal.common_utils.make_fullrank_matrices_with_distinct_singular_values", "torch.testing._internal.common_utils.gradcheck", "numpy.array_equal", "torch.linalg.cholesky_ex", "torch.lobpcg", "torch.testing._internal.common_utils.random_hermitian_pd_matrix", "torch.testing._internal.common_utils.random_lowrank_matrix", "torch.geqrf", "torch.matmul", "torch.linalg.matrix_power", "torch.eig", "torch.linalg.matrix_norm", "torch.testing._internal.common_device_type.skipCUDAIf", "torch.linalg.svdvals", "torch.testing._internal.common_utils.random_symmetric_pd_matrix", "torch.svd", "torch.testing._internal.common_utils.random_symmetric_matrix", "torch.linalg.pinv", "torch.inner", "torch.set_default_dtype", "torch.testing._internal.common_cuda.tf32_on_and_off", "torch.unique", "torch.testing._internal.common_utils.random_square_matrix_of_rank", "torch.full_like", "torch.matrix_rank", "torch._compute_linear_combination", "torch._linalg_utils.qform", "torch.linalg.vector_norm", "scipy.sparse.coo_matrix", "torch.mm", "torch.ones", "torch.linalg.matrix_rank", "torch.from_numpy", "scipy.linalg.lstsq", "torch.qr", "numpy.linalg.tensorinv", "torch.arange", "torch.testing._internal.common_device_type.skipCUDAVersionIn", "torch.testing._internal.common_cuda._get_torch_cuda_version", "torch.linalg.cholesky", "torch.Tensor.outer", "torch.nuclear_norm", "torch.linalg.eigh", "torch.cholesky_inverse", "numpy.argsort", "torch.linalg.multi_dot", "torch.ormqr", "torch.linalg.inv", "torch.linalg.svd", "numpy.linalg.norm", "torch.linalg.cross", "numpy.broadcast_to", "torch.testing._internal.common_device_type.precisionOverride", "numpy.einsum", "torch.addr", "torch.testing._internal.common_dtype.all_types", "torch.testing._internal.common_dtype.get_all_int_dtypes", "torch.testing._internal.common_utils.random_symmetric_psd_matrix", "numpy.linalg.qr", "torch.Tensor.ger", "scipy.sparse.linalg.lobpcg", "torch.jit.script", "numpy.linalg.svd", "torch.Size", "torch.linalg.matrix_exp", "torch.solve", "torch.linalg.slogdet", "torch.eye", "torch.testing._internal.common_utils.random_well_conditioned_matrix", "torch.kron", "torch.testing._internal.common_utils.gradgradcheck", "torch.linalg.cond", "torch.get_default_dtype", "torch.lu_solve", "torch.linalg.lstsq", "torch.empty", "torch.pinverse", "torch.cholesky_solve", "torch.linalg.eig", "torch.randint_like", "torch.testing._internal.common_utils.random_fullrank_matrix_distinct_singular_value", "torch.symeig", "numpy.linalg.lstsq", "torch.lu", "torch.baddbmm", "torch.testing._internal.common_dtype.floating_and_complex_types", "torch.linalg.eigvals", "torch.promote_types", "torch.testing._internal.common_dtype.get_all_dtypes", "torch.testing._internal.common_dtype.floating_types", "torch.testing._internal.common_device_type.dtypesIfCUDA", "numpy.prod", "torch.testing._internal.common_dtype.get_all_fp_dtypes", "torch.ger", "torch.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
AIDefender/Tianshou-ReMPER
[ "297ba383fc1e4e19cd52bd89df7d0d3148bd4e68" ]
[ "examples/minigrid/eval_maze.py" ]
[ "import seaborn as sns\nimport matplotlib.pyplot as plt\nimport pickle\nimport argparse\nimport numpy as np\nimport os\nsns.set_context('paper', font_scale=1.5)\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", type=int)\nparser.add_argument('--resume-path', type=str, default=None)\nparser.add_argument('--title', type=str, default='default')\nargs = parser.parse_args()\n\ndef get_mask(mean_V_loss):\n mask = np.where(mean_V_loss > 0.9, 1, 0)\n\n return mask\nall_V_loss = []\nfor seed in os.listdir(args.resume_path):\n if seed.startswith(\"heatmap\"):\n continue\n with open(os.path.join(args.resume_path, str(seed), \"Q_tablepickle%d\"%args.n), 'rb') as f:\n Q_table = pickle.load(f)\n print(\"Loaded Q table \", os.path.join(args.resume_path, \"Q_tablepickle%d\"%args.n))\n\n V_table = {}\n for key, value in zip(Q_table.keys(), Q_table.values()):\n V_table[key] = np.max(value)\n V_mean = np.average(list(V_table.values()))\n\n V_loss_table = []\n V_loss_linear = {}\n for i in range(14):\n V_loss_linear[i] = []\n for i in range(1, 8):\n this_loss = []\n for j in range(1, 8):\n # TODO: compute correct real_V\n real_V = 0.99 ** ((7-i) + (7-j))\n try:\n loss = abs(V_table[(i,j)] - real_V)\n except KeyError:\n # loss = abs(V_mean - real_V)\n loss = 1\n this_loss.append(loss)\n V_loss_linear[14-i-j].append(loss)\n V_loss_table.append(this_loss)\n V_loss_table = np.array(V_loss_table)\n all_V_loss.append(V_loss_table)\nall_V_loss = np.array(all_V_loss)\nV_seed_mean = np.average(all_V_loss, axis=(1,2))\nmean_V_loss = np.average(all_V_loss[np.argsort(V_seed_mean)[:2]], axis=0)\n\n\n# ===========plot=============\nfig, ax = plt.subplots()\n\n# frame = sns.heatmap(mean_V_loss, cmap=\"YlGnBu\", vmin=0.1, vmax=0.5)\n# frame = sns.heatmap(mean_V_loss, cmap = 'RdBu_r', vmin=0.1, center=0.45, vmax=0.6, mask=get_mask(mean_V_loss), ax=ax, annot=False)\nannot = [[\"\" for _ in range(7)] for _ in range(7)]\nfor pos in [(2,2), (6,0), (4,2), (4,4), (6,6), (2, 4)]:\n annot[pos[0]][pos[1]] = str(round(mean_V_loss[pos], 2))\nframe = sns.heatmap(mean_V_loss, cmap = sns.color_palette(\"rocket_r\", 20), vmin=0.3, vmax=0.6, \n mask=get_mask(mean_V_loss), ax=ax, annot=annot, fmt=\"\")\nframe.axes.get_xaxis().set_visible(False)\nframe.axes.get_yaxis().set_visible(False)\nframe.set_facecolor(\"gray\")\ntriangle = plt.imread('examples/minigrid/fig/triangle.png')\nsquare = plt.imread('examples/minigrid/fig/square.png')\nnewax = fig.add_axes([0.65, 0.78, 0.1, 0.1])\nnewax.imshow(square)\nnewax.set_xticks([])\nnewax.set_yticks([])\n\nnewax2 = fig.add_axes([0.12, 0.78, 0.1, 0.1])\nnewax2.imshow(triangle)\nnewax2.set_xticks([])\nnewax2.set_yticks([])\n\n# =========save fig============\nif not os.path.isdir(os.path.join(args.resume_path, \"heatmap\")):\n os.mkdir(os.path.join(args.resume_path, \"heatmap\"))\nfig.suptitle(args.title)\nplt.savefig(os.path.join(args.resume_path, \"heatmap\", \"%d.png\"%args.n))\n" ]
[ [ "matplotlib.pyplot.imread", "matplotlib.pyplot.subplots", "numpy.max", "numpy.average", "numpy.argsort", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Roninkoi/Scicodes
[ "97eb4dc017ad4cd494b545aecaa9fdd7c501a9b7" ]
[ "anova.py" ]
[ "import numpy as np\nfrom scipy.stats import f\n\n# Does analysis of variance for a number of sets x.\n# Each set in x is an array containing mean, variance\n# and number [mean, var, n].\ndef anova(x):\n mean = np.mean(x[:, 0]) # overall mean\n n = np.sum(x[:, 2]) # total N\n r = len(x) # number of sets\n\n ssb = 0.\n for i in range(r): # sum of squares between sets\n ssb += x[i, 2] * (x[i, 0] - mean)**2\n\n ssw = 0.\n for i in range(r): # sum of squares within sets\n ssw += (x[i, 2] - 1) * x[i, 1]\n\n fs = (ssb / (r - 1)) / (ssw / (n - r))\n dfn, dfd = r - 1, n - r # degrees of freedom\n p = f.cdf(fs, dfn, dfd) # P-value from F-distribution\n\n return fs, p\n" ]
[ [ "numpy.mean", "numpy.sum", "scipy.stats.f.cdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dudeperf3ct/lightning-flash
[ "a855cd14cf1cd0301b4a2f82c0c95e4d8d986650" ]
[ "flash/image/data.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport base64\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport numpy as np\nimport torch\n\nimport flash\nfrom flash.core.data.io.input import DataKeys, Input, ServeInput\nfrom flash.core.data.utilities.paths import filter_valid_files, has_file_allowed_extension, PATH_TYPE\nfrom flash.core.data.utilities.samples import to_samples\nfrom flash.core.data.utils import image_default_loader\nfrom flash.core.utilities.imports import _TORCHVISION_AVAILABLE, Image, requires\n\nif _TORCHVISION_AVAILABLE:\n from torchvision.datasets.folder import IMG_EXTENSIONS\n from torchvision.transforms.functional import to_pil_image\nelse:\n IMG_EXTENSIONS = (\".jpg\", \".jpeg\", \".png\", \".ppm\", \".bmp\", \".pgm\", \".tif\", \".tiff\", \".webp\")\n\nNP_EXTENSIONS = (\".npy\",)\n\n\ndef image_loader(filepath: str):\n if has_file_allowed_extension(filepath, IMG_EXTENSIONS):\n img = image_default_loader(filepath)\n elif has_file_allowed_extension(filepath, NP_EXTENSIONS):\n img = Image.fromarray(np.load(filepath).astype(\"uint8\"), \"RGB\")\n else:\n raise ValueError(\n f\"File: {filepath} has an unsupported extension. Supported extensions: \"\n f\"{list(IMG_EXTENSIONS + NP_EXTENSIONS)}.\"\n )\n return img\n\n\nclass ImageDeserializer(ServeInput):\n @requires(\"image\")\n def serve_load_sample(self, data: str) -> Dict:\n encoded_with_padding = (data + \"===\").encode(\"ascii\")\n img = base64.b64decode(encoded_with_padding)\n buffer = BytesIO(img)\n img = Image.open(buffer, mode=\"r\")\n return {\n DataKeys.INPUT: img,\n }\n\n @property\n def example_input(self) -> str:\n with (Path(flash.ASSETS_ROOT) / \"fish.jpg\").open(\"rb\") as f:\n return base64.b64encode(f.read()).decode(\"UTF-8\")\n\n\nclass ImageInput(Input):\n @requires(\"image\")\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n w, h = sample[DataKeys.INPUT].size # W x H\n if DataKeys.METADATA not in sample:\n sample[DataKeys.METADATA] = {}\n sample[DataKeys.METADATA][\"size\"] = (h, w)\n return sample\n\n\nclass ImageFilesInput(ImageInput):\n def load_data(self, files: List[PATH_TYPE]) -> List[Dict[str, Any]]:\n files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)\n return to_samples(files)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample[DataKeys.INPUT] = image_loader(filepath)\n sample = super().load_sample(sample)\n sample[DataKeys.METADATA][\"filepath\"] = filepath\n return sample\n\n\nclass ImageTensorInput(ImageInput):\n def load_data(self, tensor: Any) -> List[Dict[str, Any]]:\n return to_samples(tensor)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n img = to_pil_image(sample[DataKeys.INPUT])\n sample[DataKeys.INPUT] = img\n return super().load_sample(sample)\n\n\nclass ImageNumpyInput(ImageInput):\n def load_data(self, array: Any) -> List[Dict[str, Any]]:\n return to_samples(array)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n img = to_pil_image(torch.from_numpy(sample[DataKeys.INPUT]))\n sample[DataKeys.INPUT] = img\n return super().load_sample(sample)\n" ]
[ [ "numpy.load", "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iamansoni/MSS
[ "69bc8fc61ab277697ca691119f911382a63860c0", "69bc8fc61ab277697ca691119f911382a63860c0", "69bc8fc61ab277697ca691119f911382a63860c0" ]
[ "mslib/mswms/dataaccess.py", "mslib/_tests/test_thermolib.py", "mslib/msui/mpl_qtwidget.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib.mswms.dataaccess\n ~~~~~~~~~~~~~~~~~~~~~~\n\n This module provides functions to access data\n\n This file is part of mss.\n\n :copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.\n :copyright: Copyright 2011-2014 Marc Rautenhaus (mr)\n :copyright: Copyright 2016-2020 by the mss team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nimport itertools\nimport os\nimport logging\nimport netCDF4\nimport numpy as np\nimport pint\n\nfrom mslib import netCDF4tools\nfrom mslib.utils import UR\n\n\nclass NWPDataAccess(metaclass=ABCMeta):\n \"\"\"Abstract superclass providing a framework to let the user query\n in which data file a given variable at a given time can be found.\n\n The class provides the method get_filename(). It derives filenames from\n CF variable names, initialisation and valid times.q\n The method get_datapath() provides the root path where the data\n can be found.\n\n In subclasses, the protected method _determine_filename() must be\n implemented.\n \"\"\"\n\n def __init__(self, rootpath, uses_init_time=True, uses_valid_time=True):\n \"\"\"Constructor takes the path of the data directory and determines whether\n this class employs different init_times or valid_times.\n \"\"\"\n self._root_path = rootpath\n self._modelname = \"\"\n self._use_init_time = uses_init_time\n self._use_valid_time = uses_valid_time\n\n @abstractmethod\n def setup(self):\n \"\"\"Checks for existing files etc. and sets up the class. Called by\n server whenever a client requests a current capability document.\n \"\"\"\n pass\n\n def have_data(self, variable, vartype, init_time, valid_time):\n \"\"\"Checks whether a file with data for the specified variable,\n type and times is known. This does not trigger a search for\n updated data files on disk.\n \"\"\"\n try:\n self._determine_filename(\n variable, vartype, init_time, valid_time, reload=False)\n except ValueError:\n return False\n else:\n return True\n\n def get_filename(self, variable, vartype, init_time, valid_time,\n fullpath=False):\n \"\"\"Get the filename of the file in which a given variable at\n a given time can be found.\n\n In case no file is available, the disk is searched for updated\n data before failing.\n\n Arguments:\n variable -- string with CF name of variable\n vartype -- string specifying the type of the variable (model specific).\n For example, can be ml (model level), pl (pressure level),\n or sfc (surface) for, e.g., ECMWF data.\n init_time -- datetime object with initialisation time of forecast run\n valid_time -- datetime object with valid time of forecast\n fullpath -- if True, the complete path to the file will be returned.\n Default is False, only the filename will be returned.\n \"\"\"\n filename = self._determine_filename(variable, vartype,\n init_time, valid_time)\n if fullpath:\n return os.path.join(self._root_path, filename)\n else:\n return filename\n\n @abstractmethod\n def _determine_filename(self, variable, vartype, init_time, valid_time):\n \"\"\"Must be overwritten in subclass. Determines the filename\n (without path) of the variable <variable> at the forecast\n timestep specified by init_time and valid_time.\n \"\"\"\n pass\n\n def get_datapath(self):\n \"\"\"Return the path to the data directory.\n \"\"\"\n return self._root_path\n\n def uses_inittime_dimension(self):\n \"\"\" Return whether this data set supports multiple init times\n \"\"\"\n return self._use_init_time\n\n def uses_validtime_dimension(self):\n \"\"\" Return whether this data set supports multiple valid times\n \"\"\"\n return self._use_valid_time\n\n @abstractmethod\n def get_all_datafiles(self):\n \"\"\"Return a list of all available data files.\n \"\"\"\n pass\n\n @abstractmethod\n def get_init_times(self):\n \"\"\"Return a list of available forecast init times (base times).\n \"\"\"\n pass\n\n @abstractmethod\n def get_valid_times(self):\n \"\"\"Return a list of available forecast times.\n \"\"\"\n pass\n\n @abstractmethod\n def get_elevations(self, vert_type):\n \"\"\"Return a list of available elevations for a vertical level type.\n \"\"\"\n pass\n\n @abstractmethod\n def get_elevation_units(self, vert_type):\n \"\"\"Returns units of supplied vertical type.\n \"\"\"\n pass\n\n _mfDatasetArgsDict = {}\n\n def mfDatasetArgs(self):\n \"\"\"Returns additional keyword for the MFDatasetCommonDims instance that\n handles the input data of this dataset. See the MFDatasetCommonDims\n documentation for further details.\n Mainly provided as a workaround for numerical inaccuracies introduced\n to the NetCDF files by netcdf-java 4.3.\n (mr, 16Oct2012)\n \"\"\"\n return self._mfDatasetArgsDict\n\n\nclass DefaultDataAccess(NWPDataAccess):\n \"\"\"\n Subclass to NWPDataAccess for accessing properly constructed NetCDF files\n Constructor needs information on domain ID.\n \"\"\"\n\n # Workaround for the numerical issue concering the lon dimension in\n # NetCDF files produced by netcdf-java 4.3..\n\n def __init__(self, rootpath, domain_id, skip_dim_check=[], **kwargs):\n \"\"\"Constructor takes the path of the data directory and determines whether\n this class employs different init_times or valid_times.\n \"\"\"\n NWPDataAccess.__init__(self, rootpath, **kwargs)\n self._domain_id = domain_id\n self._available_files = None\n self._filetree = None\n self._mfDatasetArgsDict = {\"skip_dim_check\": skip_dim_check}\n\n def _determine_filename(self, variable, vartype, init_time, valid_time, reload=True):\n \"\"\"Determines the name of the data file that contains\n the variable <variable> with type <vartype> of the forecast specified\n by <init_time> and <valid_time>.\n \"\"\"\n assert self._filetree is not None, \"filetree is None. Forgot to call setup()?\"\n try:\n return self._filetree[vartype][init_time][variable][valid_time]\n except KeyError:\n if reload:\n self.setup()\n try:\n return self._filetree[vartype][init_time][variable][valid_time]\n except KeyError as ex:\n logging.error(\"Could not identify filename. %s %s %s %s %s %s\",\n variable, vartype, init_time, valid_time, type(ex), ex)\n raise ValueError(\"variable type {} not available for variable {}\"\n .format(vartype, variable))\n\n def _parse_file(self, filename):\n elevations = {\"levels\": [], \"units\": None}\n with netCDF4.Dataset(os.path.join(self._root_path, filename)) as dataset:\n\n time_name, time_var = netCDF4tools.identify_CF_time(dataset)\n init_time = netCDF4tools.num2date(0, time_var.units)\n if not self.uses_inittime_dimension():\n init_time = None\n valid_times = netCDF4tools.num2date(time_var[:], time_var.units)\n if not self.uses_validtime_dimension():\n if len(valid_times) > 0:\n raise IOError(\"Skipping file '{}: no support for valid time, but multiple \"\n \"time steps present\".format(filename))\n valid_times = [None]\n lat_name, lat_var, lon_name, lon_var = netCDF4tools.identify_CF_lonlat(dataset)\n vert_name, vert_var, _, _, vert_type = netCDF4tools.identify_vertical_axis(dataset)\n\n if len(time_var.dimensions) != 1 or time_var.dimensions[0] != time_name:\n raise IOError(\"Problem with time coordinate variable\")\n if len(lat_var.dimensions) != 1 or lat_var.dimensions[0] != lat_name:\n raise IOError(\"Problem with latitude coordinate variable\")\n if len(lon_var.dimensions) != 1 or lon_var.dimensions[0] != lon_name:\n raise IOError(\"Problem with longitude coordinate variable\")\n\n if vert_type != \"sfc\":\n elevations = {\"levels\": vert_var[:], \"units\": vert_var.units}\n if vert_type in self._elevations:\n if len(vert_var[:]) != len(self._elevations[vert_type][\"levels\"]):\n raise IOError(\"Number of vertical levels does not fit to levels of \"\n \"previous file '{}'.\".format(self._elevations[vert_type][\"filename\"]))\n if not np.allclose(vert_var[:], self._elevations[vert_type][\"levels\"]):\n raise IOError(\"vertical levels do not fit to levels of previous \"\n \"file '{}'.\".format(self._elevations[vert_type][\"filename\"]))\n if elevations[\"units\"] != self._elevations[vert_type][\"units\"]:\n raise IOError(\"vertical level units do not match previous file '{}'\".format(\n self._elevations[vert_type][\"filename\"]))\n\n standard_names = []\n for ncvarname, ncvar in dataset.variables.items():\n if hasattr(ncvar, \"standard_name\"):\n if (len(ncvar.dimensions) >= 3 and (\n ncvar.dimensions[0] != time_name or\n ncvar.dimensions[-2] != lat_name or\n ncvar.dimensions[-1] != lon_name)):\n logging.error(\"Skipping variable '%s' in file '%s': Incorrect order of dimensions\",\n ncvarname, filename)\n continue\n if not hasattr(ncvar, \"units\"):\n logging.error(\"Skipping variable '%s' in file '%s': No units attribute\",\n ncvarname, filename)\n continue\n if ncvar.standard_name != \"time\":\n try:\n UR(ncvar.units)\n except (ValueError, pint.UndefinedUnitError):\n logging.error(\"Skipping variable '%s' in file '%s': unparseable units attribute '%s'\",\n ncvarname, filename, ncvar.units)\n continue\n if len(ncvar.shape) == 4 and vert_name in ncvar.dimensions:\n standard_names.append(ncvar.standard_name)\n elif len(ncvar.shape) == 3 and vert_type == \"sfc\":\n standard_names.append(ncvar.standard_name)\n return {\n \"vert_type\": vert_type,\n \"elevations\": elevations,\n \"init_time\": init_time,\n \"valid_times\": valid_times,\n \"standard_names\": standard_names\n }\n\n def _add_to_filetree(self, filename, content):\n logging.info(\"File '%s' identified as '%s' type\", filename, content[\"vert_type\"])\n logging.info(\"Found init time '%s', %s valid_times and %s standard_names\",\n content[\"init_time\"], len(content[\"valid_times\"]), len(content[\"standard_names\"]))\n if len(content[\"valid_times\"]) == 0 or len(content[\"standard_names\"]) == 0:\n logging.error(\n \"Something is wrong with this file... valid_times='%s' standard_names='%s'\",\n content[\"valid_times\"], content[\"standard_names\"])\n else:\n logging.debug(\"valid_times='%s' standard_names='%s'\",\n content[\"valid_times\"], content[\"standard_names\"])\n leaf = self._filetree.setdefault(content[\"vert_type\"], {}).setdefault(content[\"init_time\"], {})\n for standard_name in content[\"standard_names\"]:\n var_leaf = leaf.setdefault(standard_name, {})\n for valid_time in content[\"valid_times\"]:\n if valid_time in var_leaf:\n logging.warning(\n \"some data was found twice! vartype='%s' init_time='%s' standard_name='%s' \"\n \"valid_time='%s' first_file='%s' second_file='%s'\",\n content[\"vert_type\"], content[\"init_time\"], standard_name,\n valid_time, var_leaf[valid_time], filename)\n else:\n var_leaf[valid_time] = filename\n\n def setup(self):\n # Get a list of the available data files.\n self._available_files = [\n _filename for _filename in sorted(os.listdir(self._root_path)) if self._domain_id in _filename]\n logging.info(\"Files identified for domain '%s': %s\",\n self._domain_id, self._available_files)\n\n self._filetree = {}\n self._elevations = {\"sfc\": {\"filename\": None, \"levels\": [], \"units\": None}}\n\n # Build the tree structure.\n for filename in self._available_files:\n logging.info(\"Opening candidate '%s'\", filename)\n try:\n content = self._parse_file(filename)\n except IOError as ex:\n logging.error(\"Skipping file '%s' (%s: %s)\", filename, type(ex), ex)\n continue\n if content[\"vert_type\"] not in self._elevations:\n self._elevations[content[\"vert_type\"]] = content[\"elevations\"]\n self._add_to_filetree(filename, content)\n\n def get_init_times(self):\n \"\"\"Returns a list of available forecast init times (base times).\n \"\"\"\n init_times = set(itertools.chain.from_iterable(\n self._filetree[_x].keys() for _x in self._filetree))\n return sorted(init_times)\n\n def get_valid_times(self, variable, vartype, init_time):\n \"\"\"Returns a list of available valid times for the specified\n variable at the specified init time.\n \"\"\"\n try:\n return sorted(self._filetree[vartype][init_time][variable])\n except KeyError as ex:\n logging.error(\"Could not find times! %s %s\", type(ex), ex)\n return []\n\n def get_elevations(self, vert_type):\n \"\"\"Return a list of available elevations for a vertical level type.\n \"\"\"\n logging.debug(\"%s\", self._elevations)\n return self._elevations[vert_type][\"levels\"]\n\n def get_elevation_units(self, vert_type):\n \"\"\"Return a list of available elevations for a vertical level type.\n \"\"\"\n logging.debug(\"%s\", self._elevations)\n return self._elevations[vert_type][\"units\"]\n\n def get_all_valid_times(self, variable, vartype):\n \"\"\"Similar to get_valid_times(), but returns the combined valid times\n of all available init times.\n \"\"\"\n all_valid_times = []\n if vartype not in self._filetree:\n return []\n for init_time in self._filetree[vartype]:\n if variable in self._filetree[vartype][init_time]:\n all_valid_times.extend(list(self._filetree[vartype][init_time][variable]))\n return sorted(set(all_valid_times))\n\n def get_all_datafiles(self):\n \"\"\"Return a list of all available data files.\n \"\"\"\n return self._available_files\n\n\nclass CachedDataAccess(DefaultDataAccess):\n \"\"\"\n Subclass to NWPDataAccess for accessing properly constructed NetCDF files\n Constructor needs information on domain ID.\n\n Uses file name and modification date to reduce setup time by caching directory\n content in a dictionary.\n \"\"\"\n\n def __init__(self, rootpath, domain_id, **kwargs):\n \"\"\"Constructor takes the path of the data directory and determines whether\n this class employs different init_times or valid_times.\n \"\"\"\n DefaultDataAccess.__init__(self, rootpath, domain_id, **kwargs)\n self._file_cache = {}\n\n def setup(self):\n # Get a list of the available data files.\n self._available_files = [\n _filename for _filename in os.listdir(self._root_path) if self._domain_id in _filename]\n logging.info(\"Files identified for domain '%s': %s\",\n self._domain_id, self._available_files)\n\n for filename in list(self._file_cache):\n if filename not in self._available_files:\n del self._file_cache[filename]\n\n self._filetree = {}\n self._elevations = {\"sfc\": {\"filename\": None, \"levels\": []}}\n\n # Build the tree structure.\n for filename in self._available_files:\n mtime = os.path.getmtime(os.path.join(self._root_path, filename))\n if filename in self._file_cache and mtime == self._file_cache[filename][0]:\n logging.info(\"Using cached candidate '%s'\", filename)\n content = self._file_cache[filename][1]\n if content[\"vert_type\"] != \"sfc\":\n if content[\"vert_type\"] not in self._elevations:\n self._elevations[content[\"vert_type\"]] = content[\"elevations\"]\n elif not np.allclose(\n self._elevations[content[\"vert_type\"]][\"levels\"],\n content[\"elevations\"][\"levels\"]):\n logging.error(\"Skipping file '%s' due to elevation mismatch\", filename)\n continue\n\n else:\n if filename in self._file_cache:\n del self._file_cache[filename]\n logging.info(\"Opening candidate '%s'\", filename)\n try:\n content = self._parse_file(filename)\n except IOError as ex:\n logging.error(\"Skipping file '%s' (%s: %s)\", filename, type(ex), ex)\n continue\n self._file_cache[filename] = (mtime, content)\n self._add_to_filetree(filename, content)\n", "# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib._test.test_thermoblib\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Tests for the thermolib module.\n\n This file is part of mss.\n\n :copyright: Copyright 2017 Marc Rautenhaus\n :copyright: Copyright 2016-2020 by the mss team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nimport mslib.thermolib as tl\n\n\ndef test_flightlevel2pressure():\n assert tl.flightlevel2pressure(182.8913020844737) == pytest.approx(50000)\n assert tl.flightlevel2pressure(530.8390754393636) == pytest.approx(10000)\n assert tl.flightlevel2pressure(782.4486256345779) == pytest.approx(3000)\n assert tl.flightlevel2pressure(1151.9849776810745) == pytest.approx(550)\n assert tl.flightlevel2pressure(1626.9512858549855) == pytest.approx(80)\n assert tl.flightlevel2pressure(1804.3261490037305) == pytest.approx(40)\n with pytest.raises(ValueError):\n tl.flightlevel2pressure(72000 / 30.48)\n fls = np.arange(0, 71000, 1000) / 30.48\n assert np.allclose([tl.flightlevel2pressure(_x) for _x in fls],\n tl.flightlevel2pressure_a(fls))\n\n\ndef test_pressure2flightlevel():\n assert tl.pressure2flightlevel(50000) == pytest.approx(182.89130205844737)\n assert tl.pressure2flightlevel(10000) == pytest.approx(530.8390754393636)\n assert tl.pressure2flightlevel(3000) == pytest.approx(782.4486256345779)\n assert tl.pressure2flightlevel(550) == pytest.approx(1151.9849776810745)\n assert tl.pressure2flightlevel(80) == pytest.approx(1626.9512858549855)\n assert tl.pressure2flightlevel(40) == pytest.approx(1804.3261490037305)\n with pytest.raises(ValueError):\n tl.pressure2flightlevel(3.9)\n pss = np.arange(5., 100000., 100.)\n assert np.allclose([tl.pressure2flightlevel(_x) for _x in pss],\n tl.pressure2flightlevel_a(pss))\n\n\ndef test_isa_temperature():\n assert (tl.isa_temperature(100) - 268.3379999999811) < 1e-6\n assert (tl.isa_temperature(200) - 248.5259999999622) < 1e-6\n assert (tl.isa_temperature(300) - 228.7139999999434) < 1e-6\n assert tl.isa_temperature(400) == 216.65\n assert tl.isa_temperature(500) == 216.65\n assert tl.isa_temperature(600) == 216.65\n assert (tl.isa_temperature(700) - 217.9860000000203) < 1e-6\n assert (tl.isa_temperature(800) - 221.0340000000232) < 1e-6\n with pytest.raises(ValueError):\n tl.isa_temperature(1568.9002625)\n\n\ndef test_geop_thickness():\n \"\"\"Test geop_thickness() with some values from the 1976 US standard\n atmosphere.\n \"\"\"\n pytest.skip(\"this test does not make sense, yet\")\n # Define some std. atmosphere values (height in m, T in K, p in Pa).\n std_atm_76 = np.array([[0, 288.15, 101325],\n [500, 284.9, 95460.839342],\n [1000, 281.65, 89874.570502],\n [1500, 278.4, 84556.004841],\n [2000, 275.15, 79495.215511],\n [2500, 271.9, 74682.533661],\n [3000, 268.65, 70108.54467],\n [3500, 265.4, 65764.084371],\n [4000, 262.15, 61640.235304],\n [4500, 258.9, 57728.32297],\n [5000, 255.65, 54019.912104],\n [5500, 252.4, 50506.802952],\n [6000, 249.15, 47181.027568],\n [6500, 245.9, 44034.846117],\n [7000, 242.65, 41060.743191],\n [7500, 239.4, 38251.424142],\n [8000, 236.15, 35599.811423],\n [8500, 232.9, 33099.040939],\n [9000, 229.65, 30742.45842],\n [9500, 226.4, 28523.615797],\n [10000, 223.15, 26436.267594],\n [10500, 219.9, 24474.367338],\n [11000, 216.65, 22632.063973],\n [11500, 216.65, 20916.189034],\n [12000, 216.65, 19330.405049],\n [12500, 216.65, 17864.849029],\n [13000, 216.65, 16510.405758],\n [13500, 216.65, 15258.6511],\n [14000, 216.65, 14101.799606],\n [14500, 216.65, 13032.656085],\n [15000, 216.65, 12044.570862],\n [15500, 216.65, 11131.398413],\n [16000, 216.65, 10287.459141],\n [16500, 216.65, 9507.504058],\n [17000, 216.65, 8786.682132],\n [17500, 216.65, 8120.510116],\n [18000, 216.65, 7504.844668],\n [18500, 216.65, 6935.856576],\n [19000, 216.65, 6410.006945],\n [19500, 216.65, 5924.025185],\n [20000, 216.65, 5474.88867]])\n\n # Extract p and T arrays.\n p = std_atm_76[:, 2]\n t = std_atm_76[:, 1]\n\n # Compute geopotential difference and layer thickness. Layer thickness\n # should be similar to the actual altitude given above.\n geopd = tl.geop_difference(p, t, method='cumtrapz') # noqa\n geopt = tl.geop_thickness(p, t, cumulative=True) # noqa\n", "# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib.msui.mpl_qtwidget\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n Definitions of Matplotlib widgets for Qt Designer.\n\n This file is part of mss.\n\n :copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.\n :copyright: Copyright 2011-2014 Marc Rautenhaus (mr)\n :copyright: Copyright 2016-2020 by the mss team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n# Parts of the code have been adapted from Chapter 6 of Sandro Tosi,\n# 'Matplotlib for Python Developers'.\n\nfrom datetime import datetime\nimport enum\nimport os\nimport six\nimport logging\nimport numpy as np\nimport matplotlib\nfrom fs import open_fs\nfrom fslib.fs_filepicker import getSaveFileNameAndFilter\nfrom matplotlib import cbook, figure\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT, FigureCanvasQTAgg\nimport matplotlib.backend_bases\nfrom mslib import thermolib\nfrom mslib.utils import config_loader, FatalUserError\nfrom mslib.msui import MissionSupportSystemDefaultConfig as mss_default\nfrom mslib.msui import mpl_pathinteractor as mpl_pi\nfrom mslib.msui import mpl_map\nfrom mslib.msui.icons import icons\nfrom PyQt5 import QtCore, QtWidgets, QtGui\nfrom mslib.utils import convert_pressure_to_vertical_axis_measure\n\nPIL_IMAGE_ORIGIN = \"upper\"\nLAST_SAVE_DIRECTORY = config_loader(dataset=\"data_dir\")\n\nmatplotlib.rcParams['savefig.directory'] = LAST_SAVE_DIRECTORY\n\n\nclass MplCanvas(FigureCanvasQTAgg):\n \"\"\"Class to represent the FigureCanvasQTAgg widget.\n\n Main axes instance has zorder 99 (important when additional\n axes are added).\n \"\"\"\n\n def __init__(self):\n # setup Matplotlib Figure and Axis\n self.fig = figure.Figure(facecolor=\"w\") # 0.75\n self.ax = self.fig.add_subplot(111, zorder=99)\n self.default_filename = \"_image\"\n\n # initialization of the canvas\n super(MplCanvas, self).__init__(self.fig)\n\n # we define the widget as expandable\n super(MplCanvas, self).setSizePolicy(\n QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n\n # notify the system of updated policy\n super(MplCanvas, self).updateGeometry()\n\n def get_default_filename(self):\n result = self.basename + self.default_filename\n if len(result) > 100:\n result = result[:100]\n return result + \".png\"\n\n def draw_metadata(self, title=\"\", init_time=None, valid_time=None,\n level=None, style=None):\n \"\"\"Draw a title indicating the init and valid time of the\n image that has been drawn, and the vertical elevation level.\n \"\"\"\n self.default_filename = \"\"\n if title:\n self.default_filename += \"_{:>5}\".format(title.split()[0])\n if style:\n title += \" ({})\".format(style)\n if level:\n title += \" at {}\".format(level)\n self.default_filename += \"_{}\".format(level.split()[0])\n if isinstance(valid_time, datetime) and isinstance(init_time, datetime):\n time_step = valid_time - init_time\n else:\n time_step = None\n if isinstance(valid_time, datetime):\n valid_time = valid_time.strftime('%a %Y-%m-%d %H:%M UTC')\n if isinstance(init_time, datetime):\n init_time = init_time.strftime('%a %Y-%m-%d %H:%M UTC')\n\n # Add valid time / init time information to the title.\n if valid_time:\n if init_time:\n if time_step is not None:\n title += \"\\nValid: {} (step {:d} hrs from {})\".format(\n valid_time, (time_step.days * 86400 + time_step.seconds) // 3600, init_time)\n else:\n title += \"\\nValid: {} (initialisation: {})\".format(valid_time, init_time)\n else:\n title += \"\\nValid: {}\".format(valid_time)\n\n # Set title.\n self.ax.set_title(title, horizontalalignment='left', x=0)\n self.draw()\n # without the repaint the title is not properly updated\n self.repaint()\n\n def get_plot_size_in_px(self):\n \"\"\"Determines the size of the current figure in pixels.\n\n Returns the tuple width, height.\n \"\"\"\n # (bounds = left, bottom, width, height)\n ax_bounds = self.ax.bbox.bounds\n width = int(round(ax_bounds[2]))\n height = int(round(ax_bounds[3]))\n return width, height\n\n\nclass MplWidget(QtWidgets.QWidget):\n \"\"\"Matplotlib canvas widget defined in Qt Designer\"\"\"\n\n def __init__(self, parent=None):\n # initialization of Qt MainWindow widget\n super(MplWidget, self).__init__(parent)\n\n # set the canvas to the Matplotlib widget\n self.canvas = MplCanvas()\n\n # create a vertical box layout\n self.vbl = QtWidgets.QVBoxLayout()\n\n # add mpl widget to vertical box\n self.vbl.addWidget(self.canvas)\n\n # set the layout to th vertical box\n self.setLayout(self.vbl)\n\n\ndef _getSaveFileName(parent, title=\"Choose a filename to save to\", filename=\"test.png\",\n filters=\" Images (*.png)\"):\n _dirname, _name = os.path.split(filename)\n _dirname = os.path.join(_dirname, \"\")\n return getSaveFileNameAndFilter(parent, fs_url=_dirname, file_pattern=filters,\n title=title, default_filename=_name, show_save_action=True)\n\n\nsave_figure_original = NavigationToolbar2QT.save_figure\n\n\ndef save_figure(self, *args):\n picker_type = config_loader(dataset=\"filepicker_default\")\n if picker_type in [\"default\", \"qt\"]:\n save_figure_original(self, *args)\n elif picker_type == \"fs\":\n filetypes = self.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = sorted(six.iteritems(filetypes))\n startpath = matplotlib.rcParams.get('savefig.directory', LAST_SAVE_DIRECTORY)\n startpath = os.path.expanduser(startpath)\n start = os.path.join(startpath, self.canvas.get_default_filename())\n filters = []\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n filters.append(filter)\n\n fname, filter = _getSaveFileName(self.parent,\n title=\"Choose a filename to save to\",\n filename=start, filters=filters)\n if fname is not None:\n if not fname.endswith(filter[1:]):\n fname = filter.replace('*', fname)\n if startpath == '':\n # explicitly missing key or empty str signals to use cwd\n matplotlib.rcParams['savefig.directory'] = startpath\n else:\n # save dir for next time\n savefig_dir = os.path.dirname(six.text_type(fname))\n matplotlib.rcParams['savefig.directory'] = savefig_dir\n try:\n _dirname, _name = os.path.split(fname)\n _fs = open_fs(_dirname)\n with _fs.open(_name, 'wb') as source:\n self.canvas.print_figure(source, format=filter.replace('*.', ''))\n except Exception as e:\n QtWidgets.QMessageBox.critical(\n self, \"Error saving file\", six.text_type(e),\n QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)\n else:\n raise FatalUserError(\"Unknown file picker type '{}'\".format(picker_type))\n\n\n# Patch matplotlib function\nNavigationToolbar2QT.save_figure = save_figure\n\n\nclass _Mode(str, enum.Enum):\n \"\"\"\n Override _Mode of backend_base to include our tools.\n \"\"\"\n NONE = \"\"\n PAN = \"pan/zoom\"\n ZOOM = \"zoom rect\"\n INSERT_WP = \"insert waypoint\"\n DELETE_WP = \"delete waypoint\"\n MOVE_WP = \"move waypoint\"\n\n def __str__(self):\n return self.value\n\n @property\n def _navigate_mode(self):\n return self.name if self is not _Mode.NONE else None\n\n\nmatplotlib.backend_bases._Mode = _Mode\n\n\nclass NavigationToolbar(NavigationToolbar2QT):\n \"\"\"\n parts of this class have been copied from the NavigationToolbar2QT class.\n\n According to https://matplotlib.org/users/license.html we shall\n summarise our changes to matplotlib code:\n\n We copied small parts of the given implementation of the navigation\n toolbar class to allow for our custom waypoint buttons. Our code extends\n the matplotlib toolbar to allow for less or additional buttons and properly\n update all plots and elements in case the pan or zoom elements were\n triggered by the user.\n \"\"\"\n def __init__(self, canvas, parent, sideview=False, coordinates=True):\n self.sideview = sideview\n\n if sideview:\n self.toolitems = [\n _x for _x in self.toolitems if _x[0] in ('Save',)]\n self.set_history_buttons = lambda: None\n else:\n self.toolitems = [\n _x for _x in self.toolitems if\n _x[0] in (None, 'Home', 'Back', 'Forward', 'Pan', 'Zoom', 'Save')]\n\n self.toolitems.extend([\n (None, None, None, None),\n ('Mv WP', 'Move waypoints', \"wp_move\", 'move_wp'),\n ('Ins WP', 'Insert waypoints', \"wp_insert\", 'insert_wp'),\n ('Del WP', 'Delete waypoints', \"wp_delete\", 'delete_wp'),\n ])\n super(NavigationToolbar, self).__init__(canvas, parent, coordinates)\n self._actions[\"move_wp\"].setCheckable(True)\n self._actions[\"insert_wp\"].setCheckable(True)\n self._actions[\"delete_wp\"].setCheckable(True)\n\n self.setIconSize(QtCore.QSize(24, 24))\n self.layout().setSpacing(12)\n self.canvas = canvas\n\n def _icon(self, name, *args):\n \"\"\"\n wrapper around base method to inject our own icons.\n \"\"\"\n myname = icons(\"32x32\", name)\n if os.path.exists(myname):\n return QtGui.QIcon(myname)\n else:\n return super(NavigationToolbar, self)._icon(name, *args)\n\n def _zoom_pan_handler(self, event):\n \"\"\"\n extend zoom_pan_handler of base class with our own tools\n \"\"\"\n super(NavigationToolbar, self)._zoom_pan_handler(event)\n if event.name == \"button_press_event\":\n if self.mode in (_Mode.INSERT_WP, _Mode.MOVE_WP, _Mode.DELETE_WP):\n self.canvas.waypoints_interactor.button_press_callback(event)\n elif event.name == \"button_release_event\":\n if self.mode == _Mode.INSERT_WP:\n self.canvas.waypoints_interactor.button_release_insert_callback(event)\n elif self.mode == _Mode.MOVE_WP:\n self.canvas.waypoints_interactor.button_release_move_callback(event)\n elif self.mode == _Mode.DELETE_WP:\n self.canvas.waypoints_interactor.button_release_delete_callback(event)\n\n def insert_wp(self, *args):\n \"\"\"\n activate insert_wp tool\n \"\"\"\n if self.mode == _Mode.INSERT_WP:\n self.mode = _Mode.NONE\n self.canvas.widgetlock.release(self)\n else:\n self.mode = _Mode.INSERT_WP\n self.canvas.widgetlock(self)\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self.mode._navigate_mode)\n self.set_message(self.mode)\n self._update_buttons_checked()\n\n def delete_wp(self, *args):\n \"\"\"\n activate delete_wp tool\n \"\"\"\n if self.mode == _Mode.DELETE_WP:\n self.mode = _Mode.NONE\n self.canvas.widgetlock.release(self)\n else:\n self.mode = _Mode.DELETE_WP\n self.canvas.widgetlock(self)\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self.mode._navigate_mode)\n self.set_message(self.mode)\n self._update_buttons_checked()\n\n def move_wp(self, *args):\n \"\"\"\n activate move_wp tool\n \"\"\"\n if self.mode == _Mode.MOVE_WP:\n self.mode = _Mode.NONE\n self.canvas.widgetlock.release(self)\n else:\n self.mode = _Mode.MOVE_WP\n self.canvas.widgetlock(self)\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self.mode._navigate_mode)\n self.set_message(self.mode)\n self._update_buttons_checked()\n\n def release_zoom(self, event):\n super(NavigationToolbar, self).release_zoom(event)\n self.canvas.redraw_map()\n\n def release_pan(self, event):\n super(NavigationToolbar, self).release_pan(event)\n self.canvas.redraw_map()\n\n def mouse_move(self, event):\n \"\"\"\n overwrite mouse_move to print lon/lat instead of x/y coordinates.\n \"\"\"\n if self.mode == _Mode.MOVE_WP:\n self.canvas.waypoints_interactor.motion_notify_callback(event)\n if not self.sideview:\n self._update_cursor(event)\n\n if event.inaxes and event.inaxes.get_navigate():\n try:\n lat, lon = self.canvas.waypoints_interactor.get_lat_lon(event)\n except (ValueError, OverflowError) as ex:\n logging.error(\"%s\", ex)\n else:\n s = f\"lat={lat:6.2f}, lon={lon:7.2f}\"\n artists = [a for a in event.inaxes._mouseover_set\n if a.contains(event)[0] and a.get_visible()]\n if artists:\n a = cbook._topmost_artist(artists)\n if a is not event.inaxes.patch:\n data = a.get_cursor_data(event)\n if data is not None:\n data_str = a.format_cursor_data(data)\n if data_str is not None:\n s += \" \" + data_str\n if self.mode:\n s = self.mode + \", \" + s\n self.set_message(s)\n else:\n self.set_message(self.mode)\n else:\n if not event.ydata or not event.xdata:\n self.set_message(self.mode)\n else:\n (lat, lon), _ = self.canvas.waypoints_interactor.get_lat_lon(event)\n y_value = convert_pressure_to_vertical_axis_measure(\n self.canvas.settings_dict[\"vertical_axis\"], event.ydata)\n self.set_message(\"{} lat={:6.2f} lon={:7.2f} altitude={:.2f}\".format(\n self.mode, lat, lon, y_value))\n\n def _update_buttons_checked(self):\n super(NavigationToolbar, self)._update_buttons_checked()\n if \"insert_wp\" in self._actions:\n self._actions['insert_wp'].setChecked(self.mode.name == 'INSERT_WP')\n if \"delete_wp\" in self._actions:\n self._actions['delete_wp'].setChecked(self.mode.name == 'DELETE_WP')\n if \"move_wp\" in self._actions:\n self._actions['move_wp'].setChecked(self.mode.name == 'MOVE_WP')\n\n\nclass MplNavBarWidget(QtWidgets.QWidget):\n \"\"\"Matplotlib canvas widget with navigation toolbar defined in Qt Designer\"\"\"\n\n def __init__(self, sideview=False, parent=None, canvas=None):\n # initialization of Qt MainWindow widget\n super(MplNavBarWidget, self).__init__(parent)\n\n # set the canvas to the Matplotlib widget\n if canvas:\n self.canvas = canvas\n else:\n self.canvas = MplCanvas()\n\n # instantiate the navigation toolbar\n self.navbar = NavigationToolbar(self.canvas, self, sideview)\n\n # create a vertical box layout\n self.vbl = QtWidgets.QVBoxLayout()\n\n # add mpl widget to vertical box\n self.vbl.addWidget(self.navbar)\n self.vbl.addWidget(self.canvas)\n\n # set the layout to th vertical box\n self.setLayout(self.vbl)\n\n\nclass MplSideViewCanvas(MplCanvas):\n \"\"\"Specialised MplCanvas that draws a side view (vertical section) of a\n flight track / list of waypoints.\n \"\"\"\n _pres_maj = np.concatenate([np.arange(top * 10, top, -top) for top in (10000, 1000, 100, 10)] + [[10]])\n _pres_min = np.concatenate([np.arange(top * 10, top, -top // 10) for top in (10000, 1000, 100, 10)] + [[10]])\n\n _pres_maj = np.concatenate([np.arange(top * 10, top, -top) for top in (10000, 1000, 100, 10)] + [[10]])\n _pres_min = np.concatenate([np.arange(top * 10, top, -top // 10) for top in (10000, 1000, 100, 10)] + [[10]])\n\n def __init__(self, model=None, settings=None, numlabels=None):\n \"\"\"\n Arguments:\n model -- WaypointsTableModel defining the vertical section.\n \"\"\"\n if numlabels is None:\n numlabels = config_loader(dataset='num_labels')\n super(MplSideViewCanvas, self).__init__()\n\n # Default settings.\n self.settings_dict = {\"vertical_extent\": (1050, 180),\n \"vertical_axis\": \"pressure\",\n \"flightlevels\": [],\n \"draw_flightlevels\": True,\n \"draw_flighttrack\": True,\n \"fill_flighttrack\": True,\n \"label_flighttrack\": True,\n \"draw_ceiling\": True,\n \"colour_ft_vertices\": (0, 0, 1, 1),\n \"colour_ft_waypoints\": (1, 0, 0, 1),\n \"colour_ft_fill\": (0, 0, 1, 0.15),\n \"colour_ceiling\": (0, 0, 1, 0.15)}\n if settings is not None:\n self.settings_dict.update(settings)\n\n # Setup the plot.\n self.p_bot = self.settings_dict[\"vertical_extent\"][0] * 100\n self.p_top = self.settings_dict[\"vertical_extent\"][1] * 100\n self.numlabels = numlabels\n self.setup_side_view()\n # Draw a number of flight level lines.\n self.flightlevels = []\n self.fl_label_list = []\n self.draw_flight_levels()\n self.imgax = None\n self.image = None\n self.ceiling_alt = []\n # If a waypoints model has been passed, create an interactor on it.\n self.waypoints_interactor = None\n self.waypoints_model = None\n self.basename = \"sideview\"\n if model:\n self.set_waypoints_model(model)\n\n self.set_settings(self.settings_dict)\n\n def set_waypoints_model(self, model):\n \"\"\"Set the WaypointsTableModel defining the vertical section.\n If no model had been set before, create a new interactor object on the\n model to let the user interactively move the altitude of the waypoints.\n \"\"\"\n self.waypoints_model = model\n if self.waypoints_interactor:\n self.waypoints_interactor.set_waypoints_model(model)\n else:\n # Create a path interactor object. The interactor object connects\n # itself to the change() signals of the flight track data model.\n self.waypoints_interactor = mpl_pi.VPathInteractor(\n self.ax, self.waypoints_model,\n numintpoints=config_loader(dataset=\"num_interpolation_points\"),\n redraw_xaxis=self.redraw_xaxis, clear_figure=self.clear_figure\n )\n\n def redraw_yaxis(self):\n \"\"\" Redraws the y-axis on map after setting the values from sideview options dialog box\"\"\"\n\n self.checknconvert()\n vaxis = self.settings_dict[\"vertical_axis\"]\n if vaxis == \"pressure\":\n # Compute the position of major and minor ticks. Major ticks are labelled.\n major_ticks = self._pres_maj[(self._pres_maj <= self.p_bot) & (self._pres_maj >= self.p_top)]\n minor_ticks = self._pres_min[(self._pres_min <= self.p_bot) & (self._pres_min >= self.p_top)]\n labels = [f\"{int(_x / 100)}\"\n if (_x / 100) - int(_x / 100) == 0 else f\"{float(_x / 100)}\" for _x in major_ticks]\n if len(labels) > 20:\n labels = [\"\" if x.split(\".\")[-1][0] in \"975\" else x for x in labels]\n elif len(labels) > 10:\n labels = [\"\" if x.split(\".\")[-1][0] in \"9\" else x for x in labels]\n self.ax.set_ylabel(\"pressure (hPa)\")\n elif vaxis == \"pressure altitude\":\n bot_km = thermolib.pressure2flightlevel(self.p_bot) * 0.03048\n top_km = thermolib.pressure2flightlevel(self.p_top) * 0.03048\n ma_dist, mi_dist = 4, 1.0\n if (top_km - bot_km) <= 20:\n ma_dist, mi_dist = 1, 0.5\n elif (top_km - bot_km) <= 40:\n ma_dist, mi_dist = 2, 0.5\n major_heights = np.arange(0, top_km + 1, ma_dist)\n minor_heights = np.arange(0, top_km + 1, mi_dist)\n major_ticks = thermolib.flightlevel2pressure_a(major_heights / 0.03048)\n minor_ticks = thermolib.flightlevel2pressure_a(minor_heights / 0.03048)\n labels = major_heights\n self.ax.set_ylabel(\"pressure altitude (km)\")\n elif vaxis == \"flight level\":\n bot_km = thermolib.pressure2flightlevel(self.p_bot) * 0.03048\n top_km = thermolib.pressure2flightlevel(self.p_top) * 0.03048\n ma_dist, mi_dist = 50, 10\n if (top_km - bot_km) <= 10:\n ma_dist, mi_dist = 20, 10\n elif (top_km - bot_km) <= 40:\n ma_dist, mi_dist = 40, 10\n major_fl = np.arange(0, 2132, ma_dist)\n minor_fl = np.arange(0, 2132, mi_dist)\n major_ticks = thermolib.flightlevel2pressure_a(major_fl)\n minor_ticks = thermolib.flightlevel2pressure_a(minor_fl)\n labels = major_fl\n self.ax.set_ylabel(\"flight level (hft)\")\n else:\n raise RuntimeError(\"Unsupported vertical axis type: '{}'\".format(vaxis))\n\n # Draw ticks and tick labels.\n self.ax.set_yticks(minor_ticks, minor=True)\n self.ax.set_yticks(major_ticks, minor=False)\n self.ax.set_yticklabels([], minor=True, fontsize=10)\n self.ax.set_yticklabels(labels, minor=False, fontsize=10)\n self.ax.set_ylim(self.p_bot, self.p_top)\n\n def setup_side_view(self):\n \"\"\"Set up a vertical section view.\n\n Vertical cross section code (log-p axis etc.) taken from\n mss_batch_production/visualisation/mpl_vsec.py.\n \"\"\"\n self.checknconvert()\n\n ax = self.ax\n self.fig.subplots_adjust(left=0.08, right=0.96, top=0.9, bottom=0.14)\n\n ax.set_title(\"vertical flight profile\", horizontalalignment=\"left\", x=0)\n ax.set_yscale(\"log\")\n\n # Set axis limits and draw grid for major ticks.\n ax.set_ylim(self.p_bot, self.p_top)\n ax.grid(b=True)\n\n ax.set_xlabel(\"lat/lon\")\n self.redraw_yaxis()\n\n def clear_figure(self):\n logging.debug(\"path of side view has changed.. removing invalidated \"\n \"image (if existent) and redrawing.\")\n if self.image is not None:\n self.image.remove()\n self.image = None\n self.ax.set_title(\"vertical flight profile\", horizontalalignment=\"left\", x=0)\n self.ax.figure.canvas.draw()\n\n def redraw_xaxis(self, lats, lons, times):\n \"\"\"Redraw the x-axis of the side view on path changes. Also remove\n a vertical section image if one exists, as it is invalid after\n a path change.\n \"\"\"\n logging.debug(\"redrawing x-axis\")\n\n # Re-label x-axis.\n self.ax.set_xlim(0, len(lats) - 1)\n # Set xticks so that they display lat/lon. Plot \"numlabels\" labels.\n lat_inds = np.arange(len(lats))\n tick_index_step = len(lat_inds) // self.numlabels\n self.ax.set_xticks(lat_inds[::tick_index_step])\n if self.waypoints_model is not None and self.waypoints_model.performance_settings[\"visible\"]:\n self.ax.set_xticklabels([\"{:2.1f}, {:2.1f}\\n{}Z\".format(d[0], d[1], d[2].strftime(\"%H:%M\"))\n for d in zip(lats[::tick_index_step],\n lons[::tick_index_step],\n times[::tick_index_step])],\n rotation=25, fontsize=10, horizontalalignment=\"right\")\n else:\n self.ax.set_xticklabels([\"{:2.1f}, {:2.1f}\".format(d[0], d[1])\n for d in zip(lats[::tick_index_step],\n lons[::tick_index_step],\n times[::tick_index_step])],\n rotation=25, fontsize=10, horizontalalignment=\"right\")\n\n for _line in self.ceiling_alt:\n _line.remove()\n self.ceiling_alt = []\n if self.waypoints_model is not None and self.waypoints_interactor is not None:\n vertices = self.waypoints_interactor.pathpatch.get_path().vertices\n vx, vy = list(zip(*vertices))\n wpd = self.waypoints_model.all_waypoint_data()\n xs, ys = [], []\n aircraft = self.waypoints_model.performance_settings[\"aircraft\"]\n for i in range(len(wpd) - 1):\n weight = np.linspace(wpd[i].weight, wpd[i + 1].weight, 5, endpoint=False)\n ceil = [aircraft.get_ceiling_altitude(_w) for _w in weight]\n xs.extend(np.linspace(vx[i], vx[i + 1], 5, endpoint=False))\n ys.extend(ceil)\n xs.append(vx[-1])\n ys.append(aircraft.get_ceiling_altitude(wpd[-1].weight))\n\n self.ceiling_alt = self.ax.plot(\n xs, thermolib.flightlevel2pressure_a(np.asarray(ys)),\n color=\"k\", ls=\"--\")\n self.update_ceiling(\n self.settings_dict[\"draw_ceiling\"] and self.waypoints_model.performance_settings[\"visible\"],\n self.settings_dict[\"colour_ceiling\"])\n\n self.draw()\n\n def set_vertical_extent(self, pbot, ptop):\n \"\"\"Set the vertical extent of the view to the specified pressure\n values (hPa) and redraw the plot.\n \"\"\"\n self.checknconvert()\n changed = False\n if self.p_bot != pbot * 100:\n self.p_bot = pbot * 100\n changed = True\n if self.p_top != ptop * 100:\n self.p_top = ptop * 100\n changed = True\n if changed:\n if self.image is not None:\n self.image.remove()\n self.image = None\n self.setup_side_view()\n self.waypoints_interactor.redraw_figure()\n else:\n self.redraw_yaxis()\n\n def get_vertical_extent(self):\n \"\"\"Returns the bottom and top pressure (hPa) of the plot.\n \"\"\"\n self.checknconvert()\n\n return (self.p_bot // 100), (self.p_top // 100)\n\n def draw_flight_levels(self):\n \"\"\"Draw horizontal lines indicating the altitude of the flight levels.\n \"\"\"\n # Remove currently displayed flight level artists.\n for artist in self.fl_label_list:\n artist.remove()\n self.fl_label_list = []\n # Plot lines indicating flight level altitude.\n ax = self.ax\n for level in self.flightlevels:\n pressure = thermolib.flightlevel2pressure(level)\n self.fl_label_list.append(ax.axhline(pressure, color='k'))\n self.fl_label_list.append(ax.text(0.1, pressure, \"FL{:d}\".format(level)))\n self.draw()\n\n def get_flight_levels(self):\n \"\"\"\n \"\"\"\n return self.flightlevels\n\n def set_flight_levels(self, flightlevels):\n \"\"\"\n \"\"\"\n self.flightlevels = flightlevels\n self.draw_flight_levels()\n\n def set_flight_levels_visible(self, visible):\n \"\"\"Toggle the visibility of the flight level lines.\n \"\"\"\n for gxelement in self.fl_label_list:\n gxelement.set_visible(visible)\n self.draw()\n\n def update_ceiling(self, visible, color):\n \"\"\"Toggle the visibility of the flight level lines.\n \"\"\"\n for line in self.ceiling_alt:\n line.set_color(color)\n line.set_visible(visible)\n self.draw()\n\n def get_settings(self):\n \"\"\"Returns a dictionary containing settings regarding the side view\n appearance.\n \"\"\"\n return self.settings_dict\n\n def set_settings(self, settings):\n \"\"\"Apply settings to view.\n \"\"\"\n if settings is not None:\n self.settings_dict.update(settings)\n settings = self.settings_dict\n self.set_flight_levels(settings[\"flightlevels\"])\n self.set_vertical_extent(*settings[\"vertical_extent\"])\n self.set_flight_levels_visible(settings[\"draw_flightlevels\"])\n self.update_ceiling(\n settings[\"draw_ceiling\"] and (\n self.waypoints_model is not None and\n self.waypoints_model.performance_settings[\"visible\"]),\n settings[\"colour_ceiling\"])\n\n if self.waypoints_interactor is not None:\n self.waypoints_interactor.set_vertices_visible(\n settings[\"draw_flighttrack\"])\n self.waypoints_interactor.set_path_color(\n line_color=settings[\"colour_ft_vertices\"],\n marker_facecolor=settings[\"colour_ft_waypoints\"],\n patch_facecolor=settings[\"colour_ft_fill\"])\n self.waypoints_interactor.set_patch_visible(\n settings[\"fill_flighttrack\"])\n self.waypoints_interactor.set_labels_visible(\n settings[\"label_flighttrack\"])\n\n self.settings_dict = settings\n\n def getBBOX(self):\n \"\"\"Get the bounding box of the view (returns a 4-tuple\n x1, y1(p_bot[hPa]), x2, y2(p_top[hPa])).\n \"\"\"\n # Get the bounding box of the current view\n # (bbox = llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat; i.e. for the side\n # view bbox = x1, y1(p_bot), x2, y2(p_top)).\n axis = self.ax.axis()\n\n # Get the number of (great circle) interpolation points and the\n # number of labels along the x-axis.\n if self.waypoints_interactor is not None:\n num_interpolation_points = \\\n self.waypoints_interactor.get_num_interpolation_points()\n num_labels = self.numlabels\n\n # Return a tuple (num_interpolation_points, p_bot[hPa],\n # num_labels, p_top[hPa]) as BBOX.\n bbox = (num_interpolation_points, (axis[2] / 100),\n num_labels, (axis[3] / 100))\n return bbox\n\n def draw_legend(self, img):\n if img is not None:\n logging.error(\"Legends not supported in SideView mode!\")\n raise NotImplementedError\n\n def draw_image(self, img):\n \"\"\"Draw the image img on the current plot.\n\n NOTE: The image is plotted in a separate axes object that is located\n below the axes that display the flight profile. This is necessary\n because imshow() does not work with logarithmic axes.\n \"\"\"\n logging.debug(\"plotting vertical section image..\")\n ix, iy = img.size\n logging.debug(\" image size is %dx%d px, format is '%s'\", ix, iy, img.format)\n # Test if the image axes exist. If not, create them.\n if self.imgax is None:\n # Disable old white figure background so that the new underlying\n # axes become visible.\n self.ax.patch.set_facecolor(\"None\")\n # self.mpl.canvas.ax.patch.set_alpha(0.5)\n\n # Add new axes to the plot (imshow doesn't work with logarithmic axes).\n ax_bbox = self.ax.get_position()\n # Main axes instance of mplwidget has zorder 99.\n self.imgax = self.fig.add_axes(ax_bbox, frameon=True,\n xticks=[], yticks=[],\n label=\"ax2\", zorder=0)\n\n # If an image is currently displayed, remove it from the plot.\n if self.image is not None:\n self.image.remove()\n\n # Plot the new image in the image axes and adjust the axes limits.\n self.image = self.imgax.imshow(\n img, interpolation=\"nearest\", aspect=\"auto\", origin=PIL_IMAGE_ORIGIN)\n self.imgax.set_xlim(0, ix - 1)\n self.imgax.set_ylim(iy - 1, 0)\n self.draw()\n logging.debug(\"done.\")\n\n def checknconvert(self):\n \"\"\" Checks for current units of axis and convert the upper and lower limit\n to pa(pascals) for the internal computation by code \"\"\"\n\n if self.settings_dict[\"vertical_axis\"] == \"pressure altitude\":\n self.p_bot = thermolib.flightlevel2pressure(self.settings_dict[\"vertical_extent\"][0] * 32.80)\n self.p_top = thermolib.flightlevel2pressure(self.settings_dict[\"vertical_extent\"][1] * 32.80)\n elif self.settings_dict[\"vertical_axis\"] == \"flight level\":\n self.p_bot = thermolib.flightlevel2pressure(self.settings_dict[\"vertical_extent\"][0])\n self.p_top = thermolib.flightlevel2pressure(self.settings_dict[\"vertical_extent\"][1])\n\n\nclass MplSideViewWidget(MplNavBarWidget):\n \"\"\"MplNavBarWidget using an MplSideViewCanvas as the Matplotlib\n view instance.\n \"\"\"\n\n def __init__(self, parent=None):\n super(MplSideViewWidget, self).__init__(\n sideview=True, parent=parent, canvas=MplSideViewCanvas())\n # Disable some elements of the Matplotlib navigation toolbar.\n # Available actions: Home, Back, Forward, Pan, Zoom, Subplots,\n # Customize, Save, Insert Waypoint, Delete Waypoint\n actions = self.navbar.actions()\n for action in actions:\n if action.text() in [\"Home\", \"Back\", \"Forward\", \"Pan\", \"Zoom\",\n \"Subplots\", \"Customize\"]:\n action.setEnabled(False)\n\n\nclass MplTopViewCanvas(MplCanvas):\n \"\"\"Specialised MplCanvas that draws a top view (map), together with a\n flight track, trajectories and other items.\n \"\"\"\n\n redrawn = QtCore.pyqtSignal(name=\"redrawn\")\n\n def __init__(self, settings=None):\n \"\"\"\n \"\"\"\n super(MplTopViewCanvas, self).__init__()\n self.waypoints_interactor = None\n self.satoverpasspatch = []\n self.kmloverlay = None\n self.map = None\n self.basename = \"topview\"\n\n # Axes and image object to display the legend graphic, if available.\n self.legax = None\n self.legimg = None\n\n # Set map appearance from parameter or, if not specified, to default\n # values.\n self.set_map_appearance(settings)\n\n # Progress dialog to inform the user about map redraws.\n self.pdlg = QtWidgets.QProgressDialog(\"redrawing map...\", \"Cancel\", 0, 10, self)\n self.pdlg.close()\n\n def init_map(self, model=None, **kwargs):\n \"\"\"Set up the map view.\n \"\"\"\n ax = self.ax\n self.map = mpl_map.MapCanvas(appearance=self.get_map_appearance(),\n resolution=\"l\", area_thresh=1000., ax=ax,\n **kwargs)\n ax.set_autoscale_on(False)\n ax.set_title(\"Top view\", horizontalalignment=\"left\", x=0)\n self.draw() # necessary?\n\n if model:\n self.set_waypoints_model(model)\n\n def set_waypoints_model(self, model):\n \"\"\"Set the WaypointsTableModel defining the flight track.\n If no model had been set before, create a new interactor object on the\n model to let the user interactively move the altitude of the waypoints.\n \"\"\"\n self.waypoints_model = model\n if self.waypoints_interactor:\n self.waypoints_interactor.set_waypoints_model(model)\n else:\n # Create a path interactor object. The interactor object connects\n # itself to the change() signals of the flight track data model.\n appearance = self.get_map_appearance()\n self.waypoints_interactor = mpl_pi.HPathInteractor(\n self.map, self.waypoints_model,\n linecolor=appearance[\"colour_ft_vertices\"],\n markerfacecolor=appearance[\"colour_ft_waypoints\"])\n self.waypoints_interactor.set_vertices_visible(appearance[\"draw_flighttrack\"])\n\n def redraw_map(self, kwargs_update=None):\n \"\"\"Redraw map canvas.\n\n Executed on clicked() of btMapRedraw.\n\n See MapCanvas.update_with_coordinate_change(). After the map redraw,\n coordinates of all objects overlain on the map have to be updated.\n \"\"\"\n # remove legend\n self.draw_legend(None)\n\n # Show the progress dialog, since the retrieval can take a few seconds.\n self.pdlg.setValue(0)\n self.pdlg.show()\n QtWidgets.QApplication.processEvents()\n\n logging.debug(\"redrawing map\")\n\n # 1) STORE COORDINATES OF NON-MAP OBJECTS IN LAT/LON.\n\n # (Currently none.)\n self.pdlg.setValue(1)\n QtWidgets.QApplication.processEvents()\n\n # 2) UPDATE MAP.\n self.map.update_with_coordinate_change(kwargs_update)\n self.draw() # this one is required to trigger a\n # drawevent to update the background\n # in waypoints_interactor()\n\n self.pdlg.setValue(5)\n QtWidgets.QApplication.processEvents()\n\n # 3) UPDATE COORDINATES OF NON-MAP OBJECTS.\n self.pdlg.setValue(8)\n QtWidgets.QApplication.processEvents()\n\n for segment in self.satoverpasspatch:\n segment.update()\n\n if self.kmloverlay:\n self.kmloverlay.update()\n\n self.draw_metadata(\"Top view\")\n\n # Update in case of a projection change\n self.waypoints_interactor.update()\n\n self.pdlg.setValue(10)\n QtWidgets.QApplication.processEvents()\n\n logging.debug(\"finished redrawing map\")\n self.pdlg.close()\n\n # Emit signal so other parts of the module can react to a redraw event.\n self.redrawn.emit()\n\n def get_crs(self):\n \"\"\"Get the coordinate reference system of the displayed map.\n \"\"\"\n return self.map.crs\n\n def getBBOX(self):\n \"\"\"\n Get the bounding box of the map\n (returns a 4-tuple llx, lly, urx, ury) in degree or meters.\n \"\"\"\n\n axis = self.ax.axis()\n\n if self.map.bbox_units == \"degree\":\n # Convert the current axis corners to lat/lon coordinates.\n axis0, axis2 = self.map(axis[0], axis[2], inverse=True)\n axis1, axis3 = self.map(axis[1], axis[3], inverse=True)\n bbox = (axis0, axis2, axis1, axis3)\n\n elif self.map.bbox_units.startswith(\"meter\"):\n center_x, center_y = self.map(\n *(float(_x) for _x in self.map.bbox_units[6:-1].split(\",\")))\n bbox = (axis[0] - center_x, axis[2] - center_y, axis[1] - center_x, axis[3] - center_y)\n\n else:\n bbox = axis[0], axis[2], axis[1], axis[3]\n\n return bbox\n\n def clear_figure(self):\n logging.debug(\"Removing image\")\n if self.map.image is not None:\n self.map.image.remove()\n self.map.image = None\n self.ax.set_title(\"Top view\", horizontalalignment=\"left\", x=0)\n self.ax.figure.canvas.draw()\n\n def draw_image(self, img):\n \"\"\"Draw the image img on the current plot.\n \"\"\"\n logging.debug(\"plotting image..\")\n self.wms_image = self.map.imshow(img, interpolation=\"nearest\", origin=PIL_IMAGE_ORIGIN)\n # NOTE: imshow always draws the images to the lowest z-level of the\n # plot.\n # See these mailing list entries:\n # http://www.mail-archive.com/[email protected]/msg05955.html\n # http://old.nabble.com/Re%3A--Matplotlib-users--imshow-zorder-tt19047314.html#a19047314\n #\n # Question: Is this an issue for us or do we always want the images in the back\n # anyhow? At least we need to remove filled continents here.\n # self.map.set_fillcontinents_visible(False)\n # ** UPDATE 2011/01/14 ** seems to work with version 1.0!\n logging.debug(\"done.\")\n\n def draw_legend(self, img):\n \"\"\"Draw the legend graphics img on the current plot.\n\n Adds new axes to the plot that accomodate the legend.\n \"\"\"\n # If the method is called with a \"None\" image, the current legend\n # graphic should be removed (if one exists).\n if self.legimg is not None:\n logging.debug(\"removing image %s\", self.legimg)\n self.legimg.remove()\n self.legimg = None\n\n if img is not None:\n # The size of the legend axes needs to be given in relative figure\n # coordinates. To determine those from the legend graphics size in\n # pixels, we need to determine the size of the currently displayed\n # figure in pixels.\n figsize_px = self.fig.get_size_inches() * self.fig.get_dpi()\n ax_extent_x = float(img.size[0]) / figsize_px[0]\n ax_extent_y = float(img.size[1]) / figsize_px[1]\n\n # If no legend axes have been created, do so now.\n if self.legax is None:\n # Main axes instance of mplwidget has zorder 99.\n self.legax = self.fig.add_axes([1 - ax_extent_x, 0.01, ax_extent_x, ax_extent_y],\n frameon=False,\n xticks=[], yticks=[],\n label=\"ax2\", zorder=0)\n self.legax.patch.set_facecolor(\"None\")\n\n # If axes exist, adjust their position.\n else:\n self.legax.set_position([1 - ax_extent_x, 0.01, ax_extent_x, ax_extent_y])\n\n # Plot the new legimg in the legax axes.\n self.legimg = self.legax.imshow(img, origin=PIL_IMAGE_ORIGIN, aspect=\"equal\", interpolation=\"nearest\")\n self.draw()\n # required so that it is actually drawn...\n QtWidgets.QApplication.processEvents()\n\n def plot_satellite_overpass(self, segments):\n \"\"\"Plots a satellite track on top of the map.\n \"\"\"\n # If track is currently plotted on the map, remove it.\n for segment in self.satoverpasspatch:\n segment.remove()\n self.satoverpasspatch = []\n\n if segments:\n # Create a new patch.\n self.satoverpasspatch = [\n mpl_map.SatelliteOverpassPatch(self.map, segment)\n for segment in segments]\n self.draw()\n\n def plot_kml(self, kmloverlay):\n \"\"\"Plots a satellite track on top of the map.\n \"\"\"\n if self.kmloverlay:\n # If track is currently plotted on the map, remove it.\n self.kmloverlay.remove()\n if not kmloverlay:\n self.kmloverlay = None\n self.draw()\n if kmloverlay:\n # Create a new patch.\n self.kmloverlay = kmloverlay\n\n def set_map_appearance(self, settings_dict):\n \"\"\"Apply settings from dictionary 'settings_dict' to the view.\n\n If settings is None, apply default settings.\n \"\"\"\n # logging.debug(\"applying map appearance settings %s.\" % settings)\n settings = {\"draw_graticule\": True,\n \"draw_coastlines\": True,\n \"fill_waterbodies\": True,\n \"fill_continents\": True,\n \"draw_flighttrack\": True,\n \"label_flighttrack\": True,\n \"colour_water\": ((153 / 255.), (255 / 255.), (255 / 255.), (255 / 255.)),\n \"colour_land\": ((204 / 255.), (153 / 255.), (102 / 255.), (255 / 255.)),\n \"colour_ft_vertices\": (0, 0, 1, 1),\n \"colour_ft_waypoints\": (1, 0, 0, 1)}\n if settings_dict is not None:\n settings.update(settings_dict)\n\n self.appearance_settings = settings\n\n if self.map is not None:\n self.map.set_graticule_visible(settings[\"draw_graticule\"])\n self.map.set_coastlines_visible(settings[\"draw_coastlines\"])\n self.map.set_fillcontinents_visible(visible=settings[\"fill_continents\"],\n land_color=settings[\"colour_land\"],\n lake_color=settings[\"colour_water\"])\n self.map.set_mapboundary_visible(visible=settings[\"fill_waterbodies\"],\n bg_color=settings[\"colour_water\"])\n self.waypoints_interactor.set_path_color(line_color=settings[\"colour_ft_vertices\"],\n marker_facecolor=settings[\"colour_ft_waypoints\"])\n self.waypoints_interactor.set_vertices_visible(settings[\"draw_flighttrack\"])\n self.waypoints_interactor.set_labels_visible(settings[\"label_flighttrack\"])\n\n def set_remote_sensing_appearance(self, settings):\n self.waypoints_interactor.set_remote_sensing(settings[\"reference\"])\n self.waypoints_interactor.set_tangent_visible(settings[\"draw_tangents\"])\n self.waypoints_interactor.set_solar_angle_visible(settings[\"show_solar_angle\"])\n\n self.waypoints_interactor.redraw_path()\n\n def get_map_appearance(self):\n \"\"\"\n \"\"\"\n return self.appearance_settings\n\n\nclass MplTopViewWidget(MplNavBarWidget):\n \"\"\"MplNavBarWidget using an MplSideViewCanvas as the Matplotlib\n view instance.\n \"\"\"\n\n def __init__(self, parent=None):\n super(MplTopViewWidget, self).__init__(\n sideview=False, parent=parent, canvas=MplTopViewCanvas())\n # Disable some elements of the Matplotlib navigation toolbar.\n # Available actions: Home, Back, Forward, Pan, Zoom, Subplots,\n # Customize, Save\n actions = self.navbar.actions()\n for action in actions:\n if action.text() in [\"Subplots\", \"Customize\"]:\n action.setEnabled(False)\n elif action.text() in [\"Home\", \"Back\", \"Forward\"]:\n action.triggered.connect(self.historyEvent)\n\n def historyEvent(self):\n \"\"\"Slot to react to clicks on one of the history buttons in the\n navigation toolbar. Redraws the image.\n \"\"\"\n self.canvas.redraw_map()\n" ]
[ [ "numpy.allclose" ], [ "numpy.arange", "numpy.array" ], [ "matplotlib.rcParams.get", "numpy.linspace", "matplotlib.figure.Figure", "numpy.asarray", "numpy.arange", "matplotlib.cbook._topmost_artist" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jenhaoyang/datumaro
[ "add81ddb59502362fa65fa07e5bc4d8c9f61afde", "add81ddb59502362fa65fa07e5bc4d8c9f61afde", "add81ddb59502362fa65fa07e5bc4d8c9f61afde" ]
[ "tests/test_dataset.py", "tests/test_voc_format.py", "tests/test_icdar_format.py" ]
[ "from unittest import TestCase\nimport os\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.components.annotation import (\n AnnotationType, Bbox, Caption, Label, LabelCategories, Mask, Points,\n Polygon, PolyLine,\n)\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.dataset import (\n DEFAULT_FORMAT, Dataset, ItemStatus, eager_mode,\n)\nfrom datumaro.components.dataset_filter import (\n DatasetItemEncoder, XPathAnnotationsFilter, XPathDatasetFilter,\n)\nfrom datumaro.components.environment import Environment\nfrom datumaro.components.errors import (\n ConflictingCategoriesError, DatasetNotFoundError, MultipleFormatsMatchError,\n NoMatchingFormatsError, RepeatedItemError, UnknownFormatError,\n)\nfrom datumaro.components.extractor import (\n DEFAULT_SUBSET_NAME, DatasetItem, Extractor, ItemTransform, Transform,\n)\nfrom datumaro.components.launcher import Launcher\nfrom datumaro.components.media import Image\nfrom datumaro.util.test_utils import TestDir, compare_datasets\n\nfrom .requirements import Requirements, mark_requirement\n\n\nclass DatasetTest(TestCase):\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_create_from_extractors(self):\n class SrcExtractor1(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', annotations=[\n Bbox(1, 2, 3, 4),\n Label(4),\n ]),\n DatasetItem(id=1, subset='val', annotations=[\n Label(4),\n ]),\n ])\n\n class SrcExtractor2(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='val', annotations=[\n Label(5),\n ]),\n ])\n\n class DstExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', annotations=[\n Bbox(1, 2, 3, 4),\n Label(4),\n ]),\n DatasetItem(id=1, subset='val', annotations=[\n Label(4),\n Label(5),\n ]),\n ])\n\n dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())\n\n compare_datasets(self, DstExtractor(), dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_create_from_iterable(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', annotations=[\n Bbox(1, 2, 3, 4, label=2),\n Label(4),\n ]),\n DatasetItem(id=1, subset='val', annotations=[\n Label(3),\n ]),\n ])\n\n def categories(self):\n return { AnnotationType.label: LabelCategories.from_iterable(\n ['a', 'b', 'c', 'd', 'e'])\n }\n\n actual = Dataset.from_iterable([\n DatasetItem(id=1, subset='train', annotations=[\n Bbox(1, 2, 3, 4, label=2),\n Label(4),\n ]),\n DatasetItem(id=1, subset='val', annotations=[\n Label(3),\n ]),\n ], categories=['a', 'b', 'c', 'd', 'e'])\n\n compare_datasets(self, TestExtractor(), actual)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_join_datasets_with_empty_categories(self):\n expected = Dataset.from_iterable([\n DatasetItem(1, annotations=[\n Label(0),\n Bbox(1, 2, 3, 4),\n Caption('hello world'),\n ])\n ], categories=['a'])\n\n src1 = Dataset.from_iterable([\n DatasetItem(1, annotations=[ Bbox(1, 2, 3, 4, label=None) ])\n ], categories=[])\n\n src2 = Dataset.from_iterable([\n DatasetItem(1, annotations=[ Label(0) ])\n ], categories=['a'])\n\n src3 = Dataset.from_iterable([\n DatasetItem(1, annotations=[ Caption('hello world') ])\n ])\n\n actual = Dataset.from_extractors(src1, src2, src3)\n\n compare_datasets(self, expected, actual)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'])\n\n with TestDir() as test_dir:\n source_dataset.save(test_dir)\n\n loaded_dataset = Dataset.load(test_dir)\n\n compare_datasets(self, source_dataset, loaded_dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_detect(self):\n env = Environment()\n env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}\n env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}\n\n dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'])\n\n with TestDir() as test_dir:\n dataset.save(test_dir)\n\n detected_format = Dataset.detect(test_dir, env=env)\n\n self.assertEqual(DEFAULT_FORMAT, detected_format)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_detect_and_import(self):\n env = Environment()\n env.importers.items = {DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT]}\n env.extractors.items = {DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT]}\n\n source_dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'])\n\n with TestDir() as test_dir:\n source_dataset.save(test_dir)\n\n imported_dataset = Dataset.import_from(test_dir, env=env)\n\n self.assertEqual(imported_dataset.data_path, test_dir)\n self.assertEqual(imported_dataset.format, DEFAULT_FORMAT)\n compare_datasets(self, source_dataset, imported_dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_report_no_dataset_found(self):\n env = Environment()\n env.importers.items = {\n DEFAULT_FORMAT: env.importers[DEFAULT_FORMAT],\n }\n env.extractors.items = {\n DEFAULT_FORMAT: env.extractors[DEFAULT_FORMAT],\n }\n\n with TestDir() as test_dir, self.assertRaises(DatasetNotFoundError):\n Dataset.import_from(test_dir, DEFAULT_FORMAT, env=env)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_report_multiple_formats_match(self):\n env = Environment()\n env.importers.items = {\n 'a': env.importers[DEFAULT_FORMAT],\n 'b': env.importers[DEFAULT_FORMAT],\n }\n env.extractors.items = {\n 'a': env.extractors[DEFAULT_FORMAT],\n 'b': env.extractors[DEFAULT_FORMAT],\n }\n\n source_dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'])\n\n with TestDir() as test_dir:\n source_dataset.save(test_dir)\n\n with self.assertRaises(MultipleFormatsMatchError):\n Dataset.import_from(test_dir, env=env)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_report_no_matching_formats(self):\n env = Environment()\n env.importers.items = {}\n env.extractors.items = {}\n\n source_dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'])\n\n with TestDir() as test_dir:\n source_dataset.save(test_dir)\n\n with self.assertRaises(NoMatchingFormatsError):\n Dataset.import_from(test_dir, env=env)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_report_unknown_format_requested(self):\n env = Environment()\n env.importers.items = {}\n env.extractors.items = {}\n\n source_dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'])\n\n with TestDir() as test_dir:\n source_dataset.save(test_dir)\n\n with self.assertRaises(UnknownFormatError):\n Dataset.import_from(test_dir, format='custom', env=env)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_export_by_string_format_name(self):\n env = Environment()\n env.converters.items = {'qq': env.converters[DEFAULT_FORMAT]}\n\n dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Label(2) ]),\n ], categories=['a', 'b', 'c'], env=env)\n\n with TestDir() as test_dir:\n dataset.export(format='qq', save_dir=test_dir)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_remember_export_options(self):\n dataset = Dataset.from_iterable([\n DatasetItem(id=1, image=np.ones((1, 2, 3))),\n ], categories=['a'])\n\n with TestDir() as test_dir:\n dataset.save(test_dir, save_images=True)\n dataset.put(dataset.get(1)) # mark the item modified for patching\n\n image_path = osp.join(test_dir, 'images', 'default', '1.jpg')\n os.remove(image_path)\n\n dataset.save(test_dir)\n\n self.assertEqual({'save_images': True}, dataset.options)\n self.assertTrue(osp.isfile(image_path))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_compute_length_when_created_from_scratch(self):\n dataset = Dataset()\n\n dataset.put(DatasetItem(1))\n dataset.put(DatasetItem(2))\n dataset.put(DatasetItem(3))\n dataset.remove(1)\n\n self.assertEqual(2, len(dataset))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_compute_length_when_created_from_extractor(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n ]\n\n dataset = Dataset.from_extractors(TestExtractor())\n\n self.assertEqual(3, len(dataset))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_compute_length_when_created_from_sequence(self):\n dataset = Dataset.from_iterable([\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n ])\n\n self.assertEqual(3, len(dataset))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_transform_by_string_name(self):\n expected = Dataset.from_iterable([\n DatasetItem(id=1, attributes={'qq': 1}),\n ])\n\n class TestTransform(ItemTransform):\n def transform_item(self, item):\n return self.wrap_item(item, attributes={'qq': 1})\n\n env = Environment()\n env.transforms.register('qq', TestTransform)\n\n dataset = Dataset.from_iterable([ DatasetItem(id=1) ], env=env)\n\n actual = dataset.transform('qq')\n\n compare_datasets(self, expected, actual)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_transform(self):\n expected = Dataset.from_iterable([\n DatasetItem(id=1, attributes={'qq': 1}),\n ])\n\n class TestTransform(ItemTransform):\n def transform_item(self, item):\n return self.wrap_item(item, attributes={'qq': 1})\n\n dataset = Dataset.from_iterable([ DatasetItem(id=1) ])\n\n actual = dataset.transform(TestTransform)\n\n compare_datasets(self, expected, actual)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_join_annotations(self):\n a = Dataset.from_iterable([\n DatasetItem(id=1, subset='train', annotations=[\n Label(1, id=3),\n Label(2, attributes={ 'x': 1 }),\n ])\n ], categories=['a', 'b', 'c', 'd'])\n\n b = Dataset.from_iterable([\n DatasetItem(id=1, subset='train', annotations=[\n Label(2, attributes={ 'x': 1 }),\n Label(3, id=4),\n ])\n ], categories=['a', 'b', 'c', 'd'])\n\n expected = Dataset.from_iterable([\n DatasetItem(id=1, subset='train', annotations=[\n Label(1, id=3),\n Label(2, attributes={ 'x': 1 }),\n Label(3, id=4),\n ])\n ], categories=['a', 'b', 'c', 'd'])\n\n merged = Dataset.from_extractors(a, b)\n\n compare_datasets(self, expected, merged)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_cant_join_different_categories(self):\n s1 = Dataset.from_iterable([], categories=['a', 'b'])\n s2 = Dataset.from_iterable([], categories=['b', 'a'])\n\n with self.assertRaises(ConflictingCategoriesError):\n Dataset.from_extractors(s1, s2)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_join_datasets(self):\n s1 = Dataset.from_iterable([ DatasetItem(0), DatasetItem(1) ])\n s2 = Dataset.from_iterable([ DatasetItem(1), DatasetItem(2) ])\n expected = Dataset.from_iterable([\n DatasetItem(0), DatasetItem(1), DatasetItem(2)\n ])\n\n actual = Dataset.from_extractors(s1, s2)\n\n compare_datasets(self, expected, actual)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_track_modifications_on_addition(self):\n dataset = Dataset.from_iterable([\n DatasetItem(1),\n DatasetItem(2),\n ])\n\n self.assertFalse(dataset.is_modified)\n\n dataset.put(DatasetItem(3, subset='a'))\n\n self.assertTrue(dataset.is_modified)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_track_modifications_on_removal(self):\n dataset = Dataset.from_iterable([\n DatasetItem(1),\n DatasetItem(2),\n ])\n\n self.assertFalse(dataset.is_modified)\n\n dataset.remove(1)\n\n self.assertTrue(dataset.is_modified)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_create_patch(self):\n expected = Dataset.from_iterable([\n DatasetItem(2),\n DatasetItem(3, subset='a')\n ])\n\n dataset = Dataset.from_iterable([\n DatasetItem(1),\n DatasetItem(2),\n ])\n dataset.put(DatasetItem(2))\n dataset.put(DatasetItem(3, subset='a'))\n dataset.remove(1)\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.added,\n ('3', 'a'): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n 'a': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(2, len(patch.data))\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(dataset.get(2), patch.data.get(2))\n self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_create_patch_when_cached(self):\n expected = Dataset.from_iterable([\n DatasetItem(2),\n DatasetItem(3, subset='a')\n ])\n\n dataset = Dataset.from_iterable([\n DatasetItem(1),\n DatasetItem(2),\n ])\n dataset.init_cache()\n dataset.put(DatasetItem(2))\n dataset.put(DatasetItem(3, subset='a'))\n dataset.remove(1)\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n\n # Item was not changed from the original one.\n # TODO: add item comparison and remove this line\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,\n\n ('3', 'a'): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n 'a': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(2, len(patch.data))\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(dataset.get(2), patch.data.get(2))\n self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_BUG_257)\n def test_can_create_patch_when_transforms_mixed(self):\n expected = Dataset.from_iterable([\n DatasetItem(2),\n DatasetItem(3, subset='a')\n ])\n\n dataset = Dataset.from_iterable([\n DatasetItem(1),\n DatasetItem(2),\n ])\n\n class Remove1(Transform):\n def __iter__(self):\n for item in self._extractor:\n if item.id != '1':\n yield item\n\n class Add3(Transform):\n def __iter__(self):\n for item in self._extractor:\n if item.id == '2':\n yield item\n yield DatasetItem(3, subset='a')\n\n dataset.transform(Remove1)\n dataset.transform(Add3)\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,\n ('3', 'a'): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n 'a': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(2, len(patch.data))\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(dataset.get(2), patch.data.get(2))\n self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_BUG_257)\n def test_can_create_patch_when_transforms_chained(self):\n expected = Dataset.from_iterable([\n DatasetItem(2),\n DatasetItem(3, subset='a')\n ])\n\n class TestExtractor(Extractor):\n iter_called = 0\n def __iter__(self):\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n ]\n\n __class__.iter_called += 1\n\n class Remove1(Transform):\n iter_called = 0\n def __iter__(self):\n for item in self._extractor:\n if item.id != '1':\n yield item\n\n __class__.iter_called += 1\n\n class Add3(Transform):\n iter_called = 0\n def __iter__(self):\n yield from self._extractor\n yield DatasetItem(3, subset='a')\n\n __class__.iter_called += 1\n\n dataset = Dataset.from_extractors(TestExtractor())\n dataset.transform(Remove1)\n dataset.transform(Add3)\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.modified,\n ('3', 'a'): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n 'a': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(2, len(patch.data))\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(dataset.get(2), patch.data.get(2))\n self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))\n\n self.assertEqual(TestExtractor.iter_called, 2) # 1 for items, 1 for list\n self.assertEqual(Remove1.iter_called, 1)\n self.assertEqual(Add3.iter_called, 1)\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_BUG_257)\n def test_can_create_patch_when_transforms_intermixed_with_direct_ops(self):\n expected = Dataset.from_iterable([\n DatasetItem(3, subset='a'),\n DatasetItem(4),\n DatasetItem(5),\n ])\n\n class TestExtractor(Extractor):\n iter_called = 0\n def __iter__(self):\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n ]\n\n __class__.iter_called += 1\n\n class Remove1(Transform):\n iter_called = 0\n def __iter__(self):\n for item in self._extractor:\n if item.id != '1':\n yield item\n\n __class__.iter_called += 1\n\n class Add3(Transform):\n iter_called = 0\n def __iter__(self):\n yield from self._extractor\n yield DatasetItem(3, subset='a')\n\n __class__.iter_called += 1\n\n dataset = Dataset.from_extractors(TestExtractor())\n dataset.init_cache()\n dataset.put(DatasetItem(4))\n dataset.transform(Remove1)\n dataset.put(DatasetItem(5))\n dataset.remove(2)\n dataset.transform(Add3)\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('3', 'a'): ItemStatus.added,\n ('4', DEFAULT_SUBSET_NAME): ItemStatus.added,\n ('5', DEFAULT_SUBSET_NAME): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n 'a': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(3, len(patch.data))\n\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(None, patch.data.get(2))\n self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))\n self.assertEqual(dataset.get(4), patch.data.get(4))\n self.assertEqual(dataset.get(5), patch.data.get(5))\n\n self.assertEqual(TestExtractor.iter_called, 1)\n self.assertEqual(Remove1.iter_called, 1)\n self.assertEqual(Add3.iter_called, 1)\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_BUG_257)\n def test_can_create_patch_when_local_transforms_stacked(self):\n expected = Dataset.from_iterable([\n DatasetItem(4),\n DatasetItem(5),\n ])\n\n class TestExtractor(Extractor):\n iter_called = 0\n def __iter__(self):\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n ]\n\n __class__.iter_called += 1\n\n class ShiftIds(ItemTransform):\n def transform_item(self, item):\n return item.wrap(id=int(item.id) + 1)\n\n dataset = Dataset.from_extractors(TestExtractor())\n dataset.remove(2)\n dataset.transform(ShiftIds)\n dataset.transform(ShiftIds)\n dataset.transform(ShiftIds)\n dataset.put(DatasetItem(5))\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('4', DEFAULT_SUBSET_NAME): ItemStatus.added,\n ('5', DEFAULT_SUBSET_NAME): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(2, len(patch.data))\n\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(None, patch.data.get(2))\n self.assertEqual(None, patch.data.get(3))\n self.assertEqual(dataset.get(4), patch.data.get(4))\n self.assertEqual(dataset.get(5), patch.data.get(5))\n\n self.assertEqual(TestExtractor.iter_called, 1)\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_BUG_257)\n def test_can_create_patch_when_transforms_chained_and_source_cached(self):\n expected = Dataset.from_iterable([\n DatasetItem(2),\n DatasetItem(3, subset='a')\n ])\n\n class TestExtractor(Extractor):\n iter_called = 0\n def __iter__(self):\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n ]\n\n __class__.iter_called += 1\n\n class Remove1(Transform):\n iter_called = 0\n def __iter__(self):\n for item in self._extractor:\n if item.id != '1':\n yield item\n\n __class__.iter_called += 1\n\n class Add3(Transform):\n iter_called = 0\n def __iter__(self):\n yield from self._extractor\n yield DatasetItem(3, subset='a')\n\n __class__.iter_called += 1\n\n dataset = Dataset.from_extractors(TestExtractor())\n dataset.init_cache()\n dataset.transform(Remove1)\n dataset.transform(Add3)\n\n patch = dataset.get_patch()\n\n self.assertEqual({\n ('1', DEFAULT_SUBSET_NAME): ItemStatus.removed,\n ('2', DEFAULT_SUBSET_NAME): ItemStatus.modified, # TODO: remove this\n ('3', 'a'): ItemStatus.added,\n }, patch.updated_items)\n\n self.assertEqual({\n 'default': ItemStatus.modified,\n 'a': ItemStatus.modified,\n }, patch.updated_subsets)\n\n self.assertEqual(2, len(patch.data))\n self.assertEqual(None, patch.data.get(1))\n self.assertEqual(dataset.get(2), patch.data.get(2))\n self.assertEqual(dataset.get(3, 'a'), patch.data.get(3, 'a'))\n\n self.assertEqual(TestExtractor.iter_called, 1) # 1 for items and list\n self.assertEqual(Remove1.iter_called, 1)\n self.assertEqual(Add3.iter_called, 1)\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_do_lazy_put_and_remove(self):\n iter_called = False\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called = True\n return iter([\n DatasetItem(1),\n DatasetItem(2),\n ])\n dataset = Dataset.from_extractors(TestExtractor())\n\n self.assertFalse(dataset.is_cache_initialized)\n\n dataset.put(DatasetItem(3))\n dataset.remove(DatasetItem(1))\n\n self.assertFalse(dataset.is_cache_initialized)\n self.assertFalse(iter_called)\n\n dataset.init_cache()\n\n self.assertTrue(dataset.is_cache_initialized)\n self.assertTrue(iter_called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_put(self):\n dataset = Dataset()\n\n dataset.put(DatasetItem(1))\n\n self.assertTrue((1, '') in dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_do_lazy_get_on_updated_item(self):\n iter_called = False\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called = True\n return iter([\n DatasetItem(1),\n DatasetItem(2),\n ])\n dataset = Dataset.from_extractors(TestExtractor())\n\n dataset.put(DatasetItem(2))\n\n self.assertTrue((2, '') in dataset)\n self.assertFalse(iter_called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_switch_eager_and_lazy_with_cm_global(self):\n iter_called = False\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called = True\n return iter([\n DatasetItem(1),\n DatasetItem(2),\n ])\n\n with eager_mode():\n Dataset.from_extractors(TestExtractor())\n\n self.assertTrue(iter_called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_switch_eager_and_lazy_with_cm_local(self):\n iter_called = False\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called = True\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n with eager_mode(dataset=dataset):\n dataset.select(lambda item: int(item.id) < 3)\n dataset.select(lambda item: int(item.id) < 2)\n\n self.assertTrue(iter_called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_do_lazy_select(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n dataset.select(lambda item: int(item.id) < 3)\n dataset.select(lambda item: int(item.id) < 2)\n\n self.assertEqual(iter_called, 0)\n\n self.assertEqual(1, len(dataset))\n\n self.assertEqual(iter_called, 1)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_chain_lazy_transforms(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n class TestTransform(ItemTransform):\n def transform_item(self, item):\n return self.wrap_item(item, id=int(item.id) + 1)\n\n dataset.transform(TestTransform)\n dataset.transform(TestTransform)\n\n self.assertEqual(iter_called, 0)\n\n self.assertEqual(4, len(dataset))\n self.assertEqual(3, int(min(int(item.id) for item in dataset)))\n\n self.assertEqual(iter_called, 1)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_get_len_after_local_transforms(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n class TestTransform(ItemTransform):\n def transform_item(self, item):\n return self.wrap_item(item, id=int(item.id) + 1)\n\n dataset.transform(TestTransform)\n dataset.transform(TestTransform)\n\n self.assertEqual(iter_called, 0)\n\n self.assertEqual(4, len(dataset))\n\n self.assertEqual(iter_called, 1)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_get_len_after_nonlocal_transforms(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n class TestTransform(Transform):\n def __iter__(self):\n for item in self._extractor:\n yield self.wrap_item(item, id=int(item.id) + 1)\n\n dataset.transform(TestTransform)\n dataset.transform(TestTransform)\n\n self.assertEqual(iter_called, 0)\n\n self.assertEqual(4, len(dataset))\n\n self.assertEqual(iter_called, 2)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_get_subsets_after_local_transforms(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n class TestTransform(ItemTransform):\n def transform_item(self, item):\n return self.wrap_item(item, id=int(item.id) + 1, subset='a')\n\n dataset.transform(TestTransform)\n dataset.transform(TestTransform)\n\n self.assertEqual(iter_called, 0)\n\n self.assertEqual({'a'}, set(dataset.subsets()))\n\n self.assertEqual(iter_called, 1)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_get_subsets_after_nonlocal_transforms(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n yield from [\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ]\n dataset = Dataset.from_extractors(TestExtractor())\n\n class TestTransform(Transform):\n def __iter__(self):\n for item in self._extractor:\n yield self.wrap_item(item, id=int(item.id) + 1, subset='a')\n\n dataset.transform(TestTransform)\n dataset.transform(TestTransform)\n\n self.assertEqual(iter_called, 0)\n\n self.assertEqual({'a'}, set(dataset.subsets()))\n\n self.assertEqual(iter_called, 2)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_raises_when_repeated_items_in_source(self):\n dataset = Dataset.from_iterable([DatasetItem(0), DatasetItem(0)])\n\n with self.assertRaises(RepeatedItemError):\n dataset.init_cache()\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_check_item_existence(self):\n dataset = Dataset.from_iterable([\n DatasetItem(0, subset='a'), DatasetItem(1)\n ])\n\n self.assertTrue(DatasetItem(0, subset='a') in dataset)\n self.assertFalse(DatasetItem(0, subset='b') in dataset)\n self.assertTrue((0, 'a') in dataset)\n self.assertFalse((0, 'b') in dataset)\n self.assertTrue(1 in dataset)\n self.assertFalse(0 in dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_put_with_id_override(self):\n dataset = Dataset.from_iterable([])\n\n dataset.put(DatasetItem(0, subset='a'), id=2, subset='b')\n\n self.assertTrue((2, 'b') in dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_compute_cache_with_empty_source(self):\n dataset = Dataset.from_iterable([])\n dataset.put(DatasetItem(2))\n\n dataset.init_cache()\n\n self.assertTrue(2 in dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_cant_do_partial_caching_in_get_when_default(self):\n iter_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n return iter([\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ])\n\n dataset = Dataset.from_extractors(TestExtractor())\n\n dataset.get(3)\n dataset.get(4)\n\n self.assertEqual(1, iter_called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_do_partial_caching_in_get_when_redefined(self):\n iter_called = 0\n get_called = 0\n class TestExtractor(Extractor):\n def __iter__(self):\n nonlocal iter_called\n iter_called += 1\n return iter([\n DatasetItem(1),\n DatasetItem(2),\n DatasetItem(3),\n DatasetItem(4),\n ])\n\n def get(self, id, subset=None):\n nonlocal get_called\n get_called += 1\n return DatasetItem(id, subset=subset)\n\n dataset = Dataset.from_extractors(TestExtractor())\n\n dataset.get(3)\n dataset.get(4)\n\n self.assertEqual(0, iter_called)\n self.assertEqual(2, get_called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_binds_on_save(self):\n dataset = Dataset.from_iterable([DatasetItem(1)])\n\n self.assertFalse(dataset.is_bound)\n\n with TestDir() as test_dir:\n dataset.save(test_dir)\n\n self.assertTrue(dataset.is_bound)\n self.assertEqual(dataset.data_path, test_dir)\n self.assertEqual(dataset.format, DEFAULT_FORMAT)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_flushes_changes_on_save(self):\n dataset = Dataset.from_iterable([])\n dataset.put(DatasetItem(1))\n\n self.assertTrue(dataset.is_modified)\n\n with TestDir() as test_dir:\n dataset.save(test_dir)\n\n self.assertFalse(dataset.is_modified)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_does_not_load_images_on_saving(self):\n # Issue https://github.com/openvinotoolkit/datumaro/issues/177\n # Missing image metadata (size etc.) can lead to image loading on\n # dataset save without image saving\n\n called = False\n def test_loader():\n nonlocal called\n called = True\n\n dataset = Dataset.from_iterable([\n DatasetItem(1, image=test_loader)\n ])\n\n with TestDir() as test_dir:\n dataset.save(test_dir)\n\n self.assertFalse(called)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_transform_labels(self):\n expected = Dataset.from_iterable([], categories=['c', 'b'])\n dataset = Dataset.from_iterable([], categories=['a', 'b'])\n\n actual = dataset.transform('remap_labels', {'a': 'c'})\n\n compare_datasets(self, expected, actual)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_run_model(self):\n dataset = Dataset.from_iterable([\n DatasetItem(i, image=np.array([i]))\n for i in range(5)\n ], categories=['label'])\n\n batch_size = 3\n\n expected = Dataset.from_iterable([\n DatasetItem(i, image=np.array([i]), annotations=[\n Label(0, attributes={ 'idx': i % batch_size, 'data': i })\n ])\n for i in range(5)\n ], categories=['label'])\n\n calls = 0\n\n class TestLauncher(Launcher):\n def launch(self, inputs):\n nonlocal calls\n calls += 1\n\n for i, inp in enumerate(inputs):\n yield [ Label(0, attributes={'idx': i, 'data': inp.item()}) ]\n\n model = TestLauncher()\n\n actual = dataset.run_model(model, batch_size=batch_size)\n\n compare_datasets(self, expected, actual, require_images=True)\n self.assertEqual(2, calls)\n\n @mark_requirement(Requirements.DATUM_BUG_259)\n def test_can_filter_items(self):\n dataset = Dataset.from_iterable([\n DatasetItem(id=0, subset='train'),\n DatasetItem(id=1, subset='test'),\n ])\n\n dataset.filter('/item[id > 0]')\n\n self.assertEqual(1, len(dataset))\n\n @mark_requirement(Requirements.DATUM_BUG_257)\n def test_filter_registers_changes(self):\n dataset = Dataset.from_iterable([\n DatasetItem(id=0, subset='train'),\n DatasetItem(id=1, subset='test'),\n ])\n\n dataset.filter('/item[id > 0]')\n\n self.assertEqual({\n ('0', 'train'): ItemStatus.removed,\n ('1', 'test'): ItemStatus.modified, # TODO: remove this line\n }, dataset.get_patch().updated_items)\n\n @mark_requirement(Requirements.DATUM_BUG_259)\n def test_can_filter_annotations(self):\n dataset = Dataset.from_iterable([\n DatasetItem(id=0, subset='train', annotations=[Label(0), Label(1)]),\n DatasetItem(id=1, subset='val', annotations=[Label(2)]),\n DatasetItem(id=2, subset='test', annotations=[Label(0), Label(2)]),\n ], categories=['a', 'b', 'c'])\n\n dataset.filter('/item/annotation[label = \"c\"]',\n filter_annotations=True, remove_empty=True)\n\n self.assertEqual(2, len(dataset))\n\n @mark_requirement(Requirements.DATUM_BUG_259)\n def test_can_filter_items_in_merged_dataset(self):\n dataset = Dataset.from_extractors(\n Dataset.from_iterable([ DatasetItem(id=0, subset='train') ]),\n Dataset.from_iterable([ DatasetItem(id=1, subset='test') ]),\n )\n\n dataset.filter('/item[id > 0]')\n\n self.assertEqual(1, len(dataset))\n\n @mark_requirement(Requirements.DATUM_BUG_259)\n def test_can_filter_annotations_in_merged_dataset(self):\n dataset = Dataset.from_extractors(\n Dataset.from_iterable([\n DatasetItem(id=0, subset='train', annotations=[Label(0)]),\n ], categories=['a', 'b', 'c']),\n Dataset.from_iterable([\n DatasetItem(id=1, subset='val', annotations=[Label(1)]),\n ], categories=['a', 'b', 'c']),\n Dataset.from_iterable([\n DatasetItem(id=2, subset='test', annotations=[Label(2)]),\n ], categories=['a', 'b', 'c']),\n )\n\n dataset.filter('/item/annotation[label = \"c\"]',\n filter_annotations=True, remove_empty=True)\n\n self.assertEqual(1, len(dataset))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_inplace_save_writes_only_updated_data(self):\n class CustomConverter(Converter):\n DEFAULT_IMAGE_EXT = '.jpg'\n\n def apply(self):\n assert osp.isdir(self._save_dir)\n\n for item in self._extractor:\n name = f'{item.subset}_{item.id}'\n with open(osp.join(\n self._save_dir, name + '.txt'), 'w') as f:\n f.write('\\n')\n\n if self._save_images and \\\n item.has_image and item.image.has_data:\n self._save_image(item, name=name)\n\n env = Environment()\n env.converters.items = { 'test': CustomConverter }\n\n with TestDir() as path:\n dataset = Dataset.from_iterable([\n DatasetItem(1, subset='train', image=np.ones((2, 4, 3))),\n DatasetItem(2, subset='train',\n image=Image(path='2.jpg', size=(3, 2))),\n DatasetItem(3, subset='valid', image=np.ones((2, 2, 3))),\n ], categories=[], env=env)\n dataset.export(path, 'test', save_images=True)\n\n dataset.put(DatasetItem(2, subset='train', image=np.ones((3, 2, 3))))\n dataset.remove(3, 'valid')\n dataset.save(save_images=True)\n\n self.assertEqual({\n 'train_1.txt', 'train_1.jpg',\n 'train_2.txt', 'train_2.jpg'\n },\n set(os.listdir(path)))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_update_overwrites_matching_items(self):\n patch = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])\n ], categories=['a', 'b'])\n\n dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ]),\n DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),\n ], categories=['a', 'b'])\n\n expected = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ]),\n DatasetItem(id=2, annotations=[ Bbox(1, 1, 1, 1, label=1) ]),\n ], categories=['a', 'b'])\n\n dataset.update(patch)\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_update_can_reorder_labels(self):\n patch = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=1) ])\n ], categories=['b', 'a'])\n\n dataset = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Bbox(2, 2, 1, 1, label=0) ])\n ], categories=['a', 'b'])\n\n # Note that label id and categories are changed\n expected = Dataset.from_iterable([\n DatasetItem(id=1, annotations=[ Bbox(1, 2, 3, 4, label=0) ])\n ], categories=['a', 'b'])\n\n dataset.update(patch)\n\n compare_datasets(self, expected, dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_update_can_project_labels(self):\n dataset = Dataset.from_iterable([\n # Must be overridden\n DatasetItem(id=100, annotations=[\n Bbox(1, 2, 3, 3, label=0),\n ]),\n\n # Must be kept\n DatasetItem(id=1, annotations=[\n Bbox(1, 2, 3, 4, label=1)\n ]),\n ], categories=['a', 'b'])\n\n patch = Dataset.from_iterable([\n # Must override\n DatasetItem(id=100, annotations=[\n Bbox(1, 2, 3, 4, label=0), # Label must be remapped\n Bbox(5, 6, 2, 3, label=1), # Label must be remapped\n Bbox(2, 2, 2, 3, label=2), # Will be dropped due to label\n ]),\n\n # Must be added\n DatasetItem(id=2, annotations=[\n Bbox(1, 2, 3, 2, label=1) # Label must be remapped\n ]),\n ], categories=['b', 'a', 'c'])\n\n expected = Dataset.from_iterable([\n DatasetItem(id=100, annotations=[\n Bbox(1, 2, 3, 4, label=1),\n Bbox(5, 6, 2, 3, label=0),\n ]),\n\n DatasetItem(id=1, annotations=[\n Bbox(1, 2, 3, 4, label=1)\n ]),\n\n DatasetItem(id=2, annotations=[\n Bbox(1, 2, 3, 2, label=0)\n ]),\n ], categories=['a', 'b'])\n\n dataset.update(patch)\n\n compare_datasets(self, expected, dataset, ignored_attrs='*')\n\n\nclass DatasetItemTest(TestCase):\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_ctor_requires_id(self):\n with self.assertRaises(Exception):\n # pylint: disable=no-value-for-parameter\n DatasetItem()\n # pylint: enable=no-value-for-parameter\n\n @staticmethod\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_ctors_with_image():\n for args in [\n { 'id': 0, 'image': None },\n { 'id': 0, 'image': 'path.jpg' },\n { 'id': 0, 'image': np.array([1, 2, 3]) },\n { 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },\n { 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },\n ]:\n DatasetItem(**args)\n\n\nclass DatasetFilterTest(TestCase):\n @staticmethod\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_item_representations():\n item = DatasetItem(id=1, subset='subset',\n image=np.ones((5, 4, 3)),\n annotations=[\n Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),\n Caption('hello', id=1),\n Caption('world', group=5),\n Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),\n Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),\n Bbox(5, 6, 7, 8, id=5, group=5),\n Points([1, 2, 2, 0, 1, 1], label=0, id=5),\n Mask(id=5, image=np.ones((3, 2))),\n Mask(label=3, id=5, image=np.ones((2, 3))),\n PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),\n Polygon([1, 2, 3, 4, 5, 6, 7, 8]),\n ]\n )\n\n encoded = DatasetItemEncoder.encode(item)\n DatasetItemEncoder.to_string(encoded)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_item_filter_can_be_applied(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n for i in range(4):\n yield DatasetItem(id=i, subset='train')\n\n extractor = TestExtractor()\n\n filtered = XPathDatasetFilter(extractor, '/item[id > 1]')\n\n self.assertEqual(2, len(filtered))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_annotations_filter_can_be_applied(self):\n class SrcExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=0),\n DatasetItem(id=1, annotations=[\n Label(0),\n Label(1),\n ]),\n DatasetItem(id=2, annotations=[\n Label(0),\n Label(2),\n ]),\n ])\n\n class DstExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=0),\n DatasetItem(id=1, annotations=[\n Label(0),\n ]),\n DatasetItem(id=2, annotations=[\n Label(0),\n ]),\n ])\n\n extractor = SrcExtractor()\n\n filtered = XPathAnnotationsFilter(extractor,\n '/item/annotation[label_id = 0]')\n\n self.assertListEqual(list(filtered), list(DstExtractor()))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_annotations_filter_can_remove_empty_items(self):\n source = Dataset.from_iterable([\n DatasetItem(id=0),\n DatasetItem(id=1, annotations=[\n Label(0),\n Label(1),\n ]),\n DatasetItem(id=2, annotations=[\n Label(0),\n Label(2),\n ]),\n ], categories=['a', 'b', 'c'])\n\n expected = Dataset.from_iterable([\n DatasetItem(id=2, annotations=[Label(2)]),\n ], categories=['a', 'b', 'c'])\n\n filtered = XPathAnnotationsFilter(source,\n '/item/annotation[label_id = 2]', remove_empty=True)\n\n compare_datasets(self, expected, filtered)\n", "from collections import OrderedDict\nfrom functools import partial\nfrom unittest import TestCase\nimport os\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.components.annotation import (\n AnnotationType, Bbox, Label, LabelCategories, Mask, MaskCategories,\n)\nfrom datumaro.components.dataset import Dataset\nfrom datumaro.components.environment import Environment\nfrom datumaro.components.extractor import DatasetItem, Extractor\nfrom datumaro.components.media import Image\nfrom datumaro.plugins.voc_format.converter import (\n VocActionConverter, VocClassificationConverter, VocConverter,\n VocDetectionConverter, VocLayoutConverter, VocSegmentationConverter,\n)\nfrom datumaro.plugins.voc_format.importer import VocImporter\nfrom datumaro.util.mask_tools import load_mask\nfrom datumaro.util.test_utils import (\n TestDir, check_save_and_load, compare_datasets,\n)\nimport datumaro.plugins.voc_format.format as VOC\n\nfrom .requirements import Requirements, mark_requirement\n\n\nclass VocFormatTest(TestCase):\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_colormap_generator(self):\n reference = np.array([\n [ 0, 0, 0],\n [128, 0, 0],\n [ 0, 128, 0],\n [128, 128, 0],\n [ 0, 0, 128],\n [128, 0, 128],\n [ 0, 128, 128],\n [128, 128, 128],\n [ 64, 0, 0],\n [192, 0, 0],\n [ 64, 128, 0],\n [192, 128, 0],\n [ 64, 0, 128],\n [192, 0, 128],\n [ 64, 128, 128],\n [192, 128, 128],\n [ 0, 64, 0],\n [128, 64, 0],\n [ 0, 192, 0],\n [128, 192, 0],\n [ 0, 64, 128],\n [224, 224, 192], # ignored\n ])\n\n self.assertTrue(np.array_equal(reference, list(VOC.VocColormap.values())))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_write_and_parse_labelmap(self):\n src_label_map = VOC.make_voc_label_map()\n src_label_map['qq'] = [None, ['part1', 'part2'], ['act1', 'act2']]\n src_label_map['ww'] = [(10, 20, 30), [], ['act3']]\n\n with TestDir() as test_dir:\n file_path = osp.join(test_dir, 'test.txt')\n\n VOC.write_label_map(file_path, src_label_map)\n dst_label_map = VOC.parse_label_map(file_path)\n\n self.assertEqual(src_label_map, dst_label_map)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_write_and_parse_dataset_meta_file(self):\n src_label_map = VOC.make_voc_label_map()\n src_label_map['qq'] = [None, ['part1', 'part2'], ['act1', 'act2']]\n src_label_map['ww'] = [(10, 20, 30), [], ['act3']]\n\n with TestDir() as test_dir:\n VOC.write_meta_file(test_dir, src_label_map)\n dst_label_map = VOC.parse_meta_file(test_dir)\n\n self.assertEqual(src_label_map, dst_label_map)\n\nclass TestExtractorBase(Extractor):\n def _label(self, voc_label):\n return self.categories()[AnnotationType.label].find(voc_label)[0]\n\n def categories(self):\n return VOC.make_voc_categories()\n\n\nDUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'voc_dataset',\n 'voc_dataset1')\nDUMMY_DATASET2_DIR = osp.join(osp.dirname(__file__), 'assets', 'voc_dataset',\n 'voc_dataset2')\nDUMMY_DATASET3_DIR = osp.join(osp.dirname(__file__), 'assets', 'voc_dataset',\n 'voc_dataset3')\n\nclass VocImportTest(TestCase):\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import(self):\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Label(self._label(l.name))\n for l in VOC.VocLabel if l.value % 2 == 1\n ] + [\n Bbox(1, 2, 2, 2, label=self._label('cat'),\n attributes={\n 'pose': VOC.VocPose(1).name,\n 'truncated': True,\n 'difficult': False,\n 'occluded': False,\n },\n id=1, group=1,\n ),\n # Only main boxes denote instances (have ids)\n Mask(image=np.ones([10, 20]),\n label=self._label(VOC.VocLabel(2).name),\n group=1,\n ),\n\n Bbox(4, 5, 2, 2, label=self._label('person'),\n attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n **{\n a.name: a.value % 2 == 1\n for a in VOC.VocAction\n }\n },\n id=2, group=2,\n ),\n # Only main boxes denote instances (have ids)\n Bbox(5.5, 6, 2, 2,\n label=self._label(VOC.VocBodyPart(1).name),\n group=2\n ),\n ]\n ),\n\n DatasetItem(id='2007_000002', subset='test',\n image=np.ones((10, 20, 3))),\n ])\n\n dataset = Dataset.import_from(DUMMY_DATASET_DIR, 'voc')\n\n compare_datasets(self, DstExtractor(), dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_voc_classification_dataset(self):\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Label(self._label(l.name))\n for l in VOC.VocLabel if l.value % 2 == 1\n ]),\n\n DatasetItem(id='2007_000002', subset='test',\n image=np.ones((10, 20, 3))),\n ])\n expected_dataset = DstExtractor()\n\n rpath = osp.join('ImageSets', 'Main', 'train.txt')\n matrix = [\n ('voc_classification', '', ''),\n ('voc_classification', 'train', rpath),\n ]\n for format, subset, path in matrix:\n with self.subTest(format=format, subset=subset, path=path):\n if subset:\n expected = expected_dataset.get_subset(subset)\n else:\n expected = expected_dataset\n\n actual = Dataset.import_from(osp.join(DUMMY_DATASET_DIR, path),\n format)\n\n compare_datasets(self, expected, actual, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_voc_layout_dataset(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Bbox(4.0, 5.0, 2.0, 2.0, label=15, id=2, group=2,\n attributes={\n 'difficult': False,\n 'truncated': False,\n 'occluded': False,\n **{\n a.name : a.value % 2 == 1\n for a in VOC.VocAction\n }\n }\n ),\n Bbox(5.5, 6.0, 2.0, 2.0, label=22, group=2),\n ]),\n\n DatasetItem(id='2007_000002', subset='test',\n image=np.ones((10, 20, 3))),\n ], categories=VOC.make_voc_categories())\n\n rpath = osp.join('ImageSets', 'Layout', 'train.txt')\n matrix = [\n ('voc_layout', '', ''),\n ('voc_layout', 'train', rpath),\n ('voc', 'train', rpath),\n ]\n for format, subset, path in matrix:\n with self.subTest(format=format, subset=subset, path=path):\n if subset:\n expected = expected_dataset.get_subset(subset)\n else:\n expected = expected_dataset\n\n actual = Dataset.import_from(osp.join(DUMMY_DATASET_DIR, path),\n format)\n\n compare_datasets(self, expected, actual, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_voc_detection_dataset(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Bbox(1.0, 2.0, 2.0, 2.0, label=8, id=1, group=1,\n attributes={\n 'difficult': False,\n 'truncated': True,\n 'occluded': False,\n 'pose': 'Unspecified'\n }\n ),\n Bbox(4.0, 5.0, 2.0, 2.0, label=15, id=2, group=2,\n attributes={\n 'difficult': False,\n 'truncated': False,\n 'occluded': False,\n **{\n a.name : a.value % 2 == 1\n for a in VOC.VocAction\n }\n }\n ),\n ]),\n\n DatasetItem(id='2007_000002', subset='test',\n image=np.ones((10, 20, 3))),\n ], categories=VOC.make_voc_categories())\n\n rpath = osp.join('ImageSets', 'Main', 'train.txt')\n matrix = [\n ('voc_detection', '', ''),\n ('voc_detection', 'train', rpath),\n ]\n for format, subset, path in matrix:\n with self.subTest(format=format, subset=subset, path=path):\n if subset:\n expected = expected_dataset.get_subset(subset)\n else:\n expected = expected_dataset\n\n actual = Dataset.import_from(osp.join(DUMMY_DATASET_DIR, path),\n format)\n\n compare_datasets(self, expected, actual, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_voc_segmentation_dataset(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Mask(image=np.ones([10, 20]), label=2, group=1)\n ]),\n\n DatasetItem(id='2007_000002', subset='test',\n image=np.ones((10, 20, 3))),\n ], categories=VOC.make_voc_categories())\n\n rpath = osp.join('ImageSets', 'Segmentation', 'train.txt')\n matrix = [\n ('voc_segmentation', '', ''),\n ('voc_segmentation', 'train', rpath),\n ('voc', 'train', rpath),\n ]\n for format, subset, path in matrix:\n with self.subTest(format=format, subset=subset, path=path):\n if subset:\n expected = expected_dataset.get_subset(subset)\n else:\n expected = expected_dataset\n\n actual = Dataset.import_from(osp.join(DUMMY_DATASET_DIR, path),\n format)\n\n compare_datasets(self, expected, actual, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_voc_action_dataset(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Bbox(4.0, 5.0, 2.0, 2.0, label=15, id=2, group=2,\n attributes={\n 'difficult': False,\n 'truncated': False,\n 'occluded': False,\n **{\n a.name : a.value % 2 == 1\n for a in VOC.VocAction\n }\n }\n )\n ]),\n\n DatasetItem(id='2007_000002', subset='test',\n image=np.ones((10, 20, 3))),\n ], categories=VOC.make_voc_categories())\n\n rpath = osp.join('ImageSets', 'Action', 'train.txt')\n matrix = [\n ('voc_action', '', ''),\n ('voc_action', 'train', rpath),\n ('voc', 'train', rpath),\n ]\n for format, subset, path in matrix:\n with self.subTest(format=format, subset=subset, path=path):\n if subset:\n expected = expected_dataset.get_subset(subset)\n else:\n expected = expected_dataset\n\n actual = Dataset.import_from(osp.join(DUMMY_DATASET_DIR, path),\n format)\n\n compare_datasets(self, expected, actual, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_detect_voc(self):\n env = Environment()\n\n for path in [DUMMY_DATASET_DIR, DUMMY_DATASET2_DIR]:\n with self.subTest(path=path):\n detected_formats = env.detect_dataset(path)\n self.assertEqual([VocImporter.NAME], detected_formats)\n\n @mark_requirement(Requirements.DATUM_BUG_583)\n def test_can_import_voc_dataset_with_empty_lines_in_subset_lists(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='2007_000001', subset='train',\n image=np.ones((10, 20, 3)),\n annotations=[\n Bbox(1.0, 2.0, 2.0, 2.0, label=8, id=1, group=1,\n attributes={\n 'difficult': False,\n 'truncated': True,\n 'occluded': False,\n 'pose': 'Unspecified'\n }\n )\n ])\n ], categories=VOC.make_voc_categories())\n\n rpath = osp.join('ImageSets', 'Main', 'train.txt')\n matrix = [\n ('voc_detection', '', ''),\n ('voc_detection', 'train', rpath),\n ]\n for format, subset, path in matrix:\n with self.subTest(format=format, subset=subset, path=path):\n if subset:\n expected = expected_dataset.get_subset(subset)\n else:\n expected = expected_dataset\n\n actual = Dataset.import_from(osp.join(DUMMY_DATASET3_DIR, path),\n format)\n\n compare_datasets(self, expected, actual, require_images=True)\n\n\nclass VocConverterTest(TestCase):\n def _test_save_and_load(self, source_dataset, converter, test_dir,\n target_dataset=None, importer_args=None, **kwargs):\n return check_save_and_load(self, source_dataset, converter, test_dir,\n importer='voc',\n target_dataset=target_dataset, importer_args=importer_args, **kwargs)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_cls(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/0', subset='a', annotations=[\n Label(1),\n Label(2),\n Label(3),\n ]),\n\n DatasetItem(id=1, subset='b', annotations=[\n Label(4),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocClassificationConverter.convert, label_map='voc'),\n test_dir)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_det(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/1', subset='a', annotations=[\n Bbox(2, 3, 4, 5, label=2,\n attributes={ 'occluded': True }\n ),\n Bbox(2, 3, 4, 5, label=3,\n attributes={ 'truncated': True },\n ),\n ]),\n\n DatasetItem(id=2, subset='b', annotations=[\n Bbox(5, 4, 6, 5, label=3,\n attributes={ 'difficult': True },\n ),\n ]),\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/1', subset='a', annotations=[\n Bbox(2, 3, 4, 5, label=2, id=1, group=1,\n attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': True,\n }\n ),\n Bbox(2, 3, 4, 5, label=3, id=2, group=2,\n attributes={\n 'truncated': True,\n 'difficult': False,\n 'occluded': False,\n },\n ),\n ]),\n\n DatasetItem(id=2, subset='b', annotations=[\n Bbox(5, 4, 6, 5, label=3, id=1, group=1,\n attributes={\n 'truncated': False,\n 'difficult': True,\n 'occluded': False,\n },\n ),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocDetectionConverter.convert, label_map='voc'),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_segm(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/b/1', subset='a', annotations=[\n # overlapping masks, the first should be truncated\n # the second and third are different instances\n Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3,\n z_order=3),\n Mask(image=np.array([[0, 1, 1, 1, 0]]), label=4,\n z_order=1),\n Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3,\n z_order=2),\n ]),\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/b/1', subset='a', annotations=[\n Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4,\n group=1),\n Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3,\n group=2),\n Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3,\n group=3),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocSegmentationConverter.convert, label_map='voc'),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_segm_unpainted(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='a', annotations=[\n # overlapping masks, the first should be truncated\n # the second and third are different instances\n Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3,\n z_order=3),\n Mask(image=np.array([[0, 1, 1, 1, 0]]), label=4,\n z_order=1),\n Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3,\n z_order=2),\n ]),\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='a', annotations=[\n Mask(image=np.array([[0, 0, 1, 0, 0]]), label=4,\n group=1),\n Mask(image=np.array([[1, 1, 0, 0, 0]]), label=3,\n group=2),\n Mask(image=np.array([[0, 0, 0, 1, 0]]), label=3,\n group=3),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocSegmentationConverter.convert,\n label_map='voc', apply_colormap=False),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_segm_with_many_instances(self):\n def bit(x, y, shape):\n mask = np.zeros(shape)\n mask[y, x] = 1\n return mask\n\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='a', annotations=[\n Mask(image=bit(x, y, shape=[10, 10]),\n label=self._label(VOC.VocLabel(3).name),\n z_order=10 * y + x + 1\n )\n for y in range(10) for x in range(10)\n ]),\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='a', annotations=[\n Mask(image=bit(x, y, shape=[10, 10]),\n label=self._label(VOC.VocLabel(3).name),\n group=10 * y + x + 1\n )\n for y in range(10) for x in range(10)\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocSegmentationConverter.convert, label_map='voc'),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_layout(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/b/1', subset='a', annotations=[\n Bbox(2, 3, 4, 5, label=2, id=1, group=1,\n attributes={\n 'pose': VOC.VocPose(1).name,\n 'truncated': True,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n Bbox(2, 3, 1, 1, label=self._label(\n VOC.VocBodyPart(1).name), group=1),\n Bbox(5, 4, 3, 2, label=self._label(\n VOC.VocBodyPart(2).name), group=1),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocLayoutConverter.convert, label_map='voc'), test_dir)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_voc_action(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/b/1', subset='a', annotations=[\n Bbox(2, 3, 4, 5, label=2,\n attributes={\n 'truncated': True,\n VOC.VocAction(1).name: True,\n VOC.VocAction(2).name: True,\n }\n ),\n Bbox(5, 4, 3, 2, label=self._label('person'),\n attributes={\n 'truncated': True,\n VOC.VocAction(1).name: True,\n VOC.VocAction(2).name: True,\n }\n ),\n ]),\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a/b/1', subset='a', annotations=[\n Bbox(2, 3, 4, 5, label=2,\n id=1, group=1, attributes={\n 'truncated': True,\n 'difficult': False,\n 'occluded': False,\n # no attributes here in the label categories\n }\n ),\n Bbox(5, 4, 3, 2, label=self._label('person'),\n id=2, group=2, attributes={\n 'truncated': True,\n 'difficult': False,\n 'occluded': False,\n VOC.VocAction(1).name: True,\n VOC.VocAction(2).name: True,\n **{\n a.name: False for a in VOC.VocAction\n if a.value not in {1, 2}\n }\n }\n ),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocActionConverter.convert,\n label_map='voc', allow_attributes=False), test_dir,\n target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_dataset_with_no_subsets(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1),\n DatasetItem(id=2),\n ])\n\n for task in [None] + list(VOC.VocTask):\n with self.subTest(subformat=task), TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc', tasks=task),\n test_dir)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='кириллица с пробелом 1'),\n DatasetItem(id='кириллица с пробелом 2',\n image=np.ones([4, 5, 3])),\n ])\n\n for task in [None] + list(VOC.VocTask):\n with self.subTest(subformat=task), TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc', tasks=task,\n save_images=True),\n test_dir, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_dataset_with_images(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='a', image=np.ones([4, 5, 3])),\n DatasetItem(id=2, subset='a', image=np.ones([5, 4, 3])),\n\n DatasetItem(id=3, subset='b', image=np.ones([2, 6, 3])),\n ])\n\n for task in [None] + list(VOC.VocTask):\n with self.subTest(subformat=task), TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc',\n save_images=True, tasks=task),\n test_dir, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_dataset_with_voc_labelmap(self):\n class SrcExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=self._label('cat'), id=1),\n Bbox(1, 2, 3, 4, label=self._label('non_voc_label'), id=2),\n ])\n\n def categories(self):\n label_cat = LabelCategories()\n label_cat.add(VOC.VocLabel.cat.name)\n label_cat.add('non_voc_label')\n return {\n AnnotationType.label: label_cat,\n }\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n # drop non voc label\n Bbox(2, 3, 4, 5, label=self._label('cat'), id=1, group=1,\n attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n ])\n\n def categories(self):\n return VOC.make_voc_categories()\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n partial(VocConverter.convert, label_map='voc'),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_dataset_with_source_labelmap_undefined(self):\n class SrcExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=0, id=1),\n Bbox(1, 2, 3, 4, label=1, id=2),\n ])\n\n def categories(self):\n label_cat = LabelCategories()\n label_cat.add('Label_1')\n label_cat.add('label_2')\n return {\n AnnotationType.label: label_cat,\n }\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=self._label('Label_1'),\n id=1, group=1, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n Bbox(1, 2, 3, 4, label=self._label('label_2'),\n id=2, group=2, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n ])\n\n def categories(self):\n label_map = OrderedDict()\n label_map['background'] = [None, [], []]\n label_map['Label_1'] = [None, [], []]\n label_map['label_2'] = [None, [], []]\n return VOC.make_voc_categories(label_map)\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n partial(VocConverter.convert, label_map='source'),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_dataset_with_source_labelmap_defined(self):\n class SrcExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=0, id=1),\n Bbox(1, 2, 3, 4, label=2, id=2),\n ])\n\n def categories(self):\n label_map = OrderedDict()\n label_map['label_1'] = [(1, 2, 3), [], []]\n label_map['background'] = [(0, 0, 0), [], []] # can be not 0\n label_map['label_2'] = [(3, 2, 1), [], []]\n return VOC.make_voc_categories(label_map)\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=self._label('label_1'),\n id=1, group=1, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n Bbox(1, 2, 3, 4, label=self._label('label_2'),\n id=2, group=2, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n ])\n\n def categories(self):\n label_map = OrderedDict()\n label_map['background'] = [(0, 0, 0), [], []]\n label_map['label_1'] = [(1, 2, 3), [], []]\n label_map['label_2'] = [(3, 2, 1), [], []]\n return VOC.make_voc_categories(label_map)\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n partial(VocConverter.convert, label_map='source'),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_dataset_with_save_dataset_meta_file(self):\n label_map = OrderedDict([\n ('background', [(0, 0, 0), [], []]),\n ('label_1', [(1, 2, 3), ['part1', 'part2'], ['act1', 'act2']]),\n ('label_2', [(3, 2, 1), ['part3'], []])\n ])\n\n class SrcExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=1, id=1),\n ])\n\n def categories(self):\n return VOC.make_voc_categories(label_map)\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=self._label('label_1'),\n id=1, group=1, attributes={\n 'act1': False,\n 'act2': False,\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n ])\n\n def categories(self):\n return VOC.make_voc_categories(label_map)\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n partial(VocConverter.convert, label_map=label_map,\n save_dataset_meta=True), test_dir,\n target_dataset=DstExtractor())\n self.assertTrue(osp.isfile(osp.join(test_dir, 'dataset_meta.json')))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_dataset_with_fixed_labelmap(self):\n class SrcExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(2, 3, 4, 5, label=self._label('foreign_label'), id=1),\n Bbox(1, 2, 3, 4, label=self._label('label'), id=2, group=2,\n attributes={'act1': True}),\n Bbox(2, 3, 4, 5, label=self._label('label_part1'), group=2),\n Bbox(2, 3, 4, 6, label=self._label('label_part2'), group=2),\n ])\n\n def categories(self):\n label_cat = LabelCategories()\n label_cat.add('foreign_label')\n label_cat.add('label', attributes=['act1', 'act2'])\n label_cat.add('label_part1')\n label_cat.add('label_part2')\n return {\n AnnotationType.label: label_cat,\n }\n\n label_map = OrderedDict([\n ('label', [None, ['label_part1', 'label_part2'], ['act1', 'act2']])\n ])\n\n dst_label_map = OrderedDict([\n ('background', [None, [], []]),\n ('label', [None, ['label_part1', 'label_part2'], ['act1', 'act2']])\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n yield DatasetItem(id=1, annotations=[\n Bbox(1, 2, 3, 4, label=self._label('label'), id=1, group=1,\n attributes={\n 'act1': True,\n 'act2': False,\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }\n ),\n Bbox(2, 3, 4, 5, label=self._label('label_part1'), group=1),\n Bbox(2, 3, 4, 6, label=self._label('label_part2'), group=1),\n ])\n\n def categories(self):\n return VOC.make_voc_categories(dst_label_map)\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcExtractor(),\n partial(VocConverter.convert, label_map=label_map),\n test_dir, target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_background_masks_dont_introduce_instances_but_cover_others(self):\n dataset = Dataset.from_iterable([\n DatasetItem(1, image=np.zeros((4, 1, 1)), annotations=[\n Mask([1, 1, 1, 1], label=1, attributes={'z_order': 1}),\n Mask([0, 0, 1, 1], label=2, attributes={'z_order': 2}),\n Mask([0, 0, 1, 1], label=0, attributes={'z_order': 3}),\n ])\n ], categories=['background', 'a', 'b'])\n\n with TestDir() as test_dir:\n VocConverter.convert(dataset, test_dir, apply_colormap=False)\n\n cls_mask = load_mask(\n osp.join(test_dir, 'SegmentationClass', '1.png'))\n inst_mask = load_mask(\n osp.join(test_dir, 'SegmentationObject', '1.png'))\n self.assertTrue(np.array_equal([0, 1], np.unique(cls_mask)))\n self.assertTrue(np.array_equal([0, 1], np.unique(inst_mask)))\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_dataset_with_image_info(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, image=Image(path='1.jpg', size=(10, 15))),\n ])\n\n for task in [None] + list(VOC.VocTask):\n with self.subTest(subformat=task), TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc', tasks=task),\n test_dir)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_image_with_arbitrary_extension(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='q/1', image=Image(path='q/1.JPEG',\n data=np.zeros((4, 3, 3)))),\n DatasetItem(id='a/b/c/2', image=Image(path='a/b/c/2.bmp',\n data=np.zeros((3, 4, 3)))),\n ])\n\n for task in [None] + list(VOC.VocTask):\n with self.subTest(subformat=task), TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc', tasks=task,\n save_images=True),\n test_dir, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_relative_paths(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='1', image=np.ones((4, 2, 3))),\n DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3))),\n DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3))),\n ])\n\n for task in [None] + list(VOC.VocTask):\n with self.subTest(subformat=task), TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert,\n label_map='voc', save_images=True, tasks=task),\n test_dir, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_attributes(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a', annotations=[\n Bbox(2, 3, 4, 5, label=2,\n attributes={ 'occluded': True, 'x': 1, 'y': '2' }\n ),\n ]),\n ])\n\n class DstExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='a', annotations=[\n Bbox(2, 3, 4, 5, label=2, id=1, group=1,\n attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': True,\n 'x': '1', 'y': '2', # can only read strings\n }\n ),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc'), test_dir,\n target_dataset=DstExtractor())\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_inplace_save_writes_only_updated_data_with_direct_changes(self):\n expected = Dataset.from_iterable([\n DatasetItem(1, subset='a', image=np.ones((1, 2, 3)),\n annotations=[\n # Bbox(0, 0, 0, 0, label=1) # won't find removed anns\n ]),\n\n DatasetItem(2, subset='b', image=np.ones((3, 2, 3)),\n annotations=[\n Bbox(0, 0, 0, 0, label=4, id=1, group=1, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n })\n ]),\n ], categories={\n AnnotationType.label: LabelCategories.from_iterable(\n ['background', 'a', 'b', 'c', 'd']),\n AnnotationType.mask: MaskCategories(\n colormap=VOC.generate_colormap(5)),\n })\n\n dataset = Dataset.from_iterable([\n DatasetItem(1, subset='a', image=np.ones((1, 2, 3)),\n annotations=[Bbox(0, 0, 0, 0, label=1)]),\n DatasetItem(2, subset='b',\n annotations=[Bbox(0, 0, 0, 0, label=2)]),\n DatasetItem(3, subset='c', image=np.ones((2, 2, 3)),\n annotations=[\n Bbox(0, 0, 0, 0, label=3),\n Mask(np.ones((2, 2)), label=1)\n ]),\n ], categories=['a', 'b', 'c', 'd'])\n\n with TestDir() as path:\n dataset.export(path, 'voc', save_images=True)\n os.unlink(osp.join(path, 'Annotations', '1.xml'))\n os.unlink(osp.join(path, 'Annotations', '2.xml'))\n os.unlink(osp.join(path, 'Annotations', '3.xml'))\n\n dataset.put(DatasetItem(2, subset='b', image=np.ones((3, 2, 3)),\n annotations=[Bbox(0, 0, 0, 0, label=3)]))\n dataset.remove(3, 'c')\n dataset.save(save_images=True)\n\n self.assertEqual({'2.xml'}, # '1.xml' won't be touched\n set(os.listdir(osp.join(path, 'Annotations'))))\n self.assertEqual({'1.jpg', '2.jpg'},\n set(os.listdir(osp.join(path, 'JPEGImages'))))\n self.assertEqual({'a.txt', 'b.txt'},\n set(os.listdir(osp.join(path, 'ImageSets', 'Main'))))\n compare_datasets(self, expected, Dataset.import_from(path, 'voc'),\n require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_inplace_save_writes_only_updated_data_with_transforms(self):\n expected = Dataset.from_iterable([\n DatasetItem(3, subset='test', image=np.ones((2, 3, 3)),\n annotations=[\n Bbox(0, 1, 0, 0, label=4, id=1, group=1, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n })\n ]),\n DatasetItem(4, subset='train', image=np.ones((2, 4, 3)),\n annotations=[\n Bbox(1, 0, 0, 0, label=4, id=1, group=1, attributes={\n 'truncated': False,\n 'difficult': False,\n 'occluded': False,\n }),\n Mask(np.ones((2, 2)), label=2, group=1),\n ]),\n ], categories={\n AnnotationType.label: LabelCategories.from_iterable(\n ['background', 'a', 'b', 'c', 'd']),\n AnnotationType.mask: MaskCategories(\n colormap=VOC.generate_colormap(5)),\n })\n\n dataset = Dataset.from_iterable([\n DatasetItem(1, subset='a', image=np.ones((2, 1, 3)),\n annotations=[ Bbox(0, 0, 0, 1, label=1) ]),\n DatasetItem(2, subset='b', image=np.ones((2, 2, 3)),\n annotations=[\n Bbox(0, 0, 1, 0, label=2),\n Mask(np.ones((2, 2)), label=1),\n ]),\n DatasetItem(3, subset='b', image=np.ones((2, 3, 3)),\n annotations=[ Bbox(0, 1, 0, 0, label=3) ]),\n DatasetItem(4, subset='c', image=np.ones((2, 4, 3)),\n annotations=[\n Bbox(1, 0, 0, 0, label=3),\n Mask(np.ones((2, 2)), label=1)\n ]),\n ], categories=['a', 'b', 'c', 'd'])\n\n with TestDir() as path:\n dataset.export(path, 'voc', save_images=True)\n\n dataset.filter('/item[id >= 3]')\n dataset.transform('random_split', (('train', 0.5), ('test', 0.5)),\n seed=42)\n dataset.save(save_images=True)\n\n self.assertEqual({'3.xml', '4.xml'},\n set(os.listdir(osp.join(path, 'Annotations'))))\n self.assertEqual({'3.jpg', '4.jpg'},\n set(os.listdir(osp.join(path, 'JPEGImages'))))\n self.assertEqual({'4.png'},\n set(os.listdir(osp.join(path, 'SegmentationClass'))))\n self.assertEqual({'4.png'},\n set(os.listdir(osp.join(path, 'SegmentationObject'))))\n self.assertEqual({'train.txt', 'test.txt'},\n set(os.listdir(osp.join(path, 'ImageSets', 'Main'))))\n self.assertEqual({'train.txt'},\n set(os.listdir(osp.join(path, 'ImageSets', 'Segmentation'))))\n compare_datasets(self, expected, Dataset.import_from(path, 'voc'),\n require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_dataset_with_no_data_images(self):\n class TestExtractor(TestExtractorBase):\n def __iter__(self):\n return iter([\n DatasetItem(id='frame1', subset='test',\n image=Image(path='frame1.jpg'),\n annotations=[\n Bbox(1.0, 2.0, 3.0, 4.0,\n attributes={\n 'difficult': False,\n 'truncated': False,\n 'occluded': False\n },\n id=1, label=0, group=1\n )\n ]\n )\n ])\n\n def categories(self):\n return VOC.make_voc_categories()\n\n with TestDir() as test_dir:\n self._test_save_and_load(TestExtractor(),\n partial(VocConverter.convert, label_map='voc'), test_dir)\n", "from functools import partial\nfrom unittest import TestCase\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.components.annotation import Bbox, Caption, Mask, Polygon\nfrom datumaro.components.environment import Environment\nfrom datumaro.components.extractor import DatasetItem\nfrom datumaro.components.media import Image\nfrom datumaro.components.project import Dataset\nfrom datumaro.plugins.icdar_format.converter import (\n IcdarTextLocalizationConverter, IcdarTextSegmentationConverter,\n IcdarWordRecognitionConverter,\n)\nfrom datumaro.plugins.icdar_format.extractor import (\n IcdarTextLocalizationImporter, IcdarTextSegmentationImporter,\n IcdarWordRecognitionImporter,\n)\nfrom datumaro.util.test_utils import (\n TestDir, check_save_and_load, compare_datasets,\n)\n\nfrom .requirements import Requirements, mark_requirement\n\nDUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'icdar_dataset')\n\nclass IcdarImporterTest(TestCase):\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_detect_word_recognition(self):\n detected_formats = Environment().detect_dataset(\n osp.join(DUMMY_DATASET_DIR, 'word_recognition'))\n self.assertEqual([IcdarWordRecognitionImporter.NAME], detected_formats)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_detect_text_localization(self):\n detected_formats = Environment().detect_dataset(\n osp.join(DUMMY_DATASET_DIR, 'text_localization'))\n self.assertEqual([IcdarTextLocalizationImporter.NAME], detected_formats)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_detect_text_segmentation(self):\n detected_formats = Environment().detect_dataset(\n osp.join(DUMMY_DATASET_DIR, 'text_segmentation'))\n self.assertEqual([IcdarTextSegmentationImporter.NAME], detected_formats)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_captions(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='word_1', subset='train',\n image=np.ones((10, 15, 3)),\n annotations=[\n Caption('PROPER'),\n ]\n ),\n DatasetItem(id='word_2', subset='train',\n image=np.ones((10, 15, 3)),\n annotations=[\n Caption(\"Canon\"),\n ]\n ),\n ])\n\n dataset = Dataset.import_from(\n osp.join(DUMMY_DATASET_DIR, 'word_recognition'),\n 'icdar_word_recognition')\n\n compare_datasets(self, expected_dataset, dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_bboxes(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='img_1', subset='train',\n image=np.ones((10, 15, 3)),\n annotations=[\n Polygon([0, 0, 3, 1, 4, 6, 1, 7],\n attributes={'text': 'FOOD'}),\n ]\n ),\n DatasetItem(id='img_2', subset='train',\n image=np.ones((10, 15, 3)),\n annotations=[\n Bbox(0, 0, 2, 3, attributes={'text': 'RED'}),\n Bbox(3, 3, 2, 3, attributes={'text': 'LION'}),\n ]\n ),\n ])\n\n dataset = Dataset.import_from(\n osp.join(DUMMY_DATASET_DIR, 'text_localization'),\n 'icdar_text_localization')\n\n compare_datasets(self, expected_dataset, dataset)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_import_masks(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='1', subset='train',\n image=np.ones((2, 5, 3)),\n annotations=[\n Mask(group=0,\n image=np.array([[0, 1, 1, 0, 0], [0, 0, 0, 0, 0]]),\n attributes={ 'index': 0, 'color': '108 225 132',\n 'text': 'F', 'center': '0 1'\n }),\n Mask(group=1,\n image=np.array([[0, 0, 0, 1, 0], [0, 0, 0, 1, 0]]),\n attributes={ 'index': 1, 'color': '82 174 214',\n 'text': 'T', 'center': '1 3'\n }),\n Mask(group=1,\n image=np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]),\n attributes={ 'index': 2, 'color': '241 73 144',\n 'text': 'h', 'center': '1 4'\n }),\n ]\n ),\n ])\n\n dataset = Dataset.import_from(\n osp.join(DUMMY_DATASET_DIR, 'text_segmentation'),\n 'icdar_text_segmentation')\n\n compare_datasets(self, expected_dataset, dataset)\n\nclass IcdarConverterTest(TestCase):\n def _test_save_and_load(self, source_dataset, converter, test_dir, importer,\n target_dataset=None, importer_args=None, **kwargs):\n return check_save_and_load(self, source_dataset, converter, test_dir,\n importer,\n target_dataset=target_dataset, importer_args=importer_args, **kwargs)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_captions(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Caption('caption 0'),\n ]),\n DatasetItem(id=2, subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Caption('caption_1'),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarWordRecognitionConverter.convert, save_images=True),\n test_dir, 'icdar_word_recognition')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_captions_with_no_save_images(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Caption('caption 0'),\n ])\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarWordRecognitionConverter.convert, save_images=False),\n test_dir, 'icdar_word_recognition')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_bboxes(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Bbox(1, 3, 6, 10),\n Bbox(0, 1, 3, 5, attributes={'text': 'word 0'}),\n ]),\n DatasetItem(id=2, subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Polygon([0, 0, 3, 0, 4, 7, 1, 8],\n attributes={'text': 'word 1'}),\n Polygon([1, 2, 5, 3, 6, 8, 0, 7]),\n ]),\n DatasetItem(id=3, subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Polygon([2, 2, 8, 3, 7, 10, 2, 9],\n attributes={'text': 'word_2'}),\n Bbox(0, 2, 5, 9, attributes={'text': 'word_3'}),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarTextLocalizationConverter.convert, save_images=True),\n test_dir, 'icdar_text_localization')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_bboxes_with_no_save_images(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id=3, subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Polygon([2, 2, 8, 3, 7, 10, 2, 9],\n attributes={'text': 'word_2'}),\n Bbox(0, 2, 5, 9, attributes={'text': 'word_3'}),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarTextLocalizationConverter.convert, save_images=False),\n test_dir, 'icdar_text_localization')\n\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_masks(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Mask(image=np.array([[0, 0, 0, 1, 1]]), group=1,\n attributes={ 'index': 1, 'color': '82 174 214', 'text': 'j',\n 'center': '0 3' }),\n Mask(image=np.array([[0, 1, 1, 0, 0]]), group=1,\n attributes={ 'index': 0, 'color': '108 225 132', 'text': 'F',\n 'center': '0 1' }),\n ]),\n DatasetItem(id=2, subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Mask(image=np.array([[0, 0, 0, 0, 0, 1]]), group=0,\n attributes={ 'index': 3, 'color': '183 6 28', 'text': ' ',\n 'center': '0 5' }),\n Mask(image=np.array([[1, 0, 0, 0, 0, 0]]), group=1,\n attributes={ 'index': 0, 'color': '108 225 132', 'text': 'L',\n 'center': '0 0' }),\n Mask(image=np.array([[0, 0, 0, 1, 1, 0]]), group=1,\n attributes={ 'index': 1, 'color': '82 174 214', 'text': 'o',\n 'center': '0 3' }),\n Mask(image=np.array([[0, 1, 1, 0, 0, 0]]), group=0,\n attributes={ 'index': 2, 'color': '241 73 144', 'text': 'P',\n 'center': '0 1' }),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarTextSegmentationConverter.convert,\n save_images=True),\n test_dir, 'icdar_text_segmentation')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_masks_with_no_save_images(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='a/b/1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Mask(image=np.array([[0, 0, 0, 1, 1]]), group=1,\n attributes={ 'index': 1, 'color': '82 174 214', 'text': 'j',\n 'center': '0 3' }),\n Mask(image=np.array([[0, 1, 1, 0, 0]]), group=1,\n attributes={ 'index': 0, 'color': '108 225 132', 'text': 'F',\n 'center': '0 1' }),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarTextSegmentationConverter.convert,\n save_images=False),\n test_dir, 'icdar_text_segmentation')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_with_no_subsets(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id=1, image=np.ones((8, 8, 3)),\n annotations=[\n Bbox(0, 1, 3, 5),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n IcdarTextLocalizationConverter.convert, test_dir,\n 'icdar_text_localization')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='кириллица с пробелом',\n image=np.ones((8, 8, 3))),\n ])\n\n for importer, converter in [\n ('icdar_word_recognition', IcdarWordRecognitionConverter),\n ('icdar_text_localization', IcdarTextLocalizationConverter),\n ('icdar_text_segmentation', IcdarTextSegmentationConverter),\n ]:\n with self.subTest(subformat=converter), TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(converter.convert, save_images=True),\n test_dir, importer, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_image_with_arbitrary_extension(self):\n expected = Dataset.from_iterable([\n DatasetItem(id='q/1', image=Image(path='q/1.JPEG',\n data=np.zeros((4, 3, 3)))),\n DatasetItem(id='a/b/c/2', image=Image(path='a/b/c/2.bmp',\n data=np.zeros((3, 4, 3)))),\n ])\n\n for importer, converter in [\n ('icdar_word_recognition', IcdarWordRecognitionConverter),\n ('icdar_text_localization', IcdarTextLocalizationConverter),\n ('icdar_text_segmentation', IcdarTextSegmentationConverter),\n ]:\n with self.subTest(subformat=converter), TestDir() as test_dir:\n self._test_save_and_load(expected,\n partial(converter.convert, save_images=True),\n test_dir, importer, require_images=True)\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_captions_with_quotes(self):\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='1', image=np.ones((5, 5, 3)),\n annotations=[Caption('caption\\\"')]\n )\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(expected_dataset,\n partial(IcdarWordRecognitionConverter.convert, save_images=True),\n test_dir, 'icdar_word_recognition')\n\n @mark_requirement(Requirements.DATUM_GENERAL_REQ)\n def test_can_save_and_load_segm_wo_color_attribute(self):\n source_dataset = Dataset.from_iterable([\n DatasetItem(id='1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Mask(image=np.array([[0, 0, 0, 1, 1]]), group=1,\n attributes={'index': 1, 'text': 'j', 'center': '0 3',\n 'color': '0 128 0'}),\n Mask(image=np.array([[0, 1, 1, 0, 0]]), group=1,\n attributes={'index': 0, 'text': 'F', 'center': '0 1'}),\n Mask(image=np.array([[1, 0, 0, 0, 0]]), group=1,\n attributes={'index': 2, 'text': 'i', 'center': '0 2'}),\n ]),\n ])\n\n expected_dataset = Dataset.from_iterable([\n DatasetItem(id='1', subset='train',\n image=np.ones((10, 15, 3)), annotations=[\n Mask(image=np.array([[0, 0, 0, 1, 1]]), group=1,\n attributes={'index': 1, 'text': 'j', 'center': '0 3',\n 'color': '0 128 0'}),\n Mask(image=np.array([[0, 1, 1, 0, 0]]), group=1,\n attributes={'index': 0, 'text': 'F', 'center': '0 1',\n 'color': '128 0 0'}),\n Mask(image=np.array([[1, 0, 0, 0, 0]]), group=1,\n attributes={'index': 2, 'text': 'i', 'center': '0 2',\n 'color': '128 128 0'}),\n ]),\n ])\n\n with TestDir() as test_dir:\n self._test_save_and_load(source_dataset,\n partial(IcdarTextSegmentationConverter.convert, save_images=True),\n test_dir, 'icdar_text_segmentation', expected_dataset)\n" ]
[ [ "numpy.array", "numpy.ones" ], [ "numpy.ones", "numpy.array", "numpy.zeros", "numpy.unique" ], [ "numpy.array", "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chriamue/protoseg
[ "4ddc7d613aadcb9d25b5773eff688214349ab23f" ]
[ "protoseg/report.py" ]
[ "\nimport os\nimport numpy as np\nimport cv2\nimport json\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorboard.backend.event_processing import event_accumulator as ea\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors as colors\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport seaborn as sns\nsns.set(style=\"darkgrid\")\nsns.set_context(\"paper\")\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\nclass Report():\n\n def __init__(self, configs, resultspath='results/'):\n self.configs = configs\n self.resultspath = resultspath\n assert(configs)\n\n # source: https://github.com/JamesChuanggg/Tensorboard2Seaborn/blob/master/beautify.py\n def plot(self, acc, tag='loss', smooth_space=100, color_code='#4169E1'):\n x_list = []\n y_list = []\n x_list_raw = []\n y_list_raw = []\n try:\n x = [int(s.step) for s in acc.Scalars(tag=tag)]\n y = [s.value for s in acc.Scalars(tag=tag)]\n\n # smooth curve\n x_ = []\n y_ = []\n for i in range(0, len(x), smooth_space):\n x_.append(x[i])\n y_.append(sum(y[i:i+smooth_space]) / float(smooth_space))\n x_.append(x[-1])\n y_.append(y[-1])\n x_list = x_\n y_list = y_\n\n # raw curve\n x_list_raw = x\n y_list_raw = y\n except Exception as e:\n print(e)\n\n fig, ax = plt.subplots()\n plt.title(tag)\n plt.plot(x_list_raw, y_list_raw,\n color=colors.to_rgba(color_code, alpha=0.4))\n plt.plot(x_list, y_list, color=color_code, linewidth=1.5)\n fig.canvas.draw()\n return fig, np.array(fig.canvas.renderer._renderer)\n\n def image(self, acc, tag='loss'):\n image_list = acc.Images(tag=tag)\n with tf.Session() as sess:\n img = tf.image.decode_image(image_list[-1].encoded_image_string)\n npimg = img.eval(session=sess)\n return npimg\n\n def generate(self):\n pp = PdfPages(os.path.join(self.resultspath,\n os.path.basename(self.configs.filename) + '.pdf'))\n for run in self.configs:\n resultpath = os.path.join(self.resultspath, run)\n event_acc = ea.EventAccumulator(resultpath)\n event_acc.Reload()\n fig, img = self.plot(event_acc, tag=\"loss\")\n plt.text(0.05, 0.95, run, transform=fig.transFigure, size=24)\n pp.savefig(fig)\n cv2.imwrite(resultpath+'/loss.png', img)\n config = self.configs.get()\n for metric in config['metrices']:\n name = list(metric.keys())[0]\n fig, img = self.plot(event_acc, tag=name)\n pp.savefig(fig)\n cv2.imwrite(resultpath+'/'+name+'.png', img)\n pp.close()\n\n def hyperparamopt(self, config, hyperparamoptimizer, resultpath):\n filename = os.path.join(resultpath, 'trials.csv')\n df = pd.DataFrame(data=hyperparamoptimizer.trials.results)\n df = df.set_index('loss')\n df.to_csv(filename)\n pp = PdfPages(os.path.join(resultpath, 'paramopt.pdf'))\n event_acc = ea.EventAccumulator(resultpath)\n event_acc.Reload()\n\n for result in hyperparamoptimizer.trials.results:\n trial = result['trial']\n l = result['loss']\n _, loss = self.plot(event_acc, tag='trial'+str(trial)+'_loss')\n val_image = self.image(\n event_acc, tag='trial'+str(trial)+'_val_image')\n val_mask = self.image(\n event_acc, tag='trial'+str(trial)+'_val_mask')\n val_predicted = self.image(\n event_acc, tag='trial'+str(trial)+'_val_predicted')\n fig = plt.figure()\n\n fig.add_subplot(2, 4, 1)\n plt.axis('on')\n plt.imshow(loss)\n\n fig.add_subplot(2, 4, 2)\n plt.axis('off')\n plt.imshow(val_image)\n\n fig.add_subplot(2, 4, 3)\n plt.axis('off')\n plt.imshow(val_mask)\n\n fig.add_subplot(2, 4, 4)\n plt.axis('off')\n plt.imshow(val_predicted)\n\n plt.text(0.05, 0.95, 'trial ' + str(trial) + \" loss: \" +\n str(l), transform=fig.transFigure, size=24)\n for i, m in enumerate(config['metrices']):\n name = list(m.keys())[0]\n tag = 'trial'+str(trial)+'_'+name\n _, metric = self.plot(event_acc, tag=tag)\n fig.add_subplot(2, len(config['metrices']), len(\n config['metrices']) + i+1)\n plt.imshow(metric)\n pp.attach_note(result['params'])\n pp.savefig(fig)\n plt.close(fig)\n pp.close()\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.colors.to_rgba", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.plot", "tensorflow.image.decode_image", "tensorflow.Session", "matplotlib.pyplot.axis", "matplotlib.pyplot.close", "matplotlib.pyplot.text", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
sahara2001/editsql
[ "d4325ac996d1ed0069def6d349e43e2a1914e761" ]
[ "model/model.py" ]
[ "\"\"\" Class for the Sequence to sequence model for ATIS.\"\"\"\n\nimport os\n\nimport torch\nimport torch.nn.functional as F\nfrom . import torch_utils\nfrom . import utils_bert\n\nfrom data_util.vocabulary import DEL_TOK, UNK_TOK\n\nfrom .encoder import Encoder, Encoder_Gnn\nfrom .embedder import Embedder\nfrom .token_predictor import construct_token_predictor\n\nimport numpy as np\n\nfrom data_util.atis_vocab import ATISVocabulary\n\nfrom .gated_graph_conv import GatedGraphConv\n\ndef get_token_indices(token, index_to_token):\n \"\"\" Maps from a gold token (string) to a list of indices.\n\n Inputs:\n token (string): String to look up.\n index_to_token (list of tokens): Ordered list of tokens.\n\n Returns:\n list of int, representing the indices of the token in the probability\n distribution.\n \"\"\"\n if token in index_to_token:\n if len(set(index_to_token)) == len(index_to_token): # no duplicates\n return [index_to_token.index(token)]\n else:\n indices = []\n for index, other_token in enumerate(index_to_token):\n if token == other_token:\n indices.append(index)\n assert len(indices) == len(set(indices))\n return indices\n else:\n return [index_to_token.index(UNK_TOK)]\n\ndef flatten_utterances(utterances):\n \"\"\" Gets a flat sequence from a sequence of utterances.\n\n Inputs:\n utterances (list of list of str): Utterances to concatenate.\n\n Returns:\n list of str, representing the flattened sequence with separating\n delimiter tokens.\n \"\"\"\n sequence = []\n for i, utterance in enumerate(utterances):\n sequence.extend(utterance)\n if i < len(utterances) - 1:\n sequence.append(DEL_TOK)\n\n return sequence\n\ndef encode_snippets_with_states(snippets, states):\n \"\"\" Encodes snippets by using previous query states instead.\n\n Inputs:\n snippets (list of Snippet): Input snippets.\n states (list of dy.Expression): Previous hidden states to use.\n TODO: should this by dy.Expression or vector values?\n \"\"\"\n for snippet in snippets:\n snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))\n return snippets\n\ndef load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):\n# print(output_vocabulary.inorder_tokens)\n# print()\n\n def read_glove_embedding(embedding_filename, embedding_size):\n glove_embeddings = {}\n\n with open(embedding_filename) as f:\n cnt = 1\n for line in f:\n cnt += 1\n if params.debug or not params.train:\n if cnt == 1000:\n print('Read 1000 word embeddings')\n break\n l_split = line.split()\n word = \" \".join(l_split[0:len(l_split) - embedding_size])\n embedding = np.array([float(val) for val in l_split[-embedding_size:]])\n glove_embeddings[word] = embedding\n\n return glove_embeddings\n\n print('Loading Glove Embedding from', params.embedding_filename)\n glove_embedding_size = 300\n glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)\n print('Done')\n\n input_embedding_size = glove_embedding_size\n\n def create_word_embeddings(vocab):\n vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)\n vocabulary_tokens = vocab.inorder_tokens\n\n glove_oov = 0\n para_oov = 0\n for token in vocabulary_tokens:\n token_id = vocab.token_to_id(token)\n if token in glove_embeddings:\n vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]\n else:\n glove_oov += 1\n\n print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))\n\n return vocabulary_embeddings\n\n input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)\n output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)\n output_vocabulary_schema_embeddings = None\n if output_vocabulary_schema:\n output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)\n\n return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size\n\nclass ATISModel(torch.nn.Module):\n \"\"\" Sequence-to-sequence model for predicting a SQL query given an utterance\n and an interaction prefix.\n \"\"\"\n\n def __init__(\n self,\n params,\n input_vocabulary,\n output_vocabulary,\n output_vocabulary_schema,\n anonymizer):\n super().__init__()\n\n self.params = params\n\n if params.use_bert:\n self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)\n\n self.gnn=None\n \n\n if 'atis' not in params.data_directory:\n if params.use_bert:\n if params.use_gnn:\n encoder_input_size = self.bert_config.hidden_size\n encoder_output_size = params.encoder_state_size\n\n self.gnn = GatedGraphConv(encoder_output_size, 2, 3) #input_dim, num_timesteps, num_edge_types,\n\n input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)\n\n # Create the output embeddings\n self.output_embedder = Embedder(params.output_embedding_size,\n name=\"output-embedding\",\n initializer=output_vocabulary_embeddings,\n vocabulary=output_vocabulary,\n anonymizer=anonymizer,\n freeze=False)\n self.column_name_token_embedder = None\n else:\n input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)\n\n params.input_embedding_size = input_embedding_size\n self.params.input_embedding_size = input_embedding_size\n\n # Create the input embeddings\n self.input_embedder = Embedder(params.input_embedding_size,\n name=\"input-embedding\",\n initializer=input_vocabulary_embeddings,\n vocabulary=input_vocabulary,\n anonymizer=anonymizer,\n freeze=params.freeze)\n\n # Create the output embeddings\n self.output_embedder = Embedder(params.output_embedding_size,\n name=\"output-embedding\",\n initializer=output_vocabulary_embeddings,\n vocabulary=output_vocabulary,\n anonymizer=anonymizer,\n freeze=False)\n\n self.column_name_token_embedder = Embedder(params.input_embedding_size,\n name=\"schema-embedding\",\n initializer=output_vocabulary_schema_embeddings,\n vocabulary=output_vocabulary_schema,\n anonymizer=anonymizer,\n freeze=params.freeze)\n else:\n # Create the input embeddings\n self.input_embedder = Embedder(params.input_embedding_size,\n name=\"input-embedding\",\n vocabulary=input_vocabulary,\n anonymizer=anonymizer,\n freeze=False)\n\n # Create the output embeddings\n self.output_embedder = Embedder(params.output_embedding_size,\n name=\"output-embedding\",\n vocabulary=output_vocabulary,\n anonymizer=anonymizer,\n freeze=False)\n\n self.column_name_token_embedder = None\n\n # Create the encoder\n encoder_input_size = params.input_embedding_size\n encoder_output_size = params.encoder_state_size\n if params.use_bert:\n encoder_input_size = self.bert_config.hidden_size\n\n if params.discourse_level_lstm:\n encoder_input_size += params.encoder_state_size / 2\n\n self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)\n\n # Positional embedder for utterances\n attention_key_size = params.encoder_state_size\n self.schema_attention_key_size = attention_key_size\n if params.state_positional_embeddings:\n attention_key_size += params.positional_embedding_size\n self.positional_embedder = Embedder(\n params.positional_embedding_size,\n name=\"positional-embedding\",\n num_tokens=params.maximum_utterances)\n\n self.utterance_attention_key_size = attention_key_size\n\n\n # Create the discourse-level LSTM parameters\n if params.discourse_level_lstm:\n self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, \"LSTM-t\")\n self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), \"V-turn-state-0\")\n\n # Snippet encoder\n final_snippet_size = 0\n if params.use_snippets and not params.previous_decoder_snippet_encoding:\n snippet_encoding_size = int(params.encoder_state_size / 2)\n final_snippet_size = params.encoder_state_size\n if params.snippet_age_embedding:\n snippet_encoding_size -= int(\n params.snippet_age_embedding_size / 4)\n self.snippet_age_embedder = Embedder(\n params.snippet_age_embedding_size,\n name=\"snippet-age-embedding\",\n num_tokens=params.max_snippet_age_embedding)\n final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2\n\n\n self.snippet_encoder = Encoder(params.snippet_num_layers,\n params.output_embedding_size,\n snippet_encoding_size)\n\n # Previous query Encoder\n if params.use_previous_query:\n self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)\n\n self.final_snippet_size = final_snippet_size\n self.dropout = 0.\n\n def _encode_snippets(self, previous_query, snippets, input_schema):\n \"\"\" Computes a single vector representation for each snippet.\n\n Inputs:\n previous_query (list of str): Previous query in the interaction.\n snippets (list of Snippet): Snippets extracted from the previous\n\n Returns:\n list of Snippets, where the embedding is set to a vector.\n \"\"\"\n startpoints = [snippet.startpos for snippet in snippets]\n endpoints = [snippet.endpos for snippet in snippets]\n assert len(startpoints) == 0 or min(startpoints) >= 0\n if input_schema:\n assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)\n else:\n assert len(endpoints) == 0 or max(endpoints) < len(previous_query)\n\n snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)\n if previous_query and snippets:\n _, previous_outputs = self.snippet_encoder(\n previous_query, snippet_embedder, dropout_amount=self.dropout)\n assert len(previous_outputs) == len(previous_query)\n\n for snippet in snippets:\n if input_schema:\n embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)\n else:\n embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)\n if self.params.snippet_age_embedding:\n embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)\n snippet.set_embedding(embedding)\n\n return snippets\n\n def _initialize_discourse_states(self):\n discourse_state = self.initial_discourse_state\n\n discourse_lstm_states = []\n for lstm in self.discourse_lstms:\n hidden_size = lstm.weight_hh.size()[1]\n if lstm.weight_hh.is_cuda:\n h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)\n c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)\n else:\n h_0 = torch.zeros(1,hidden_size)\n c_0 = torch.zeros(1,hidden_size)\n discourse_lstm_states.append((h_0, c_0))\n\n return discourse_state, discourse_lstm_states\n\n def _add_positional_embeddings(self, hidden_states, utterances, group=False):\n grouped_states = []\n\n start_index = 0\n for utterance in utterances:\n grouped_states.append(hidden_states[start_index:start_index + len(utterance)])\n start_index += len(utterance)\n assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])\n\n new_states = []\n flat_sequence = []\n\n num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))\n for i, (states, utterance) in enumerate(zip(\n grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):\n positional_sequence = []\n index = num_utterances_to_keep - i - 1\n\n for state in states:\n positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))\n\n assert len(positional_sequence) == len(utterance), \\\n \"Expected utterance and state sequence length to be the same, \" \\\n + \"but they were \" + str(len(utterance)) \\\n + \" and \" + str(len(positional_sequence))\n\n if group:\n new_states.append(positional_sequence)\n else:\n new_states.extend(positional_sequence)\n flat_sequence.extend(utterance)\n\n return new_states, flat_sequence\n\n def build_optim(self):\n params_trainer = []\n params_bert_trainer = []\n for name, param in self.named_parameters():\n if param.requires_grad:\n if 'model_bert' in name:\n params_bert_trainer.append(param)\n else:\n params_trainer.append(param)\n self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)\n if self.params.fine_tune_bert:\n self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)\n\n def set_dropout(self, value):\n \"\"\" Sets the dropout to a specified value.\n\n Inputs:\n value (float): Value to set dropout to.\n \"\"\"\n self.dropout = value\n\n def set_learning_rate(self, value):\n \"\"\" Sets the learning rate for the trainer.\n\n Inputs:\n value (float): The new learning rate.\n \"\"\"\n for param_group in self.trainer.param_groups:\n param_group['lr'] = value\n\n def save(self, filename):\n \"\"\" Saves the model to the specified filename.\n\n Inputs:\n filename (str): The filename to save to.\n \"\"\"\n torch.save(self.state_dict(), filename)\n\n def load(self, filename):\n \"\"\" Loads saved parameters into the parameter collection.\n\n Inputs:\n filename (str): Name of file containing parameters.\n \"\"\"\n self.load_state_dict(torch.load(filename))\n print(\"Loaded model from file \" + filename)\n\n" ]
[ [ "torch.optim.Adam", "torch.zeros", "torch.load", "torch.cat", "torch.cuda.FloatTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ankit9437/MNIST
[ "bf620e7779a5383c2ad87cf89cd11651963bd7c5" ]
[ "MNISTT.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 15 10:58:44 2019\r\n\r\n@author: DELL\r\n\"\"\"\r\n\r\nfrom __future__ import print_function, division\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\ndef d(u, v):\r\n diff = u - v\r\n return diff.dot(diff)\r\n\r\n\r\ndef get_data(limit=None):\r\n print(\"Reading in and transforming data...\")\r\n df = pd.read_csv('train.csv')\r\n data = df.values\r\n np.random.shuffle(data)\r\n X = data[:, 1:] / 255.0 # data is from 0..255\r\n Y = data[:, 0]\r\n if limit is not None:\r\n X, Y = X[:limit], Y[:limit]\r\n return X, Y\r\n\r\ndef plot_k_means(X, K, max_iter=5, beta=3.0, show_plots=False):\r\n N, D = X.shape\r\n # R = np.zeros((N, K))\r\n exponents = np.empty((N, K))\r\n\r\n # initialize M to random\r\n initial_centers = np.random.choice(N, K, replace=False)\r\n M = X[initial_centers]\r\n\r\n\r\n k = 0\r\n for i in range(max_iter):\r\n k += 1\r\n # step 1: determine assignments / resposibilities\r\n # is this inefficient?\r\n for k in range(K):\r\n for n in range(N):\r\n exponents[n,k] = np.exp(-beta*d(M[k], X[n]))\r\n R = exponents / exponents.sum(axis=1, keepdims=True)\r\n\r\n\r\n # step 2: recalculate means\r\n\r\n for k in range(K):\r\n M[k] = R[:,k].dot(X) / R[:,k].sum()\r\n return M, R\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n # mnist data\r\n X, Y = get_data(1000)\r\n\r\n # simple data\r\n # X = get_simple_data()\r\n # Y = np.array([0]*300 + [1]*300 + [2]*300)\r\n\r\n print(\"Number of data points:\", len(Y))\r\n M, R = plot_k_means(X, len(set(Y)))\r\n # Exercise: Try different values of K and compare the evaluation metrics\r\n \r\n # they should look like digits\r\n for k in range(len(M)):\r\n im = M[k].reshape(28, 28)\r\n plt.imshow(im, cmap='Blues')\r\n plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()" ]
[ [ "matplotlib.pyplot.imshow", "pandas.read_csv", "numpy.random.choice", "numpy.random.shuffle", "matplotlib.pyplot.show", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
shivangraikar/Twitter-Data-Mining-For-Targeted-Marketing
[ "d12fe807187d438041b4497cbb82ad9ef14d4dbf" ]
[ "email-finder.py" ]
[ "import string\nimport time\nimport threading\nimport urllib\nimport re\nimport io\nimport sys\nfrom time import sleep\nimport pickle\nimport pandas as pd\nimport psycopg2\n\n\n\ndef formats(first, middle, last, domain):\n \"\"\"\n Create a list of 30 possible email formats combining:\n - First name: [empty] | Full | Initial |\n - Delimeter: [empty] | . | _ | -\n - Last name: [empty] | Full | Initial |\n \"\"\"\n list = []\n\n if len(last)==0:\n list.append(first + '@' + domain) # [email protected]\n\n\n else:\n list.append(first[0] + last + '@' + domain) # [email protected]\n list.append(first[0] + '.' + last + '@' + domain) # [email protected]\n list.append(first[0] + '_' + last + '@' + domain) # [email protected]\n list.append(first + '@' + domain) # [email protected]\n list.append(first + last + '@' + domain) # [email protected]\n list.append(first + '.' + last + '@' + domain) # [email protected]\n list.append(first + '_' + last + '@' + domain) # [email protected]\n list.append(first + '-' + last + '@' + domain) # [email protected]\n\n list.append(first + last[0] + '@' + domain) # [email protected]\n list.append(first + '.' + last[0] + '@' + domain) # [email protected]\n list.append(first + '_' + last[0] + '@' + domain) # [email protected]\n \n list.append(first[0] + middle + last + '@' + domain) # [email protected]\n list.append(first[0] + '.' + middle + last + '@' + domain) # [email protected]\n list.append(first[0] + middle + '.' + last + '@' + domain) # [email protected]\n list.append(first[0] + '_' + middle+ last + '@' + domain) # [email protected]\n list.append(first[0] + middle +'_' + last + '@' + domain) # [email protected]\n list.append(first + middle+ last + '@' + domain) # [email protected]\n list.append(first + middle + '.' + last + '@' + domain) # [email protected]\n list.append(first + '.' + middle + last + '@' + domain) # [email protected]\n list.append(first + '_' + middle + last + '@' + domain) # [email protected]\n list.append(first + middle + '_' + last + '@' + domain) # [email protected]\n list.append(first + middle+ last[0] + '@' + domain) # [email protected]\n list.append(first + '.' + middle +last[0] + '@' + domain) # [email protected]\n list.append(first + middle + '.' +last[0] + '@' + domain) # [email protected]\n list.append(first + '_' + middle +last[0] + '@' + domain) # [email protected]\n list.append(first + middle +'_' + last[0] + '@' + domain) # [email protected] \n \n list.append(last + '@' + domain) # [email protected]\n list.append(last + first+ '@' + domain) # [email protected]\n list.append(last + '.' + first + '@' + domain) # [email protected]\n list.append(last + '_' + first + '@' + domain) # [email protected]\n list.append(last[0] + '.' + first + '@' + domain) # [email protected] \n list.append(last[0] + first + '@' + domain) # [email protected]\n list.append(last + first[0] + '@' + domain) # [email protected]\n list.append(last + '.' + first[0] + '@' + domain) # [email protected]\n list.append(last + '_' + first[0] + '@' + domain) # [email protected]\n \n return(list)\n\n\nval=\"select distinct name from keywords\"\n\ntry: \n conn = psycopg2.connect(database='Hiranandani', user = \"postgres\", password = \"parth123n@#*\", host = \"127.0.0.1\", port = \"5432\") \nexcept:\n print(\"Create database first\")\n\n\ndf=pd.read_sql(val,conn)\n\nuname=list()\nfor i in df['name']:\n uname.append(i.translate(str.maketrans('', '', string.punctuation)))\n\na=['dr','ca','er'] \n\nnotdrca=list()\nfor i in uname:\n if any(x in i.lower() for x in a):\n continue\n else:\n notdrca.append(i) \n \nlen2=list()\nl1=list()\nl3=list()\nln=list()\n\nemail_list=list()\n\nfor i in notdrca:\n if any(x in i.lower() for x in a):\n print(i)\n\n\nfor i in notdrca:\n try:\n i=i.lower()\n s=i.split()\n\n if len(s)==2:\n email_list.extend(formats(s[0],s[1],'','gmail.com'))\n len2.append(i)\n elif len(s)==1:\n email_list.extend(formats(s[0],'','','gmail.com')) \n l1.append(i)\n elif len(s)==3:\n email_list.extend(formats(s[0],s[1],s[2],'gmail.com')) \n l3.append(i)\n elif len(s)>3:\n ln.append(i)\n continue \n except:\n continue \n\n\ntry:\n h=open('emails.pickle','wb')\nexcept Exception as e:\n print(e)\n \npickle.dump(email_list,h)\n\n\n\nregex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'\n\nmatch=re.match(regex,'harsha_nihar@yahoon')\nif match==None:\n print(match)\n\n" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ganesh2583/Python-Data_Science
[ "233586491d3863176a008b938b0946c472940a6d" ]
[ "genderPredictScript.py" ]
[ "from sklearn import tree\nfrom sklearn import neighbors\nfrom sklearn import gaussian_process\n\n#[height, weight, shoe size]\nX = [[181,80,10],[161,70,6],[171,66,7],[176,88,7],[189,100,8],[141,80,5],[156,78,6],[161,50,6],[171,60,7],[151,78,7],[171,40,7]]\n#Gender\nY = ['male','male','male','male','male','female','female','female','female','female','female']\n\n#Define 'DecisionTreeClassifier' Classifier From the imported Tree\ndecisionTreeclassifier = tree.DecisionTreeClassifier()\n\n#Fit the data into the Classifier\ndecisionTreeclassifier = decisionTreeclassifier.fit(X,Y)\n\n#Perform Prediction\ndecisionTreeclassifierPrediction = decisionTreeclassifier.predict([[161,60,9]])\n\n#Print the Classifier\nprint(decisionTreeclassifier)\n\n#Print the Prediction\nprint(decisionTreeclassifierPrediction)\n\n\n\n#Define 'KNeighborsClassifier' Classifier From the imported Tree\nkNeighborsClassifier = neighbors.KNeighborsClassifier()\n\n#Fit the data into the Classifier\nkNeighborsClassifier = kNeighborsClassifier.fit(X,Y)\n\n#Perform Prediction\nkNeighborsClassifierPrediction = kNeighborsClassifier.predict([[161,60,9]])\n\n#Print the Classifier\nprint(kNeighborsClassifier)\n\n#Print the Prediction\nprint(kNeighborsClassifierPrediction)\n\n\n\n#Define 'GaussianProcessClassifier' Classifier From the imported Tree\ngaussianProcessClassifier = gaussian_process.GaussianProcessClassifier()\n\n#Fit the data into the Classifier\ngaussianProcessClassifier = gaussianProcessClassifier.fit(X,Y)\n\n#Perform Prediction\ngaussianProcessClassifierPrediction = gaussianProcessClassifier.predict([[161,60,9]])\n\n#Print the Classifier\nprint(gaussianProcessClassifier)\n\n#Print the Prediction\nprint(gaussianProcessClassifierPrediction)" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "sklearn.gaussian_process.GaussianProcessClassifier", "sklearn.neighbors.KNeighborsClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Ow-woo/stable-baselines
[ "ece376f62b0eaa3b58e90593b7db5fb9de3d82c5", "ece376f62b0eaa3b58e90593b7db5fb9de3d82c5", "ece376f62b0eaa3b58e90593b7db5fb9de3d82c5" ]
[ "stable_baselines/trpo_mpi/trpo_mpi.py", "stable_baselines/td3/policies.py", "stable_baselines/ppo2/ppo2.py" ]
[ "import time\nfrom contextlib import contextmanager\nfrom collections import deque\n\nimport gym\nfrom mpi4py import MPI\nimport tensorflow as tf\nimport numpy as np\n\nimport stable_baselines.common.tf_util as tf_util\nfrom stable_baselines.common.tf_util import total_episode_reward_logger\nfrom stable_baselines.common import explained_variance, zipsame, dataset, fmt_row, colorize, ActorCriticRLModel, \\\n SetVerbosity, TensorboardWriter\nfrom stable_baselines import logger\nfrom stable_baselines.common.mpi_adam import MpiAdam\nfrom stable_baselines.common.cg import conjugate_gradient\nfrom stable_baselines.common.policies import ActorCriticPolicy\nfrom stable_baselines.common.misc_util import flatten_lists\nfrom stable_baselines.common.runners import traj_segment_generator\nfrom stable_baselines.trpo_mpi.utils import add_vtarg_and_adv\n\n\nclass TRPO(ActorCriticRLModel):\n \"\"\"\n Trust Region Policy Optimization (https://arxiv.org/abs/1502.05477)\n\n :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) the discount value\n :param timesteps_per_batch: (int) the number of timesteps to run per batch (horizon)\n :param max_kl: (float) the Kullback-Leibler loss threshold\n :param cg_iters: (int) the number of iterations for the conjugate gradient calculation\n :param lam: (float) GAE factor\n :param entcoeff: (float) the weight for the entropy loss\n :param cg_damping: (float) the compute gradient dampening factor\n :param vf_stepsize: (float) the value function stepsize\n :param vf_iters: (int) the value function's number iterations for learning\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n def __init__(self, policy, env, gamma=0.99, timesteps_per_batch=1024, max_kl=0.01, cg_iters=10, lam=0.98,\n entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, vf_iters=3, verbose=0, tensorboard_log=None,\n _init_setup_model=True, policy_kwargs=None, full_tensorboard_log=False,\n seed=None, n_cpu_tf_sess=1):\n super(TRPO, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=False,\n _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)\n\n self.using_gail = False\n self.timesteps_per_batch = timesteps_per_batch\n self.cg_iters = cg_iters\n self.cg_damping = cg_damping\n self.gamma = gamma\n self.lam = lam\n self.max_kl = max_kl\n self.vf_iters = vf_iters\n self.vf_stepsize = vf_stepsize\n self.entcoeff = entcoeff\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n\n # GAIL Params\n self.hidden_size_adversary = 100\n self.adversary_entcoeff = 1e-3\n self.expert_dataset = None\n self.g_step = 1\n self.d_step = 1\n self.d_stepsize = 3e-4\n\n self.graph = None\n self.sess = None\n self.policy_pi = None\n self.loss_names = None\n self.assign_old_eq_new = None\n self.compute_losses = None\n self.compute_lossandgrad = None\n self.compute_fvp = None\n self.compute_vflossandgrad = None\n self.d_adam = None\n self.vfadam = None\n self.get_flat = None\n self.set_from_flat = None\n self.timed = None\n self.allmean = None\n self.nworkers = None\n self.rank = None\n self.reward_giver = None\n self.step = None\n self.proba_step = None\n self.initial_state = None\n self.params = None\n self.summary = None\n\n if _init_setup_model:\n self.setup_model()\n\n def _get_pretrain_placeholders(self):\n policy = self.policy_pi\n action_ph = policy.pdtype.sample_placeholder([None])\n if isinstance(self.action_space, gym.spaces.Discrete):\n return policy.obs_ph, action_ph, policy.policy\n return policy.obs_ph, action_ph, policy.deterministic_action\n\n def setup_model(self):\n # prevent import loops\n from stable_baselines.gail.adversary import TransitionClassifier\n\n with SetVerbosity(self.verbose):\n\n assert issubclass(self.policy, ActorCriticPolicy), \"Error: the input policy for the TRPO model must be \" \\\n \"an instance of common.policies.ActorCriticPolicy.\"\n\n self.nworkers = MPI.COMM_WORLD.Get_size()\n self.rank = MPI.COMM_WORLD.Get_rank()\n np.set_printoptions(precision=3)\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.set_random_seed(self.seed)\n self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)\n\n if self.using_gail:\n self.reward_giver = TransitionClassifier(self.observation_space, self.action_space,\n self.hidden_size_adversary,\n entcoeff=self.adversary_entcoeff)\n\n # Construct network for new policy\n self.policy_pi = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n None, reuse=False, **self.policy_kwargs)\n\n # Network for old policy\n with tf.variable_scope(\"oldpi\", reuse=False):\n old_policy = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n None, reuse=False, **self.policy_kwargs)\n\n with tf.variable_scope(\"loss\", reuse=False):\n atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)\n ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return\n\n observation = self.policy_pi.obs_ph\n action = self.policy_pi.pdtype.sample_placeholder([None])\n\n kloldnew = old_policy.proba_distribution.kl(self.policy_pi.proba_distribution)\n ent = self.policy_pi.proba_distribution.entropy()\n meankl = tf.reduce_mean(kloldnew)\n meanent = tf.reduce_mean(ent)\n entbonus = self.entcoeff * meanent\n\n vferr = tf.reduce_mean(tf.square(self.policy_pi.value_flat - ret))\n\n # advantage * pnew / pold\n ratio = tf.exp(self.policy_pi.proba_distribution.logp(action) -\n old_policy.proba_distribution.logp(action))\n surrgain = tf.reduce_mean(ratio * atarg)\n\n optimgain = surrgain + entbonus\n losses = [optimgain, meankl, entbonus, surrgain, meanent]\n self.loss_names = [\"optimgain\", \"meankl\", \"entloss\", \"surrgain\", \"entropy\"]\n\n dist = meankl\n\n all_var_list = tf_util.get_trainable_vars(\"model\")\n var_list = [v for v in all_var_list if \"/vf\" not in v.name and \"/q/\" not in v.name]\n vf_var_list = [v for v in all_var_list if \"/pi\" not in v.name and \"/logstd\" not in v.name]\n\n self.get_flat = tf_util.GetFlat(var_list, sess=self.sess)\n self.set_from_flat = tf_util.SetFromFlat(var_list, sess=self.sess)\n\n klgrads = tf.gradients(dist, var_list)\n flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name=\"flat_tan\")\n shapes = [var.get_shape().as_list() for var in var_list]\n start = 0\n tangents = []\n for shape in shapes:\n var_size = tf_util.intprod(shape)\n tangents.append(tf.reshape(flat_tangent[start: start + var_size], shape))\n start += var_size\n gvp = tf.add_n([tf.reduce_sum(grad * tangent)\n for (grad, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111\n # Fisher vector products\n fvp = tf_util.flatgrad(gvp, var_list)\n\n tf.summary.scalar('entropy_loss', meanent)\n tf.summary.scalar('policy_gradient_loss', optimgain)\n tf.summary.scalar('value_function_loss', surrgain)\n tf.summary.scalar('approximate_kullback-leibler', meankl)\n tf.summary.scalar('loss', optimgain + meankl + entbonus + surrgain + meanent)\n\n self.assign_old_eq_new = \\\n tf_util.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in\n zipsame(tf_util.get_globals_vars(\"oldpi\"),\n tf_util.get_globals_vars(\"model\"))])\n self.compute_losses = tf_util.function([observation, old_policy.obs_ph, action, atarg], losses)\n self.compute_fvp = tf_util.function([flat_tangent, observation, old_policy.obs_ph, action, atarg],\n fvp)\n self.compute_vflossandgrad = tf_util.function([observation, old_policy.obs_ph, ret],\n tf_util.flatgrad(vferr, vf_var_list))\n\n @contextmanager\n def timed(msg):\n if self.rank == 0 and self.verbose >= 1:\n print(colorize(msg, color='magenta'))\n start_time = time.time()\n yield\n print(colorize(\"done in {:.3f} seconds\".format((time.time() - start_time)),\n color='magenta'))\n else:\n yield\n\n def allmean(arr):\n assert isinstance(arr, np.ndarray)\n out = np.empty_like(arr)\n MPI.COMM_WORLD.Allreduce(arr, out, op=MPI.SUM)\n out /= self.nworkers\n return out\n\n tf_util.initialize(sess=self.sess)\n\n th_init = self.get_flat()\n MPI.COMM_WORLD.Bcast(th_init, root=0)\n self.set_from_flat(th_init)\n\n with tf.variable_scope(\"Adam_mpi\", reuse=False):\n self.vfadam = MpiAdam(vf_var_list, sess=self.sess)\n if self.using_gail:\n self.d_adam = MpiAdam(self.reward_giver.get_trainable_variables(), sess=self.sess)\n self.d_adam.sync()\n self.vfadam.sync()\n\n with tf.variable_scope(\"input_info\", reuse=False):\n tf.summary.scalar('discounted_rewards', tf.reduce_mean(ret))\n tf.summary.scalar('learning_rate', tf.reduce_mean(self.vf_stepsize))\n tf.summary.scalar('advantage', tf.reduce_mean(atarg))\n tf.summary.scalar('kl_clip_range', tf.reduce_mean(self.max_kl))\n\n if self.full_tensorboard_log:\n tf.summary.histogram('discounted_rewards', ret)\n tf.summary.histogram('learning_rate', self.vf_stepsize)\n tf.summary.histogram('advantage', atarg)\n tf.summary.histogram('kl_clip_range', self.max_kl)\n if tf_util.is_image(self.observation_space):\n tf.summary.image('observation', observation)\n else:\n tf.summary.histogram('observation', observation)\n\n self.timed = timed\n self.allmean = allmean\n\n self.step = self.policy_pi.step\n self.proba_step = self.policy_pi.proba_step\n self.initial_state = self.policy_pi.initial_state\n\n self.params = tf_util.get_trainable_vars(\"model\") + tf_util.get_trainable_vars(\"oldpi\")\n if self.using_gail:\n self.params.extend(self.reward_giver.get_trainable_variables())\n\n self.summary = tf.summary.merge_all()\n\n self.compute_lossandgrad = \\\n tf_util.function([observation, old_policy.obs_ph, action, atarg, ret],\n [self.summary, tf_util.flatgrad(optimgain, var_list)] + losses)\n\n def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name=\"TRPO\",\n reset_num_timesteps=True):\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n callback = self._init_callback(callback)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn()\n\n with self.sess.as_default():\n callback.on_training_start(locals(), globals())\n\n seg_gen = traj_segment_generator(self.policy_pi, self.env, self.timesteps_per_batch,\n reward_giver=self.reward_giver,\n gail=self.using_gail, callback=callback)\n\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n t_start = time.time()\n len_buffer = deque(maxlen=40) # rolling buffer for episode lengths\n reward_buffer = deque(maxlen=40) # rolling buffer for episode rewards\n\n true_reward_buffer = None\n if self.using_gail:\n true_reward_buffer = deque(maxlen=40)\n\n # Initialize dataloader\n batchsize = self.timesteps_per_batch // self.d_step\n self.expert_dataset.init_dataloader(batchsize)\n\n # Stats not used for now\n # TODO: replace with normal tb logging\n #  g_loss_stats = Stats(loss_names)\n # d_loss_stats = Stats(reward_giver.loss_name)\n # ep_stats = Stats([\"True_rewards\", \"Rewards\", \"Episode_length\"])\n\n while True:\n if timesteps_so_far >= total_timesteps:\n break\n\n logger.log(\"********** Iteration %i ************\" % iters_so_far)\n\n def fisher_vector_product(vec):\n return self.allmean(self.compute_fvp(vec, *fvpargs, sess=self.sess)) + self.cg_damping * vec\n\n # ------------------ Update G ------------------\n logger.log(\"Optimizing Policy...\")\n # g_step = 1 when not using GAIL\n mean_losses = None\n vpredbefore = None\n tdlamret = None\n observation = None\n action = None\n seg = None\n for k in range(self.g_step):\n with self.timed(\"sampling\"):\n seg = seg_gen.__next__()\n\n # Stop training early (triggered by the callback)\n if not seg.get('continue_training', True): # pytype: disable=attribute-error\n break\n\n add_vtarg_and_adv(seg, self.gamma, self.lam)\n # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))\n observation, action = seg[\"observations\"], seg[\"actions\"]\n atarg, tdlamret = seg[\"adv\"], seg[\"tdlamret\"]\n\n\n vpredbefore = seg[\"vpred\"] # predicted value function before update\n atarg = (atarg - atarg.mean()) / (atarg.std() + 1e-8) # standardized advantage function estimate\n\n # true_rew is the reward without discount\n if writer is not None:\n total_episode_reward_logger(self.episode_reward,\n seg[\"true_rewards\"].reshape(\n (self.n_envs, -1)),\n seg[\"dones\"].reshape((self.n_envs, -1)),\n writer, self.num_timesteps)\n\n args = seg[\"observations\"], seg[\"observations\"], seg[\"actions\"], atarg\n # Subsampling: see p40-42 of John Schulman thesis\n # http://joschu.net/docs/thesis.pdf\n fvpargs = [arr[::5] for arr in args]\n\n self.assign_old_eq_new(sess=self.sess)\n\n with self.timed(\"computegrad\"):\n steps = self.num_timesteps + (k + 1) * (seg[\"total_timestep\"] / self.g_step)\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata() if self.full_tensorboard_log else None\n # run loss backprop with summary, and save the metadata (memory, compute time, ...)\n if writer is not None:\n summary, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,\n options=run_options,\n run_metadata=run_metadata)\n if self.full_tensorboard_log:\n writer.add_run_metadata(run_metadata, 'step%d' % steps)\n writer.add_summary(summary, steps)\n else:\n _, grad, *lossbefore = self.compute_lossandgrad(*args, tdlamret, sess=self.sess,\n options=run_options,\n run_metadata=run_metadata)\n\n lossbefore = self.allmean(np.array(lossbefore))\n grad = self.allmean(grad)\n if np.allclose(grad, 0):\n logger.log(\"Got zero gradient. not updating\")\n else:\n with self.timed(\"conjugate_gradient\"):\n stepdir = conjugate_gradient(fisher_vector_product, grad, cg_iters=self.cg_iters,\n verbose=self.rank == 0 and self.verbose >= 1)\n assert np.isfinite(stepdir).all()\n shs = .5 * stepdir.dot(fisher_vector_product(stepdir))\n # abs(shs) to avoid taking square root of negative values\n lagrange_multiplier = np.sqrt(abs(shs) / self.max_kl)\n # logger.log(\"lagrange multiplier:\", lm, \"gnorm:\", np.linalg.norm(g))\n fullstep = stepdir / lagrange_multiplier\n expectedimprove = grad.dot(fullstep)\n surrbefore = lossbefore[0]\n stepsize = 1.0\n thbefore = self.get_flat()\n for _ in range(10):\n thnew = thbefore + fullstep * stepsize\n self.set_from_flat(thnew)\n mean_losses = surr, kl_loss, *_ = self.allmean(\n np.array(self.compute_losses(*args, sess=self.sess)))\n improve = surr - surrbefore\n logger.log(\"Expected: %.3f Actual: %.3f\" % (expectedimprove, improve))\n if not np.isfinite(mean_losses).all():\n logger.log(\"Got non-finite value of losses -- bad!\")\n elif kl_loss > self.max_kl * 1.5:\n logger.log(\"violated KL constraint. shrinking step.\")\n elif improve < 0:\n logger.log(\"surrogate didn't improve. shrinking step.\")\n else:\n logger.log(\"Stepsize OK!\")\n break\n stepsize *= .5\n else:\n logger.log(\"couldn't compute a good step\")\n self.set_from_flat(thbefore)\n if self.nworkers > 1 and iters_so_far % 20 == 0:\n # list of tuples\n paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), self.vfadam.getflat().sum()))\n assert all(np.allclose(ps, paramsums[0]) for ps in paramsums[1:])\n\n for (loss_name, loss_val) in zip(self.loss_names, mean_losses):\n logger.record_tabular(loss_name, loss_val)\n\n with self.timed(\"vf\"):\n for _ in range(self.vf_iters):\n # NOTE: for recurrent policies, use shuffle=False?\n for (mbob, mbret) in dataset.iterbatches((seg[\"observations\"], seg[\"tdlamret\"]),\n include_final_partial_batch=False,\n batch_size=128,\n shuffle=True):\n grad = self.allmean(self.compute_vflossandgrad(mbob, mbob, mbret, sess=self.sess))\n self.vfadam.update(grad, self.vf_stepsize)\n\n\n # Stop training early (triggered by the callback)\n if not seg.get('continue_training', True): # pytype: disable=attribute-error\n break\n\n logger.record_tabular(\"explained_variance_tdlam_before\",\n explained_variance(vpredbefore, tdlamret))\n\n if self.using_gail:\n # ------------------ Update D ------------------\n logger.log(\"Optimizing Discriminator...\")\n logger.log(fmt_row(13, self.reward_giver.loss_name))\n assert len(observation) == self.timesteps_per_batch\n batch_size = self.timesteps_per_batch // self.d_step\n\n # NOTE: uses only the last g step for observation\n d_losses = [] # list of tuples, each of which gives the loss for a minibatch\n # NOTE: for recurrent policies, use shuffle=False?\n for ob_batch, ac_batch in dataset.iterbatches((observation, action),\n include_final_partial_batch=False,\n batch_size=batch_size,\n shuffle=True):\n ob_expert, ac_expert = self.expert_dataset.get_next_batch()\n # update running mean/std for reward_giver\n if self.reward_giver.normalize:\n self.reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))\n\n # Reshape actions if needed when using discrete actions\n if isinstance(self.action_space, gym.spaces.Discrete):\n if len(ac_batch.shape) == 2:\n ac_batch = ac_batch[:, 0]\n if len(ac_expert.shape) == 2:\n ac_expert = ac_expert[:, 0]\n *newlosses, grad = self.reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)\n self.d_adam.update(self.allmean(grad), self.d_stepsize)\n d_losses.append(newlosses)\n logger.log(fmt_row(13, np.mean(d_losses, axis=0)))\n\n # lr: lengths and rewards\n lr_local = (seg[\"ep_lens\"], seg[\"ep_rets\"], seg[\"ep_true_rets\"]) # local values\n list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples\n lens, rews, true_rets = map(flatten_lists, zip(*list_lr_pairs))\n true_reward_buffer.extend(true_rets)\n else:\n # lr: lengths and rewards\n lr_local = (seg[\"ep_lens\"], seg[\"ep_rets\"]) # local values\n list_lr_pairs = MPI.COMM_WORLD.allgather(lr_local) # list of tuples\n lens, rews = map(flatten_lists, zip(*list_lr_pairs))\n len_buffer.extend(lens)\n reward_buffer.extend(rews)\n\n if len(len_buffer) > 0:\n logger.record_tabular(\"EpLenMean\", np.mean(len_buffer))\n logger.record_tabular(\"EpRewMean\", np.mean(reward_buffer))\n if self.using_gail:\n logger.record_tabular(\"EpTrueRewMean\", np.mean(true_reward_buffer))\n logger.record_tabular(\"EpThisIter\", len(lens))\n episodes_so_far += len(lens)\n current_it_timesteps = MPI.COMM_WORLD.allreduce(seg[\"total_timestep\"])\n timesteps_so_far += current_it_timesteps\n self.num_timesteps += current_it_timesteps\n iters_so_far += 1\n\n logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"TimestepsSoFar\", self.num_timesteps)\n logger.record_tabular(\"TimeElapsed\", time.time() - t_start)\n\n if self.verbose >= 1 and self.rank == 0:\n logger.dump_tabular()\n\n callback.on_training_end()\n return self\n\n def save(self, save_path, cloudpickle=False):\n if self.using_gail and self.expert_dataset is not None:\n # Exit processes to pickle the dataset\n self.expert_dataset.prepare_pickling()\n data = {\n \"gamma\": self.gamma,\n \"timesteps_per_batch\": self.timesteps_per_batch,\n \"max_kl\": self.max_kl,\n \"cg_iters\": self.cg_iters,\n \"lam\": self.lam,\n \"entcoeff\": self.entcoeff,\n \"cg_damping\": self.cg_damping,\n \"vf_stepsize\": self.vf_stepsize,\n \"vf_iters\": self.vf_iters,\n \"hidden_size_adversary\": self.hidden_size_adversary,\n \"adversary_entcoeff\": self.adversary_entcoeff,\n \"expert_dataset\": self.expert_dataset,\n \"g_step\": self.g_step,\n \"d_step\": self.d_step,\n \"d_stepsize\": self.d_stepsize,\n \"using_gail\": self.using_gail,\n \"verbose\": self.verbose,\n \"policy\": self.policy,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"n_envs\": self.n_envs,\n \"n_cpu_tf_sess\": self.n_cpu_tf_sess,\n \"seed\": self.seed,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs\n }\n\n params_to_save = self.get_parameters()\n\n self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)\n", "import tensorflow as tf\nimport numpy as np\nfrom gym.spaces import Box\nimport copy\nfrom stable_baselines.common.policies import BasePolicy, nature_cnn, register_policy, cnn_1d_extractor\nfrom stable_baselines.sac.policies import mlp\nfrom stable_baselines.a2c.utils import lstm, batch_to_seq, seq_to_batch\n\n\nclass TD3Policy(BasePolicy):\n \"\"\"\n Policy object that implements a TD3-like actor critic\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param scale: (bool) whether or not to scale the input\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, scale=False,\n add_action_ph=False):\n super(TD3Policy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse, scale=scale,\n add_action_ph=add_action_ph)\n assert isinstance(ac_space, Box), \"Error: the action space must be of type gym.spaces.Box\"\n\n self.qf1 = None\n self.qf2 = None\n self.q_discrepancy = None\n self.policy = None\n\n def make_actor(self, obs=None, reuse=False, scope=\"pi\"):\n \"\"\"\n Creates an actor object\n\n :param obs: (TensorFlow Tensor) The observation placeholder (can be None for default placeholder)\n :param reuse: (bool) whether or not to reuse parameters\n :param scope: (str) the scope name of the actor\n :return: (TensorFlow Tensor) the output tensor\n \"\"\"\n raise NotImplementedError\n\n def make_critics(self, obs=None, action=None, reuse=False,\n scope=\"qvalues_fn\"):\n \"\"\"\n Creates the two Q-Values approximator\n\n :param obs: (TensorFlow Tensor) The observation placeholder (can be None for default placeholder)\n :param action: (TensorFlow Tensor) The action placeholder\n :param reuse: (bool) whether or not to reuse parameters\n :param scope: (str) the scope name\n :return: ([tf.Tensor]) Mean, action and log probability\n \"\"\"\n raise NotImplementedError\n\n def step(self, obs, state=None, mask=None):\n \"\"\"\n Returns the policy for a single step\n\n :param obs: ([float] or [int]) The current observation of the environment\n :param state: ([float]) The last states (used in recurrent policies)\n :param mask: ([float]) The last masks (used in recurrent policies)\n :return: ([float]) actions\n \"\"\"\n raise NotImplementedError\n\n def proba_step(self, obs, state=None, mask=None):\n \"\"\"\n Returns the policy for a single step\n\n :param obs: ([float] or [int]) The current observation of the environment\n :param state: ([float]) The last states (used in recurrent policies)\n :param mask: ([float]) The last masks (used in recurrent policies)\n :return: ([float]) actions\n \"\"\"\n return self.step(obs, state, mask)\n\n\nclass FeedForwardPolicy(TD3Policy):\n \"\"\"\n Policy object that implements a DDPG-like actor critic, using a feed forward neural network.\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64])\n :param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction\n :param feature_extraction: (str) The feature extraction type (\"cnn\" or \"mlp\")\n :param layer_norm: (bool) enable layer normalisation\n :param act_fun: (tf.func) the activation function to use in the neural network.\n :param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None,\n cnn_extractor=nature_cnn, feature_extraction=\"cnn\",\n layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs):\n super(FeedForwardPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch,\n reuse=reuse,\n scale=(feature_extraction == \"cnn\" and cnn_extractor == nature_cnn))\n\n self._kwargs_check(feature_extraction, kwargs)\n self.layer_norm = layer_norm\n self.feature_extraction = feature_extraction\n self.cnn_kwargs = kwargs\n self.cnn_extractor = cnn_extractor\n self.cnn_vf = self.cnn_kwargs.pop(\"cnn_vf\", True)\n self.reuse = reuse\n if layers is None:\n layers = [64, 64]\n self.layers = layers\n self.obs_module_indices = obs_module_indices\n self.policy_pre_activation = None\n\n assert len(layers) >= 1, \"Error: must have at least one hidden layer for the policy.\"\n\n self.activ_fn = act_fun\n\n def make_actor(self, obs=None, reuse=False, scope=\"pi\"):\n if obs is None:\n obs = self.processed_obs\n\n if self.obs_module_indices is not None:\n obs = tf.gather(obs, self.obs_module_indices[\"pi\"], axis=-1)\n with tf.variable_scope(scope, reuse=reuse):\n if self.feature_extraction == \"cnn\":\n pi_h = self.cnn_extractor(obs, name=\"pi_c1\", act_fun=self.activ_fn, **self.cnn_kwargs)\n else:\n pi_h = tf.layers.flatten(obs)\n\n pi_h = mlp(pi_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)\n\n self.policy_pre_activation = tf.layers.dense(pi_h, self.ac_space.shape[0])\n self.policy = policy = tf.tanh(self.policy_pre_activation)\n\n return policy\n\n def make_critics(self, obs=None, action=None, reuse=False, scope=\"values_fn\", extracted_callback=None):\n if obs is None:\n obs = self.processed_obs\n\n if self.obs_module_indices is not None:\n obs = tf.gather(obs, self.obs_module_indices[\"vf\"], axis=-1)\n\n with tf.variable_scope(scope, reuse=reuse):\n if self.feature_extraction == \"cnn\" and self.cnn_vf:\n critics_h = self.cnn_extractor(obs, name=\"vf_c1\", act_fun=self.activ_fn, **self.cnn_kwargs)\n else:\n critics_h = tf.layers.flatten(obs)\n\n if extracted_callback is not None:\n critics_h = extracted_callback(critics_h)\n\n # Concatenate preprocessed state and action\n qf_h = tf.concat([critics_h, action], axis=-1)\n\n # Double Q values to reduce overestimation\n with tf.variable_scope('qf1', reuse=reuse):\n qf1_h = mlp(qf_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)\n qf1 = tf.layers.dense(qf1_h, 1, name=\"qf1\")\n\n with tf.variable_scope('qf2', reuse=reuse):\n qf2_h = mlp(qf_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)\n qf2 = tf.layers.dense(qf2_h, 1, name=\"qf2\")\n\n self.qf1 = qf1\n self.qf2 = qf2\n # TODO: assumes that all qf1 and qf2 can never have opposite signs\n #self.q_discrepancy = tf.square(self.qf1 - self.qf2) / tf.square(tf.maximum(self.qf1, self.qf2))\n #self.q_discrepancy = tf.abs(self.qf1 - self.qf2)\n\n return self.qf1, self.qf2\n\n def step(self, obs, state=None, mask=None):\n return self.sess.run(self.policy, {self.obs_ph: obs})\n\n def get_q_discrepancy(self, obs):\n if isinstance(obs, np.ndarray) and len(obs.shape) == 1: # TODO: check for MLP or CNN policy here\n obs = np.expand_dims(obs, axis=0)\n return self.sess.run(self.q_discrepancy, {self.obs_ph: obs})\n\n\nclass RecurrentPolicy(TD3Policy):\n \"\"\"\n Policy object that implements a DDPG-like actor critic, using a feed forward neural network.\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64])\n :param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction\n :param feature_extraction: (str) The feature extraction type (\"cnn\" or \"mlp\")\n :param layer_norm: (bool) enable layer normalisation\n :param act_fun: (tf.func) the activation function to use in the neural network.\n :param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n recurrent = True\n\n def __init__(self, sess, ob_space, ac_space, layers, n_env=1, n_steps=1, n_batch=None, reuse=False,\n cnn_extractor=nature_cnn, feature_extraction=\"mlp\", n_lstm=128, share_lstm=False, save_state=False,\n save_target_state=False, layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs):\n super(RecurrentPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch,\n reuse=reuse, add_action_ph=True,\n scale=(feature_extraction == \"cnn\" and cnn_extractor == nature_cnn))\n\n self._kwargs_check(feature_extraction, kwargs)\n self.layer_norm = layer_norm\n self.feature_extraction = feature_extraction\n self.cnn_kwargs = kwargs\n self.cnn_extractor = cnn_extractor\n self.cnn_vf = self.cnn_kwargs.pop(\"cnn_vf\", True)\n self.reuse = reuse\n self.layers = layers\n self.obs_module_indices = obs_module_indices\n\n self.activ_fn = act_fun\n self.n_lstm = n_lstm\n self.share_lstm = share_lstm\n self._obs_ph = self.processed_obs # Base class has self.obs_ph as property getting self._obs_ph\n self.obs_tp1_ph = self.processed_obs\n\n assert self.n_batch % self.n_steps == 0, \"The batch size must be a multiple of sequence length (n_steps)\"\n self._lstm_n_batch = self.n_batch // self.n_steps\n\n self.action_prev = np.zeros((1, *self.ac_space.shape))\n\n self._initial_state = np.zeros((self._lstm_n_batch, self.n_lstm * 2), dtype=np.float32)\n if self.share_lstm:\n self.state = None\n else:\n self.pi_state = None\n self.qf1_state = None\n self.qf2_state = None\n\n with tf.variable_scope(\"input\", reuse=False):\n self.dones_ph = tf.placeholder_with_default(np.zeros((self.n_batch,), dtype=np.float32), (self.n_batch,), name=\"dones_ph\") # (done t-1)\n if self.share_lstm:\n self.state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name=\"state_ph\")\n else:\n self.pi_state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name=\"pi_state_ph\")\n self.qf1_state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name=\"qf1_state_ph\")\n self.qf2_state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name=\"qf2_state_ph\")\n\n self.action_prev_ph = tf.placeholder(np.float32, (self.n_batch, *self.ac_space.shape), name=\"action_prev_ph\")\n\n self.save_state = save_state\n self.save_target_state = save_target_state\n\n self.extra_phs = [\"action_prev\"]\n self.rnn_inputs = [\"obs\", \"action_prev\"]\n self.extra_data_names = [\"action_prev\"]\n\n if self.save_target_state:\n self.extra_data_names = sorted(self.extra_data_names + [\"target_action_prev\"])\n self.rnn_inputs = sorted(self.rnn_inputs + [\"obs_tp1\"])\n self.extra_phs = sorted(self.extra_phs + [\"target_action_prev\"])\n\n if self.save_state:\n state_names = [\"state\"] if self.share_lstm else [\"pi_state\", \"qf1_state\", \"qf2_state\"]\n if self.save_target_state:\n state_names.extend([\"target_\" + state_name for state_name in state_names])\n if self.share_lstm:\n self.extra_data_names = sorted(self.extra_data_names + state_names)\n self.extra_phs = sorted(self.extra_phs + state_names)\n else:\n self.extra_data_names = sorted(self.extra_data_names + state_names)\n self.extra_phs = sorted(self.extra_phs + state_names)\n\n def _process_phs(self, **phs):\n for ph_name, ph_val in phs.items():\n if ph_val is None:\n phs[ph_name] = getattr(self, ph_name + \"_ph\")\n else:\n try:\n setattr(self, ph_name + \"_ph\", ph_val)\n except AttributeError:\n setattr(self, \"_\" + ph_name + \"_ph\", ph_val)\n\n return phs.values()\n\n def _make_branch(self, branch_name, input_tensor, dones=None, state_ph=None):\n if branch_name == \"lstm\":\n for i, fc_layer_units in enumerate(self.layers[\"lstm\"]):\n input_tensor = self.activ_fn(tf.layers.dense(input_tensor, fc_layer_units, name=\"lstm_fc{}\".format(i)))\n\n input_tensor = batch_to_seq(input_tensor, self._lstm_n_batch, self.n_steps)\n masks = batch_to_seq(dones, self._lstm_n_batch, self.n_steps)\n input_tensor, state = lstm(input_tensor, masks, state_ph, \"lstm\", n_hidden=self.n_lstm,\n layer_norm=self.layer_norm)\n input_tensor = seq_to_batch(input_tensor)\n\n return input_tensor, state\n else:\n for i, fc_layer_units in enumerate(self.layers[branch_name]):\n input_tensor = self.activ_fn(tf.layers.dense(input_tensor, fc_layer_units, name=\"{}_fc{}\".format(branch_name, i)))\n\n return input_tensor\n\n def make_actor(self, ff_phs=None, rnn_phs=None, dones=None, reuse=False, scope=\"pi\"):\n lstm_branch = tf.concat([tf.layers.flatten(ph) for ph in rnn_phs], axis=-1)\n if ff_phs is not None:\n ff_branch = tf.concat([tf.layers.flatten(ph) for ph in ff_phs], axis=-1)\n\n if dones is None:\n dones = self.dones_ph\n\n if self.share_lstm:\n with tf.variable_scope(\"shared\", reuse=tf.AUTO_REUSE):\n lstm_branch, self.state = self._make_branch(\"lstm\", lstm_branch, dones, self.state_ph)\n\n with tf.variable_scope(scope, reuse=reuse):\n if self.layers[\"ff\"] is not None:\n ff_branch = self._make_branch(\"ff\", ff_branch)\n\n if not self.share_lstm:\n lstm_branch, self.pi_state = self._make_branch(\"lstm\", lstm_branch, dones, self.pi_state_ph)\n\n if ff_phs is not None:\n head = tf.concat([ff_branch, lstm_branch], axis=-1)\n else:\n head = lstm_branch\n\n head = self._make_branch(\"head\", head)\n\n self.policy_pre_activation = tf.layers.dense(head, self.ac_space.shape[0])\n self.policy = policy = tf.tanh(self.policy_pre_activation)\n\n return policy\n\n def make_critics(self, ff_phs=None, rnn_phs=None, dones=None, reuse=False, scope=\"values_fn\"):\n lstm_branch_in = tf.concat([tf.layers.flatten(ph) for ph in rnn_phs], axis=-1)\n if ff_phs is not None:\n ff_branch_in = tf.concat([tf.layers.flatten(ph) for ph in ff_phs], axis=-1)\n\n if dones is None:\n dones = self.dones_ph\n\n self.qf1, self.qf2 = None, None\n self.qf1_state, self.qf2_state = None, None\n\n if self.share_lstm:\n with tf.variable_scope(\"shared\", reuse=tf.AUTO_REUSE):\n lstm_branch_s, self.state = self._make_branch(\"lstm\", lstm_branch_in, dones, self.state_ph)\n\n with tf.variable_scope(scope, reuse=reuse):\n # Double Q values to reduce overestimation\n for qf_i in range(1, 3):\n with tf.variable_scope('qf{}'.format(qf_i), reuse=reuse):\n lstm_branch = lstm_branch_in\n if self.layers[\"ff\"] is not None:\n ff_branch = self._make_branch(\"ff\", ff_branch_in)\n elif ff_phs is not None:\n ff_branch = ff_branch_in\n\n if not self.share_lstm:\n lstm_branch, state = self._make_branch(\"lstm\", lstm_branch, dones,\n getattr(self, \"qf{}_state_ph\".format(qf_i)))\n setattr(self, \"qf{}_state\".format(qf_i), state)\n else:\n lstm_branch = lstm_branch_s\n\n if ff_phs is not None:\n head = tf.concat([ff_branch, lstm_branch], axis=-1)\n else:\n head = lstm_branch\n\n head = self._make_branch(\"head\", head)\n\n setattr(self, \"qf{}\".format(qf_i), tf.layers.dense(head, 1, name=\"qf{}\".format(qf_i)))\n\n return self.qf1, self.qf2\n\n def step(self, obs, action_prev=None, state=None, mask=None, feed_dict=None, **kwargs):\n if feed_dict is None:\n feed_dict = {}\n if state is None:\n state = self.initial_state\n if mask is None:\n mask = np.array([False])\n if action_prev is None:\n assert obs.shape[0] == 1\n if mask[0]:\n self.action_prev = np.zeros((1, *self.ac_space.shape))\n action_prev = self.action_prev\n\n rnn_node = self.state if self.share_lstm else self.pi_state\n state_ph = self.state_ph if self.share_lstm else self.pi_state_ph\n\n feed_dict.update({self.obs_ph: obs, state_ph: state, self.dones_ph: mask,\n self.action_prev_ph: action_prev})\n\n action, out_state = self.sess.run([self.policy, rnn_node], feed_dict)\n self.action_prev = action\n\n return action, out_state\n\n @property\n def initial_state(self):\n return self._initial_state\n\n def collect_data(self, _locals, _globals):\n data = {}\n if self.save_state:\n if self.share_lstm:\n data[\"state\"] = _locals[\"prev_policy_state\"][0, :]\n else:\n data[\"pi_state\"] = _locals[\"prev_policy_state\"][0, :]\n if len(_locals[\"episode_data\"]) == 0:\n qf1_state, qf2_state = self.initial_state, self.initial_state\n else:\n qf_feed_dict = {\n self.qf1_state_ph: _locals[\"episode_data\"][-1][\"qf1_state\"][None],\n self.qf2_state_ph: _locals[\"episode_data\"][-1][\"qf2_state\"][None],\n }\n qf_feed_dict.update({getattr(self, data_name + \"_ph\"): _locals[\"episode_data\"][-1][data_name][None]\n for data_name in self.rnn_inputs})\n qf1_state, qf2_state = self.sess.run([self.qf1_state, self.qf2_state], feed_dict=qf_feed_dict)\n data[\"qf1_state\"] = qf1_state[0, :]\n data[\"qf2_state\"] = qf2_state[0, :]\n\n if len(_locals[\"episode_data\"]) == 0:\n data[\"action_prev\"] = np.zeros(*self.ac_space.shape, dtype=np.float32)\n else:\n data[\"action_prev\"] = _locals[\"episode_data\"][-1][\"action\"]\n\n if self.save_target_state:\n data[\"target_action_prev_rnn\"] = _locals[\"action\"]\n\n return data\n\n\nclass DRPolicy(RecurrentPolicy):\n \"\"\"\n Policy object that implements a DDPG-like actor critic, using a feed forward neural network.\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64])\n :param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction\n :param feature_extraction: (str) The feature extraction type (\"cnn\" or \"mlp\")\n :param layer_norm: (bool) enable layer normalisation\n :param act_fun: (tf.func) the activation function to use in the neural network.\n :param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n recurrent = True\n\n def __init__(self, sess, ob_space, ac_space, goal_size, my_size, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None,\n cnn_extractor=nature_cnn, feature_extraction=\"mlp\", n_lstm=128, share_lstm=False,\n layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs):\n if layers is None:\n layers = {\"ff\": [128], \"lstm\": [128], \"head\": [128, 128]}\n super().__init__(sess, ob_space, ac_space, layers, n_env, n_steps, n_batch,\n reuse=reuse, cnn_extractor=cnn_extractor,\n feature_extraction=feature_extraction, n_lstm=n_lstm,\n share_lstm=share_lstm, layer_norm=layer_norm, act_fun=act_fun,\n obs_module_indices=obs_module_indices, **kwargs)\n\n with tf.variable_scope(\"input\", reuse=False):\n self.my_ph = tf.placeholder(tf.float32, (None, my_size), name=\"my_ph\") # the dynamics of the environment\n\n self.goal_size = goal_size\n self.extra_phs = sorted(self.extra_phs + [\"my\"])\n self.extra_data_names = sorted(self.extra_data_names + [\"my\"])\n\n def make_actor(self, obs_ff=None, obs_rnn=None, action_prev=None, dones=None, reuse=False, scope=\"pi\"):\n if obs_ff is None:\n obs_ff = self.processed_obs\n if obs_rnn is None:\n obs_rnn = self.processed_obs\n if action_prev is None:\n action_prev = self.action_prev_ph\n\n obs_ff, goal = obs_ff[:, :-self.goal_size], obs_ff[:, -self.goal_size:]\n goal = tf.subtract(goal, obs_ff[:, -self.goal_size:], name=\"goal_relative\")\n obs_rnn = obs_rnn[:, :-self.goal_size]\n\n ff_phs = [obs_ff, goal]\n rnn_phs = [obs_rnn, action_prev]\n return super().make_actor(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope)\n\n def make_critics(self, obs_ff=None, action_ff=None, my=None, obs_rnn=None, action_prev=None, dones=None, reuse=False, scope=\"values_fn\"):\n if obs_ff is None:\n obs_ff = self.processed_obs\n if action_ff is None:\n action_ff = self.action_ph\n if my is None:\n my = self.my_ph\n if obs_rnn is None:\n obs_rnn = self.processed_obs\n if action_prev is None:\n action_prev = self.action_prev_ph\n\n obs_ff, goal = obs_ff[:, :-self.goal_size], obs_ff[:, -self.goal_size:]\n goal = tf.subtract(goal, obs_ff[:, -self.goal_size:], name=\"goal_relative\")\n obs_rnn = obs_rnn[:, :-self.goal_size]\n\n ff_phs = [obs_ff, goal, my, action_ff]\n rnn_phs = [obs_rnn, action_prev]\n return super().make_critics(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope)\n\n def collect_data(self, _locals, _globals, **kwargs):\n data = super().collect_data(_locals, _globals)\n if \"my\" not in _locals or _locals[\"episode_data\"]:\n data[\"my\"] = _locals[\"self\"].env.get_env_parameters()\n\n return data\n\n\nclass LstmMlpPolicy(RecurrentPolicy):\n recurrent = True\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False,\n layers=None,\n cnn_extractor=nature_cnn, feature_extraction=\"mlp\", n_lstm=128, share_lstm=False,\n layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs):\n if layers is None:\n layers = {\"ff\": None, \"lstm\": [64, 64], \"head\": []}\n else:\n assert layers[\"ff\"] is None\n super().__init__(sess, ob_space, ac_space, layers, n_env, n_steps, n_batch,\n reuse=reuse, cnn_extractor=cnn_extractor,\n feature_extraction=feature_extraction, n_lstm=n_lstm,\n share_lstm=share_lstm, layer_norm=layer_norm, act_fun=act_fun,\n obs_module_indices=obs_module_indices, **kwargs)\n\n def make_actor(self, obs=None, action_prev=None, dones=None, reuse=False, scope=\"pi\"):\n obs, action_prev, dones = self._process_phs(obs=obs, action_prev=action_prev, dones=dones)\n\n ff_phs = None\n rnn_phs = [obs, action_prev]\n return super().make_actor(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope)\n\n def make_critics(self, obs=None, action=None, action_prev=None, dones=None, reuse=False, scope=\"values_fn\"):\n obs, action, action_prev, dones = self._process_phs(obs=obs, action=action, action_prev=action_prev, dones=dones)\n\n ff_phs = [action]\n rnn_phs = [obs, action_prev]\n return super().make_critics(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope)\n\n\nclass LstmFFMlpPolicy(RecurrentPolicy):\n recurrent = True\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False,\n layers=None,\n cnn_extractor=nature_cnn, feature_extraction=\"mlp\", n_lstm=128, share_lstm=False,\n layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs):\n if layers is None:\n layers = {\"ff\": [64], \"lstm\": [64, 64], \"head\": []}\n\n super().__init__(sess, ob_space, ac_space, layers, n_env, n_steps, n_batch,\n reuse=reuse, cnn_extractor=cnn_extractor,\n feature_extraction=feature_extraction, n_lstm=n_lstm,\n share_lstm=share_lstm, layer_norm=layer_norm, act_fun=act_fun,\n obs_module_indices=obs_module_indices, **kwargs)\n\n def make_actor(self, obs=None, action_prev=None, dones=None, reuse=False, scope=\"pi\"):\n obs, action_prev, dones = self._process_phs(obs=obs, action_prev=action_prev, dones=dones)\n\n ff_phs = [obs]\n rnn_phs = [obs, action_prev]\n return super().make_actor(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope)\n\n def make_critics(self, obs=None, action=None, action_prev=None, dones=None, reuse=False, scope=\"values_fn\"):\n obs, action, action_prev, dones = self._process_phs(obs=obs, action=action, action_prev=action_prev, dones=dones)\n\n ff_phs = [obs, action]\n rnn_phs = [obs, action_prev]\n return super().make_critics(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope)\n\n\nclass CnnPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a CNN (the nature CNN)\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):\n super(CnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n feature_extraction=\"cnn\", **_kwargs)\n\n\nclass CnnMlpPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a CNN (the nature CNN)\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):\n super(CnnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n cnn_extractor=cnn_1d_extractor, feature_extraction=\"cnn\", **_kwargs)\n\n\nclass DRCnnMlpPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a CNN (the nature CNN)\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, my_size, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):\n super(DRCnnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n cnn_extractor=cnn_1d_extractor, feature_extraction=\"cnn\", **_kwargs)\n\n with tf.variable_scope(\"input\", reuse=False):\n self.my_ph = tf.placeholder(tf.float32, (self.n_batch, *my_size), name=\"my_ph\") # (done t-1)\n self.extra_phs = [\"my\", \"target_my\"]\n self.extra_data_names = [\"my\", \"target_my\"]\n\n def make_critics(self, obs=None, action=None, my=None, reuse=False, scope=\"values_fn\"):\n if my is None:\n my = self.my_ph\n\n return super().make_critics(obs, action, reuse, scope, extracted_callback=lambda x: tf.concat([x, my], axis=-1))\n\n def collect_data(self, _locals, _globals):\n data = []\n for env_i in range(_locals[\"self\"].n_envs):\n d = {}\n if len(_locals[\"episode_data\"][env_i]) == 0 or \"my\" not in _locals[\"episode_data\"][env_i]:\n if _locals[\"self\"].n_envs == 1:\n d[\"my\"] = _locals[\"self\"].env.get_env_parameters()\n else:\n d[\"my\"] = _locals[\"self\"].env.env_method(\"get_env_parameters\", indices=env_i)[0]\n else:\n d[\"my\"] = _locals[\"episode_data\"][env_i][-1][\"my\"]\n\n d[\"target_my\"] = d[\"my\"]\n data.append(d)\n\n return data\n\n\nclass DRMyEstPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a CNN (the nature CNN)\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, my_size, n_env=1, n_steps=1, n_batch=None, reuse=False, loss_weight=1e-3, **_kwargs):\n super().__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n cnn_extractor=cnn_1d_extractor, feature_extraction=\"mlp\", **_kwargs)\n\n self._obs_ph = self.processed_obs # Base class has self.obs_ph as property getting self._obs_ph\n with tf.variable_scope(\"input\", reuse=False):\n self.my_ph = tf.placeholder(tf.float32, (self.n_batch, *my_size), name=\"my_ph\") # (done t-1)\n self.action_prev_ph = tf.placeholder(tf.float32, (self.n_batch, *self.ac_space.shape), name=\"action_prev_ph\")\n self.obs_prev_ph = tf.placeholder(tf.float32, (self.n_batch, *self.ob_space.shape), name=\"obs_prev_ph\")\n\n self.loss_weight = loss_weight\n self.obs_prev = np.zeros((1, *self.ob_space.shape))\n self.action_prev = np.zeros((1, *self.ac_space.shape))\n self.my_est_loss_op = None\n self.my_est_op = None\n self.policy_loss = None\n self.my_est = None\n self.extra_phs = [\"my\", \"action_prev\", \"obs_prev\", \"target_my\", \"target_action_prev\", \"target_obs_prev\"]\n self.extra_data_names = [\"my\", \"action_prev\", \"obs_prev\", \"target_my\", \"target_action_prev\", \"target_obs_prev\"]\n\n def _process_phs(self, **phs):\n for ph_name, ph_val in phs.items():\n if ph_val is None:\n phs[ph_name] = getattr(self, ph_name + \"_ph\")\n else:\n try:\n setattr(self, ph_name + \"_ph\", ph_val)\n except AttributeError:\n setattr(self, \"_\" + ph_name + \"_ph\", ph_val)\n\n return phs.values()\n\n def make_actor(self, obs=None, obs_prev=None, action_prev=None, my_gt=None, reuse=False, scope=\"pi\"):\n obs, obs_prev, action_prev, my_gt = self._process_phs(obs=obs, obs_prev=obs_prev, action_prev=action_prev, my=my_gt)\n\n if self.obs_module_indices is not None:\n obs = tf.gather(obs, self.obs_module_indices[\"pi\"], axis=-1)\n obs_prev = tf.gather(obs_prev, self.obs_module_indices[\"pi\"], axis=-1)\n\n with tf.variable_scope(scope + \"/my\", reuse=reuse):\n my_h = tf.concat([obs, obs_prev, action_prev], axis=-1)\n my_h = mlp(my_h, [64, 64], self.activ_fn, layer_norm=self.layer_norm)\n self.my_est_op = tf.layers.dense(my_h, self.my_ph.shape[-1])\n self.my_est_loss_op = tf.reduce_mean((self.my_est_op - my_gt) ** 2)\n self.policy_loss = self.loss_weight * self.my_est_loss_op\n\n obs = tf.concat([obs, self.my_est_op], axis=-1)\n\n with tf.variable_scope(scope, reuse=reuse):\n if self.feature_extraction == \"cnn\":\n pi_h = self.cnn_extractor(obs, name=\"pi_c1\", act_fun=self.activ_fn, **self.cnn_kwargs)\n else:\n pi_h = tf.layers.flatten(obs)\n\n pi_h = mlp(pi_h, self.layers, self.activ_fn, layer_norm=self.layer_norm)\n\n self.policy_pre_activation = tf.layers.dense(pi_h, self.ac_space.shape[0])\n self.policy = policy = tf.tanh(self.policy_pre_activation)\n\n return policy\n\n def make_critics(self, obs=None, action=None, my=None, reuse=False, scope=\"values_fn\"):\n obs, action, my = self._process_phs(obs=obs, action=action, my=my)\n\n return super().make_critics(obs, action, reuse, scope, extracted_callback=lambda x: tf.concat([x, my], axis=-1))\n\n def collect_data(self, _locals, _globals):\n data = {}\n if \"my\" not in _locals or _locals[\"episode_data\"]:\n data[\"my\"] = _locals[\"self\"].env.get_env_parameters()\n data[\"target_my\"] = data[\"my\"]\n if len(_locals[\"episode_data\"]) == 0:\n data[\"obs_prev\"] = _locals[\"obs\"]\n data[\"action_prev\"] = _locals[\"action\"]\n else:\n data[\"obs_prev\"] = _locals[\"episode_data\"][-1][\"obs\"]\n data[\"action_prev\"] = _locals[\"episode_data\"][-1][\"action\"]\n data[\"target_obs_prev\"] = data[\"obs_prev\"]\n data[\"target_action_prev\"] = data[\"action_prev\"]\n\n return data\n \n def step(self, obs, obs_prev=None, action_prev=None, mask=None):\n if action_prev is None:\n assert obs.shape[0] == 1\n if mask is not None and mask[0]:\n self.action_prev = np.zeros((1, *self.ac_space.shape))\n action_prev = self.action_prev\n if obs_prev is None:\n if mask is not None and mask[0]:\n self.obs_prev = np.zeros((1, *self.ob_space.shape))\n obs_prev = self.obs_prev\n\n action, my_est = self.sess.run([self.policy, self.my_est_op], {self.obs_ph: obs,\n self.action_prev_ph: action_prev,\n self.obs_prev_ph: obs_prev})\n self.action_prev = action\n self.obs_prev = obs\n self.my_est = my_est\n\n #return action, my_est\n return action\n\n\nclass LnCnnPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a CNN (the nature CNN), with layer normalisation\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):\n super(LnCnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n feature_extraction=\"cnn\", layer_norm=True, **_kwargs)\n\n\nclass MlpPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a MLP (2 layers of 64)\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):\n super(MlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n feature_extraction=\"mlp\", **_kwargs)\n\n\nclass LnMlpPolicy(FeedForwardPolicy):\n \"\"\"\n Policy object that implements actor critic, using a MLP (2 layers of 64), with layer normalisation\n\n :param sess: (TensorFlow session) The current TensorFlow session\n :param ob_space: (Gym Space) The observation space of the environment\n :param ac_space: (Gym Space) The action space of the environment\n :param n_env: (int) The number of environments to run\n :param n_steps: (int) The number of steps to run for each environment\n :param n_batch: (int) The number of batch to run (n_envs * n_steps)\n :param reuse: (bool) If the policy is reusable or not\n :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction\n \"\"\"\n\n def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs):\n super(LnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse,\n feature_extraction=\"mlp\", layer_norm=True, **_kwargs)\n\n\nregister_policy(\"LstmFFMlpPolicy\", LstmFFMlpPolicy)\nregister_policy(\"LstmMlpPolicy\", LstmMlpPolicy)\nregister_policy(\"DRPolicy\", DRPolicy)\nregister_policy(\"CnnPolicy\", CnnPolicy)\nregister_policy(\"LnCnnPolicy\", LnCnnPolicy)\nregister_policy(\"MlpPolicy\", MlpPolicy)\nregister_policy(\"LnMlpPolicy\", LnMlpPolicy)\nregister_policy(\"CnnMlpPolicy\", CnnMlpPolicy)\nregister_policy(\"DRCnnMlpPolicy\", DRCnnMlpPolicy)\nregister_policy(\"DRMyEstPolicy\", DRMyEstPolicy)\n", "import time\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\nfrom stable_baselines import logger\nfrom stable_baselines.common import explained_variance, ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter\nfrom stable_baselines.common.runners import AbstractEnvRunner\nfrom stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy\nfrom stable_baselines.common.schedules import get_schedule_fn\nfrom stable_baselines.common.tf_util import total_episode_reward_logger\nfrom stable_baselines.common.math_util import safe_mean\n\nclass PPO2(ActorCriticRLModel):\n \"\"\"\n Proximal Policy Optimization algorithm (GPU version).\n Paper: https://arxiv.org/abs/1707.06347\n\n :param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)\n :param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)\n :param gamma: (float) Discount factor\n :param n_steps: (int) The number of steps to run for each environment per update\n (i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)\n :param ent_coef: (float) Entropy coefficient for the loss calculation\n :param learning_rate: (float or callable) The learning rate, it can be a function\n :param vf_coef: (float) Value function coefficient for the loss calculation\n :param max_grad_norm: (float) The maximum value for the gradient clipping\n :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n :param nminibatches: (int) Number of training minibatches per update. For recurrent policies,\n the number of environments run in parallel should be a multiple of nminibatches.\n :param noptepochs: (int) Number of epoch when optimizing the surrogate\n :param cliprange: (float or callable) Clipping parameter, it can be a function\n :param cliprange_vf: (float or callable) Clipping parameter for the value function, it can be a function.\n This is a parameter specific to the OpenAI implementation. If None is passed (default),\n then `cliprange` (that is used for the policy) will be used.\n IMPORTANT: this clipping depends on the reward scaling.\n To deactivate value function clipping (and recover the original PPO implementation),\n you have to pass a negative value (e.g. -1).\n :param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug\n :param tensorboard_log: (str) the log location for tensorboard (if None, no logging)\n :param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance\n :param policy_kwargs: (dict) additional arguments to be passed to the policy on creation\n :param full_tensorboard_log: (bool) enable additional logging when using tensorboard\n WARNING: this logging can take a lot of space quickly\n :param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).\n If None (default), use random seed. Note that if you want completely deterministic\n results, you must set `n_cpu_tf_sess` to 1.\n :param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations\n If None, the number of cpu of the current machine will be used.\n \"\"\"\n def __init__(self, policy, env, gamma=0.99, n_steps=128, ent_coef=0.01, learning_rate=2.5e-4, vf_coef=0.5,\n max_grad_norm=0.5, lam=0.95, nminibatches=4, noptepochs=4, cliprange=0.2, cliprange_vf=None,\n verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,\n full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None):\n\n self.learning_rate = learning_rate\n self.cliprange = cliprange\n self.cliprange_vf = cliprange_vf\n self.n_steps = n_steps\n self.ent_coef = ent_coef\n self.vf_coef = vf_coef\n self.max_grad_norm = max_grad_norm\n self.gamma = gamma\n self.lam = lam\n self.nminibatches = nminibatches\n self.noptepochs = noptepochs\n self.tensorboard_log = tensorboard_log\n self.full_tensorboard_log = full_tensorboard_log\n\n self.action_ph = None\n self.advs_ph = None\n self.rewards_ph = None\n self.old_neglog_pac_ph = None\n self.old_vpred_ph = None\n self.learning_rate_ph = None\n self.clip_range_ph = None\n self.entropy = None\n self.vf_loss = None\n self.pg_loss = None\n self.approxkl = None\n self.clipfrac = None\n self._train = None\n self.loss_names = None\n self.train_model = None\n self.act_model = None\n self.value = None\n self.n_batch = None\n self.summary = None\n\n super().__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,\n _init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,\n seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)\n\n self.active_sampling = False\n\n if _init_setup_model:\n self.setup_model()\n\n def _make_runner(self):\n return Runner(env=self.env, model=self, n_steps=self.n_steps,\n gamma=self.gamma, lam=self.lam)\n\n def _get_pretrain_placeholders(self, get_vf=False):\n policy = self.act_model\n if isinstance(self.action_space, gym.spaces.Discrete):\n return policy.obs_ph, self.action_ph, policy.policy\n if get_vf:\n return policy.obs_ph, self.action_ph, policy.deterministic_action, self.train_model.obs_ph, self.rewards_ph, self.train_model._value_flat\n else:\n return policy.obs_ph, self.action_ph, policy.deterministic_action\n\n def setup_model(self):\n with SetVerbosity(self.verbose):\n\n assert issubclass(self.policy, ActorCriticPolicy), \"Error: the input policy for the PPO2 model must be \" \\\n \"an instance of common.policies.ActorCriticPolicy.\"\n\n self.n_batch = self.n_envs * self.n_steps\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n self.set_random_seed(self.seed)\n self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)\n\n n_batch_step = None\n n_batch_train = None\n if issubclass(self.policy, RecurrentActorCriticPolicy):\n assert self.n_envs % self.nminibatches == 0, \"For recurrent policies, \"\\\n \"the number of environments run in parallel should be a multiple of nminibatches.\"\n n_batch_step = self.n_envs\n n_batch_train = self.n_batch // self.nminibatches\n\n act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,\n n_batch_step, reuse=False, **self.policy_kwargs)\n with tf.variable_scope(\"train_model\", reuse=True,\n custom_getter=tf_util.outer_scope_getter(\"train_model\")):\n train_model = self.policy(self.sess, self.observation_space, self.action_space,\n self.n_envs // self.nminibatches, self.n_steps, n_batch_train,\n reuse=True, **self.policy_kwargs)\n\n with tf.variable_scope(\"loss\", reuse=False):\n self.action_ph = train_model.pdtype.sample_placeholder([None], name=\"action_ph\")\n self.advs_ph = tf.placeholder(tf.float32, [None], name=\"advs_ph\")\n self.rewards_ph = tf.placeholder(tf.float32, [None], name=\"rewards_ph\")\n self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name=\"old_neglog_pac_ph\")\n self.old_vpred_ph = tf.placeholder(tf.float32, [None], name=\"old_vpred_ph\")\n self.learning_rate_ph = tf.placeholder(tf.float32, [], name=\"learning_rate_ph\")\n self.clip_range_ph = tf.placeholder(tf.float32, [], name=\"clip_range_ph\")\n\n neglogpac = train_model.proba_distribution.neglogp(self.action_ph)\n self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())\n\n vpred = train_model.value_flat\n\n # Value function clipping: not present in the original PPO\n if self.cliprange_vf is None:\n # Default behavior (legacy from OpenAI baselines):\n # use the same clipping as for the policy\n self.clip_range_vf_ph = self.clip_range_ph\n self.cliprange_vf = self.cliprange\n elif isinstance(self.cliprange_vf, (float, int)) and self.cliprange_vf < 0:\n # Original PPO implementation: no value function clipping\n self.clip_range_vf_ph = None\n else:\n # Last possible behavior: clipping range\n # specific to the value function\n self.clip_range_vf_ph = tf.placeholder(tf.float32, [], name=\"clip_range_vf_ph\")\n\n if self.clip_range_vf_ph is None:\n # No clipping\n vpred_clipped = train_model.value_flat\n else:\n # Clip the different between old and new value\n # NOTE: this depends on the reward scaling\n vpred_clipped = self.old_vpred_ph + \\\n tf.clip_by_value(vpred - self.old_vpred_ph,\n - self.clip_range_vf_ph, self.clip_range_vf_ph)\n\n vf_losses1 = tf.square(vpred - self.rewards_ph)\n vf_losses2 = tf.square(vpred_clipped - self.rewards_ph)\n self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))\n\n ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)\n pg_losses = -self.advs_ph * ratio\n pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +\n self.clip_range_ph)\n self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))\n self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))\n self.clipfrac = tf.reduce_mean(tf.cast(tf.greater(tf.abs(ratio - 1.0),\n self.clip_range_ph), tf.float32))\n loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef\n\n tf.summary.scalar('entropy_loss', self.entropy)\n tf.summary.scalar('policy_gradient_loss', self.pg_loss)\n tf.summary.scalar('value_function_loss', self.vf_loss)\n tf.summary.scalar('approximate_kullback-leibler', self.approxkl)\n tf.summary.scalar('clip_factor', self.clipfrac)\n tf.summary.scalar('loss', loss)\n\n with tf.variable_scope('model'):\n self.params = tf.trainable_variables()\n if self.full_tensorboard_log:\n for var in self.params:\n tf.summary.histogram(var.name, var)\n grads = tf.gradients(loss, self.params)\n if self.max_grad_norm is not None:\n grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)\n grads = list(zip(grads, self.params))\n trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)\n self._train = trainer.apply_gradients(grads)\n\n self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']\n\n with tf.variable_scope(\"input_info\", reuse=False):\n tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))\n tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))\n tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))\n tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))\n if self.clip_range_vf_ph is not None:\n tf.summary.scalar('clip_range_vf', tf.reduce_mean(self.clip_range_vf_ph))\n\n tf.summary.scalar('old_neglog_action_probability', tf.reduce_mean(self.old_neglog_pac_ph))\n tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))\n\n if self.full_tensorboard_log:\n tf.summary.histogram('discounted_rewards', self.rewards_ph)\n tf.summary.histogram('learning_rate', self.learning_rate_ph)\n tf.summary.histogram('advantage', self.advs_ph)\n tf.summary.histogram('clip_range', self.clip_range_ph)\n tf.summary.histogram('old_neglog_action_probability', self.old_neglog_pac_ph)\n tf.summary.histogram('old_value_pred', self.old_vpred_ph)\n if tf_util.is_image(self.observation_space):\n tf.summary.image('observation', train_model.obs_ph)\n else:\n tf.summary.histogram('observation', train_model.obs_ph)\n\n self.train_model = train_model\n self.act_model = act_model\n self.step = act_model.step\n self.proba_step = act_model.proba_step\n self.value = act_model.value\n self.initial_state = act_model.initial_state\n tf.global_variables_initializer().run(session=self.sess) # pylint: disable=E1101\n\n self.summary = tf.summary.merge_all()\n\n def _train_step(self, learning_rate, cliprange, obs, returns, masks, actions, values, neglogpacs, update,\n writer, states=None, cliprange_vf=None):\n \"\"\"\n Training of PPO2 Algorithm\n\n :param learning_rate: (float) learning rate\n :param cliprange: (float) Clipping factor\n :param obs: (np.ndarray) The current observation of the environment\n :param returns: (np.ndarray) the rewards\n :param masks: (np.ndarray) The last masks for done episodes (used in recurent policies)\n :param actions: (np.ndarray) the actions\n :param values: (np.ndarray) the values\n :param neglogpacs: (np.ndarray) Negative Log-likelihood probability of Actions\n :param update: (int) the current step iteration\n :param writer: (TensorFlow Summary.writer) the writer for tensorboard\n :param states: (np.ndarray) For recurrent policies, the internal state of the recurrent model\n :return: policy gradient loss, value function loss, policy entropy,\n approximation of kl divergence, updated clipping range, training update operation\n :param cliprange_vf: (float) Clipping factor for the value function\n \"\"\"\n advs = returns - values\n advs = (advs - advs.mean()) / (advs.std() + 1e-8)\n td_map = {self.train_model.obs_ph: obs, self.action_ph: actions,\n self.advs_ph: advs, self.rewards_ph: returns,\n self.learning_rate_ph: learning_rate, self.clip_range_ph: cliprange,\n self.old_neglog_pac_ph: neglogpacs, self.old_vpred_ph: values}\n if states is not None:\n td_map[self.train_model.states_ph] = states\n td_map[self.train_model.dones_ph] = masks\n\n if cliprange_vf is not None and cliprange_vf >= 0:\n td_map[self.clip_range_vf_ph] = cliprange_vf\n\n if states is None:\n update_fac = max(self.n_batch // self.nminibatches // self.noptepochs, 1)\n else:\n update_fac = max(self.n_batch // self.nminibatches // self.noptepochs // self.n_steps, 1)\n\n if writer is not None:\n # run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)\n if self.full_tensorboard_log and (1 + update) % 10 == 0:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],\n td_map, options=run_options, run_metadata=run_metadata)\n writer.add_run_metadata(run_metadata, 'step%d' % (update * update_fac))\n else:\n summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(\n [self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],\n td_map)\n writer.add_summary(summary, (update * update_fac))\n else:\n policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(\n [self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train], td_map)\n\n return policy_loss, value_loss, policy_entropy, approxkl, clipfrac\n\n def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name=\"PPO2\",\n reset_num_timesteps=True):\n # Transform to callable if needed\n self.learning_rate = get_schedule_fn(self.learning_rate)\n self.cliprange = get_schedule_fn(self.cliprange)\n cliprange_vf = get_schedule_fn(self.cliprange_vf)\n\n samples = deque(maxlen=5 * self.env.num_envs * 10)\n\n new_tb_log = self._init_num_timesteps(reset_num_timesteps)\n callback = self._init_callback(callback)\n\n with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \\\n as writer:\n self._setup_learn()\n\n t_first_start = time.time()\n n_updates = total_timesteps // self.n_batch\n\n callback.on_training_start(locals(), globals())\n\n for update in range(1, n_updates + 1):\n assert self.n_batch % self.nminibatches == 0, (\"The number of minibatches (`nminibatches`) \"\n \"is not a factor of the total number of samples \"\n \"collected per rollout (`n_batch`), \"\n \"some samples won't be used.\"\n )\n batch_size = self.n_batch // self.nminibatches\n t_start = time.time()\n frac = 1.0 - (update - 1.0) / n_updates\n lr_now = self.learning_rate(frac)\n cliprange_now = self.cliprange(frac)\n cliprange_vf_now = cliprange_vf(frac)\n\n callback.on_rollout_start()\n # true_reward is the reward without discount\n rollout = self.runner.run(callback)\n # Unpack\n obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = rollout\n\n callback.on_rollout_end()\n\n # Early stopping due to the callback\n if not self.runner.continue_training:\n break\n\n self.ep_info_buf.extend(ep_infos)\n mb_loss_vals = []\n if states is None: # nonrecurrent version\n update_fac = max(self.n_batch // self.nminibatches // self.noptepochs, 1)\n inds = np.arange(self.n_batch)\n for epoch_num in range(self.noptepochs):\n np.random.shuffle(inds)\n for start in range(0, self.n_batch, batch_size):\n timestep = self.num_timesteps // update_fac + ((epoch_num *\n self.n_batch + start) // batch_size)\n end = start + batch_size\n mbinds = inds[start:end]\n slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))\n mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, writer=writer,\n update=timestep, cliprange_vf=cliprange_vf_now))\n else: # recurrent version\n update_fac = max(self.n_batch // self.nminibatches // self.noptepochs // self.n_steps, 1)\n assert self.n_envs % self.nminibatches == 0\n env_indices = np.arange(self.n_envs)\n flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)\n envs_per_batch = batch_size // self.n_steps\n for epoch_num in range(self.noptepochs):\n np.random.shuffle(env_indices)\n for start in range(0, self.n_envs, envs_per_batch):\n timestep = self.num_timesteps // update_fac + ((epoch_num *\n self.n_envs + start) // envs_per_batch)\n end = start + envs_per_batch\n mb_env_inds = env_indices[start:end]\n mb_flat_inds = flat_indices[mb_env_inds].ravel()\n slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))\n mb_states = states[mb_env_inds]\n mb_loss_vals.append(self._train_step(lr_now, cliprange_now, *slices, update=timestep,\n writer=writer, states=mb_states,\n cliprange_vf=cliprange_vf_now))\n\n loss_vals = np.mean(mb_loss_vals, axis=0)\n t_now = time.time()\n fps = int(self.n_batch / (t_now - t_start))\n\n if writer is not None:\n total_episode_reward_logger(self.episode_reward,\n true_reward.reshape((self.n_envs, self.n_steps)),\n masks.reshape((self.n_envs, self.n_steps)),\n writer, self.num_timesteps)\n\n if self.verbose >= 1 and (update % log_interval == 0 or update == 1):\n explained_var = explained_variance(values, returns)\n logger.logkv(\"serial_timesteps\", update * self.n_steps)\n logger.logkv(\"n_updates\", update)\n logger.logkv(\"total_timesteps\", self.num_timesteps)\n logger.logkv(\"fps\", fps)\n logger.logkv(\"explained_variance\", float(explained_var))\n if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:\n logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))\n logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))\n logger.logkv('time_elapsed', t_start - t_first_start)\n for (loss_val, loss_name) in zip(loss_vals, self.loss_names):\n logger.logkv(loss_name, loss_val)\n logger.dumpkvs()\n\n if self.active_sampling:\n if len(samples) < samples.maxlen:\n samples.extendleft(self.env.get_reset_data())\n else:\n resets = np.count_nonzero(masks)\n if resets > 0:\n sample_obs = [s[\"obs\"] for s in samples]\n scores = self.train_model.get_critic_discrepancy(sample_obs)\n samples = deque(\n [sample for _, sample in sorted(zip(scores, samples), key=lambda pair: pair[0])],\n maxlen=5 * self.env.num_envs * 10)\n scenarios = []\n for i in range(self.env.num_envs * resets):\n scenarios.append(samples.popleft()[\"initial_state\"])\n\n self.env.add_scenarios(scenarios)\n\n samples.extendleft(self.env.get_reset_data())\n callback.on_training_end()\n return self\n\n def save(self, save_path, cloudpickle=False):\n data = {\n \"gamma\": self.gamma,\n \"n_steps\": self.n_steps,\n \"vf_coef\": self.vf_coef,\n \"ent_coef\": self.ent_coef,\n \"max_grad_norm\": self.max_grad_norm,\n \"learning_rate\": self.learning_rate,\n \"lam\": self.lam,\n \"nminibatches\": self.nminibatches,\n \"noptepochs\": self.noptepochs,\n \"cliprange\": self.cliprange,\n \"cliprange_vf\": self.cliprange_vf,\n \"verbose\": self.verbose,\n \"policy\": self.policy,\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"n_envs\": self.n_envs,\n \"n_cpu_tf_sess\": self.n_cpu_tf_sess,\n \"seed\": self.seed,\n \"_vectorize_action\": self._vectorize_action,\n \"policy_kwargs\": self.policy_kwargs,\n \"num_timesteps\": self.num_timesteps\n }\n\n params_to_save = self.get_parameters()\n\n self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)\n\n\nclass Runner(AbstractEnvRunner):\n def __init__(self, *, env, model, n_steps, gamma, lam):\n \"\"\"\n A runner to learn the policy of an environment for a model\n\n :param env: (Gym environment) The environment to learn from\n :param model: (Model) The model to learn\n :param n_steps: (int) The number of steps to run for each environment\n :param gamma: (float) Discount factor\n :param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n \"\"\"\n super().__init__(env=env, model=model, n_steps=n_steps)\n self.lam = lam\n self.gamma = gamma\n\n def _run(self):\n \"\"\"\n Run a learning step of the model\n\n :return:\n - observations: (np.ndarray) the observations\n - rewards: (np.ndarray) the rewards\n - masks: (numpy bool) whether an episode is over or not\n - actions: (np.ndarray) the actions\n - values: (np.ndarray) the value function output\n - negative log probabilities: (np.ndarray)\n - states: (np.ndarray) the internal states of the recurrent policies\n - infos: (dict) the extra information of the model\n \"\"\"\n # mb stands for minibatch\n mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs, mb_envterms = [], [], [], [], [], [], []\n mb_states = self.states\n ep_infos = []\n\n for _ in range(self.n_steps):\n actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)\n mb_obs.append(self.obs.copy())\n mb_actions.append(actions)\n mb_values.append(values)\n mb_neglogpacs.append(neglogpacs)\n mb_dones.append(self.dones)\n clipped_actions = actions\n # Clip the actions to avoid out of bound error\n if isinstance(self.env.action_space, gym.spaces.Box):\n clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)\n self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions)\n self.model.num_timesteps += self.n_envs\n\n if self.callback is not None:\n # Abort training early\n if self.callback.on_step() is False:\n self.continue_training = False\n # Return dummy values\n return [None] * 9\n\n env_terms = []\n for info in infos:\n maybe_ep_info = info.get('episode')\n termination_reason = info.get(\"termination\", None)\n if maybe_ep_info is not None:\n ep_infos.append(maybe_ep_info)\n env_terms.append(termination_reason is not None and termination_reason != \"steps\")\n env_terms = np.array(env_terms)\n mb_envterms.append(env_terms)\n mb_rewards.append(rewards)\n # batch of steps to batch of rollouts\n mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)\n mb_rewards = np.asarray(mb_rewards, dtype=np.float32)\n mb_actions = np.asarray(mb_actions)\n mb_values = np.asarray(mb_values, dtype=np.float32)\n mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)\n mb_dones = np.asarray(mb_dones, dtype=np.bool)\n last_values = self.model.value(self.obs, self.states, self.dones)\n # discount/bootstrap off value fn\n mb_advs = np.zeros_like(mb_rewards)\n true_reward = np.copy(mb_rewards)\n last_gae_lam = 0\n for step in reversed(range(self.n_steps)):\n if step == self.n_steps - 1:\n nextnonterminal = 1.0 - self.dones\n #nextnonterminal = 1.0 - env_terms\n nextvalues = last_values\n else:\n nextnonterminal = 1.0 - mb_dones[step + 1]\n #nextnonterminal = 1.0 - mb_envterms[step + 1]\n nextvalues = mb_values[step + 1]\n delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]\n mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam\n mb_returns = mb_advs + mb_values\n\n mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \\\n map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))\n\n return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward\n\n\n# obs, returns, masks, actions, values, neglogpacs, states = runner.run()\ndef swap_and_flatten(arr):\n \"\"\"\n swap and then flatten axes 0 and 1\n\n :param arr: (np.ndarray)\n :return: (np.ndarray)\n \"\"\"\n shape = arr.shape\n return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])\n" ]
[ [ "tensorflow.reduce_sum", "tensorflow.RunMetadata", "numpy.concatenate", "numpy.mean", "tensorflow.summary.scalar", "tensorflow.Graph", "numpy.allclose", "numpy.empty_like", "tensorflow.summary.image", "tensorflow.gradients", "tensorflow.square", "tensorflow.RunOptions", "tensorflow.placeholder", "tensorflow.summary.merge_all", "numpy.array", "tensorflow.summary.histogram", "numpy.isfinite", "tensorflow.reduce_mean", "numpy.set_printoptions", "tensorflow.reshape", "tensorflow.assign", "tensorflow.variable_scope" ], [ "tensorflow.layers.flatten", "numpy.expand_dims", "tensorflow.concat", "tensorflow.reduce_mean", "tensorflow.placeholder_with_default", "tensorflow.layers.dense", "tensorflow.placeholder", "tensorflow.subtract", "tensorflow.gather", "tensorflow.tanh", "tensorflow.variable_scope", "numpy.array", "numpy.zeros" ], [ "numpy.asarray", "tensorflow.RunMetadata", "numpy.zeros_like", "numpy.mean", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.Graph", "numpy.clip", "numpy.arange", "tensorflow.summary.image", "tensorflow.gradients", "numpy.copy", "tensorflow.square", "numpy.count_nonzero", "tensorflow.trainable_variables", "tensorflow.RunOptions", "tensorflow.placeholder", "tensorflow.exp", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "numpy.array", "tensorflow.summary.histogram", "tensorflow.clip_by_value", "tensorflow.reduce_mean", "tensorflow.maximum", "numpy.random.shuffle", "tensorflow.clip_by_global_norm", "tensorflow.variable_scope", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
carlosal1015/scikit-fem
[ "1e73a417e9b43fe0a36e29807792c41fa289b77d", "1e73a417e9b43fe0a36e29807792c41fa289b77d", "1e73a417e9b43fe0a36e29807792c41fa289b77d", "1e73a417e9b43fe0a36e29807792c41fa289b77d" ]
[ "skfem/element/element_tet/element_tet_p2.py", "docs/examples/ex28.py", "docs/examples/ex02.py", "skfem/assembly/global_basis/mortar_basis.py" ]
[ "import numpy as np\nfrom ..element_h1 import ElementH1\n\n\nclass ElementTetP2(ElementH1):\n nodal_dofs = 1\n edge_dofs = 1\n dim = 3\n maxdeg = 2\n dofnames = ['u', 'u']\n doflocs = np.array([[0., 0., 0.],\n [1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.],\n [.5, 0., 0.],\n [.5, .5, 0.], \n [0., .5, 0.],\n [0., .0, .5],\n [.5, .0, .5],\n [.0, .5, .5]])\n\n def lbasis(self, X, i):\n x, y, z = X\n\n if i == 0: # at (0,0,0)\n phi = (1. - 3.*x + 2.*x**2 - 3.*y + 4.*x*y +\n 2.*y**2 - 3.*z + 4.*x*z + 4.*y*z + 2.*z**2)\n dphi = np.array([\n -3. + 4.*x + 4.*y + 4.*z,\n -3. + 4.*x + 4.*y + 4.*z,\n -3. + 4.*x + 4.*y + 4.*z,\n ])\n elif i == 1: # at (1,0,0)\n phi = - 1.*x + 2.*x**2\n dphi = np.array([\n -1 + 4*x,\n 0*x,\n 0*x,\n ])\n elif i == 2: # at (0,1,0)\n phi = - 1.*y + 2.*y**2\n dphi = np.array([\n 0*x,\n -1. + 4.*y,\n 0*x,\n ])\n elif i == 3: # at (0,0,1)\n phi = - 1.*z + 2.*z**2\n dphi = np.array([\n 0*x,\n 0*x,\n -1. + 4.*z,\n ])\n elif i == 4: # between (0,1)\n phi = 4.*x - 4.*x**2 - 4.*x*y - 4*x*z\n dphi = np.array([\n 4. - 8.*x - 4.*y - 4.*z,\n -4.*x,\n -4.*x,\n ])\n elif i == 5: # between (1,2)\n phi = 4.*x*y\n dphi = np.array([\n 4.*y,\n 4.*x,\n 0*x,\n ])\n elif i == 6: # between (0,2)\n phi = 0. + 4.*y - 4.*x*y - 4.*y**2 - 4.*y*z\n dphi = np.array([\n -4.*y,\n 4. - 4.*x - 8.*y - 4.*z,\n -4.*y,\n ])\n elif i == 7: # between (0,3)\n phi = 0. + 4.*z - 4.*x*z - 4.*y*z - 4.*z**2\n dphi = np.array([\n -4.*z,\n -4.*z,\n 4. - 4.*x - 4.*y - 8.*z,\n ])\n elif i == 8:\n phi = 0. + 4.*x*z\n dphi = np.array([\n 4.*z,\n 0*x,\n 4*x,\n ])\n elif i == 9:\n phi = 0. + 4.*y*z\n dphi = np.array([\n 0*x,\n 4*z,\n 4*y,\n ])\n else:\n raise Exception(\"!\")\n\n return phi, dphi\n", "from skfem import *\nfrom skfem.importers import from_meshio\nfrom skfem.models.poisson import unit_load\n\nfrom matplotlib.pyplot import subplots\nimport numpy as np\n\nfrom pygmsh import generate_mesh\nfrom pygmsh.built_in import Geometry\n\nhalfheight = 1.\nlength = 10.\nthickness = halfheight\n\nkratio = 80. / (4.181 / 7.14)\n\npeclet = 357.\n\n\ndef make_mesh(halfheight: float, # mm\n length: float,\n thickness: float) -> MeshTri:\n geom = Geometry()\n points = []\n lines = []\n\n lcar = halfheight / 2**2\n\n for xy in [(0., halfheight),\n (0., -halfheight),\n (length, -halfheight),\n (length, halfheight),\n (0., -halfheight - thickness),\n (length, -halfheight - thickness)]:\n points.append(geom.add_point([*xy, 0.], lcar))\n\n lines.append(geom.add_line(*points[:2]))\n geom.add_physical(lines[-1], 'fluid-inlet')\n\n lines.append(geom.add_line(*points[1:3]))\n\n lines.append(geom.add_line(*points[2:4]))\n geom.add_physical(lines[-1], 'fluid-outlet')\n\n lines.append(geom.add_line(points[3], points[0]))\n\n geom.add_physical(geom.add_plane_surface(geom.add_line_loop(lines)),\n 'fluid')\n\n lines.append(geom.add_line(points[1], points[4]))\n geom.add_physical(lines[-1], 'solid-inlet')\n\n lines.append(geom.add_line(*points[4:6]))\n geom.add_physical(lines[-1], 'heated')\n\n lines.append(geom.add_line(points[5], points[2]))\n geom.add_physical(lines[-1], 'solid-outlet')\n\n geom.add_physical(geom.add_plane_surface(geom.add_line_loop(\n [*lines[-3:], -lines[1]])), 'solid')\n\n return from_meshio(generate_mesh(geom, dim=2))\n\n\nmesh = make_mesh(halfheight, length, thickness)\nelement = ElementTriP1()\nbasis = {\n 'heat': InteriorBasis(mesh, element),\n 'fluid': InteriorBasis(mesh, element, elements=mesh.subdomains['fluid']),\n **{label: FacetBasis(mesh, element, facets=mesh.boundaries[label])\n for label in ['heated', 'fluid-outlet', 'solid-outlet']}}\n\n\n@bilinear_form\ndef conduction(u, du, v, dv, w):\n return w.w * sum(du * dv)\n\n\n@bilinear_form\ndef advection(u, du, v, dv, w):\n _, y = w.x\n velocity_x = 1 - (y / halfheight)**2 # plane Poiseuille\n return v * velocity_x * du[0]\n\n\nconductivity = basis['heat'].zero_w() + 1\nconductivity[mesh.subdomains['solid']] = kratio\n\nlongitudinal_gradient = 3 / 4 / peclet\n\nA = (asm(conduction, basis['heat'], w=conductivity)\n + peclet * asm(advection, basis['fluid']))\nb = (asm(unit_load, basis['heated'])\n + longitudinal_gradient\n * (asm(unit_load, basis['fluid-outlet'])\n + kratio * asm(unit_load, basis['solid-outlet'])))\n\nD = basis['heat'].get_dofs(\n {label: boundary for\n label, boundary in mesh.boundaries.items()\n if label.endswith('-inlet')})\nI = basis['heat'].complement_dofs(D)\n\n\ndef exact(x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"return the exact fully developed solution at specified points\"\"\"\n return np.where(y > -halfheight,\n - (5 - y**2) * (1 - y**2) / 16 - y / 2,\n 1 / 2 - (1 + y) / kratio) + longitudinal_gradient * x\n\ntemperature = np.zeros(basis['heat'].N)\ninlet_dofs = basis['heat'].complement_dofs(I)\ntemperature[inlet_dofs] = exact(*mesh.p[:, inlet_dofs])\n\ntemperature = solve(*condense(A, b, temperature, I=I))\n\ndofs = {label: basis['heat'].get_dofs(facets).all()\n for label, facets in mesh.boundaries.items()\n if label.endswith('let')}\n\nexit_interface_temperature = {\n 'skfem': temperature[np.intersect1d(dofs['fluid-outlet'],\n dofs['solid-outlet'])[0]],\n 'exact': exact(length, -1.)}\n \n\nif __name__ == '__main__':\n\n from pathlib import Path\n\n mesh.plot(temperature, edgecolors='none')\n mesh.savefig(Path(__file__).with_suffix('.png'),\n bbox_inches='tight', pad_inches=0)\n\n fig, ax = subplots()\n ax.set_title('transverse temperature profiles')\n \n y = {label: mesh.p[1, d] for label, d in dofs.items()}\n ii = {label: np.argsort(yy) for label, yy in y.items()}\n\n y['exact'] = np.linspace(min(y['solid-inlet']),\n max(y['fluid-inlet']))\n for port, saturation, linestyle in [('inlet', '', '--'),\n ('outlet', 'dark', '-')]:\n for phase, hue, marker in [('fluid', 'green', 'x'),\n ('solid', 'red', '+')]:\n color = saturation + hue\n label = f'{phase}-{port}'\n ax.plot(temperature[dofs[label][ii[label]]], y[label][ii[label]],\n marker=marker, color=color, linestyle='none',\n label=f'{label}, skfem')\n ax.plot(exact(mesh.p[0, dofs[label][0]], y['exact']), y['exact'],\n color='k', linestyle=linestyle, label=f'{port}, exact')\n \n ax.set_xlabel('temperature')\n ax.set_ylabel('$y$')\n ax.set_ylim((-halfheight - thickness, halfheight))\n ax.axhline(-halfheight, color='k', linestyle=':')\n ax.legend()\n fig.savefig(Path(__file__).with_name(\n Path(__file__).stem + '-inlet-outlet.png'))\n", "from skfem import *\nimport numpy as np\n\nm = MeshTri.init_symmetric()\nm.refine(3)\n\ne = ElementTriMorley()\nib = InteriorBasis(m, e)\n\n@bilinear_form\ndef bilinf(u, du, ddu, v, dv, ddv, w):\n d = 0.1\n E = 200e9\n nu = 0.3\n\n def C(T):\n trT = T[0, 0] + T[1, 1]\n return E / (1. + nu) * \\\n np.array([[T[0, 0] + nu / (1. - nu) * trT, T[0, 1]],\n [T[1, 0], T[1, 1] + nu / (1. - nu) * trT]])\n\n def Eps(ddw):\n return np.array([[ddw[0][0], ddw[0][1]],\n [ddw[1][0], ddw[1][1]]])\n\n def ddot(T1, T2):\n return (T1[0, 0] * T2[0, 0] +\n T1[0, 1] * T2[0, 1] +\n T1[1, 0] * T2[1, 0] +\n T1[1, 1] * T2[1, 1])\n\n return d**3 / 12.0 * ddot(C(Eps(ddu)), Eps(ddv))\n\n@linear_form\ndef linf(v, dv, ddv, w):\n return 1e6 * v\n\nK = asm(bilinf, ib)\nf = asm(linf, ib)\n\nboundary = {\n 'left': m.facets_satisfying(lambda x: x[0] == 0),\n 'right': m.facets_satisfying(lambda x: x[0] == 1),\n 'top': m.facets_satisfying(lambda x: x[1] == 1),\n}\n\ndofs = ib.get_dofs(boundary)\n\nD = np.concatenate((\n dofs['left'].nodal['u'],\n dofs['left'].facet['u_n'],\n dofs['right'].nodal['u'],\n dofs['top'].nodal['u'],\n))\n\nx = solve(*condense(K, f, D=D, expand=True))\n\nif __name__ == \"__main__\":\n from os.path import splitext\n from sys import argv\n \n M, X = ib.refinterp(x, 3)\n ax = m.draw()\n M.plot(X, smooth=True, ax=ax, colorbar=True)\n M.savefig(splitext(argv[0])[0] + '_solution.png')\n", "from typing import Dict, Optional\n\nimport numpy as np\nfrom numpy import ndarray\n\nfrom skfem.quadrature import get_quadrature\n\nfrom .global_basis import GlobalBasis\nfrom .interior_basis import InteriorBasis\n\n\nclass MortarBasis(GlobalBasis):\n \"\"\"Global basis functions evaluated at integration points on the mortar\n boundary. \"\"\"\n def __init__(self,\n mesh,\n elem,\n mapping,\n intorder: Optional[int] = None,\n side: int = 0):\n super(MortarBasis, self).__init__(mesh, elem, mapping, intorder)\n\n self.ib1 = InteriorBasis(mesh.mesh1, elem)\n self.ib2 = InteriorBasis(mesh.mesh2, elem)\n\n self.X, self.W = get_quadrature(self.brefdom, self.intorder)\n\n self.find = np.nonzero(self.mesh.f2t[1, :] != -1)[0]\n self.tind = self.mesh.f2t[side, self.find]\n\n # boundary refdom to global facet\n x = self.mapping.G(self.X, find=self.find)\n # global facet to refdom facet\n Y = self.mapping.invF(x, tind=self.tind)\n\n self.normals = np.repeat(mesh.normals[:, :, None], len(self.W), axis=2)\n\n self.nelems = len(self.find)\n\n self.basis = [self.elem.gbasis(self.mapping, Y, j, self.tind)\n for j in range(self.Nbfun)]\n\n self.dx = np.abs(self.mapping.detDG(self.X, find=self.find)) *\\\n np.tile(self.W, (self.nelems, 1))\n\n if side == 0:\n self.element_dofs = self.ib1.element_dofs[:, self.tind]\n elif side == 1:\n self.element_dofs = self.ib2.element_dofs[:, self.tind - mesh.mesh1.t.shape[1]] + self.ib1.N\n\n self.N = self.ib1.N + self.ib2.N\n\n def default_parameters(self):\n \"\"\"Return default parameters for `~skfem.assembly.asm`.\"\"\"\n return {'x':self.global_coordinates(),\n 'h':self.mesh_parameters(),\n 'n':self.normals}\n \n def global_coordinates(self) -> ndarray:\n return self.mapping.G(self.X, find=self.find)\n\n def mesh_parameters(self) -> ndarray:\n if self.mesh.dim() == 1:\n return np.array([0.0])\n else:\n return np.abs(self.mapping.detDG(self.X, self.find)) ** (1.0 / (self.mesh.dim() - 1))\n" ]
[ [ "numpy.array" ], [ "matplotlib.pyplot.subplots", "numpy.intersect1d", "numpy.argsort", "numpy.where", "numpy.zeros" ], [ "numpy.concatenate", "numpy.array" ], [ "numpy.array", "numpy.tile", "numpy.nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nathanmartins/Son-Of-Anton
[ "d45eec2b9263dbd981f468219c9d0fb049bd481d" ]
[ "son.py" ]
[ "import logging\nimport math\nimport os\nimport pickle\nimport re\n\nimport PIL.Image\nimport numpy as np\nfrom mtcnn import MTCNN\nfrom numpy import expand_dims\nfrom sklearn import preprocessing, neighbors\nfrom tensorflow_core.python.keras.models import load_model\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nDATASET_DIR = os.path.join(BASE_DIR, \"dataset\")\nTRAIN_DIR = os.path.join(DATASET_DIR, \"train\")\nTEST_DIR = os.path.join(DATASET_DIR, \"test\")\n\nDEBUG = True\n# DEBUG = False\n\nmodel = load_model('facenet_keras.h5')\n\nlogging.basicConfig(level=logging.DEBUG if DEBUG else logging.INFO)\n\n\ndef extract_faces(img_path: str):\n faces_arr = list()\n\n # Open file and convert to numpy\n image_array = np.array(PIL.Image.open(img_path).convert(\"RGB\"), \"uint8\")\n\n detector = MTCNN()\n faces = detector.detect_faces(image_array)\n\n if len(faces) == 0:\n # If there are no people in a training image, skip the image.\n logging.warning(f\"Image {img_path} not suitable for training. Size{len(faces)}\")\n return None, None\n\n for face in faces:\n # logging.debug(f\"Image {img_path} is suitable for training!\")\n\n x1, y1, width, height = face['box']\n # bug fix\n x1, y1 = abs(x1), abs(y1)\n x2, y2 = x1 + width, y1 + height\n # extract the face\n face = image_array[y1:y2, x1:x2]\n\n # resize pixels to the model size\n image = PIL.Image.fromarray(face)\n image = image.resize((160, 160))\n faces_arr.append(np.asarray(image))\n\n return faces_arr, faces\n\n\ndef get_embedding(face_pixels):\n # scale pixel values\n face_pixels = face_pixels.astype('float32')\n # standardize pixel values across channels (global)\n mean, std = face_pixels.mean(), face_pixels.std()\n face_pixels = (face_pixels - mean) / std\n # transform face into one sample\n samples = expand_dims(face_pixels, axis=0)\n # make prediction to get embedding\n return model.predict(samples)\n\n\ndef prepare():\n x_train = list()\n y_labels = list()\n\n # Loop through each person in the training set\n for label in os.listdir(TRAIN_DIR):\n\n path = os.path.join(TRAIN_DIR, label)\n\n # This will ignore anything that is not jpg|jpeg|png *USE WITH CAUTION*\n allowed_files = [os.path.join(path, f) for f in os.listdir(path) if\n re.match(r'.*\\.(jpg|jpeg|png)', f, flags=re.I)]\n\n for img_path in allowed_files:\n\n logging.debug(f\"File: {img_path}, Label: {label}\")\n\n faces, _ = extract_faces(img_path)\n if faces is not None:\n for face in faces:\n x_train.append(np.asarray(face))\n y_labels.append(label)\n\n # Converting string labels into numbers.\n le = preprocessing.LabelEncoder()\n labels_encoded = le.fit_transform(y_labels)\n\n with open(\"x_train.pickle\", 'wb') as f:\n pickle.dump(x_train, f)\n\n with open(\"y_labels.pickle\", 'wb') as f:\n pickle.dump(y_labels, f)\n\n with open(\"labels_encoded.pickle\", 'wb') as f:\n pickle.dump(labels_encoded, f)\n\n\ndef train():\n with open(\"x_train.pickle\", 'rb') as f:\n x_train = pickle.load(f)\n # x_train = np.array(x_train)\n # x_train = np.reshape(x_train, (-1, 2))\n\n with open(\"labels_encoded.pickle\", 'rb') as f:\n y_labels = pickle.load(f)\n\n # convert each face in the train set to an embedding\n encoded_x_train = list()\n for face_pixels in x_train:\n embedding = get_embedding(face_pixels)[0]\n encoded_x_train.append(embedding)\n encoded_x_train = np.asarray(encoded_x_train)\n\n # Determine how many neighbors to use for weighting in the KNN classifier.\n n_neighbors = int(round(math.sqrt(len(x_train))))\n logging.info(f\"n_neighbors: {n_neighbors}\")\n\n # Create and train the KNN classifier.\n knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=\"ball_tree\", weights='distance')\n knn_clf.fit(encoded_x_train, y_labels)\n # perdicted = list()\n # metrics.accuracy(y_labels, perdicted)\n\n # Save the trained KNN classifier\n with open(\"model.clf\", 'wb') as f:\n pickle.dump(knn_clf, f)\n\n\ndef predict():\n with open(\"model.clf\", 'rb') as f:\n knn_clf = pickle.load(f)\n\n with open(\"labels_encoded.pickle\", 'rb') as f:\n y_labels = pickle.load(f)\n\n le = preprocessing.LabelEncoder()\n le.fit_transform(y_labels)\n # breakpoint()\n for img in os.listdir(TEST_DIR):\n\n # logging.info(f\"Testing image: {img}\")\n\n full_path = os.path.join(TEST_DIR, img)\n\n faces, raws = extract_faces(full_path)\n\n if faces is None:\n logging.info(f\"WARNING: COULD NOT FIND A FACE IN {full_path}\")\n continue\n\n c = 0\n\n for face in faces:\n\n faces_encodings = get_embedding(face)\n\n # A list of tuples of found face locations in css (top, right, bottom, left) order\n x_face_locations = tuple(raws[c][\"box\"])\n c += 1\n\n # Use the KNN model to find the best matches for the test face\n closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)\n are_matches = list()\n\n for i in range(len(x_face_locations)):\n try:\n\n dis = closest_distances[0][i][0]\n # logging.debug(f\"Closest distance is {dis} - {dis < 7}\")\n\n if dis < 7:\n # logging.debug(f\"Adding a Dis {dis}\")\n are_matches.append(dis)\n except IndexError:\n pass\n\n # logging.debug(f\"Dis is {are_matches}\")\n\n pred = knn_clf.predict(faces_encodings)\n\n if len(are_matches) > 0:\n\n for pred, loc, rec in zip(pred, x_face_locations, are_matches):\n\n if rec:\n if pred == 1:\n a = \"unknown\"\n else:\n a = \"nsm\"\n logging.info(f\"Found: {a} - {img}\")\n else:\n logging.warning(f\"WARNING: COULD NOT IDENTIFY A FACE IN {full_path}\")\n else:\n a = \"unknown\"\n logging.info(f\"Found: {a} - {img}\")\n\n\nif __name__ == '__main__':\n # prepare()\n # train()\n predict()\n" ]
[ [ "sklearn.preprocessing.LabelEncoder", "numpy.expand_dims", "numpy.asarray", "sklearn.neighbors.KNeighborsClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IGx89/scrypted
[ "577b00a090393f31aaa81de67f5fd4555995921a" ]
[ "plugins/opencv/src/opencv/__init__.py" ]
[ "from __future__ import annotations\nfrom time import sleep\nfrom detect import DetectionSession, DetectPlugin\nfrom typing import Any, List\nimport numpy as np\nimport cv2\nimport imutils\nfrom gi.repository import GLib, Gst\nfrom scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected\n\nclass OpenCVDetectionSession(DetectionSession):\n cap: cv2.VideoCapture\n previous_frame: Any\n\n def __init__(self) -> None:\n super().__init__()\n self.previous_frame = None\n self.cap = None\n\ndefaultThreshold = 25\ndefaultArea = 2000\ndefaultInterval = 250\n\nclass OpenCVPlugin(DetectPlugin):\n def __init__(self, nativeId: str | None = None):\n super().__init__(nativeId=nativeId)\n self.color2Gray = None\n self.pixelFormat = \"I420\"\n self.pixelFormatChannelCount = 1\n\n if True:\n self.retainAspectRatio = False\n self.color2Gray = None\n self.pixelFormat = \"I420\"\n self.pixelFormatChannelCount = 1\n else:\n self.retainAspectRatio = True\n self.color2Gray = cv2.COLOR_BGRA2GRAY\n self.pixelFormat = \"BGRA\"\n self.pixelFormatChannelCount = 4\n\n async def getDetectionModel(self) -> ObjectDetectionModel:\n d: ObjectDetectionModel = {\n 'name': '@scrypted/opencv',\n 'classes': ['motion'],\n }\n settings = [\n {\n 'title': \"Motion Area\",\n 'description': \"The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.\",\n 'value': defaultArea,\n 'key': 'area',\n 'placeholder': defaultArea,\n 'type': 'number',\n },\n {\n 'title': \"Motion Threshold\",\n 'description': \"The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.\",\n 'value': defaultThreshold,\n 'key': 'threshold',\n 'placeholder': defaultThreshold,\n 'type': 'number',\n },\n {\n 'title': \"Frame Analysis Interval\",\n 'description': \"The number of milliseconds to wait between motion analysis.\",\n 'value': defaultInterval,\n 'key': 'interval',\n 'placeholder': defaultInterval,\n 'type': 'number',\n },\n ]\n d['settings'] = settings\n return d\n\n def get_pixel_format(self):\n return self.pixelFormat\n\n def parse_settings(self, settings: Any):\n area = defaultArea\n threshold = defaultThreshold\n interval = defaultInterval\n if settings:\n area = float(settings.get('area', area))\n threshold = int(settings.get('threshold', threshold))\n interval = float(settings.get('interval', interval))\n return area, threshold, interval\n\n def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:\n area, threshold, interval = self.parse_settings(settings)\n\n # see get_detection_input_size on undocumented size requirements for GRAY8\n if self.color2Gray != None:\n gray = cv2.cvtColor(frame, self.color2Gray)\n else:\n gray = frame\n curFrame = cv2.GaussianBlur(gray, (21,21), 0)\n\n if detection_session.previous_frame is None:\n detection_session.previous_frame = curFrame\n return\n\n frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)\n detection_session.previous_frame = curFrame\n\n _, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)\n dilated = cv2.dilate(thresh, None, iterations=2)\n fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(fcontours)\n\n detections: List[ObjectDetectionResult] = []\n detection_result: ObjectsDetected = {}\n detection_result['detections'] = detections\n detection_result['inputDimensions'] = src_size\n \n for c in contours:\n x, y, w, h = cv2.boundingRect(c)\n # if w * h != contour_area:\n # print(\"mismatch w/h\", contour_area - w * h)\n\n x2, y2 = convert_to_src_size((x + w, y + h))\n x, y = convert_to_src_size((x, y))\n w = x2 - x + 1\n h = y2 - y + 1\n\n contour_area = w * h\n\n if not area or contour_area > area:\n detection: ObjectDetectionResult = {}\n detection['boundingBox'] = (x, y, w, h)\n detection['className'] = 'motion'\n detection['score'] = 1 if area else contour_area\n detections.append(detection)\n\n return detection_result \n\n def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:\n raise Exception('can not run motion detection on jpeg')\n\n def get_detection_input_size(self, src_size):\n # The initial implementation of this plugin used BGRA\n # because it seemed impossible to pull the Y frame out of I420 without corruption.\n # This is because while 318x174 is aspect ratio correct,\n # it seems to cause strange issues with stride and the image is skewed.\n # By using 300x300, this seems to avoid some undocumented minimum size\n # reqiurement in gst-videoscale or opencv. Unclear which.\n\n # This is the same input size as tensorflow-lite. Allows for better pipelining.\n if not self.retainAspectRatio:\n return (300, 300)\n\n width, height = src_size\n if (width > height):\n if (width > 318):\n height = height / width * 318\n width = 318\n else:\n if (height > 318):\n width = width / height * 318\n height = 318\n\n width = int(np.floor(width / 6) * 6)\n height = int(np.floor(height / 6) * 6)\n\n return width, height\n\n def end_session(self, detection_session: OpenCVDetectionSession):\n if detection_session and detection_session.cap:\n detection_session.cap.release()\n detection_session.cap = None\n return super().end_session(detection_session)\n\n def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:\n buf = gst_sample.get_buffer()\n caps = gst_sample.get_caps()\n # can't trust the width value, compute the stride\n height = caps.get_structure(0).get_value('height')\n width = caps.get_structure(0).get_value('width')\n result, info = buf.map(Gst.MapFlags.READ)\n if not result:\n return\n try:\n mat = np.ndarray(\n (height,\n width,\n self.pixelFormatChannelCount),\n buffer=info.data,\n dtype= np.uint8)\n return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)\n finally:\n buf.unmap(info)\n\n def create_detection_session(self):\n return OpenCVDetectionSession()\n\n def detection_event_notified(self, settings: Any):\n area, threshold, interval = self.parse_settings(settings)\n # it is safe to block here because gstreamer creates a queue thread\n sleep(interval / 1000)\n return super().detection_event_notified(settings)\n" ]
[ [ "numpy.ndarray", "numpy.floor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liqimai/GraphConvForSSL
[ "ef94a897292275680b1058685f2de9d4a8a6449c" ]
[ "gcn/lp.py" ]
[ "import numpy as np\nfrom gcn.graphconv import ap_approximate\n\n\ndef Model17(adj, alpha, y_train, y_test):\n k = int(np.ceil(4 * alpha))\n prediction, time = ap_approximate(adj, y_train, alpha, k)\n predicted_labels = np.argmax(prediction, axis=1)\n prediction = np.zeros(prediction.shape)\n prediction[np.arange(prediction.shape[0]), predicted_labels] = 1\n\n test_acc = np.sum(prediction * y_test) / np.sum(y_test)\n test_acc_of_class = np.sum(prediction * y_test, axis=0) / np.sum(y_test, axis=0)\n return test_acc, test_acc_of_class\n" ]
[ [ "numpy.arange", "numpy.ceil", "numpy.argmax", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
onlyphantom/elangdev
[ "bdb80e10e98f98ef6510c313cda55daf9464d5c4", "bdb80e10e98f98ef6510c313cda55daf9464d5c4" ]
[ "build/lib/elang/plot/utils/embedding.py", "elang/word2vec/scraper/scrape_01.py" ]
[ "import sys, os.path\nimport gensim\nfrom gensim.models import Word2Vec\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\n\n\ndef plot2d_demo(model, words=None):\n assert (\n model.vector_size >= 2\n ), \"This function expects a model of size 2 (2-dimension word vectors) or higher.\"\n\n if words is None:\n words = [words for words in model.wv.vocab]\n\n word_vec = np.array([model.wv[word] for word in words])\n\n if model.vector_size > 2:\n pca = PCA(2)\n word_vec = pca.fit_transform(word_vec)\n\n with plt.style.context(\"seaborn-pastel\"):\n plt.figure(figsize=(7, 5), dpi=180)\n plt.scatter(word_vec[:, 0], word_vec[:, 1], s=5, edgecolors=\"k\", c=\"c\")\n\n for word, (x, y) in zip(words, word_vec):\n plt.text(x - 0.02, y + 0.02, word, fontsize=5)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n MODEL_PATH = (\n os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../\"))\n + \"/word2vec/model/demo2d.model\"\n # + \"/word2vec/model/demo500d.model\"\n )\n model = Word2Vec.load(MODEL_PATH)\n print(\"Loaded from Path:\", MODEL_PATH, \"\\n\", model)\n\n # plot2d_demo(model, words=[\"bca\", \"mandiri\", \"uob\", \"algoritma\", \"airbnb\"])\n plot2d_demo(model)\n", "import os\nimport re\nimport math\nimport pickle\nimport time\nimport sys\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom tqdm import tqdm\n\nrealpath = os.path.dirname(os.path.realpath(__file__))\nfolderpath = realpath + \"\\\\scrape-results\"\n\n# main scraping function\ndef extract_wikipedia_random(num_articles):\n _make_folders()\n\n url_base = \"https://id.wikipedia.org/wiki/\"\n random_url = url_base + \"Istimewa:Halaman_sembarang\"\n\n if num_articles <= 0:\n raise ValueError(\"number of articles must be positive\")\n\n articles = []\n for page in tqdm(range(num_articles)):\n url = requests.request(\"GET\", random_url).url\n query = re.sub(url_base, '', url)\n\n articles.append(_get_wikipedia_article(query))\n\n filename = \"wikipedia_random_{}\".format(num_articles)\n _save_content2tsv(articles, filename + \".tsv\")\n _save_content2txt(articles, filename + \".txt\")\n print(\"Article contents successfully saved to\", filename + \".tsv\", \"and\", filename + \".txt\")\n\n return articles\n\n\ndef extract_wikipedia(query, levels = 5):\n _make_folders()\n \n # scrape article with query\n try:\n articles = []\n article = _get_wikipedia_article(query)\n articles.append(article)\n next_related_queries = list(set(article['related_queries']))\n all_queries = [query]\n except:\n raise Exception(\"no article found, try another query\")\n\n # scrape related queries\n for level in range(1, levels+1):\n print(\"Level {}\\n\".format(level))\n\n nextt = []\n for related_query in tqdm(set(next_related_queries)):\n if related_query not in set(all_queries) and related_query != \"\":\n current_article = _get_wikipedia_article(related_query)\n articles.append(current_article)\n all_queries.append(related_query)\n nextt.extend(current_article['related_queries'])\n next_related_queries = list(set(nextt))\n\n filename = \"wikipedia_{}_{}\".format(query, len(articles))\n _save_content2tsv(articles, filename + \".tsv\")\n _save_content2txt(articles, filename + \".txt\")\n print(\"Article contents successfully saved to\", filename + \".tsv\", \"and\", filename + \".txt\")\n\n return articles\n\n\ndef extract_tirtoid(query, batch_size = None, save_urls = True):\n _make_folders()\n \n # list of dictionary: {\"title\", \"url\"}\n tirtoid_urls = _get_tirtoid_urls(query, save_urls)\n\n # list of dictionary: {\"title\", \"url\", \"category\", \"content\"}\n articles = _get_tirtoid_contents(tirtoid_urls, batch_size, query)\n\n return articles\n\n\ndef extract_detikcom(query, batch_size = None, save_urls = True):\n _make_folders()\n \n # list of dictionary: {\"title\", \"url\"}\n detikcom_urls = _get_detikcom_urls(query, save_urls)\n\n # list of dictionary: {\"title\", \"url\", \"category\", \"content\"}\n articles = _get_detikcom_contents(detikcom_urls, batch_size, query)\n\n return articles\n\n\n# helper function\ndef _get_wikipedia_article(query):\n url_base = \"https://id.wikipedia.org\"\n url_query = url_base + \"/wiki/\" + str(query)\n req = requests.get(url_query)\n soup = BeautifulSoup(req.content, \"html.parser\")\n\n article = {}\n article['title'] = soup.find(\"h1\", attrs={\"class\": \"firstHeading\"}).text\n article['url'] = url_query\n\n find_div = soup.find(\"div\", attrs={\"class\": \"mw-parser-output\"})\n if find_div is None:\n return\n for s in find_div(['script', 'style', 'table', 'div']):\n s.decompose()\n\n find_content = find_div.findAll([\"p\", \"li\", \"h2.span.mw-headline\", \"h3.span.mw-headline\"])\n\n article['content'] = ' '.join([re.sub(r'\\s+', ' ', row.text) for row in find_content])\n\n find_redirect_link = find_div.findAll(\"a\", attrs={\"class\": \"mw-redirect\"})\n article['related_queries'] = [link['href'][6:] for link in find_redirect_link]\n\n return article\n\n\ndef _get_tirtoid_urls(query, save_urls = True):\n url_base = \"https://tirto.id\"\n url_query = url_base + \"/search?q=\" + query\n req = requests.get(url_query)\n soup = BeautifulSoup(req.content, \"html.parser\")\n\n # get total page number\n try:\n find_pagination = soup.findAll(\"li\", attrs = {\"class\": \"pagination-item\"})\n pagination_list = [row.a.text for row in find_pagination]\n total_page = int(pagination_list[-2])\n except:\n raise Exception(\"no article found, try another query\")\n \n # iterate each page number, to get the title and url\n articles = []\n print(\"Extracting article URLs from\", url_query)\n for page_num in tqdm(range(1, total_page+1)):\n url = url_query + \"&p=\" + str(page_num)\n r = requests.get(url)\n s = BeautifulSoup(r.content, \"html.parser\")\n\n find_article = s.findAll(\"div\", attrs = {\"class\": \"news-list-fade\"})\n for row in find_article:\n article = {}\n article['title'] = row.h1.text\n article['url'] = url_base + row.a['href']\n articles.append(article)\n\n if save_urls:\n path = \"{}\\\\pkl\\\\tirtoid_{}.pkl\".format(folderpath, query)\n _save2pickle(path, articles)\n print(\"URLs successfully saved to\", path)\n\n return articles\n\n\ndef _get_tirtoid_contents(articles, batch_size=None, query=None):\n if batch_size == None or batch_size <= 0:\n batch_size = len(articles) \n\n # loop through each stored url\n counter = 0\n print(\"Extracting article contents\")\n for article in tqdm(articles):\n counter += 1\n\n # access the article url\n req_article = requests.get(article['url'])\n soup_article = BeautifulSoup(req_article.content, \"html.parser\")\n\n # preprocessing html\n for s in soup_article(['script', 'style']):\n s.decompose()\n for br in soup_article.find_all(\"br\"):\n br.replace_with(\" \")\n\n # get article category\n find_category = soup_article.findAll(\"a\", attrs = {\"itemprop\": \"item\"})\n article['category'] = find_category[-1].text if len(find_category) else \"\"\n\n # get article content (but exclude the \"Baca juga\" section)\n find_baca_juga_section = soup_article.find(\"div\", attrs = {\"class\": \"baca-holder\"})\n try:\n if find_baca_juga_section is not None:\n row.decompose()\n except:\n pass\n \n article_table = soup_article.findAll(\"div\", attrs = {\"class\": \"content-text-editor\"})[:-1]\n article['content'] = \" \".join([re.sub(r'\\s+', ' ', row.text) for row in article_table])\n\n # save content to file, per batch\n # tsv: category, content, title, url\n # txt: content and title\n if 0 < batch_size < len(articles):\n if counter % batch_size == 0 or counter == len(articles):\n batch_num = (counter-1) // batch_size\n start_idx = batch_size * batch_num\n end_idx = min(start_idx + batch_size, len(articles))\n\n articles_batch = articles[start_idx:end_idx]\n\n filename = \"tirtoid_{}_#{}_{}\".format(query, batch_num+1, len(articles_batch))\n\n _save_content2tsv(articles_batch, filename + \".tsv\")\n _save_content2txt(articles_batch, filename + \".txt\")\n print(\"\\nArticle contents successfully saved to\", filename + \".tsv\", \"and\", filename + \".txt\")\n\n if batch_size >= len(articles) :\n filename = \"tirtoid_{}_{}\".format(query, len(articles))\n _save_content2tsv(articles, filename + \".tsv\")\n _save_content2txt(articles, filename + \".txt\")\n print(\"Article contents successfully saved to\", filename + \".tsv\", \"and\", filename + \".txt\")\n\n return articles\n\n\ndef _get_detikcom_urls(query, save_urls=True):\n url_base = \"https://www.detik.com\"\n url_query = url_base + \"/search/searchnews?query=\" + query\n req = requests.get(url_query)\n soup = BeautifulSoup(req.content, \"html.parser\")\n\n # get total page number\n try:\n find_total_article = soup.find(\"div\", attrs = {\"class\": \"search-result\"})\n total_article_match = re.search(\"\\\\d+\", find_total_article.span.text)\n total_article = int(total_article_match.group(0))\n\n total_page = int(math.ceil(total_article/9))\n total_page = min(1111, total_page) # detik only provides max. 1111 pages\n except:\n raise Exception(\"no article found, try another query\")\n \n # iterate each page number\n articles = []\n print(\"Extracting article URLs from\", url_query)\n for page_num in tqdm(range(1, total_page+1)):\n url = url_query + \"&page=\" + str(page_num)\n r = requests.get(url)\n s = BeautifulSoup(r.content, \"html.parser\")\n\n find_article = s.findAll(\"article\")\n for row in find_article:\n article = {}\n\n # get url\n article['url'] = row.a['href']\n\n # get title\n article['title'] = row.h2.text\n\n # get category\n find_category = row.find(\"span\", attrs = {\"class\": \"category\"})\n article['category'] = find_category.text\n find_category.decompose()\n\n # get posted date\n # article['posted_date'] = row.find(\"span\", attrs = {\"class\": \"date\"}).text\n\n articles.append(article)\n\n if save_urls:\n path = \"{}\\\\pkl\\\\detikcom_{}.pkl\".format(folderpath, query)\n _save2pickle(path, articles)\n print(\"URLs successfully saved to\", path)\n \n return articles\n\n\ndef _get_detikcom_contents(articles, batch_size=None, query=None):\n if batch_size == None or batch_size <= 0:\n batch_size = len(articles)\n\n # loop through each stored url\n counter = 0\n print(\"Extracting article contents\")\n for article in tqdm(articles):\n counter += 1\n\n # access the article url\n try:\n req_article = requests.get(article['url'] + \"?single=1\")\n except:\n continue\n \n soup_article = BeautifulSoup(req_article.content, \"html.parser\")\n\n # preprocessing html\n for s in soup_article(['script', 'style']):\n s.decompose()\n for br in soup_article.find_all(\"br\"):\n br.replace_with(\" \")\n\n # get article content\n find_div = soup_article.find(\"div\", attrs = {\"class\": \"detail__body-text\"})\n if find_div is None:\n find_div = soup_article.find(\"div\", attrs = {\"class\": \"itp_bodycontent\"})\n if find_div is None:\n find_div = soup_article.find(\"div\", attrs = {\"class\": \"detail_text\"})\n \n if find_div is not None:\n article_content = find_div.findAll(\"p\")\n if len(article_content) == 0:\n article_content = [find_div]\n article['content'] = \" \".join([re.sub(r'\\s+', ' ', row.text) for row in article_content])\n else:\n article['content'] = \"\"\n\n # save content to file, per batch\n # tsv: category, content, title, url\n # txt: content and title\n if 0 < batch_size < len(articles):\n if counter % batch_size == 0 or counter == len(articles):\n batch_num = (counter-1) // batch_size\n start_idx = batch_size * batch_num\n end_idx = min(start_idx + batch_size, len(articles))\n\n articles_batch = articles[start_idx:end_idx]\n\n filename = \"detikcom_{}_#{}_{}\".format(query, batch_num+1, len(articles_batch))\n\n _save_content2tsv(articles_batch, filename + \".tsv\")\n _save_content2txt(articles_batch, filename + \".txt\")\n print(\"\\nArticle contents successfully saved to\", filename + \".tsv\", \"and\", filename + \".txt\")\n\n if batch_size >= len(articles):\n filename = \"detikcom_{}_{}\".format(query, len(articles))\n _save_content2tsv(articles, filename + \".tsv\")\n _save_content2txt(articles, filename + \".txt\")\n print(\"Article contents successfully saved to\", filename + \".tsv\", \"and\", filename + \".txt\")\n\n return articles\n\n\ndef _open_pickle(filename):\n with open(\"{}\\\\pkl\\\\{}\".format(folderpath, filename), \"rb\") as f:\n return pickle.load(f)\n \n\ndef _save2pickle(path, l):\n with open(path, 'wb') as f:\n pickle.dump(l, f)\n\n\ndef _save_content2tsv(dictionary, filename):\n df = pd.DataFrame(dictionary)\n df.to_csv(\"{}\\\\tsv\\\\{}\".format(folderpath, filename), sep = \"\\t\", index = False)\n\n\ndef _save_content2txt(dictionary, filename):\n title_content_list = [d['title'] + \"\\n\" + d['content'] for d in dictionary if 'content' in d.keys()]\n with open(\"{}\\\\txt\\\\{}\".format(folderpath, filename), \"w\", encoding = \"utf-8\") as f:\n f.write(\"\\n\".join(title_content_list))\n\n\ndef _convert_tsv2txt(source_filename, destination_filename):\n df = pd.read_csv(\"{}\\\\tsv\\\\{}\".format(folderpath, source_filename), sep = '\\t', encoding = \"utf-8\")\n\n title_content_series = df[\"title\"] + \"\\n\" + df[\"content\"]\n with open(\"{}\\\\txt\\\\{}\".format(folderpath, destination_filename), \"w\", encoding = \"utf-8\") as f:\n f.write(\"\\n\".join([str(row) for row in title_content_series]))\n\n\ndef _make_folders():\n for filetype in ['tsv', 'txt', 'pkl']:\n path = folderpath + \"\\\\\" + filetype\n if not os.path.exists(path):\n os.makedirs(path)\n" ]
[ [ "matplotlib.pyplot.scatter", "matplotlib.pyplot.style.context", "matplotlib.pyplot.text", "numpy.array", "sklearn.decomposition.PCA", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
weikhor/tensorflow
[ "ce047fc05c7b5ff54868ba53d724d9c171c4adbb", "17ac2bd078dcc8c4cf064c0e977b4e2dd061b011" ]
[ "tensorflow/python/data/experimental/kernel_tests/snapshot_test.py", "tensorflow/lite/python/convert.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the `SnapshotDataset` transformation.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport shutil\nimport time\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.experimental.ops import snapshot\nfrom tensorflow.python.data.kernel_tests import checkpoint_test_base\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.kernel_tests import tf_record_test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import readers as core_readers\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.platform import test\n\n\ndef is_graphdef_file(filename):\n return filename.endswith(\"-graph.pbtxt\")\n\n\ndef is_temp_file(filename):\n return \"-tmp-\" in filename\n\n\ndef listdir_and_filter(dirname, filter_fn):\n return [path for path in sorted(os.listdir(dirname)) if filter_fn(path)]\n\n\nclass SnapshotTest(tf_record_test_base.TFRecordTestBase,\n parameterized.TestCase):\n\n def setUp(self):\n super(SnapshotTest, self).setUp()\n tmpdir = self.get_temp_dir()\n tmpdir = os.path.join(tmpdir, \"snapshot\")\n os.mkdir(tmpdir)\n self._snapshot_dir = tmpdir\n\n def tearDown(self):\n super(SnapshotTest, self).tearDown()\n shutil.rmtree(self._snapshot_dir)\n\n def createTFRecords(self, num_files=10, num_records=100):\n self._num_files = num_files\n self._num_records = num_records\n self._filenames = self._createFiles()\n\n def removeTFRecords(self):\n for filename in self._filenames:\n os.remove(filename)\n self._filenames = []\n self._num_files = None\n self._num_records = None\n\n def assertDatasetProducesSet(self, dataset, expected):\n actual = []\n next_fn = self.getNext(dataset)\n for _ in range(len(expected)):\n elem = self.evaluate(next_fn())\n actual.append(elem)\n self.assertCountEqual(actual, expected)\n with self.assertRaises(errors.OutOfRangeError):\n self.evaluate(next_fn())\n\n def assertSnapshotDirectoryContains(self, directory, num_fingerprints,\n num_runs_per_fingerprint,\n num_snapshot_shards_per_run):\n\n # Ignore the graphdef pbtxts we write for debugging purposes and temporary\n # files that are an artifact of how TF writes files.\n dirlist = listdir_and_filter(\n directory,\n lambda p: not (is_graphdef_file(p) or is_temp_file(p)))\n self.assertLen(dirlist, num_fingerprints)\n\n for i in range(num_fingerprints):\n fingerprint_dir = os.path.join(directory, dirlist[i])\n fingerprint_dir_list = listdir_and_filter(\n fingerprint_dir, lambda p: not is_temp_file(p))\n self.assertLen(fingerprint_dir_list, num_runs_per_fingerprint + 1)\n self.assertEqual(fingerprint_dir_list[num_runs_per_fingerprint],\n \"snapshot.metadata\")\n\n for j in range(num_runs_per_fingerprint):\n run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])\n run_dirlist = sorted(os.listdir(run_dir))\n self.assertLen(run_dirlist, num_snapshot_shards_per_run)\n\n file_counter = 0\n for filename in run_dirlist:\n self.assertEqual(filename, \"%08d.shard\" % file_counter)\n file_counter += 1\n\n @combinations.generate(test_base.default_test_combinations())\n def testCreateSnapshotDataset(self):\n dataset = dataset_ops.Dataset.from_tensors([1, 2, 3])\n dataset.apply(snapshot.snapshot(self._snapshot_dir))\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadSnapshotDatasetDefault(self):\n self.createTFRecords()\n filenames = self._filenames\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 100)\n ]\n\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset, expected)\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n self.removeTFRecords()\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset2, expected)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadSnapshotDatasetAutoWriteSnappyRead(self):\n self.createTFRecords()\n filenames = self._filenames\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 100)\n ]\n\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.snapshot(self._snapshot_dir, compression=\"AUTO\"))\n self.assertDatasetProduces(dataset, expected)\n\n self.removeTFRecords()\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.snapshot(self._snapshot_dir, compression=\"SNAPPY\"))\n self.assertDatasetProduces(dataset2, expected)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadSnapshotDatasetCustomShardFn(self):\n self.createTFRecords()\n filenames = self._filenames\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 100)\n ]\n\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: np.int64(0)))\n self.assertDatasetProduces(dataset, expected)\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=1)\n\n self.removeTFRecords()\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.snapshot(self._snapshot_dir, shard_func=lambda _: 0))\n self.assertDatasetProduces(dataset2, expected)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadSnapshotDatasetCustomReaderFn(self):\n self.createTFRecords()\n filenames = self._filenames\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 100)\n ]\n\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.snapshot(\n self._snapshot_dir,\n reader_func=(\n lambda ds: ds.interleave( # pylint:disable=g-long-lambda\n lambda x: x,\n cycle_length=4,\n num_parallel_calls=4))))\n self.assertDatasetProduces(dataset, expected)\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n self.removeTFRecords()\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.snapshot(\n self._snapshot_dir,\n reader_func=(\n lambda ds: ds.interleave( # pylint:disable=g-long-lambda\n lambda x: x,\n cycle_length=4,\n num_parallel_calls=4))))\n self.assertDatasetProducesSet(dataset2, expected)\n\n @combinations.generate(test_base.default_test_combinations())\n def testSnapshotDatasetInvalidShardFn(self):\n dataset = dataset_ops.Dataset.range(1000)\n with self.assertRaises(TypeError):\n dataset = dataset.apply(\n snapshot.snapshot(\n self._snapshot_dir, shard_func=lambda _: \"invalid_fn\"))\n next_fn = self.getNext(dataset)\n self.evaluate(next_fn())\n\n @combinations.generate(test_base.default_test_combinations())\n def testSnapshotDatasetInvalidReaderFn(self):\n dataset = dataset_ops.Dataset.range(1000)\n with self.assertRaises(TypeError):\n dataset = dataset.apply(\n snapshot.snapshot(self._snapshot_dir, reader_func=lambda x: x + 1))\n next_fn = self.getNext(dataset)\n self.evaluate(next_fn())\n\n @combinations.generate(test_base.default_test_combinations())\n def testRoundtripEmptySnapshot(self):\n dataset = dataset_ops.Dataset.range(0)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset, [])\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=0)\n\n dataset2 = dataset_ops.Dataset.range(0)\n dataset2 = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset2, [])\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotDatasetSimple(self):\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset, list(range(1000)))\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotDatasetMultipleFingerprints(self):\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset1, list(range(1000)))\n\n dataset2 = dataset_ops.Dataset.range(2000)\n dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset2, list(range(2000)))\n\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=2,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotDatasetSameFingerprintMultipleCompleteRuns(self):\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset1, list(range(1000)))\n dataset2 = dataset_ops.Dataset.range(1000)\n dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset2, list(range(1000)))\n\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotDatasetSameFingerprintIncompleteRunRestart(self):\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(snapshot.snapshot(self._snapshot_dir))\n next1 = self.getNext(dataset1)\n for i in range(500):\n self.assertEqual(i, self.evaluate(next1()))\n\n dataset2 = dataset_ops.Dataset.range(1000)\n dataset2 = dataset2.apply(snapshot.snapshot(self._snapshot_dir))\n next2 = self.getNext(dataset2)\n for i in range(500):\n self.assertEqual(i, self.evaluate(next2()))\n\n for i in range(500, 1000):\n self.assertEqual(i, self.evaluate(next1()))\n self.assertEqual(i, self.evaluate(next2()))\n\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=2,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotCustomShardFunction(self):\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.enumerate()\n dataset = dataset.apply(\n snapshot.snapshot(self._snapshot_dir, shard_func=lambda i, _: i % 2))\n dataset = dataset.map(lambda _, elem: elem)\n self.assertDatasetProduces(dataset, list(range(1000)))\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=2)\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotDatasetWithTuples(self):\n dataset1 = dataset_ops.Dataset.range(0, 1000)\n dataset2 = dataset_ops.Dataset.range(1000, 2000)\n dataset3 = dataset_ops.Dataset.range(2000, 3000)\n dataset4 = dataset_ops.Dataset.range(3000, 4000)\n\n dataset = dataset_ops.Dataset.zip((dataset1, dataset2, dataset3, dataset4))\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n\n expected = list(\n zip(\n range(0, 1000), range(1000, 2000), range(2000, 3000),\n range(3000, 4000)))\n self.assertDatasetProduces(dataset, expected)\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotShuffleSameFingerprint(self):\n\n def make_dataset():\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.shuffle(1000)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n return dataset\n\n dataset1 = make_dataset()\n self.assertDatasetProducesSet(dataset1, list(range(1000)))\n dataset2 = make_dataset()\n self.assertDatasetProducesSet(dataset2, list(range(1000)))\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadUsingFlatMap(self):\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProduces(dataset, list(range(1000)))\n flat_map = dataset_ops.Dataset.from_tensors(dataset).flat_map(lambda x: x)\n self.assertDatasetProduces(flat_map, list(range(1000)))\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadOptimizableUsingFlatMap(self):\n dataset = dataset_ops.Dataset.range(1000)\n # Will be optimized into ShuffleAndRepeat.\n dataset = dataset.shuffle(10)\n dataset = dataset.repeat(2)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n self.assertDatasetProducesSet(dataset, 2 * list(range(1000)))\n flat_map = dataset_ops.Dataset.from_tensors(dataset).flat_map(lambda x: x)\n self.assertDatasetProducesSet(flat_map, 2 * list(range(1000)))\n self.assertSnapshotDirectoryContains(\n self._snapshot_dir,\n num_fingerprints=1,\n num_runs_per_fingerprint=1,\n num_snapshot_shards_per_run=multiprocessing.cpu_count())\n\n @combinations.generate(test_base.default_test_combinations())\n def testRepeatAndPrefetch(self):\n \"\"\"This test reproduces github.com/tensorflow/tensorflow/issues/48903.\"\"\"\n dataset = dataset_ops.Dataset.from_tensor_slices(np.random.rand(16, 32))\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n dataset = dataset.shuffle(buffer_size=16)\n dataset = dataset.batch(16)\n dataset = dataset.repeat()\n dataset = dataset.prefetch(1)\n next_element = self.getNext(dataset)\n for _ in range(30):\n self.evaluate(next_element())\n\n\nclass LegacySnapshotTest(tf_record_test_base.TFRecordTestBase,\n parameterized.TestCase):\n\n def setUp(self):\n super(LegacySnapshotTest, self).setUp()\n self.removeTFRecords()\n tmpdir = self.get_temp_dir()\n tmpdir = os.path.join(tmpdir, \"snapshot\")\n os.mkdir(tmpdir)\n self.snapshot_dir = tmpdir\n\n def tearDown(self):\n super(LegacySnapshotTest, self).tearDown()\n shutil.rmtree(self.snapshot_dir)\n\n def removeTFRecords(self):\n for filename in self._filenames:\n os.remove(filename)\n self._filenames = []\n\n def setUpTFRecord(self, num_files=10, num_records=10):\n self._num_files = num_files\n self._num_records = num_records\n self._filenames = self._createFiles()\n\n def makeSnapshotDirectory(self):\n return self.snapshot_dir\n\n def assertSnapshotDirectoryContains(self, directory, num_fingerprints,\n num_runs_per_fp, num_snapshot_files):\n # Ignore the graphdef pbtxts we write for debugging purposes and temporary\n # files that are an artifact of how TF writes files.\n dirlist = listdir_and_filter(\n directory,\n lambda p: not (is_graphdef_file(p) or is_temp_file(p)))\n self.assertLen(dirlist, num_fingerprints)\n\n for i in range(num_fingerprints):\n fingerprint_dir = os.path.join(directory, dirlist[i])\n fingerprint_dir_list = listdir_and_filter(\n fingerprint_dir, lambda p: not is_temp_file(p))\n self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)\n self.assertEqual(fingerprint_dir_list[num_runs_per_fp],\n \"snapshot.metadata\")\n\n for j in range(num_runs_per_fp):\n run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])\n run_dirlist = sorted(os.listdir(run_dir))\n self.assertLen(run_dirlist, num_snapshot_files)\n\n file_counter = 0\n for filename in run_dirlist:\n self.assertEqual(filename, \"%08d.snapshot\" % file_counter)\n file_counter += 1\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteDifferentPipelinesInOneDirectory(self):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))\n self.assertDatasetProduces(dataset, list(range(1000)))\n\n dataset = dataset_ops.Dataset.range(1001)\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))\n self.assertDatasetProduces(dataset, list(range(1001)))\n\n self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testWriteSnapshotMultipleSimultaneous(self):\n tmpdir = self.snapshot_dir\n\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))\n next1 = self.getNext(dataset1)\n\n dataset2 = dataset_ops.Dataset.range(1000)\n dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))\n next2 = self.getNext(dataset2)\n\n for i in range(0, 1000):\n self.assertEqual(i, self.evaluate(next1()))\n self.assertEqual(i, self.evaluate(next2()))\n\n # we check that only one copy of the metadata has been written, and the\n # one that lost the race would be in passthrough mode.\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testGetNextCreatesDir(self):\n tmpdir = self.snapshot_dir\n\n # We create two iterators but call getNext on only one.\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(snapshot.legacy_snapshot(tmpdir))\n next1 = self.getNext(dataset1)\n\n dataset2 = dataset_ops.Dataset.range(1001)\n dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))\n _ = self.getNext(dataset2)\n\n for _ in range(1000):\n self.evaluate(next1())\n\n # We check that only one directory is created.\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,\n snapshot.COMPRESSION_SNAPPY\n ])))\n def testWriteSnapshotSimpleSuccessful(self, compression):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, compression=compression))\n self.assertDatasetProduces(dataset, list(range(1000)))\n\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,\n snapshot.COMPRESSION_SNAPPY\n ])))\n def testWriteSnapshotRepeatAfterwards(self, compression):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, compression=compression))\n dataset = dataset.repeat(10)\n self.assertDatasetProduces(dataset, list(range(10)) * 10)\n\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,\n snapshot.COMPRESSION_SNAPPY\n ])))\n def testWriteSnapshotMixTypes(self, compression):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(10)\n\n def map_fn(x):\n return (x, string_ops.as_string(x), string_ops.as_string(2 * x), 2 * x)\n\n dataset = dataset.map(map_fn)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, compression=compression))\n dataset = dataset.repeat(10)\n\n expected = []\n for i in range(10):\n expected.append((i, str(i), str(2 * i), 2 * i))\n self.assertDatasetProduces(dataset, expected * 10)\n\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testSpecifySnapshotNameWriteAndRead(self):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, snapshot_name=\"my_custom_snapshot\"))\n dataset = dataset.repeat(10)\n self.assertDatasetProduces(dataset, list(range(10)) * 10)\n\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n self.assertTrue(\n os.path.exists(os.path.join(tmpdir, \"custom-my_custom_snapshot\")))\n self.assertTrue(\n os.path.exists(\n os.path.join(tmpdir, \"custom-my_custom_snapshot\", \"custom\")))\n\n @combinations.generate(test_base.default_test_combinations())\n def testForcePassthroughMode(self):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, mode=\"passthrough\"))\n dataset = dataset.repeat(10)\n self.assertDatasetProduces(dataset, list(range(10)) * 10)\n\n self.assertSnapshotDirectoryContains(tmpdir, 0, 0, 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testForceWriteMode(self):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode=\"write\"))\n dataset = dataset.repeat(10)\n self.assertDatasetProduces(dataset, list(range(10)) * 10)\n\n # We will end up writing 10 different runs.\n self.assertSnapshotDirectoryContains(tmpdir, 1, 10, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testForceReadMode(self):\n tmpdir = self.snapshot_dir\n\n # We write a copy of the snapshot first.\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n tmpdir, mode=\"write\", snapshot_name=\"my_custom_snapshot\"))\n self.assertDatasetProduces(dataset, list(range(10)))\n\n # We move the run to a new name.\n shutil.move(\n os.path.join(tmpdir, \"custom-my_custom_snapshot\"),\n os.path.join(tmpdir, \"custom-my_custom_snapshot_2\"))\n\n # Even though the snapshot.metadata is pointing to the old run that no\n # longer exists after we moved, we force it to read from the run we specify.\n dataset = dataset_ops.Dataset.range(10)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n tmpdir, mode=\"read\", snapshot_name=\"my_custom_snapshot_2\"))\n self.assertDatasetProduces(dataset, list(range(10)))\n\n # We should still have one snapshot and one run.\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testForceReadNonexistentSnapshot(self):\n tmpdir = self.snapshot_dir\n dataset = dataset_ops.Dataset.range(10)\n with self.assertRaises(errors.NotFoundError):\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir, mode=\"read\"))\n get_next = self.getNext(dataset)\n self.evaluate(get_next())\n\n @combinations.generate(test_base.default_test_combinations())\n def testForceReadNonexistentNamedSnapshot(self):\n tmpdir = self.snapshot_dir\n dataset = dataset_ops.Dataset.range(10)\n with self.assertRaises(errors.NotFoundError):\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n tmpdir, mode=\"read\", snapshot_name=\"my_nonexistent_snapshot\"))\n get_next = self.getNext(dataset)\n self.evaluate(get_next())\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,\n snapshot.COMPRESSION_SNAPPY\n ])))\n def testReadSnapshotBackAfterWrite(self, compression):\n self.setUpTFRecord()\n filenames = self._filenames\n\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 10)\n ]\n\n tmpdir = self.snapshot_dir\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, compression=compression))\n self.assertDatasetProduces(dataset, expected)\n\n # remove the original files and try to read the data back only from snapshot\n self.removeTFRecords()\n\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.legacy_snapshot(tmpdir, compression=compression))\n self.assertDatasetProduces(dataset2, expected)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadShuffledSnapshotAfterWrite(self):\n self.setUpTFRecord(num_files=10, num_records=50)\n filenames = self._filenames\n\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 50)\n ]\n\n tmpdir = self.snapshot_dir\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, shard_size_bytes=100))\n self.assertDatasetProduces(dataset, expected)\n\n # remove the original files and try to read the data back only from snapshot\n self.removeTFRecords()\n\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.legacy_snapshot(\n tmpdir, shard_size_bytes=100, shuffle_on_read=True))\n next2 = self.getNext(dataset2)\n\n res1 = self.evaluate(next2())\n res2 = self.evaluate(next2())\n res3 = self.evaluate(next2())\n res4 = self.evaluate(next2())\n res5 = self.evaluate(next2())\n\n # make sure that we don't read the file back in the same order.\n self.assertNotEqual([res1, res2, res3, res4, res5], expected[0:5])\n\n # make sure all the elements are still there\n dataset3 = core_readers._TFRecordDataset(filenames)\n dataset3 = dataset3.apply(\n snapshot.legacy_snapshot(\n tmpdir, shard_size_bytes=100, shuffle_on_read=True))\n self.assertDatasetProduces(dataset3, expected, assert_items_equal=True)\n\n @combinations.generate(test_base.default_test_combinations())\n def testReadShuffledSnapshotWithSeedAfterWrite(self):\n self.setUpTFRecord(num_files=10, num_records=50)\n filenames = self._filenames\n\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 50)\n ]\n\n tmpdir = self.snapshot_dir\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10))\n self.assertDatasetProduces(dataset, expected)\n\n # remove the original files and try to read the data back only from snapshot\n self.removeTFRecords()\n\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.legacy_snapshot(\n tmpdir,\n shard_size_bytes=10,\n shuffle_on_read=True,\n shuffle_seed=123456))\n next2 = self.getNext(dataset2)\n\n dataset3 = core_readers._TFRecordDataset(filenames)\n dataset3 = dataset3.apply(\n snapshot.legacy_snapshot(\n tmpdir,\n shard_size_bytes=10,\n shuffle_on_read=True,\n shuffle_seed=123456))\n next3 = self.getNext(dataset3)\n\n # make sure that the items are read back in the same order for both datasets\n for _ in range(500):\n res2 = self.evaluate(next2())\n res3 = self.evaluate(next3())\n self.assertEqual(res2, res3)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,\n snapshot.COMPRESSION_SNAPPY\n ])))\n def testReadSnapshotParallelAfterWrite(self, compression):\n self.setUpTFRecord(5, 500)\n filenames = self._filenames\n\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 5)\n for r in range(0, 500)\n ]\n\n tmpdir = self.snapshot_dir\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n tmpdir,\n shard_size_bytes=1024 * 1024,\n num_reader_threads=2,\n reader_buffer_size=10,\n compression=compression))\n self.assertDatasetProduces(dataset, expected, assert_items_equal=True)\n\n # remove the original files and try to read the data back only from\n # snapshot.\n self.removeTFRecords()\n\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.legacy_snapshot(\n tmpdir,\n shard_size_bytes=1024 * 1024,\n num_reader_threads=2,\n reader_buffer_size=10,\n compression=compression))\n self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)\n\n # Not testing Snappy here because Snappy reads currently require a lot of\n # memory.\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.times(\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP\n ]),\n combinations.combine(threads=2, size=[1, 2]) +\n combinations.combine(threads=8, size=[1, 4, 8]))))\n def testReadSnapshotBackAfterMultiThreadedWrite(self, compression, threads,\n size):\n self.setUpTFRecord()\n filenames = self._filenames\n\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 10)\n ]\n\n tmpdir = self.snapshot_dir\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n tmpdir,\n compression=compression,\n num_writer_threads=threads,\n writer_buffer_size=size))\n self.assertDatasetProduces(dataset, expected)\n\n # remove the original files and try to read the data back only from\n # snapshot\n self.removeTFRecords()\n\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(\n snapshot.legacy_snapshot(tmpdir, compression=compression))\n self.assertDatasetProduces(dataset2, expected, assert_items_equal=True)\n\n @combinations.generate(test_base.default_test_combinations())\n def testSameFingerprintWithDifferentInitializationOrder(self):\n tmpdir = self.snapshot_dir\n\n dataset1 = dataset_ops.Dataset.range(0, 100)\n dataset2 = dataset_ops.Dataset.range(100, 200)\n dataset3 = dataset_ops.Dataset.range(200, 300)\n\n dataset = dataset1.concatenate(dataset2).concatenate(dataset3)\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))\n self.assertDatasetProduces(dataset, list(range(300)))\n\n dataset4 = dataset_ops.Dataset.range(200, 300)\n dataset5 = dataset_ops.Dataset.range(100, 200)\n dataset6 = dataset_ops.Dataset.range(0, 100)\n\n dataset = dataset6.concatenate(dataset5).concatenate(dataset4)\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))\n self.assertDatasetProduces(dataset, list(range(300)))\n\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testExpiredSnapshotRewrite(self):\n tmpdir = self.snapshot_dir\n\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(\n snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))\n next1 = self.getNext(dataset1)\n\n # Don't finish reading dataset1, so it is never finalized\n for _ in range(500):\n self.evaluate(next1())\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n time.sleep(2)\n\n # Creating dataset2 after we run through dataset1 due to eager mode, where\n # the snapshot state is determined immediately upon dataset creation. We\n # only want to determine the snapshot state for dataset2 after the first\n # snapshot has expired.\n dataset2 = dataset_ops.Dataset.range(1000)\n dataset2 = dataset2.apply(\n snapshot.legacy_snapshot(tmpdir, pending_snapshot_expiry_seconds=1))\n next2 = self.getNext(dataset2)\n\n for _ in range(500):\n self.evaluate(next2())\n self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)\n\n @combinations.generate(test_base.default_test_combinations())\n def testSnapshotArgsCreateNewSnapshot(self):\n tmpdir = self.snapshot_dir\n\n dataset1 = dataset_ops.Dataset.range(1000)\n dataset1 = dataset1.apply(\n snapshot.legacy_snapshot(tmpdir, shard_size_bytes=10000))\n next1 = self.getNext(dataset1)\n\n for _ in range(1000):\n self.evaluate(next1())\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)\n\n # Create second snapshot with a different shard_size_bytes\n dataset2 = dataset_ops.Dataset.range(1000)\n dataset2 = dataset1.apply(\n snapshot.legacy_snapshot(tmpdir, shard_size_bytes=20000))\n next2 = self.getNext(dataset2)\n\n for _ in range(1000):\n self.evaluate(next2())\n self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(compression=[\n snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP,\n snapshot.COMPRESSION_SNAPPY\n ])))\n def testSpecifyShardSize(self, compression):\n tmpdir = self.snapshot_dir\n\n dataset = dataset_ops.Dataset.from_tensor_slices([1.0])\n dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))\n dataset = dataset.repeat(10)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n tmpdir, shard_size_bytes=10 * 1024 * 1024, compression=compression))\n next_fn = self.getNext(dataset)\n\n for _ in range(10):\n self.evaluate(next_fn())\n\n num_files = 1\n if compression == snapshot.COMPRESSION_NONE:\n num_files = 3\n self.assertSnapshotDirectoryContains(tmpdir, 1, 1, num_files)\n\n @combinations.generate(test_base.default_test_combinations())\n def testAdditionalOperationsAfterReadBack(self):\n self.setUpTFRecord()\n filenames = self._filenames\n\n expected = [\n b\"Record %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 10)\n ]\n\n tmpdir = self.snapshot_dir\n dataset = core_readers._TFRecordDataset(filenames)\n dataset = dataset.apply(snapshot.legacy_snapshot(tmpdir))\n self.assertDatasetProduces(dataset, expected)\n\n # remove the original files and try to read the data back only from snapshot\n self.removeTFRecords()\n\n dataset2 = core_readers._TFRecordDataset(filenames)\n dataset2 = dataset2.apply(snapshot.legacy_snapshot(tmpdir))\n self.assertDatasetProduces(dataset2, expected)\n\n expected_after = [\n b\"cord %d of file %d\" % (r, f) # pylint:disable=g-complex-comprehension\n for f in range(0, 10)\n for r in range(0, 10)\n ]\n\n dataset3 = core_readers._TFRecordDataset(filenames)\n dataset3 = dataset3.apply(snapshot.legacy_snapshot(tmpdir))\n dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))\n self.assertDatasetProduces(dataset3, expected_after)\n\n\nclass SnapshotCheckpointTest(checkpoint_test_base.CheckpointTestBase,\n parameterized.TestCase):\n\n def _build_snapshot_dataset(self, repeat=False):\n\n def ds_fn():\n self._snapshot_dir = os.path.join(self.get_temp_dir(), \"snapshot\")\n if not os.path.exists(self._snapshot_dir):\n os.mkdir(self._snapshot_dir)\n\n dataset = dataset_ops.Dataset.range(100)\n dataset = dataset.apply(snapshot.snapshot(self._snapshot_dir))\n if repeat:\n dataset = dataset.repeat(2)\n return dataset\n\n return ds_fn\n\n @combinations.generate(test_base.default_test_combinations())\n def testCheckpointBeforeEpochEndNoRepeat(self):\n ds_fn = self._build_snapshot_dataset(repeat=False)\n outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)\n self.assertSequenceEqual(outputs, range(50))\n outputs.extend(\n self.gen_outputs(ds_fn, [], 50, ckpt_saved=True, verify_exhausted=True))\n self.assertSequenceEqual(outputs, range(100))\n\n @combinations.generate(test_base.default_test_combinations())\n def testCheckpointBeforeOneEpochWithReading(self):\n ds_fn = self._build_snapshot_dataset(repeat=True)\n\n # Generate 50 entries from iterator and save checkpoint.\n outputs = self.gen_outputs(ds_fn, [], 50, verify_exhausted=False)\n self.assertSequenceEqual(outputs, list(range(50)))\n\n # Restore from checkpoint and produce the rest of the elements from the\n # iterator.\n t = self.gen_outputs(ds_fn, [], 150, ckpt_saved=True, verify_exhausted=True)\n outputs.extend(t)\n self.assertSequenceEqual(\n outputs,\n list(range(50)) + list(range(50, 100)) + list(range(100)))\n\n @combinations.generate(test_base.default_test_combinations())\n def testCheckpointBeforeOneEpochThenRunAFewSteps(self):\n ds_fn = self._build_snapshot_dataset(repeat=False)\n outputs = self.gen_outputs(\n ds_fn, [10], 20, verify_exhausted=False, save_checkpoint_at_end=False)\n self.assertSequenceEqual(outputs, range(20))\n\n outputs = outputs[:10]\n outputs.extend(\n self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True))\n self.assertSequenceEqual(outputs, range(100))\n\n @combinations.generate(test_base.default_test_combinations())\n def testCheckpointAfterOneEpoch(self):\n ds_fn = self._build_snapshot_dataset(repeat=True)\n\n # Generate 110 entries from iterator and save checkpoint.\n outputs = self.gen_outputs(ds_fn, [], 110, verify_exhausted=False)\n self.assertSequenceEqual(outputs, list(range(100)) + list(range(10)))\n\n # Restore from checkpoint and produce the rest of the elements from the\n # iterator.\n t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)\n outputs.extend(t)\n self.assertSequenceEqual(\n outputs,\n list(range(100)) + list(range(10)) + list(range(10, 100)))\n\n @combinations.generate(test_base.default_test_combinations())\n def testCheckpointAfterOneEpochRunFewSteps(self):\n ds_fn = self._build_snapshot_dataset(repeat=True)\n\n # Generate 120 entries from iterator and save checkpoint at 110.\n outputs = self.gen_outputs(\n ds_fn, [110], 120, verify_exhausted=False, save_checkpoint_at_end=False)\n self.assertSequenceEqual(outputs, list(range(100)) + list(range(20)))\n\n # Restore from checkpoint and produce the rest of the elements from the\n # iterator.\n outputs = outputs[:110]\n t = self.gen_outputs(ds_fn, [], 90, ckpt_saved=True, verify_exhausted=True)\n outputs.extend(t)\n self.assertSequenceEqual(\n outputs,\n list(range(100)) + list(range(10)) + list(range(10, 100)))\n\n\nclass LegacySnapshotCheckpointTest(\n checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):\n\n def _build_snapshot_dataset(self,\n num_threads=1,\n repeat=False,\n pending_snapshot_expiry_seconds=-1,\n shard_size_bytes=None):\n\n def ds_fn():\n self.snapshot_dir = os.path.join(self.get_temp_dir(), \"snapshot\")\n if not os.path.exists(self.snapshot_dir):\n os.mkdir(self.snapshot_dir)\n dataset = dataset_ops.Dataset.range(1000)\n dataset = dataset.apply(\n snapshot.legacy_snapshot(\n self.snapshot_dir,\n num_writer_threads=num_threads,\n writer_buffer_size=2 * num_threads,\n num_reader_threads=num_threads,\n reader_buffer_size=2 * num_threads,\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,\n shard_size_bytes=shard_size_bytes))\n if repeat:\n dataset = dataset.repeat(2)\n return dataset\n\n return ds_fn\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))\n def testSnapshotBeforeEpochEnd(self, pending_snapshot_expiry_seconds):\n ds_fn = self._build_snapshot_dataset(\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)\n outputs = self.gen_outputs(ds_fn, [], 100, verify_exhausted=False)\n self.assertSequenceEqual(outputs, range(100))\n outputs.extend(\n self.gen_outputs(\n ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))\n self.assertSequenceEqual(outputs, range(1000))\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))\n def testCheckpointBeforeOneEpochThenRunFewStepsSmallShardMultiThread(\n self, pending_snapshot_expiry_seconds):\n ds_fn = self._build_snapshot_dataset(\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,\n shard_size_bytes=100)\n\n outputs = []\n with ops.Graph().as_default() as g:\n init_op, get_next_op, saver = self._build_graph(ds_fn)\n with self.session(graph=g) as sess:\n self._initialize(init_op, sess)\n start = 0\n end = 100\n num_iters = end - start\n for _ in range(num_iters):\n outputs.append(sess.run(get_next_op))\n self._save(sess, saver)\n start = 100\n end = 400\n num_iters = end - start\n for _ in range(num_iters):\n outputs.append(sess.run(get_next_op))\n self.assertSequenceEqual(outputs, range(400))\n\n outputs = outputs[:100]\n outputs.extend(\n self.gen_outputs(\n ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))\n self.assertSequenceEqual(outputs, range(1000))\n fp_dir_list = os.listdir(self.snapshot_dir)\n self.assertLen(list(fp_dir_list), 2)\n for d in fp_dir_list:\n if not d.endswith(\"-graph.pbtxt\"):\n fp_dir = os.path.join(self.snapshot_dir, d)\n run_dir_list = os.listdir(fp_dir)\n self.assertLen(list(run_dir_list), 2)\n for e in run_dir_list:\n if e != \"snapshot.metadata\":\n run_dir = os.path.join(fp_dir, e)\n self.assertLen(list(os.listdir(run_dir)), 258)\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))\n def testCheckpointBeforeOneEpochThenRunFewSteps(\n self, pending_snapshot_expiry_seconds):\n ds_fn = self._build_snapshot_dataset(\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)\n\n # Generate 200 entries from iterator but save checkpoint after producing\n # 100.\n outputs = self.gen_outputs(\n ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)\n self.assertSequenceEqual(outputs, range(200))\n\n outputs = outputs[:100]\n outputs.extend(\n self.gen_outputs(\n ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))\n self.assertSequenceEqual(outputs, range(1000))\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))\n def testCheckpointBeforeOneEpochThenRunFewStepsMultipleThreads(\n self, pending_snapshot_expiry_seconds):\n ds_fn = self._build_snapshot_dataset(\n num_threads=2,\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)\n\n # Generate 200 entries from iterator but save checkpoint after producing\n # 100.\n outputs = self.gen_outputs(\n ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)\n self.assertSequenceEqual(outputs, range(200))\n\n outputs = outputs[:100]\n outputs.extend(\n self.gen_outputs(\n ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))\n self.assertSequenceEqual(outputs, range(1000))\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))\n def testCheckpointAfterOneEpoch(self, pending_snapshot_expiry_seconds):\n ds_fn = self._build_snapshot_dataset(\n repeat=True,\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)\n\n # Generate 1100 entries from iterator and save checkpoint.\n outputs = self.gen_outputs(ds_fn, [], 1100, verify_exhausted=False)\n self.assertSequenceEqual(outputs, list(range(1000)) + list(range(100)))\n\n # Restore from checkpoint and produce the rest of the elements from the\n # iterator.\n t = self.gen_outputs(\n ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)\n outputs.extend(t)\n self.assertSequenceEqual(\n outputs,\n list(range(1000)) + list(range(100)) + list(range(900)))\n\n @combinations.generate(\n combinations.times(\n test_base.default_test_combinations(),\n combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))\n def testCheckpointAfterOneEpochThenRunFewSteps(\n self, pending_snapshot_expiry_seconds):\n ds_fn = self._build_snapshot_dataset(\n repeat=True,\n pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)\n\n # Generate 200 entries from iterator but save checkpoint after producing\n # 100.\n outputs = self.gen_outputs(\n ds_fn, [1100],\n 1200,\n verify_exhausted=False,\n save_checkpoint_at_end=False)\n self.assertSequenceEqual(\n outputs,\n list(range(1000)) + list(range(100)) + list(range(100)))\n\n outputs = outputs[:1100]\n t = self.gen_outputs(\n ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)\n outputs.extend(t)\n self.assertSequenceEqual(\n outputs, (list(range(1000)) + list(range(100)) + list(range(900))))\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converts a frozen graph into a TFLite FlatBuffer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport distutils.spawn\nimport enum # pylint: disable=g-bad-import-order\nimport os as _os\nimport platform as _platform\nimport subprocess as _subprocess\nimport tempfile as _tempfile\n\nimport six\nfrom six.moves import map\n\nfrom tensorflow.lite.python import lite_constants\nfrom tensorflow.lite.python import util\nfrom tensorflow.lite.python import wrap_toco\nfrom tensorflow.lite.python.convert_phase import Component\nfrom tensorflow.lite.python.convert_phase import convert_phase\nfrom tensorflow.lite.python.convert_phase import ConverterError\nfrom tensorflow.lite.python.convert_phase import SubComponent\nfrom tensorflow.lite.python.metrics_wrapper import metrics_wrapper as _metrics_wrapper\nfrom tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2\nfrom tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import resource_loader as _resource_loader\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util.tf_export import tf_export as _tf_export\n\n\ndef _requires_input_stats(toco_flags: _toco_flags_pb2.TocoFlags()) -> bool:\n \"\"\"Checks if the `input_stats` flag is required for conversion.\n\n Args:\n toco_flags: A protocol buffer describing the conversion process.\n\n Returns:\n True, if the `inference_type` or the `inference_input_type` is a quantized\n type and it is not post training quantization, else False.\n \"\"\"\n quantized_inference_types = (\n [_types_pb2.QUANTIZED_UINT8, _types_pb2.QUANTIZED_INT8])\n return ((toco_flags.inference_type in quantized_inference_types or\n toco_flags.inference_input_type in quantized_inference_types) and\n not toco_flags.post_training_quantize)\n\n\ndef convert_tensor_tf_type_to_tflite_type(\n tf_type: dtypes.DType, usage: str = \"\") -> _types_pb2.IODataType:\n \"\"\"Convert tensor type from tf type to tflite type.\n\n Args:\n tf_type: TensorFlow type.\n usage: Text describing the reason for invoking this function.\n\n Raises:\n ValueError: If `tf_type` is unsupported.\n\n Returns:\n tflite_type: TFLite type. Refer to lite/toco/types.proto.\n \"\"\"\n mapping = {\n dtypes.float16: _types_pb2.FLOAT16,\n dtypes.float32: _types_pb2.FLOAT,\n dtypes.float64: _types_pb2.FLOAT64,\n dtypes.int8: _types_pb2.INT8,\n dtypes.int16: _types_pb2.INT16,\n dtypes.int32: _types_pb2.INT32,\n dtypes.int64: _types_pb2.INT64,\n dtypes.uint8: _types_pb2.UINT8,\n dtypes.uint32: _types_pb2.UINT32,\n dtypes.uint64: _types_pb2.UINT64,\n dtypes.string: _types_pb2.STRING,\n dtypes.bool: _types_pb2.BOOL,\n dtypes.complex64: _types_pb2.COMPLEX64,\n dtypes.complex128: _types_pb2.COMPLEX128,\n }\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError(\"Unsupported TensorFlow type `{0}` provided for the {1}\"\n .format(tf_type, usage))\n return tflite_type\n\n\n# Only a few restricted tensor types are allowed for explicitly setting\n# inference/input/output types.\ndef convert_inference_tf_type_to_tflite_type(\n tf_type: dtypes.DType, usage: str = \"\") -> _types_pb2.IODataType:\n \"\"\"Convert inference type from tf type to tflite type.\n\n Args:\n tf_type: TensorFlow type.\n usage: Text describing the reason for invoking this function.\n\n Raises:\n ValueError: If `tf_type` is unsupported.\n\n Returns:\n tflite_type: TFLite type. Refer to lite/toco/types.proto.\n \"\"\"\n mapping = {\n dtypes.float32: _types_pb2.FLOAT,\n dtypes.uint8: _types_pb2.QUANTIZED_UINT8,\n dtypes.int8: _types_pb2.QUANTIZED_INT8,\n dtypes.int16: _types_pb2.QUANTIZED_INT16,\n }\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError(\"Unsupported TensorFlow type `{0}` provided for the {1}\"\n .format(tf_type, usage))\n return tflite_type\n\n\n# Find the toco_from_protos binary using the resource loader if using from\n# bazel, otherwise we are in a pip where console_scripts already has\n# the toco_from_protos tool.\nif lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:\n _toco_from_proto_bin = \"\"\nelse:\n _toco_from_proto_bin = _resource_loader.get_path_to_datafile(\n \"../toco/python/toco_from_protos\")\n\nif _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):\n _toco_from_proto_bin = \"toco_from_protos\"\n\n\ndef _try_convert_to_unicode(output):\n if output is None:\n return u\"\"\n\n if isinstance(output, bytes):\n try:\n return six.ensure_text(output)\n except UnicodeDecodeError:\n pass\n return output\n\n\n@_tf_export(\"lite.OpsSet\")\nclass OpsSet(enum.Enum):\n \"\"\"Enum class defining the sets of ops available to generate TFLite models.\n\n WARNING: Experimental interface, subject to change.\n \"\"\"\n # Convert model using TensorFlow Lite builtin ops.\n TFLITE_BUILTINS = \"TFLITE_BUILTINS\"\n\n # Convert model using TensorFlow ops. Not all TensorFlow ops are available.\n # WARNING: Experimental interface, subject to change.\n SELECT_TF_OPS = \"SELECT_TF_OPS\"\n\n # Convert model using only TensorFlow Lite quantized int8 operations.\n # Specifying this will throw an error for operations that do not yet have\n # quantized implementations.\n TFLITE_BUILTINS_INT8 = \"TFLITE_BUILTINS_INT8\"\n\n # Convert model using only TensorFlow Lite operations with quantized int8\n # weights, int16 activations and int64 bias.\n # Specifying this will throw an error for operations that do not yet have\n # quantized implementations.\n # This quantization mode may be used in models for super-resolution,\n # audio signal processing or image de-noising. It improves accuracy\n # significantly, but only slightly increases the model size.\n # WARNING: These ops are currently experimental and have not yet been\n # finalized.\n # They are only compatible with CPU execution, and have not been optimized for\n # production.\n EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = (\n \"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8\")\n\n def __str__(self):\n return str(self.value)\n\n @staticmethod\n def get_options():\n \"\"\"Returns a list of OpsSet options as a list of strings.\"\"\"\n return [str(option) for option in list(OpsSet)]\n\n\n@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE)\ndef mlir_quantize(input_data_str,\n disable_per_channel=False,\n fully_quantize=False,\n inference_type=_types_pb2.QUANTIZED_INT8,\n input_data_type=dtypes.float32,\n output_data_type=dtypes.float32,\n enable_numeric_verify=False):\n \"\"\"Quantize `input_data_str` with calibration results.\n\n Args:\n input_data_str: Input data in serialized form (e.g. a TFLITE model with\n calibration results).\n disable_per_channel: Bool indicating whether to do per-channel or per-tensor\n quantization\n fully_quantize: Bool indicating whether to fully quantize the model. Besides\n model body, the input/output will be quantized as well.\n inference_type: Data type for the activations. The default value is int8.\n input_data_type: Data type for the inputs. The default value is float32.\n output_data_type: Data type for the outputs. The default value is float32.\n enable_numeric_verify: Experimental. Subject to change. Bool indicating\n whether to add NumericVerify ops into the debug mode quantized model.\n\n Returns:\n Quantized model in serialized form (e.g. a TFLITE model) with floating-point\n inputs and outputs.\n \"\"\"\n return wrap_toco.wrapped_experimental_mlir_quantize(\n input_data_str, disable_per_channel, fully_quantize, inference_type,\n convert_tensor_tf_type_to_tflite_type(input_data_type),\n convert_tensor_tf_type_to_tflite_type(output_data_type),\n enable_numeric_verify)\n\n\n@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.SPARSIFY)\ndef mlir_sparsify(input_data_str):\n \"\"\"Sparsify `input_data_str` to encode sparse tensor with proper format.\n\n Args:\n input_data_str: Input data in serialized form (e.g. a TFLITE model).\n\n Returns:\n Sparsified model in serialized form (e.g. a TFLITE model).\n \"\"\"\n return wrap_toco.wrapped_experimental_mlir_sparsify(input_data_str)\n\n\ndef register_custom_opdefs(custom_opdefs_list):\n \"\"\"Register the given custom opdefs to the TensorFlow global op registry.\n\n Args:\n custom_opdefs_list: String representing the custom ops OpDefs that are\n included in the GraphDef.\n\n Returns:\n True if the registration is successfully completed.\n \"\"\"\n return wrap_toco.wrapped_register_custom_opdefs(custom_opdefs_list)\n\n\ndef toco_convert_protos(model_flags_str,\n toco_flags_str,\n input_data_str,\n debug_info_str=None,\n enable_mlir_converter=False):\n \"\"\"Convert `input_data_str` according to model and toco parameters.\n\n Unless you know what you are doing consider using\n the more friendly `tf.compat.v1.lite.toco_convert`.\n\n Args:\n model_flags_str: Serialized proto describing model properties, see\n `toco/model_flags.proto`.\n toco_flags_str: Serialized proto describing conversion properties, see\n `toco/toco_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information. (default None)\n enable_mlir_converter: Enables MLIR-based conversion instead of the default\n TOCO conversion. (default False)\n\n Returns:\n Converted model in serialized form (e.g. a TFLITE model is common).\n Raises:\n ConverterError: When conversion fails in TFLiteConverter, usually due to\n ops not being supported.\n RuntimeError: When conversion fails, an exception is raised with the error\n message embedded.\n \"\"\"\n # Historically, TOCO conversion failures would trigger a crash, so we would\n # attempt to run the converter out-of-process. The MLIR conversion pipeline\n # surfaces errors instead, and can be safely run in-process.\n if enable_mlir_converter or not _toco_from_proto_bin:\n try:\n model_str = wrap_toco.wrapped_toco_convert(model_flags_str,\n toco_flags_str, input_data_str,\n debug_info_str,\n enable_mlir_converter)\n return model_str\n except Exception as e:\n converter_error = ConverterError(str(e))\n for error_data in _metrics_wrapper.get_collected_errors():\n converter_error.append_error(error_data)\n raise converter_error\n\n return _run_toco_binary(model_flags_str, toco_flags_str, input_data_str,\n debug_info_str)\n\n\n@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL,\n SubComponent.CONVERT_GRAPHDEF_USING_DEPRECATED_CONVERTER)\ndef _run_toco_binary(model_flags_str,\n toco_flags_str,\n input_data_str,\n debug_info_str=None):\n \"\"\"Convert `input_data_str` using TOCO converter binary.\n\n Args:\n model_flags_str: Serialized proto describing model properties, see\n `toco/model_flags.proto`.\n toco_flags_str: Serialized proto describing conversion properties, see\n `toco/toco_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information. (default None)\n\n Returns:\n Converted model in serialized form (e.g. a TFLITE model is common).\n Raises:\n ConverterError: When cannot find the toco binary.\n RuntimeError: When conversion fails, an exception is raised with the error\n message embedded.\n \"\"\"\n if distutils.spawn.find_executable(_toco_from_proto_bin) is None:\n raise ConverterError(\"\"\"Could not find toco_from_protos binary, make sure\nyour virtualenv bin directory or pip local bin directory is in your path.\nIn particular, if you have installed TensorFlow with --user, make sure you\nadd the install directory to your path.\n\nFor example:\nLinux: export PATH=$PATH:~/.local/bin/\nMac: export PATH=$PATH:~/Library/Python/<version#>/bin\n\nAlternative, use virtualenv.\"\"\")\n # Windows and TemporaryFile are not that useful together,\n # since you cannot have two readers/writers. So we have to\n # make the temporaries and close and delete them explicitly.\n toco_filename, model_filename, input_filename, output_filename = (None, None,\n None, None)\n try:\n # Build all input files\n with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_model, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_input, \\\n _tempfile.NamedTemporaryFile(delete=False) as fp_debug:\n toco_filename = fp_toco.name\n input_filename = fp_input.name\n model_filename = fp_model.name\n debug_filename = fp_debug.name\n\n fp_model.write(model_flags_str)\n fp_toco.write(toco_flags_str)\n fp_input.write(six.ensure_binary(input_data_str))\n debug_info_str = debug_info_str if debug_info_str else \"\"\n # if debug_info_str contains a \"string value\", then the call to\n # fp_debug.write(debug_info_str) will fail with the following error\n #\n # TypeError: a bytes-like object is required, not 'str'\n #\n # Some of the subtests within the \"convert_test\" unit-test fail\n # with the error shown above. So watch out for that scenario and\n # convert debug_info_str to bytes where needed\n if not isinstance(debug_info_str, bytes):\n fp_debug.write(debug_info_str.encode(\"utf-8\"))\n else:\n fp_debug.write(debug_info_str)\n\n # Reserve an output file\n with _tempfile.NamedTemporaryFile(delete=False) as fp:\n output_filename = fp.name\n\n # Run\n cmd = [\n _toco_from_proto_bin,\n model_filename,\n toco_filename,\n input_filename,\n output_filename,\n \"--debug_proto_file={}\".format(debug_filename),\n ]\n cmdline = \" \".join(cmd)\n is_windows = _platform.system() == \"Windows\"\n proc = _subprocess.Popen(\n cmdline,\n shell=True,\n stdout=_subprocess.PIPE,\n stderr=_subprocess.STDOUT,\n close_fds=not is_windows)\n stdout, stderr = proc.communicate()\n exitcode = proc.returncode\n if exitcode == 0:\n with open(output_filename, \"rb\") as fp:\n return fp.read()\n else:\n stdout = _try_convert_to_unicode(stdout)\n stderr = _try_convert_to_unicode(stderr)\n raise ConverterError(\"See console for info.\\n%s\\n%s\\n\" % (stdout, stderr))\n finally:\n # Must manually cleanup files.\n for filename in [\n toco_filename, input_filename, model_filename, output_filename\n ]:\n try:\n _os.unlink(filename)\n except (OSError, TypeError):\n pass\n\n\ndef build_toco_flags(inference_type=dtypes.float32,\n inference_input_type=None,\n input_format=lite_constants.TENSORFLOW_GRAPHDEF,\n output_format=lite_constants.TFLITE,\n default_ranges_stats=None,\n drop_control_dependency=True,\n reorder_across_fake_quant=False,\n allow_custom_ops=False,\n post_training_quantize=False,\n quantize_to_float16=False,\n dump_graphviz_dir=None,\n dump_graphviz_video=False,\n target_ops=None,\n conversion_summary_dir=None,\n select_user_tf_ops=None,\n enable_tflite_resource_variables=False,\n unfold_batchmatmul=True,\n lower_tensor_list_ops=True,\n accumulation_type=None,\n allow_bfloat16=False,\n **_):\n \"\"\"Build the TOCO flags object from params.\"\"\"\n toco = _toco_flags_pb2.TocoFlags()\n toco.input_format = input_format\n toco.output_format = output_format\n toco.inference_type = convert_inference_tf_type_to_tflite_type(\n inference_type, usage=\"inference_type flag\")\n if inference_input_type:\n toco.inference_input_type = convert_inference_tf_type_to_tflite_type(\n inference_input_type, usage=\"inference_input_type flag\")\n else:\n toco.inference_input_type = toco.inference_type\n toco.drop_control_dependency = drop_control_dependency\n toco.reorder_across_fake_quant = reorder_across_fake_quant\n toco.allow_custom_ops = allow_custom_ops\n if select_user_tf_ops:\n toco.select_user_tf_ops.extend(select_user_tf_ops)\n toco.post_training_quantize = post_training_quantize\n toco.quantize_to_float16 = quantize_to_float16\n if default_ranges_stats:\n toco.default_ranges_min = default_ranges_stats[0]\n toco.default_ranges_max = default_ranges_stats[1]\n if dump_graphviz_dir:\n toco.dump_graphviz_dir = dump_graphviz_dir\n toco.dump_graphviz_include_video = dump_graphviz_video\n if conversion_summary_dir:\n toco.conversion_summary_dir = conversion_summary_dir\n if target_ops:\n if OpsSet.SELECT_TF_OPS in set(target_ops):\n toco.enable_select_tf_ops = True\n if set(target_ops) == set([OpsSet.SELECT_TF_OPS]):\n toco.force_select_tf_ops = True\n toco.enable_tflite_resource_variables = enable_tflite_resource_variables\n toco.unfold_batchmatmul = unfold_batchmatmul\n toco.lower_tensor_list_ops = lower_tensor_list_ops\n if accumulation_type:\n toco.accumulation_type = convert_tensor_tf_type_to_tflite_type(\n accumulation_type, usage=\"accumulation_type flag\")\n toco.allow_bfloat16 = allow_bfloat16\n\n return toco\n\n\ndef build_toco_convert_protos(input_tensors,\n output_tensors,\n inference_type=dtypes.float32,\n inference_input_type=None,\n input_format=lite_constants.TENSORFLOW_GRAPHDEF,\n input_shapes=None,\n output_format=lite_constants.TFLITE,\n quantized_input_stats=None,\n default_ranges_stats=None,\n drop_control_dependency=True,\n reorder_across_fake_quant=False,\n allow_custom_ops=False,\n change_concat_input_ranges=False,\n post_training_quantize=False,\n quantize_to_float16=False,\n dump_graphviz_dir=None,\n dump_graphviz_video=False,\n target_ops=None,\n allow_nonexistent_arrays=False,\n debug_info=None,\n conversion_summary_dir=None,\n saved_model_dir=None,\n saved_model_version=0,\n saved_model_tags=None,\n saved_model_exported_names=None,\n select_user_tf_ops=None,\n unfold_batchmatmul=True,\n lower_tensor_list_ops=True,\n accumulation_type=None,\n allow_bfloat16=False):\n \"\"\"Builds protocol buffers describing a conversion of a model using TOCO.\n\n Typically this is to convert from TensorFlow GraphDef to TFLite, in which\n case the default `input_format` and `output_format` are sufficient.\n\n Args:\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n inference_type: Data type of numeric arrays, excluding the input layer.\n (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})\n inference_input_type: Data type of the numeric arrays in the input layer. If\n `inference_input_type` is in {tf.int8, tf.uint8}, then\n `quantized_input_stats` must be provided. (default is the value assigned\n to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})\n input_format: Type of data to read.\n (default TENSORFLOW_GRAPHDEF, must be in {TENSORFLOW_GRAPHDEF})\n input_shapes: Input array shape. (default None, must be None or a list of\n the same length as `input_tensors`.)\n output_format: Output file format. (default TFLITE, must be in\n {TFLITE, GRAPHVIZ_DOT})\n quantized_input_stats: Map of input tensor names to a tuple of floats\n representing the mean and standard deviation of the training data.\n (e.g., {\"foo\" : (0., 1.)}). Required if `inference_input_type` is tf.int8\n or tf.uint8. (default None)\n default_ranges_stats: Tuple of integers representing (min, max) range values\n for all arrays without a specified range. Intended for experimenting with\n quantization via \"dummy quantization\". (default None)\n drop_control_dependency: Boolean indicating whether to drop control\n dependencies silently. This is due to TFLite not supporting control\n dependencies. (default True)\n reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant\n nodes in unexpected locations. Used when the location of the FakeQuant\n nodes is preventing graph transformations necessary to convert the graph.\n Results in a graph that differs from the quantized training graph,\n potentially causing differing arithmetic behavior. (default False)\n allow_custom_ops: Boolean indicating whether to allow custom operations.\n When false any unknown operation is an error. When true, custom ops are\n created for any op that is unknown. The developer will need to provide\n these to the TensorFlow Lite runtime with a custom resolver. (default\n False)\n change_concat_input_ranges: Boolean to change behavior of min/max ranges for\n inputs and outputs of the concat operator for quantized models. Changes\n the ranges of concat operator overlap when true. (default False)\n post_training_quantize: Boolean indicating whether to quantize the weights\n of the converted float model. Model size will be reduced and there will be\n latency improvements (at the cost of accuracy). (default False)\n quantize_to_float16: Boolean indicating whether to convert float buffers to\n float16. (default False)\n dump_graphviz_dir: Full filepath of folder to dump the graphs at various\n stages of processing GraphViz .dot files. Preferred over\n --output_format=GRAPHVIZ_DOT in order to keep the requirements of the\n output file. (default None)\n dump_graphviz_video: Boolean indicating whether to dump the graph after\n every graph transformation. (default False)\n target_ops: Experimental flag, subject to change. Set of OpsSet options\n indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))\n allow_nonexistent_arrays: Allow specifying array names that don't exist or\n are unused in the final graph. (default False)\n debug_info: `GraphDebugInfo` proto containing the stack traces for the\n original nodes referred by the converted graph.\n conversion_summary_dir: A string, the path to the generated conversion logs.\n saved_model_dir: Filepath of the saved model to be converted. This value\n will be non-empty only when the saved model import path will be used.\n Otherwises, the graph def-based conversion will be processed.\n saved_model_version: SavedModel file format version of The saved model file\n to be converted. This value will be set only when the SavedModel import\n path will be used.\n saved_model_tags: Set of string saved model tags, formatted in the\n comma-separated value. This value will be set only when the SavedModel\n import path will be used.\n saved_model_exported_names: Names to be exported (default: export all) when\n the saved model import path is on. This value will be set only when the\n SavedModel import path will be used.\n select_user_tf_ops: List of user's defined TensorFlow ops need to be\n supported in the TensorFlow Lite runtime. These ops will be supported as\n select TensorFlow ops.\n unfold_batchmatmul: Whether to unfold tf.BatchMatMul to a set of\n tfl.fully_connected ops. If not, translate to tfl.batch_matmul.\n lower_tensor_list_ops: Whether to lower tensor list ops to builtin ops. If\n not, use Flex tensor list ops.\n accumulation_type: Data type of the accumulators in quantized inference.\n Typically used for float16 quantization and is either fp16 or fp32.\n allow_bfloat16: Whether the converted model supports reduced precision\n inference with the bfloat16 type.\n\n Returns:\n model_flags, toco_flags, debug_info: three protocol buffers describing the\n conversion process and debug information.\n\n Raises:\n ValueError:\n If the input tensor type is unknown\n Missing mean_values or std_dev_values\n RuntimeError: If TOCO fails to convert (in which case the runtime error's\n error text will contain the TOCO error log)\n \"\"\"\n toco = build_toco_flags(\n inference_type=inference_type,\n inference_input_type=inference_input_type,\n input_format=input_format,\n output_format=output_format,\n default_ranges_stats=default_ranges_stats,\n drop_control_dependency=drop_control_dependency,\n reorder_across_fake_quant=reorder_across_fake_quant,\n allow_custom_ops=allow_custom_ops,\n post_training_quantize=post_training_quantize,\n quantize_to_float16=quantize_to_float16,\n dump_graphviz_dir=dump_graphviz_dir,\n dump_graphviz_video=dump_graphviz_video,\n target_ops=target_ops,\n conversion_summary_dir=conversion_summary_dir,\n select_user_tf_ops=select_user_tf_ops,\n unfold_batchmatmul=unfold_batchmatmul,\n lower_tensor_list_ops=lower_tensor_list_ops,\n accumulation_type=accumulation_type,\n allow_bfloat16=allow_bfloat16)\n model = _model_flags_pb2.ModelFlags()\n model.change_concat_input_ranges = change_concat_input_ranges\n for idx, input_tensor in enumerate(input_tensors):\n input_array = model.input_arrays.add()\n if saved_model_dir:\n input_array.name = input_tensor.name\n else:\n input_array.name = util.get_tensor_name(input_tensor)\n input_array.data_type = convert_tensor_tf_type_to_tflite_type(\n input_tensor.dtype, usage=\"input type of the TensorFlow model\")\n\n if _requires_input_stats(toco) and quantized_input_stats:\n input_array.mean_value, input_array.std_value = quantized_input_stats[idx]\n\n if input_shapes is None:\n shape = input_tensor.shape\n else:\n shape = input_shapes[idx]\n\n if shape.rank is not None:\n # Create shapes with -1 for unknown dimensions.\n dims = []\n for dim in shape:\n if (dim is None or\n (isinstance(dim, tensor_shape.Dimension) and dim.value is None)):\n dims.append(-1)\n else:\n dims.append(int(dim))\n input_array.shape.dims.extend(dims)\n input_array.shape.unknown_rank = False\n else:\n input_array.shape.unknown_rank = True\n\n for output_tensor in output_tensors:\n if saved_model_dir:\n model.output_arrays.append(output_tensor.name)\n else:\n model.output_arrays.append(util.get_tensor_name(output_tensor))\n\n model.allow_nonexistent_arrays = allow_nonexistent_arrays\n\n if saved_model_dir:\n model.saved_model_dir = saved_model_dir\n model.saved_model_version = saved_model_version\n if saved_model_tags:\n model.saved_model_tags.extend(saved_model_tags)\n if saved_model_exported_names:\n model.saved_model_exported_names.extend(saved_model_exported_names)\n\n return model, toco, debug_info\n\n\n@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL,\n SubComponent.CONVERT_GRAPHDEF)\ndef toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,\n enable_mlir_converter, control_output_arrays, *args,\n **kwargs):\n \"\"\"\"Convert a model using TOCO.\n\n This function is used to convert GraphDefs that cannot be loaded into\n TensorFlow to TFLite. Conversion can be customized by providing arguments\n that are forwarded to `build_toco_convert_protos` (see documentation for\n details).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_arrays_with_shape: Tuple of strings representing input tensor names\n and list of integers representing input shapes\n (e.g., [(\"foo\" : [1, 16, 16, 3])]). Use only when graph cannot be loaded\n into TensorFlow and when `input_tensors` is None.\n output_arrays: List of output tensors to freeze graph with. Use only when\n graph cannot be loaded into TensorFlow and when `output_tensors` is None.\n enable_mlir_converter: Enables MLIR-based conversion instead of TOCO\n conversion.\n control_output_arrays: Control output node names. This is used when\n converting a Graph with no output tensors. For example, if the\n graph's last operation is a Print op, just specify that op's name in\n this field. This can be used together with the `output_arrays`\n parameter.\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n model_flags, toco_flags, _ = build_toco_convert_protos(\n input_tensors=[], output_tensors=[], *args, **kwargs)\n\n for idx, (name, shape) in enumerate(input_arrays_with_shape):\n input_array = model_flags.input_arrays.add()\n if _requires_input_stats(toco_flags):\n if ((\"quantized_input_stats\" not in kwargs) or\n (not kwargs[\"quantized_input_stats\"])):\n raise ValueError(\n \"The `quantized_input_stats` flag must be defined when either \"\n \"`inference_type` flag or `inference_input_type` flag is set to \"\n \"tf.int8 or tf.uint8.\")\n input_array.mean_value, input_array.std_value = kwargs[\n \"quantized_input_stats\"][idx]\n input_array.name = name\n input_array.shape.dims.extend(list(map(int, shape)))\n\n if output_arrays:\n for name in output_arrays:\n model_flags.output_arrays.append(name)\n if control_output_arrays:\n for name in control_output_arrays:\n model_flags.control_output_arrays.append(name)\n\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n input_data.SerializeToString(),\n enable_mlir_converter=enable_mlir_converter)\n return data\n\n\n@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL,\n SubComponent.CONVERT_GRAPHDEF)\ndef toco_convert_impl(input_data, input_tensors, output_tensors,\n enable_mlir_converter, *args, **kwargs):\n \"\"\"\"Convert a model using TOCO.\n\n Typically this function is used to convert from TensorFlow GraphDef to TFLite.\n Conversion can be customized by providing arguments that are forwarded to\n `build_toco_convert_protos` (see documentation for details).\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n enable_mlir_converter: Enables MLIR-based conversion instead of TOCO\n conversion.\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n model_flags, toco_flags, debug_info = build_toco_convert_protos(\n input_tensors, output_tensors, *args, **kwargs)\n debug_info_str = debug_info.SerializeToString() if debug_info else None\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n input_data.SerializeToString(),\n debug_info_str=debug_info_str,\n enable_mlir_converter=enable_mlir_converter)\n return data\n\n\n@convert_phase(Component.CONVERT_TF_TO_TFLITE_MODEL,\n SubComponent.CONVERT_SAVED_MODEL)\ndef convert_saved_model(saved_model_dir=None,\n saved_model_version=0,\n saved_model_tags=None,\n saved_model_exported_names=None,\n **kwargs):\n \"\"\"Converts a saved_model using TF Lite converter.\"\"\"\n model_flags = _model_flags_pb2.ModelFlags()\n if saved_model_dir:\n model_flags.saved_model_dir = saved_model_dir\n model_flags.saved_model_version = saved_model_version\n if saved_model_tags:\n model_flags.saved_model_tags.extend(saved_model_tags)\n if saved_model_exported_names:\n model_flags.saved_model_exported_names.extend(saved_model_exported_names)\n toco_flags = build_toco_flags(**kwargs)\n data = toco_convert_protos(\n model_flags.SerializeToString(),\n toco_flags.SerializeToString(),\n None, # input_data, unused\n None, # debug_info_str, unused\n enable_mlir_converter=True)\n return data\n\n\n@_tf_export(v1=[\"lite.toco_convert\"])\[email protected](None, \"Use `lite.TFLiteConverter` instead.\")\ndef toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):\n \"\"\"Convert a model using TOCO.\n\n Typically this function is used to convert from TensorFlow GraphDef to TFLite.\n Conversion can be customized by providing arguments that are forwarded to\n `build_toco_convert_protos` (see documentation for details). This function has\n been deprecated. Please use `tf.lite.TFLiteConverter` instead.\n\n Args:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_tensors: List of input tensors. Type and shape are computed using\n `foo.shape` and `foo.dtype`.\n output_tensors: List of output tensors (only .name is used from this).\n *args: See `build_toco_convert_protos`,\n **kwargs: See `build_toco_convert_protos`.\n\n Returns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\n Raises:\n Defined in `build_toco_convert_protos`.\n \"\"\"\n enable_mlir_converter = kwargs.get(\"enable_mlir_converter\", False)\n return toco_convert_impl(input_data, input_tensors, output_tensors,\n enable_mlir_converter, *args, **kwargs)\n" ]
[ [ "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.data.experimental.ops.snapshot.snapshot", "tensorflow.python.ops.string_ops.substr_v2", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.data.kernel_tests.test_base.default_test_combinations", "tensorflow.python.ops.gen_array_ops.broadcast_to", "tensorflow.python.ops.string_ops.as_string", "tensorflow.python.framework.ops.Graph", "tensorflow.python.data.ops.dataset_ops.Dataset.zip", "numpy.int64", "tensorflow.python.platform.test.main", "numpy.random.rand", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.data.ops.readers._TFRecordDataset", "tensorflow.python.data.experimental.ops.snapshot.legacy_snapshot", "tensorflow.python.framework.combinations.combine" ], [ "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.lite.python.wrap_toco.wrapped_register_custom_opdefs", "tensorflow.lite.python.wrap_toco.wrapped_toco_convert", "tensorflow.lite.python.util.get_tensor_name", "tensorflow.python.util.tf_export.tf_export", "tensorflow.lite.python.wrap_toco.wrapped_experimental_mlir_sparsify", "tensorflow.lite.python.convert_phase.convert_phase", "tensorflow.lite.toco.model_flags_pb2.ModelFlags", "tensorflow.lite.toco.toco_flags_pb2.TocoFlags", "tensorflow.lite.python.metrics_wrapper.metrics_wrapper.get_collected_errors", "tensorflow.lite.python.convert_phase.ConverterError", "tensorflow.python.util.deprecation.deprecated" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.9", "2.6", "2.10" ] } ]
kradical/cluster-analysis-udemy
[ "e2101bdb08ae3b9ed0ed8c4c1c488e3a75a1b7c5" ]
[ "src/prereq/exercise8.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\n# Plot a spiral dataset\n\ndef generateArm(rotation, step):\n theta = np.random.rand(500) * step\n r = np.exp(theta) - 1\n\n x = r * np.cos(theta) + (np.random.rand(500) - 0.5) / 7\n y = r * np.sin(theta) + (np.random.rand(500) - 0.5) / 7\n\n x, y = x * np.cos(rotation) - y * np.sin(rotation), x * np.sin(rotation) + y * np.cos(rotation)\n\n return (x, y)\n\ndef main():\n arms = 6\n step = 2 * np.pi / arms\n\n for i in range(arms):\n rotation = i * step\n\n x, y = generateArm(rotation, step)\n\n plt.scatter(x, y)\n\n plt.show()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.scatter", "numpy.cos", "numpy.sin", "numpy.random.rand", "numpy.exp", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lisapm/mlpiper
[ "74ad5ae343d364682cc2f8aaa007f2e8a1d84929", "74ad5ae343d364682cc2f8aaa007f2e8a1d84929", "74ad5ae343d364682cc2f8aaa007f2e8a1d84929" ]
[ "mlops/parallelm/mlops/stats/health/categorical_hist_stat.py", "reflex-algos/components/Python/test-python-train/main.py", "mlops/parallelm/mlops/channels/python_channel_health.py" ]
[ "\"\"\"\nThe Code contains functions to calculate univariate statistics for categorical features, given a dataset.\n\n\"\"\"\n\nimport numpy as np\n\nfrom parallelm.mlops.stats.health.histogram_data_objects import CategoricalHistogramDataObject\n\n\nclass CategoricalHistogram(object):\n \"\"\"\n Class is responsible for providing fit and get_feature_histogram_rep functionality of categorical training dataset.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Class constructor to init variables.\n :rtype: object\n \"\"\"\n # holds array of tuple of bin and edges. order can be followed by features list.\n self._prob_dist_categorical = []\n\n self._features = []\n\n def fit(self, training_feature_values, training_feature_names, num_bins, pred_bins):\n \"\"\"\n Function is responsible for fitting training data and fill up _prob_dist_categorical containing tuples of bin and edges.\n :rtype: self\n \"\"\"\n if isinstance(training_feature_values, np.ndarray):\n\n prob_dist = self._cal_hist_params(training_feature_values, num_bins=num_bins, pred_bins=pred_bins)\n self._prob_dist_categorical = prob_dist\n self._features = training_feature_names\n\n else:\n raise Exception(\"categorical histograms are generated on numpy array only!\")\n\n return self\n\n def get_feature_histogram_rep(self):\n \"\"\"\n Function is responsible for creating formatted representation of categorical histogram. It will be used in forwarding stat through MLOps.\n :rtype: list containing CategoricalHistogramDataObject\n \"\"\"\n feature_histogram_rep = []\n\n for index in range(len(self._features)):\n edges = self._prob_dist_categorical[index][0]\n # Since, edges can be of type string, bins array in python tuple can be stored as string.\n # To make it strong, converting it to float on fly.\n bins_str = self._prob_dist_categorical[index][1]\n # Converting to float!\n bins = [float(i) for i in bins_str]\n\n normalized_bins = bins / np.sum(bins)\n edges_rep = []\n for each_edge_index in range(0, len(edges)):\n edges_rep.append(str(edges[each_edge_index]))\n\n categorical_histogram_data_object = CategoricalHistogramDataObject(feature_name=self._features[index],\n edges=edges_rep,\n bins=normalized_bins)\n feature_histogram_rep.append(categorical_histogram_data_object)\n return feature_histogram_rep\n\n @staticmethod\n def _cal_hist_params(sample, num_bins, pred_bins=None):\n \"\"\"\n Calculate the probability of each category in each column, assuming multi-nomial distribution.\n\n :param sample: A dataset that is a 2D numpy array\n :param num_bins: Number of bins to create. Although it is not used right now. But to make it scalable, passing now.\n :param pred_bins: pre-defined bins. Although it is not used right now. But to make it scalable, passing now.\n :rtype: prob_dist: A list containing the probability distribution of categories in each column of the sample.\n Order of arrays in the list is same as the order of columns in sample data\n \"\"\"\n\n # convert whole nd array to float!\n sample = sample.astype(str)\n\n prob_dist = []\n for a in range(0, sample.shape[1]):\n # Determine the frequency of unique values\n unique, counts = np.unique(sample[:, a], return_counts=True)\n\n prob_dist.append(np.asarray((unique, counts * 1.0)))\n return prob_dist\n", "from __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nimport time\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--arg1\", help=\"Test argument 1\")\n parser.add_argument(\"--output-model\", help=\"Path to store generated model\")\n parser.add_argument(\"--model-is-directory\", default=0, help=\"Whether model should be saved as a directory\")\n parser.add_argument(\"--import-tensorflow\", default=0, help=\"Whether to import tensorflow\")\n parser.add_argument(\"--exit-value\", type=int, default=0, help=\"Exit value\")\n parser.add_argument(\"--iter\", type=int, default=20, help=\"How many 1sec iterations to perform\")\n\n # TODO add model size as argument\n # TODO add mlops test as argument\n\n options = parser.parse_args()\n return options\n\n\ndef main():\n\n print(\"args: {}\".format(sys.argv))\n options = parse_args()\n print(\"- inside test-python-train.main.py Running main.py\")\n print(\"arg1: {}\".format(options.arg1))\n print(\"output_model: {}\".format(options.output_model))\n print(\"model_is_directory: {}\".format(options.model_is_directory))\n print(\"import_tensorflow: {}\".format(options.import_tensorflow))\n print(\"iter: {}\".format(options.iter))\n print(\"exit_value: {}\".format(options.exit_value))\n\n for idx in range(options.iter):\n print(\"stdout - Idx {}\".format(idx))\n print(\"stderr- Idx {}\".format(idx), file=sys.stderr)\n time.sleep(1)\n\n if options.import_tensorflow:\n import tensorflow as tf\n feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}\n print(\"feature_configs\".format(feature_configs))\n\n if options.output_model is not None:\n if options.model_is_directory == 0:\n with open(options.output_model, \"w\") as f:\n f.write(\"model-1234-test-train-python\")\n else:\n os.mkdir(options.output_model)\n filename = os.path.join(options.output_model, \"saved_model.pb\")\n with open(filename, \"a+\") as f:\n f.write(\"model-1234-test-train-tf\")\n\n if options.exit_value >= 0:\n print(\"About to exit with value: {}\".format(options.exit_value))\n sys.exit(options.exit_value)\n else:\n print(\"About to raise exception: {}\".format(options.exit_value))\n raise Exception(\"Exiting main using exception\")\n\n\nif __name__ == \"__main__\":\n main()\n", "import logging\n\nimport numpy as np\n\nfrom parallelm.mlops.constants import PyHealth\nfrom parallelm.mlops.data_analysis.categorical_data_analyst import CategoricalDataAnalyst\nfrom parallelm.mlops.data_analysis.continuous_data_analyst import ContinuousDataAnalyst\nfrom parallelm.mlops.stats.health.categorical_hist_stat import CategoricalHistogram\nfrom parallelm.mlops.stats.health.continuous_hist_stat import ContinuousHistogram\nfrom parallelm.mlops.stats.health.general_hist_stat import GeneralHistogramStat\nfrom parallelm.mlops.stats.heatmap_stat import _HeatMapStat\nfrom parallelm.mlops.stats.histogram_overlap_score_stat import _HistogramOverlapScoreStat\nfrom parallelm.mlops.stats.histogram_stat import _HistogramStat\nfrom parallelm.mlops.stats.table import Table\nfrom parallelm.protobuf.ReflexEvent_pb2 import ReflexEvent\n\n\nclass PythonChannelHealth(object):\n \"\"\"\n Class is responsible for generating health and heatmap given feature values, names and model_stat\n \"\"\"\n\n @staticmethod\n def _create_feature_subset(features_values, features_names, selection_features_subset):\n \"\"\"\n Feature selects features subset from values.\n For example, if user wants feature array of features - c0 and c2 from\n feature of c0--c10, then it will select column c0 and c2 and return np-array.\n \"\"\"\n\n features_values = np.array(features_values)\n features_names = list(features_names)\n selection_features_subset = list(selection_features_subset)\n\n # feature values in order of names\n subset_feature_values_array = []\n for each_selection_f_names in selection_features_subset:\n subset_feature_values_array.append(\n features_values[:, features_names.index(each_selection_f_names)])\n return np.array(subset_feature_values_array).T\n\n @staticmethod\n def _create_current_hist_rep(features_values,\n features_names,\n num_bins,\n pred_bins_hist,\n stat_object_method,\n name_of_stat,\n model_id):\n \"\"\"\n Method is responsible for creating histogram from given values and names and return histogram lists.\n Type of histogram - continuous/categorical - depends on name_of_stat.\n :param features_values: feature array\n :param features_names: feature names\n :param num_bins: max number of bins for continuous features.\n :param pred_bins_hist: if bins are provided, to use as default.\n :param stat_object_method: stat object method to output stat\n :param name_of_stat:\n :param model_id: id of the mlobject stat is related to\n :return: list of continuous histogram.\n \"\"\"\n current_histogram_representation = None\n\n # generating histogram if features_values exists.\n if len(features_values) > 0:\n\n if name_of_stat is PyHealth.CONTINUOUS_HISTOGRAM_KEY:\n current_histogram = ContinuousHistogram() \\\n .fit(features_values,\n features_names,\n num_bins=num_bins,\n pred_bins=pred_bins_hist)\n\n elif name_of_stat is PyHealth.CATEGORICAL_HISTOGRAM_KEY:\n current_histogram = CategoricalHistogram() \\\n .fit(features_values,\n features_names,\n num_bins=num_bins,\n pred_bins=pred_bins_hist)\n else:\n raise Exception(\"_create_current_hist_rep is not compatible with: {}\".format(name_of_stat))\n\n current_histogram_representation = \\\n current_histogram.get_feature_histogram_rep()\n\n mlops_histogram_stat = _HistogramStat() \\\n .name(name_of_stat)\n\n for each_features_rep in current_histogram_representation:\n mlops_histogram_stat.add_feature_data(feature=each_features_rep.get_feature_name(),\n edge=each_features_rep.get_edges(),\n bin=each_features_rep.get_bins())\n if stat_object_method is not None:\n stat_object_method(mlops_stat=mlops_histogram_stat.get_mlops_stat(model_id),\n reflex_event_message_type=ReflexEvent.MLHealthModel)\n\n return current_histogram_representation\n\n @staticmethod\n def _create_current_continuous_heatmap_rep(continuous_features_values,\n continuous_features_names,\n stat_object_method,\n model_id):\n \"\"\"\n Method is responsible for creating heatmap outof given feature values and report it using stat_object_method\n\n :param continuous_features_values: feature array\n :param continuous_features_names: feature names\n :param stat_object_method: stat object method to output stat\n :return:\n \"\"\"\n\n valid_feature_indexes = []\n valid_feature_values = []\n valid_feature_names = []\n non_valid_feature_indexes = []\n\n # convert whole nd array to float!\n for each_feature_index in range(continuous_features_values.shape[1]):\n try:\n\n valid_feature_values.append(np.array(continuous_features_values[:, each_feature_index], dtype=float))\n valid_feature_names.append(continuous_features_names[each_feature_index])\n valid_feature_indexes.append(each_feature_index)\n except:\n non_valid_feature_indexes.append(each_feature_index)\n\n valid_feature_values = np.array(valid_feature_values).T\n\n if len(non_valid_feature_indexes) > 0:\n logging.info(\"{} cannot be used for continuous heatmap creation.\"\n .format(np.array(continuous_features_names)[non_valid_feature_indexes]))\n\n # creating heatmap -- min-max normalization and then taking mean.\n heat_map_values = []\n if len(valid_feature_values) > 0:\n\n max_continuous_features_values = np.max(valid_feature_values, axis=0)\n min_continuous_features_values = np.min(valid_feature_values, axis=0)\n\n for each_feature_index in range(len(valid_feature_names)):\n\n # for constant values,\n # min value has to be 0\n # if max is also 0, then it needs to be changed to 1.0 to prevent 0/0\n if (max_continuous_features_values[each_feature_index]\n == min_continuous_features_values[each_feature_index]):\n min_continuous_features_values[each_feature_index] = 0.0\n if max_continuous_features_values[each_feature_index] == 0.0:\n max_continuous_features_values[each_feature_index] = 1.0\n\n max_min_diff = max_continuous_features_values - min_continuous_features_values\n\n normalized_continuous_features_values = (valid_feature_values -\n min_continuous_features_values) * 1.0 / \\\n max_min_diff\n\n heat_map_values = np.mean(normalized_continuous_features_values, axis=0)\n\n mlops_heatmap_stat = _HeatMapStat() \\\n .name(PyHealth.HEATMAP_KEY) \\\n .features(list(valid_feature_names)) \\\n .data(list(heat_map_values))\n\n if stat_object_method is not None:\n stat_object_method(mlops_stat=mlops_heatmap_stat.get_mlops_stat(model_id),\n reflex_event_message_type=ReflexEvent.StatsMessage)\n\n return valid_feature_names, heat_map_values\n\n @staticmethod\n def _compare_health(current_histogram_representation,\n contender_histogram_representation,\n stat_object_method,\n name_of_stat,\n model_id):\n \"\"\"\n Method is responsible for comparing two histogram representation and output score using stat_object_method\n\n :param current_histogram_representation: inferring histogram representation\n :param contender_histogram_representation: training histogram representation\n :param stat_object_method: stat object method to output stat\n :param name_of_stat: be it continuous or categorical\n :return:\n \"\"\"\n # If contender_histogram_representation is present then overlap can be calculated \"\"\"\n\n contender_histogram_present = False\n if isinstance(contender_histogram_representation, list):\n if len(contender_histogram_representation) > 0:\n contender_histogram_present = True\n\n elif contender_histogram_representation is not None:\n contender_histogram_present = True\n\n compared_feature_names = []\n compared_feature_score = []\n if contender_histogram_present:\n compared_feature_names, compared_feature_score = \\\n GeneralHistogramStat.calculate_overlap_score(\n contender_hist_rep=contender_histogram_representation,\n inferring_hist_rep=current_histogram_representation)\n\n mlops_stat_hist_score = _HistogramOverlapScoreStat() \\\n .name(name_of_stat) \\\n .features(list(compared_feature_names)) \\\n .data(list(compared_feature_score))\n\n if stat_object_method is not None:\n stat_object_method(mlops_stat=mlops_stat_hist_score.get_mlops_stat(model_id),\n reflex_event_message_type=ReflexEvent.MLHealthModel)\n\n return compared_feature_names, compared_feature_score\n\n @staticmethod\n def generate_health_and_heatmap_stat(stat_object_method,\n logger,\n features_values,\n features_names,\n model_stat,\n model_id,\n num_bins=13,\n # TODO: Have ability to get this argument from user!\n data_analysis=True):\n \"\"\"\n Method is highly responsible and creates continuous/categorical histograms. Also creates heatmap and compare two histogram if program is running on inference.\n\n :param stat_object_method: stat object method to output stat\n :param logger: logger to log\n :param features_values: feature array\n :param features_names: feature names\n :param model_stat: model stat\n :param num_bins: max number of bins for features.\n :return:\n \"\"\"\n # generating general stats like categorical/continuous features and contender histograms.\n general_hist_stat = GeneralHistogramStat()\n general_hist_stat \\\n .create_and_set_general_stat(set_of_features_values=features_values,\n set_of_features_names=features_names,\n model_stat=model_stat)\n\n # For Continuous Values\n # continuous feature names\n continuous_features_names = general_hist_stat.set_of_continuous_features\n # predefined bins of contender continuous hist\n pred_bins_continuous_hist = general_hist_stat.contender_continuous_hist_bins\n contender_continuous_histogram_representation = general_hist_stat.contender_continuous_histogram\n\n continuous_features_values = PythonChannelHealth. \\\n _create_feature_subset(features_values=features_values,\n features_names=features_names,\n selection_features_subset=continuous_features_names)\n current_continuous_histogram_representation = \\\n PythonChannelHealth._create_current_hist_rep(\n features_values=continuous_features_values,\n features_names=continuous_features_names,\n num_bins=num_bins,\n pred_bins_hist=pred_bins_continuous_hist,\n stat_object_method=stat_object_method,\n name_of_stat=PyHealth.CONTINUOUS_HISTOGRAM_KEY,\n model_id=model_id)\n\n # running data analysis for continuous dataset\n if data_analysis:\n continuous_data_analyst_result = ContinuousDataAnalyst \\\n .analyze(set_of_continuous_feature_names=continuous_features_names,\n set_of_continuous_feature_values=continuous_features_values)\n\n # outputting stat only if analysis result is there\n if len(continuous_data_analyst_result) > 0:\n cont_da = Table() \\\n .name(\"Continuous Data Analysis\") \\\n .cols([\"Count\",\n \"Missing\",\n \"Zeros\",\n \"Standard Deviation\",\n \"Min\",\n \"Mean\",\n \"Median\",\n \"Max\"])\n\n for f_n in continuous_data_analyst_result.keys():\n f_v = continuous_data_analyst_result[f_n]\n cont_da.add_row(str(f_v.feature_name),\n [f_v.count,\n f_v.NAs,\n f_v.zeros,\n f_v.std,\n f_v.min,\n f_v.mean,\n f_v.median,\n f_v.max])\n\n # outputting stat using stat object as stat message type\n stat_object_method(mlops_stat=cont_da.get_mlops_stat(model_id=model_id),\n reflex_event_message_type=ReflexEvent.StatsMessage)\n\n logger.debug(\"continuous features values: {}\".format(continuous_features_values))\n logger.debug(\"continuous features names: {}\".format(continuous_features_names))\n logger.debug(\n \"current histogram representation: {}\".format(current_continuous_histogram_representation))\n logger.debug(\n \"contender histogram representation: {}\".format(contender_continuous_histogram_representation))\n\n # For Categorical Values\n # categorical feature names\n categorical_features_names = general_hist_stat.set_of_categorical_features\n\n # predefined bins of contender categorical hist\n pred_bins_categorical_hist = general_hist_stat.contender_categorical_hist_bins\n contender_categorical_histogram_representation = general_hist_stat.contender_categorical_histogram\n\n categorical_features_values = PythonChannelHealth._create_feature_subset(features_values=features_values,\n features_names=features_names,\n selection_features_subset=categorical_features_names)\n current_categorical_histogram_representation = \\\n PythonChannelHealth._create_current_hist_rep(\n categorical_features_values,\n categorical_features_names,\n num_bins,\n pred_bins_categorical_hist,\n stat_object_method,\n name_of_stat=PyHealth.CATEGORICAL_HISTOGRAM_KEY,\n model_id=model_id)\n\n # running data analysis for categorical dataset\n if data_analysis:\n categorical_data_analyst_result = CategoricalDataAnalyst \\\n .analyze(set_of_categorical_feature_names=categorical_features_names,\n set_of_categorical_feature_values=categorical_features_values)\n\n # outputting stat only if analysis result is there\n if len(categorical_data_analyst_result) > 0:\n\n categ_da = Table() \\\n .name(\"Categorical Data Analysis\") \\\n .cols([\"Count\",\n \"Missing\",\n \"Uniques\",\n \"Top Frequently Occurring Category\",\n \"Top Frequency\",\n \"Average String Length\"])\n\n for f_n in categorical_data_analyst_result.keys():\n f_v = categorical_data_analyst_result[f_n]\n categ_da. \\\n add_row(str(f_v.feature_name),\n [f_v.count,\n f_v.NAs,\n f_v.unique,\n f_v.top,\n f_v.freq_top,\n f_v.avg_str_len])\n\n # outputting stat using stat object as stat message type\n stat_object_method(mlops_stat=categ_da.get_mlops_stat(model_id=model_id),\n reflex_event_message_type=ReflexEvent.StatsMessage)\n\n logger.debug(\"categorical features values: {}\".format(categorical_features_values))\n logger.debug(\"categorical features names: {}\".format(categorical_features_names))\n logger.debug(\n \"current histogram representation: {}\".format(current_categorical_histogram_representation))\n logger.debug(\n \"contender histogram representation: {}\".format(contender_categorical_histogram_representation))\n\n # If model_stat is given, it means it is inference program\n # so it needs to create heatmap and score too.\n if model_stat is not None:\n if continuous_features_values.shape[0] > 0:\n continuous_features_names, heat_map_values = PythonChannelHealth. \\\n _create_current_continuous_heatmap_rep(continuous_features_values=continuous_features_values,\n continuous_features_names=continuous_features_names,\n stat_object_method=stat_object_method,\n model_id=model_id)\n logger.debug(\"features: {}, heatmap values: {}\".format(continuous_features_names,\n heat_map_values))\n\n compared_continuous_feature_names, compared_continuous_feature_score = PythonChannelHealth. \\\n _compare_health(\n current_histogram_representation=current_continuous_histogram_representation,\n contender_histogram_representation=contender_continuous_histogram_representation,\n stat_object_method=stat_object_method,\n name_of_stat=PyHealth.CONTINUOUS_HISTOGRAM_OVERLAP_SCORE_KEY,\n model_id=model_id)\n logger.debug(\n \"continuous features: {}, overlap scores: {}\".format(compared_continuous_feature_names,\n compared_continuous_feature_score))\n\n if categorical_features_values.shape[0] > 0:\n compared_categorical_feature_names, compared_categorical_feature_names = PythonChannelHealth. \\\n _compare_health(\n current_histogram_representation=current_categorical_histogram_representation,\n contender_histogram_representation=contender_categorical_histogram_representation,\n stat_object_method=stat_object_method,\n name_of_stat=PyHealth.CATEGORICAL_HISTOGRAM_OVERLAP_SCORE_KEY,\n model_id=model_id)\n logger.debug(\n \"categorical features: {}, overlap scores: {}\".format(\n compared_categorical_feature_names, compared_categorical_feature_names))\n" ]
[ [ "numpy.asarray", "numpy.sum", "numpy.unique" ], [ "tensorflow.FixedLenFeature" ], [ "numpy.max", "numpy.array", "numpy.mean", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jsdussanc/luminoth
[ "dc1c1203a40e1ecf2aaca9647f3008ab72b41438", "dc1c1203a40e1ecf2aaca9647f3008ab72b41438" ]
[ "luminoth/utils/bbox_transform_tf.py", "luminoth/models/fasterrcnn/rpn_test.py" ]
[ "import tensorflow as tf\n\n\ndef get_width_upright(bboxes):\n with tf.name_scope('BoundingBoxTransform/get_width_upright'):\n bboxes = tf.cast(bboxes, tf.float32)\n x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)\n width = x2 - x1 + 1.\n height = y2 - y1 + 1.\n\n # Calculate up right point of bbox (urx = up right x)\n urx = x1 + .5 * width\n ury = y1 + .5 * height\n\n return width, height, urx, ury\n\n\ndef encode(bboxes, gt_boxes, variances=None):\n with tf.name_scope('BoundingBoxTransform/encode'):\n (bboxes_width, bboxes_height,\n bboxes_urx, bboxes_ury) = get_width_upright(bboxes)\n\n (gt_boxes_width, gt_boxes_height,\n gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes)\n\n if variances is None:\n variances = [1., 1.]\n\n targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0])\n targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0])\n\n targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1]\n targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1]\n\n targets = tf.concat(\n [targets_dx, targets_dy, targets_dw, targets_dh], axis=1)\n\n return targets\n\n\ndef decode(roi, deltas, variances=None):\n with tf.name_scope('BoundingBoxTransform/decode'):\n (roi_width, roi_height,\n roi_urx, roi_ury) = get_width_upright(roi)\n\n dx, dy, dw, dh = tf.split(deltas, 4, axis=1)\n\n if variances is None:\n variances = [1., 1.]\n\n pred_ur_x = dx * roi_width * variances[0] + roi_urx\n pred_ur_y = dy * roi_height * variances[0] + roi_ury\n pred_w = tf.exp(dw * variances[1]) * roi_width\n pred_h = tf.exp(dh * variances[1]) * roi_height\n\n bbox_x1 = pred_ur_x - 0.5 * pred_w\n bbox_y1 = pred_ur_y - 0.5 * pred_h\n\n # This -1. extra is different from reference implementation.\n bbox_x2 = pred_ur_x + 0.5 * pred_w - 1.\n bbox_y2 = pred_ur_y + 0.5 * pred_h - 1.\n\n bboxes = tf.concat(\n [bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1)\n\n return bboxes\n\n\ndef clip_boxes(bboxes, imshape):\n \"\"\"\n Clips bounding boxes to image boundaries based on image shape.\n\n Args:\n bboxes: Tensor with shape (num_bboxes, 4)\n where point order is x1, y1, x2, y2.\n\n imshape: Tensor with shape (2, )\n where the first value is height and the next is width.\n\n Returns\n Tensor with same shape as bboxes but making sure that none\n of the bboxes are outside the image.\n \"\"\"\n with tf.name_scope('BoundingBoxTransform/clip_bboxes'):\n bboxes = tf.cast(bboxes, dtype=tf.float32)\n imshape = tf.cast(imshape, dtype=tf.float32)\n\n x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)\n width = imshape[1]\n height = imshape[0]\n x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)\n x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)\n\n y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)\n y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)\n\n bboxes = tf.concat([x1, y1, x2, y2], axis=1)\n\n return bboxes\n\n\ndef change_order(bboxes):\n \"\"\"Change bounding box encoding order.\n\n TensorFlow works with the (y_min, x_min, y_max, x_max) order while we work\n with the (x_min, y_min, x_max, y_min).\n\n While both encoding options have its advantages and disadvantages we\n decided to use the (x_min, y_min, x_max, y_min), forcing use to switch to\n TensorFlow's every time we want to use a std function that handles bounding\n boxes.\n\n Args:\n bboxes: A Tensor of shape (total_bboxes, 4)\n\n Returns:\n bboxes: A Tensor of shape (total_bboxes, 4) with the order swaped.\n \"\"\"\n with tf.name_scope('BoundingBoxTransform/change_order'):\n first_min, second_min, first_max, second_max = tf.unstack(\n bboxes, axis=1\n )\n bboxes = tf.stack(\n [second_min, first_min, second_max, first_max], axis=1\n )\n return bboxes\n\n\nif __name__ == '__main__':\n import numpy as np\n\n bboxes = tf.placeholder(tf.float32)\n bboxes_val = [[10, 10, 20, 22]]\n\n gt_boxes = tf.placeholder(tf.float32)\n gt_boxes_val = [[11, 13, 34, 31]]\n\n imshape = tf.placeholder(tf.int32)\n imshape_val = (100, 100)\n\n deltas = encode(bboxes, gt_boxes)\n decoded_bboxes = decode(bboxes, deltas)\n final_decoded_bboxes = clip_boxes(decoded_bboxes, imshape)\n\n with tf.Session() as sess:\n final_decoded_bboxes = sess.run(final_decoded_bboxes, feed_dict={\n bboxes: bboxes_val,\n gt_boxes: gt_boxes_val,\n imshape: imshape_val,\n })\n\n assert np.all(gt_boxes_val == final_decoded_bboxes)\n", "import numpy as np\nimport tensorflow as tf\n\nfrom easydict import EasyDict\nfrom luminoth.models.fasterrcnn.rpn import RPN\nfrom luminoth.utils.anchors import generate_anchors_reference\nfrom luminoth.utils.test import generate_gt_boxes, generate_anchors\n\n\nclass RPNTest(tf.test.TestCase):\n\n def setUp(self):\n super(RPNTest, self).setUp()\n self.num_anchors = 9\n # Use default settings.\n self.config = EasyDict({\n 'num_channels': 512,\n 'kernel_shape': [3, 3],\n 'rpn_initializer': {\n 'type': 'variance_scaling_initializer',\n 'factor': 1.0,\n 'mode': 'FAN_AVG',\n 'uniform': True,\n },\n 'cls_initializer': {\n 'type': 'truncated_normal_initializer',\n 'mean': 0.0,\n 'stddev': 0.01,\n },\n 'bbox_initializer': {\n 'type': 'truncated_normal_initializer',\n 'mean': 0.0,\n 'stddev': 0.01,\n },\n 'l2_regularization_scale': 0.0005,\n 'l1_sigma': 3.0,\n 'activation_function': 'relu6',\n 'proposals': {\n 'pre_nms_top_n': 12000,\n 'post_nms_top_n': 2000,\n 'nms_threshold': 0.6,\n 'min_size': 0,\n 'clip_after_nms': False,\n 'filter_outside_anchors': False,\n 'apply_nms': True,\n 'min_prob_threshold': 0.0,\n },\n 'target': {\n 'allowed_border': 0,\n 'clobber_positives': False,\n 'foreground_threshold': 0.7,\n 'background_threshold_high': 0.3,\n 'background_threshold_low': 0.,\n 'foreground_fraction': 0.5,\n 'minibatch_size': 256,\n }\n })\n\n # Use default anchor configuration values.\n self.base_size = 256\n self.scales = np.array([0.5, 1, 2])\n self.ratios = np.array([0.5, 1, 2])\n self.stride = 16\n tf.reset_default_graph()\n\n def testBasic(self):\n \"\"\"Tests shapes are consistent with anchor generation.\n \"\"\"\n model = RPN(\n self.num_anchors, self.config, debug=True\n )\n # (plus the batch number)\n pretrained_output_shape = (1, 32, 32, 512)\n pretrained_output = tf.placeholder(\n tf.float32, shape=pretrained_output_shape)\n\n # Estimate image shape from the pretrained output and the anchor stride\n image_shape_val = (\n int(pretrained_output_shape[1] * self.stride),\n int(pretrained_output_shape[2] * self.stride),\n )\n\n # Use 4 ground truth boxes.\n gt_boxes_shape = (4, 4)\n gt_boxes = tf.placeholder(tf.float32, shape=gt_boxes_shape)\n image_shape_shape = (2,)\n image_shape = tf.placeholder(tf.float32, shape=image_shape_shape)\n # Total anchors depends on the pretrained output shape and the total\n # number of anchors per point.\n total_anchors = (\n pretrained_output_shape[1] * pretrained_output_shape[2] *\n self.num_anchors\n )\n all_anchors_shape = (total_anchors, 4)\n all_anchors = tf.placeholder(tf.float32, shape=all_anchors_shape)\n layers = model(\n pretrained_output, image_shape, all_anchors, gt_boxes=gt_boxes\n )\n\n with self.test_session() as sess:\n # As in the case of a real session we need to initialize the\n # variables.\n sess.run(tf.global_variables_initializer())\n layers_inst = sess.run(layers, feed_dict={\n # We don't really care about the value of the pretrained output\n # only that has the correct shape.\n pretrained_output: np.random.rand(\n *pretrained_output_shape\n ),\n # Generate random but valid ground truth boxes.\n gt_boxes: generate_gt_boxes(\n gt_boxes_shape[0], image_shape_val\n ),\n # Generate anchors from a reference and the shape of the\n # pretrained_output.\n all_anchors: generate_anchors(\n generate_anchors_reference(\n self.base_size, self.ratios, self.scales\n ),\n 16,\n pretrained_output_shape[1:3]\n ),\n image_shape: image_shape_val,\n })\n\n # Class score generates 2 values per anchor.\n rpn_cls_score_shape = layers_inst['rpn_cls_score'].shape\n rpn_cls_score_true_shape = (total_anchors, 2)\n self.assertEqual(rpn_cls_score_shape, rpn_cls_score_true_shape)\n\n # Probs have the same shape as cls scores.\n rpn_cls_prob_shape = layers_inst['rpn_cls_prob'].shape\n self.assertEqual(rpn_cls_prob_shape, rpn_cls_score_true_shape)\n\n # We check softmax with the sum of the output.\n rpn_cls_prob_sum = layers_inst['rpn_cls_prob'].sum(axis=1)\n self.assertAllClose(rpn_cls_prob_sum, np.ones(total_anchors))\n\n # Proposals and scores are related to the output of the NMS with\n # limits.\n total_proposals = layers_inst['proposals'].shape[0]\n total_scores = layers_inst['scores'].shape[0]\n\n # Check we don't get more than top_n proposals.\n self.assertGreaterEqual(\n self.config.proposals.post_nms_top_n, total_proposals\n )\n\n # Check we get a score for each proposal.\n self.assertEqual(total_proposals, total_scores)\n\n # Check that we get a regression for each anchor.\n self.assertEqual(\n layers_inst['rpn_bbox_pred'].shape,\n (total_anchors, 4)\n )\n\n # Check that we get a target for each regression for each anchor.\n self.assertEqual(\n layers_inst['rpn_bbox_target'].shape,\n (total_anchors, 4)\n )\n\n # Check that we get a target class for each anchor.\n self.assertEqual(\n layers_inst['rpn_cls_target'].shape,\n (total_anchors,)\n )\n\n # Check that targets are composed of [-1, 0, 1] only.\n rpn_cls_target = layers_inst['rpn_cls_target']\n self.assertEqual(\n tuple(np.sort(np.unique(rpn_cls_target))),\n (-1, 0., 1.)\n )\n\n batch_cls_target = rpn_cls_target[\n (rpn_cls_target == 0.) | (rpn_cls_target == 1.)\n ]\n\n # Check that the non negative target class are exactly the size\n # as the minibatch\n self.assertEqual(\n batch_cls_target.shape,\n (self.config.target.minibatch_size, )\n )\n\n # Check that we get upto foreground_fraction of positive anchors.\n self.assertLessEqual(\n batch_cls_target[batch_cls_target == 1.].shape[0] /\n batch_cls_target.shape[0],\n self.config.target.foreground_fraction\n )\n\n def testTypes(self):\n \"\"\"Tests that return types are the expected ones.\n \"\"\"\n # We repeat testBasic's setup.\n model = RPN(\n self.num_anchors, self.config, debug=True\n )\n pretrained_output_shape = (1, 32, 32, 512)\n pretrained_output = tf.placeholder(\n tf.float32, shape=pretrained_output_shape)\n\n image_shape_val = (\n int(pretrained_output_shape[1] * self.stride),\n int(pretrained_output_shape[2] * self.stride),\n )\n\n gt_boxes_shape = (4, 4)\n gt_boxes = tf.placeholder(tf.float32, shape=gt_boxes_shape)\n image_shape_shape = (2,)\n image_shape = tf.placeholder(tf.float32, shape=image_shape_shape)\n\n total_anchors = (\n pretrained_output_shape[1] * pretrained_output_shape[2] *\n self.num_anchors\n )\n all_anchors_shape = (total_anchors, 4)\n all_anchors = tf.placeholder(tf.float32, shape=all_anchors_shape)\n layers = model(\n pretrained_output, image_shape, all_anchors, gt_boxes=gt_boxes\n )\n\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n layers_inst = sess.run(layers, feed_dict={\n pretrained_output: np.random.rand(\n *pretrained_output_shape\n ),\n gt_boxes: generate_gt_boxes(\n gt_boxes_shape[0], image_shape_val\n ),\n all_anchors: generate_anchors(\n generate_anchors_reference(\n self.base_size, self.ratios, self.scales\n ),\n 16,\n pretrained_output_shape[1:3]\n ),\n image_shape: image_shape_val,\n })\n\n # Assertions\n proposals = layers_inst['proposals']\n scores = layers_inst['scores']\n rpn_cls_prob = layers_inst['rpn_cls_prob']\n rpn_cls_score = layers_inst['rpn_cls_score']\n rpn_bbox_pred = layers_inst['rpn_bbox_pred']\n rpn_cls_target = layers_inst['rpn_cls_target']\n rpn_bbox_target = layers_inst['rpn_bbox_target']\n # Everything should have dtype=tf.float32\n self.assertAllEqual(\n # We have 7 values we want to compare to tf.float32.\n [tf.float32] * 7,\n [\n proposals.dtype, scores.dtype, rpn_cls_prob.dtype,\n rpn_cls_score.dtype, rpn_bbox_pred.dtype,\n rpn_cls_target.dtype, rpn_bbox_target.dtype,\n ]\n\n )\n\n def testLoss(self):\n \"\"\"Tests that loss returns reasonable values in simple cases.\n \"\"\"\n model = RPN(\n self.num_anchors, self.config, debug=True\n )\n\n # Define placeholders that are used inside the loss method.\n rpn_cls_prob = tf.placeholder(tf.float32)\n rpn_cls_target = tf.placeholder(tf.float32)\n rpn_cls_score = tf.placeholder(tf.float32)\n rpn_bbox_target = tf.placeholder(tf.float32)\n rpn_bbox_pred = tf.placeholder(tf.float32)\n\n loss = model.loss({\n 'rpn_cls_prob': rpn_cls_prob,\n 'rpn_cls_target': rpn_cls_target,\n 'rpn_cls_score': rpn_cls_score,\n 'rpn_bbox_target': rpn_bbox_target,\n 'rpn_bbox_pred': rpn_bbox_pred,\n })\n\n # Test perfect score.\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n loss_dict = sess.run(loss, feed_dict={\n # Probability is (background_prob, foreground_prob)\n rpn_cls_prob: [[0, 1], [1., 0]],\n # Target: 1 being foreground, 0 being background.\n rpn_cls_target: [1, 0],\n # Class scores before applying softmax. Since using cross\n # entropy, we need a big difference between values.\n rpn_cls_score: [[-100., 100.], [100., -100.]],\n # Targets and predictions are exactly equal.\n rpn_bbox_target: [[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1]],\n rpn_bbox_pred: [[0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1]],\n })\n\n # Assert close since cross-entropy could return very small value.\n self.assertAllClose(tuple(loss_dict.values()), (0, 0))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.concat", "tensorflow.unstack", "tensorflow.stack", "tensorflow.cast", "tensorflow.minimum", "tensorflow.placeholder", "tensorflow.exp", "numpy.all", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.log", "tensorflow.split" ], [ "numpy.unique", "tensorflow.placeholder", "tensorflow.test.main", "numpy.ones", "tensorflow.global_variables_initializer", "tensorflow.reset_default_graph", "numpy.random.rand", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
JayeshSukhija/ga-learner-dsmp-repo
[ "4c05d980462dde423b6be41cca1218d6d98e8e48" ]
[ "Numpy/code.py" ]
[ "# --------------\n# Importing header files\r\nimport numpy as np\r\n\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\n#Code starts here\r\n\r\n#Loading data file and saving it into a new numpy array \r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\nprint(data.shape)\r\n\r\n#Concatenating the new record to the existing numpy array\r\ncensus=np.concatenate((data, new_record),axis = 0)\r\n\r\nprint(census.shape)\r\n\r\n#Code ends here\n\n\n# --------------\n#Code starts here\r\nimport numpy as np\r\nage=census[:,0]\r\nprint (age)\r\nprint ('='*50)\r\n\r\nmax_age=np.max(age)\r\nprint (max_age)\r\nprint ('='*50)\r\n\r\nmin_age=np.min(age)\r\nprint (min_age)\r\nprint ('='*50)\r\n\r\nage_mean=np.mean(age)\r\nprint (age_mean)\r\nprint ('='*50)\r\n\r\nage_std=np.std(age)\r\nprint (age_std)\r\nprint('='*50)\r\n\r\n\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n#Creating new subsets based on 'Age'\r\nrace_0=census[census[:,2]==0]\r\nrace_1=census[census[:,2]==1]\r\nrace_2=census[census[:,2]==2]\r\nrace_3=census[census[:,2]==3]\r\nrace_4=census[census[:,2]==4]\r\n\r\n\r\n#Finding the length of the above created subsets\r\nlen_0=len(race_0)\r\nlen_1=len(race_1)\r\nlen_2=len(race_2)\r\nlen_3=len(race_3)\r\nlen_4=len(race_4)\r\n\r\n#Printing the length of the above created subsets\r\nprint('Race_0: ', len_0)\r\nprint('Race_1: ', len_1)\r\nprint('Race_2: ', len_2)\r\nprint('Race_3: ', len_3)\r\nprint('Race_4: ', len_4)\r\n\r\n#Storing the different race lengths with appropriate indexes\r\nrace_list=[len_0, len_1,len_2, len_3, len_4]\r\n\r\n#Storing the race with minimum length into a variable \r\nminority_race=race_list.index(min(race_list))\r\nprint ('minority_race:',minority_race)\r\n\r\n#Code ends here\n\n\n# --------------\n#Code starts here\r\nimport numpy as np\r\nsenior_citizens=census[census[:,0 ]>60]\r\nworking_hours_sum=senior_citizens.sum(axis=0)[6]\r\nsenior_citizens_len=len(senior_citizens)\r\navg_working_hours=(working_hours_sum/senior_citizens_len)\r\nprint (avg_working_hours)\n\n\n# --------------\n#Code starts here\r\nimport numpy as np\r\nhigh=census[census[:,1 ]>10]\r\nlow=census[census[:,1]<=10]\r\navg_pay_high=high.mean(axis=0)[7]\r\navg_pay_low=low.mean(axis=0)[7]\r\nif (avg_pay_high>avg_pay_low):\r\n print (\"Better Education leads to better pay\")\r\nelse:\r\n print (\"Better Education does not leads to better pay\")\n\n\n" ]
[ [ "numpy.min", "numpy.genfromtxt", "numpy.concatenate", "numpy.max", "numpy.std", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NateLol/BAM_A_lightweight_but_efficient_Balanced_attention_mechanism_for_super_resolution
[ "4c977ea1586e7836248acb5cbd648e124b43aca3" ]
[ "EDSR/common.py" ]
[ "import math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom torch.autograd import Variable\r\n\r\ndef default_conv(in_channels, out_channels, kernel_size, bias=True):\r\n return nn.Conv2d(\r\n in_channels, out_channels, kernel_size,\r\n padding=(kernel_size//2), bias=bias)\r\n\r\nclass MeanShift(nn.Conv2d):\r\n def __init__(self, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):\r\n super(MeanShift, self).__init__(3, 3, kernel_size=1)\r\n std = torch.Tensor(rgb_std)\r\n self.weight.data = torch.eye(3).view(3, 3, 1, 1)\r\n self.weight.data.div_(std.view(3, 1, 1, 1))\r\n self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)\r\n self.bias.data.div_(std)\r\n self.requires_grad = False\r\n\r\nclass BasicBlock(nn.Sequential):\r\n def __init__(\r\n self, in_channels, out_channels, kernel_size, stride=1, bias=False,\r\n bn=True, act=nn.ReLU(True)):\r\n\r\n m = [nn.Conv2d(\r\n in_channels, out_channels, kernel_size,\r\n padding=(kernel_size//2), stride=stride, bias=bias)\r\n ]\r\n if bn: m.append(nn.BatchNorm2d(out_channels))\r\n if act is not None: m.append(act)\r\n super(BasicBlock, self).__init__(*m)\r\n\r\nclass ResBlock(nn.Module):\r\n def __init__(\r\n self, conv, n_feats, kernel_size,\r\n bias=True, bn=False, act=nn.ReLU(True), res_scale=1):\r\n\r\n super(ResBlock, self).__init__()\r\n m = []\r\n for i in range(2):\r\n m.append(conv(n_feats, n_feats, kernel_size, bias=bias))\r\n if bn: m.append(nn.BatchNorm2d(n_feats))\r\n if i == 0: m.append(act)\r\n\r\n self.body = nn.Sequential(*m)\r\n self.res_scale = res_scale\r\n\r\n def forward(self, x):\r\n res = self.body(x).mul(self.res_scale)\r\n res += x\r\n\r\n return res\r\n\r\n\r\nclass Upsampler(nn.Sequential):\r\n def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):\r\n\r\n m = []\r\n if (scale & (scale - 1)) == 0: # Is scale = 2^n?\r\n for _ in range(int(math.log(scale, 2))):\r\n m.append(conv(n_feats, 4 * n_feats, 3, bias))\r\n m.append(nn.PixelShuffle(2))\r\n if bn: m.append(nn.BatchNorm2d(n_feats))\r\n\r\n if act == 'relu':\r\n m.append(nn.ReLU(True))\r\n elif act == 'prelu':\r\n m.append(nn.PReLU(n_feats))\r\n\r\n elif scale == 3:\r\n m.append(conv(n_feats, 9 * n_feats, 3, bias))\r\n m.append(nn.PixelShuffle(3))\r\n if bn: m.append(nn.BatchNorm2d(n_feats))\r\n\r\n if act == 'relu':\r\n m.append(nn.ReLU(True))\r\n elif act == 'prelu':\r\n m.append(nn.PReLU(n_feats))\r\n else:\r\n raise NotImplementedError\r\n\r\n super(Upsampler, self).__init__(*m)" ]
[ [ "torch.nn.Sequential", "torch.Tensor", "torch.nn.PReLU", "torch.nn.Conv2d", "torch.eye", "torch.nn.PixelShuffle", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
siddgoel/ray
[ "7f3031f451de410b71a5fcb18e04452bfa7351d6" ]
[ "python/ray/ml/tests/test_preprocessors.py" ]
[ "import warnings\nfrom unittest.mock import patch\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport ray\nfrom ray.ml.preprocessor import PreprocessorNotFittedException\nfrom ray.ml.preprocessors import (\n BatchMapper,\n StandardScaler,\n MinMaxScaler,\n OrdinalEncoder,\n OneHotEncoder,\n LabelEncoder,\n SimpleImputer,\n Chain,\n)\n\n\ndef test_standard_scaler():\n \"\"\"Tests basic StandardScaler functionality.\"\"\"\n col_a = [-1, 0, 1, 2]\n col_b = [1, 1, 5, 5]\n col_c = [1, 1, 1, None]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n scaler = StandardScaler([\"B\", \"C\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n scaler.transform(ds)\n\n # Fit data.\n scaler.fit(ds)\n assert scaler.stats_ == {\n \"mean(B)\": 3.0,\n \"mean(C)\": 1.0,\n \"std(B)\": 2.0,\n \"std(C)\": 0.0,\n }\n\n # Transform data.\n transformed = scaler.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b = [-1.0, -1.0, 1.0, 1.0]\n processed_col_c = [0.0, 0.0, 0.0, None]\n expected_df = pd.DataFrame.from_dict(\n {\"A\": processed_col_a, \"B\": processed_col_b, \"C\": processed_col_c}\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [1, 2, 3]\n pred_col_b = [3, 5, 7]\n pred_col_c = [0, 1, 2]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = scaler.transform_batch(pred_in_df)\n\n pred_processed_col_a = pred_col_a\n pred_processed_col_b = [0.0, 1.0, 2.0]\n pred_processed_col_c = [-1.0, 0.0, 1.0]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n\[email protected](warnings, \"warn\")\ndef test_fit_twice(mocked_warn):\n \"\"\"Tests that a warning msg should be printed.\"\"\"\n col_a = [-1, 0, 1]\n col_b = [1, 3, 5]\n col_c = [1, 1, None]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n scaler = MinMaxScaler([\"B\", \"C\"])\n\n # Fit data.\n scaler.fit(ds)\n assert scaler.stats_ == {\"min(B)\": 1, \"max(B)\": 5, \"min(C)\": 1, \"max(C)\": 1}\n\n ds = ds.map_batches(lambda x: x * 2)\n # Fit again\n scaler.fit(ds)\n # Assert that the fitted state is corresponding to the second ds.\n assert scaler.stats_ == {\"min(B)\": 2, \"max(B)\": 10, \"min(C)\": 2, \"max(C)\": 2}\n msg = (\n \"`fit` has already been called on the preprocessor (or at least one \"\n \"contained preprocessors if this is a chain). \"\n \"All previously fitted state will be overwritten!\"\n )\n mocked_warn.assert_called_once_with(msg)\n\n\ndef test_min_max_scaler():\n \"\"\"Tests basic MinMaxScaler functionality.\"\"\"\n col_a = [-1, 0, 1]\n col_b = [1, 3, 5]\n col_c = [1, 1, None]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n scaler = MinMaxScaler([\"B\", \"C\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n scaler.transform(ds)\n\n # Fit data.\n scaler.fit(ds)\n assert scaler.stats_ == {\"min(B)\": 1, \"max(B)\": 5, \"min(C)\": 1, \"max(C)\": 1}\n\n transformed = scaler.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b = [0.0, 0.5, 1.0]\n processed_col_c = [0.0, 0.0, None]\n expected_df = pd.DataFrame.from_dict(\n {\"A\": processed_col_a, \"B\": processed_col_b, \"C\": processed_col_c}\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [1, 2, 3]\n pred_col_b = [3, 5, 7]\n pred_col_c = [0, 1, 2]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = scaler.transform_batch(pred_in_df)\n\n pred_processed_col_a = pred_col_a\n pred_processed_col_b = [0.5, 1.0, 1.5]\n pred_processed_col_c = [-1.0, 0.0, 1.0]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n\ndef test_ordinal_encoder():\n \"\"\"Tests basic OrdinalEncoder functionality.\"\"\"\n col_a = [\"red\", \"green\", \"blue\", \"red\"]\n col_b = [\"warm\", \"cold\", \"hot\", \"cold\"]\n col_c = [1, 10, 5, 10]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n encoder = OrdinalEncoder([\"B\", \"C\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n encoder.transform(ds)\n\n # Fit data.\n encoder.fit(ds)\n assert encoder.stats_ == {\n \"unique_values(B)\": {\"cold\": 0, \"hot\": 1, \"warm\": 2},\n \"unique_values(C)\": {1: 0, 5: 1, 10: 2},\n }\n\n # Transform data.\n transformed = encoder.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b = [2, 0, 1, 0]\n processed_col_c = [0, 2, 1, 2]\n expected_df = pd.DataFrame.from_dict(\n {\"A\": processed_col_a, \"B\": processed_col_b, \"C\": processed_col_c}\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [\"blue\", \"yellow\", None]\n pred_col_b = [\"cold\", \"warm\", \"other\"]\n pred_col_c = [10, 1, 20]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = encoder.transform_batch(pred_in_df)\n\n pred_processed_col_a = pred_col_a\n pred_processed_col_b = [0, 2, None]\n pred_processed_col_c = [2, 0, None]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n # Test null behavior.\n null_col = [1, None]\n nonnull_col = [1, 1]\n null_df = pd.DataFrame.from_dict({\"A\": null_col})\n null_ds = ray.data.from_pandas(null_df)\n nonnull_df = pd.DataFrame.from_dict({\"A\": nonnull_col})\n nonnull_ds = ray.data.from_pandas(nonnull_df)\n null_encoder = OrdinalEncoder([\"A\"])\n\n # Verify fit fails for null values.\n with pytest.raises(ValueError):\n null_encoder.fit(null_ds)\n null_encoder.fit(nonnull_ds)\n\n # Verify transform fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform(null_ds)\n null_encoder.transform(nonnull_ds)\n\n # Verify transform_batch fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform_batch(null_df)\n null_encoder.transform_batch(nonnull_df)\n\n\ndef test_one_hot_encoder():\n \"\"\"Tests basic OneHotEncoder functionality.\"\"\"\n col_a = [\"red\", \"green\", \"blue\", \"red\"]\n col_b = [\"warm\", \"cold\", \"hot\", \"cold\"]\n col_c = [1, 10, 5, 10]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n encoder = OneHotEncoder([\"B\", \"C\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n encoder.transform(ds)\n\n # Fit data.\n encoder.fit(ds)\n\n assert encoder.stats_ == {\n \"unique_values(B)\": {\"cold\": 0, \"hot\": 1, \"warm\": 2},\n \"unique_values(C)\": {1: 0, 5: 1, 10: 2},\n }\n\n # Transform data.\n transformed = encoder.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b_cold = [0, 1, 0, 1]\n processed_col_b_hot = [0, 0, 1, 0]\n processed_col_b_warm = [1, 0, 0, 0]\n processed_col_c_1 = [1, 0, 0, 0]\n processed_col_c_5 = [0, 0, 1, 0]\n processed_col_c_10 = [0, 1, 0, 1]\n expected_df = pd.DataFrame.from_dict(\n {\n \"A\": processed_col_a,\n \"B_cold\": processed_col_b_cold,\n \"B_hot\": processed_col_b_hot,\n \"B_warm\": processed_col_b_warm,\n \"C_1\": processed_col_c_1,\n \"C_5\": processed_col_c_5,\n \"C_10\": processed_col_c_10,\n }\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [\"blue\", \"yellow\", None]\n pred_col_b = [\"cold\", \"warm\", \"other\"]\n pred_col_c = [10, 1, 20]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = encoder.transform_batch(pred_in_df)\n\n pred_processed_col_a = [\"blue\", \"yellow\", None]\n pred_processed_col_b_cold = [1, 0, 0]\n pred_processed_col_b_hot = [0, 0, 0]\n pred_processed_col_b_warm = [0, 1, 0]\n pred_processed_col_c_1 = [0, 1, 0]\n pred_processed_col_c_5 = [0, 0, 0]\n pred_processed_col_c_10 = [1, 0, 0]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B_cold\": pred_processed_col_b_cold,\n \"B_hot\": pred_processed_col_b_hot,\n \"B_warm\": pred_processed_col_b_warm,\n \"C_1\": pred_processed_col_c_1,\n \"C_5\": pred_processed_col_c_5,\n \"C_10\": pred_processed_col_c_10,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n # Test null behavior.\n null_col = [1, None]\n nonnull_col = [1, 1]\n null_df = pd.DataFrame.from_dict({\"A\": null_col})\n null_ds = ray.data.from_pandas(null_df)\n nonnull_df = pd.DataFrame.from_dict({\"A\": nonnull_col})\n nonnull_ds = ray.data.from_pandas(nonnull_df)\n null_encoder = OneHotEncoder([\"A\"])\n\n # Verify fit fails for null values.\n with pytest.raises(ValueError):\n null_encoder.fit(null_ds)\n null_encoder.fit(nonnull_ds)\n\n # Verify transform fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform(null_ds)\n null_encoder.transform(nonnull_ds)\n\n # Verify transform_batch fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform_batch(null_df)\n null_encoder.transform_batch(nonnull_df)\n\n\ndef test_label_encoder():\n \"\"\"Tests basic LabelEncoder functionality.\"\"\"\n col_a = [\"red\", \"green\", \"blue\", \"red\"]\n col_b = [\"warm\", \"cold\", \"cold\", \"hot\"]\n col_c = [1, 2, 3, 4]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n encoder = LabelEncoder(\"A\")\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n encoder.transform(ds)\n\n # Fit data.\n encoder.fit(ds)\n\n assert encoder.stats_ == {\"unique_values(A)\": {\"blue\": 0, \"green\": 1, \"red\": 2}}\n\n # Transform data.\n transformed = encoder.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = [2, 1, 0, 2]\n processed_col_b = col_b\n processed_col_c = col_c\n expected_df = pd.DataFrame.from_dict(\n {\"A\": processed_col_a, \"B\": processed_col_b, \"C\": processed_col_c}\n )\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [\"blue\", \"red\", \"yellow\"]\n pred_col_b = [\"cold\", \"unknown\", None]\n pred_col_c = [10, 20, None]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = encoder.transform_batch(pred_in_df)\n\n pred_processed_col_a = [0, 2, None]\n pred_processed_col_b = pred_col_b\n pred_processed_col_c = pred_col_c\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n }\n )\n assert pred_out_df.equals(pred_expected_df)\n\n # Test null behavior.\n null_col = [1, None]\n nonnull_col = [1, 1]\n null_df = pd.DataFrame.from_dict({\"A\": null_col})\n null_ds = ray.data.from_pandas(null_df)\n nonnull_df = pd.DataFrame.from_dict({\"A\": nonnull_col})\n nonnull_ds = ray.data.from_pandas(nonnull_df)\n null_encoder = LabelEncoder(\"A\")\n\n # Verify fit fails for null values.\n with pytest.raises(ValueError):\n null_encoder.fit(null_ds)\n null_encoder.fit(nonnull_ds)\n\n # Verify transform fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform(null_ds)\n null_encoder.transform(nonnull_ds)\n\n # Verify transform_batch fails for null values.\n with pytest.raises(ValueError):\n null_encoder.transform_batch(null_df)\n null_encoder.transform_batch(nonnull_df)\n\n\ndef test_simple_imputer():\n col_a = [1, 1, 1, np.nan]\n col_b = [1, 3, None, np.nan]\n col_c = [1, 1, 1, 1]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n\n ds = ray.data.from_pandas(in_df)\n\n imputer = SimpleImputer([\"B\", \"C\"])\n\n # Transform with unfitted preprocessor.\n with pytest.raises(PreprocessorNotFittedException):\n imputer.transform(ds)\n\n # Fit data.\n imputer.fit(ds)\n assert imputer.stats_ == {\"mean(B)\": 2.0, \"mean(C)\": 1.0}\n\n # Transform data.\n transformed = imputer.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = col_a\n processed_col_b = [1.0, 3.0, 2.0, 2.0]\n processed_col_c = [1, 1, 1, 1]\n expected_df = pd.DataFrame.from_dict(\n {\"A\": processed_col_a, \"B\": processed_col_b, \"C\": processed_col_c}\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [1, 2, np.nan]\n pred_col_b = [1, 2, np.nan]\n pred_col_c = [None, None, None]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = imputer.transform_batch(pred_in_df)\n\n pred_processed_col_a = pred_col_a\n pred_processed_col_b = [1.0, 2.0, 2.0]\n pred_processed_col_c = [1.0, 1.0, 1.0]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n # Test \"most_frequent\" strategy.\n most_frequent_col_a = [1, 2, 2, None, None, None]\n most_frequent_col_b = [None, \"c\", \"c\", \"b\", \"b\", \"a\"]\n most_frequent_df = pd.DataFrame.from_dict(\n {\"A\": most_frequent_col_a, \"B\": most_frequent_col_b}\n )\n most_frequent_ds = ray.data.from_pandas(most_frequent_df).repartition(3)\n\n most_frequent_imputer = SimpleImputer([\"A\", \"B\"], strategy=\"most_frequent\")\n most_frequent_imputer.fit(most_frequent_ds)\n assert most_frequent_imputer.stats_ == {\n \"most_frequent(A)\": 2.0,\n \"most_frequent(B)\": \"c\",\n }\n\n most_frequent_transformed = most_frequent_imputer.transform(most_frequent_ds)\n most_frequent_out_df = most_frequent_transformed.to_pandas()\n\n most_frequent_processed_col_a = [1.0, 2.0, 2.0, 2.0, 2.0, 2.0]\n most_frequent_processed_col_b = [\"c\", \"c\", \"c\", \"b\", \"b\", \"a\"]\n most_frequent_expected_df = pd.DataFrame.from_dict(\n {\"A\": most_frequent_processed_col_a, \"B\": most_frequent_processed_col_b}\n )\n\n assert most_frequent_out_df.equals(most_frequent_expected_df)\n\n # Test \"constant\" strategy.\n constant_col_a = [\"apple\", None]\n constant_df = pd.DataFrame.from_dict({\"A\": constant_col_a})\n constant_ds = ray.data.from_pandas(constant_df)\n\n with pytest.raises(ValueError):\n SimpleImputer([\"A\"], strategy=\"constant\")\n\n constant_imputer = SimpleImputer(\n [\"A\", \"B\"], strategy=\"constant\", fill_value=\"missing\"\n )\n constant_transformed = constant_imputer.transform(constant_ds)\n constant_out_df = constant_transformed.to_pandas()\n\n constant_processed_col_a = [\"apple\", \"missing\"]\n constant_expected_df = pd.DataFrame.from_dict({\"A\": constant_processed_col_a})\n\n assert constant_out_df.equals(constant_expected_df)\n\n\ndef test_chain():\n \"\"\"Tests basic Chain functionality.\"\"\"\n col_a = [-1, -1, 1, 1]\n col_b = [1, 1, 1, None]\n col_c = [\"sunday\", \"monday\", \"tuesday\", \"tuesday\"]\n in_df = pd.DataFrame.from_dict({\"A\": col_a, \"B\": col_b, \"C\": col_c})\n ds = ray.data.from_pandas(in_df)\n\n def udf(df):\n df[\"A\"] *= 2\n return df\n\n batch_mapper = BatchMapper(fn=udf)\n imputer = SimpleImputer([\"B\"])\n scaler = StandardScaler([\"A\", \"B\"])\n encoder = LabelEncoder(\"C\")\n chain = Chain(scaler, imputer, encoder, batch_mapper)\n\n # Fit data.\n chain.fit(ds)\n assert imputer.stats_ == {\n \"mean(B)\": 0.0,\n }\n assert scaler.stats_ == {\n \"mean(A)\": 0.0,\n \"mean(B)\": 1.0,\n \"std(A)\": 1.0,\n \"std(B)\": 0.0,\n }\n assert encoder.stats_ == {\n \"unique_values(C)\": {\"monday\": 0, \"sunday\": 1, \"tuesday\": 2}\n }\n\n # Transform data.\n transformed = chain.transform(ds)\n out_df = transformed.to_pandas()\n\n processed_col_a = [-2.0, -2.0, 2.0, 2.0]\n processed_col_b = [0.0, 0.0, 0.0, 0.0]\n processed_col_c = [1, 0, 2, 2]\n expected_df = pd.DataFrame.from_dict(\n {\"A\": processed_col_a, \"B\": processed_col_b, \"C\": processed_col_c}\n )\n\n assert out_df.equals(expected_df)\n\n # Transform batch.\n pred_col_a = [1, 2, None]\n pred_col_b = [0, None, 2]\n pred_col_c = [\"monday\", \"tuesday\", \"wednesday\"]\n pred_in_df = pd.DataFrame.from_dict(\n {\"A\": pred_col_a, \"B\": pred_col_b, \"C\": pred_col_c}\n )\n\n pred_out_df = chain.transform_batch(pred_in_df)\n\n pred_processed_col_a = [2, 4, None]\n pred_processed_col_b = [-1.0, 0.0, 1.0]\n pred_processed_col_c = [0, 2, None]\n pred_expected_df = pd.DataFrame.from_dict(\n {\n \"A\": pred_processed_col_a,\n \"B\": pred_processed_col_b,\n \"C\": pred_processed_col_c,\n }\n )\n\n assert pred_out_df.equals(pred_expected_df)\n\n\ndef test_batch_mapper():\n \"\"\"Tests batch mapper functionality.\"\"\"\n old_column = [1, 2, 3, 4]\n to_be_modified = [1, -1, 1, -1]\n in_df = pd.DataFrame.from_dict(\n {\"old_column\": old_column, \"to_be_modified\": to_be_modified}\n )\n ds = ray.data.from_pandas(in_df)\n\n def add_and_modify_udf(df: \"pd.DataFrame\"):\n df[\"new_col\"] = df[\"old_column\"] + 1\n df[\"to_be_modified\"] *= 2\n return df\n\n batch_mapper = BatchMapper(fn=add_and_modify_udf)\n batch_mapper.fit(ds)\n transformed = batch_mapper.transform(ds)\n out_df = transformed.to_pandas()\n\n expected_df = pd.DataFrame.from_dict(\n {\n \"old_column\": old_column,\n \"to_be_modified\": [2, -2, 2, -2],\n \"new_col\": [2, 3, 4, 5],\n }\n )\n\n assert out_df.equals(expected_df)\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.exit(pytest.main([\"-sv\", __file__]))\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
alexanu/pyqstrat
[ "ec62a1a7b048df05e8d1058a37bfe2cf113d2815" ]
[ "pyqstrat/account.py" ]
[ "from collections import defaultdict\nfrom sortedcontainers import SortedDict\nimport math\nimport pandas as pd\nimport numpy as np\nfrom pyqstrat.pq_types import ContractGroup, Trade, Contract\nfrom types import SimpleNamespace\nfrom typing import Sequence, Any, Tuple, Callable, Union, MutableSet, MutableSequence, MutableMapping, List\n\n\ndef calc_trade_pnl(open_qtys: np.ndarray, \n open_prices: np.ndarray, \n new_qtys: np.ndarray, \n new_prices: np.ndarray, \n multiplier: float) -> Tuple[np.ndarray, np.ndarray, float, float, float]:\n '''\n >>> print(calc_trade_pnl(\n ... open_qtys = np.array([], dtype = np.float), open_prices = np.array([], dtype = np.float), \n ... new_qtys = np.array([-8, 9, -4]), new_prices = np.array([10, 11, 6]), multiplier = 100))\n (array([-3.]), array([6.]), -3.0, 6.0, -1300.0)\n >>> print(calc_trade_pnl(open_qtys = np.array([], dtype = np.float), open_prices = np.array([], dtype = np.float), new_qtys = np.array([3, 10, -5]), \n ... new_prices = np.array([51, 50, 45]), multiplier = 100))\n (array([8.]), array([50.]), 8.0, 50.0, -2800.0)\n >>> print(calc_trade_pnl(open_qtys = np.array([]), open_prices = np.array([]), \n ... new_qtys = np.array([-58, -5, -5, 6, -8, 5, 5, -5, 19, 7, 5, -5, 39]),\n ... new_prices = np.array([2080, 2075.25, 2070.75, 2076, 2066.75, 2069.25, 2074.75, 2069.75, 2087.25, 2097.25, 2106, 2088.25, 2085.25]),\n ... multiplier = 50))\n (array([], dtype=float64), array([], dtype=float64), 0.0, 0, -33762.5) '''\n # TODO: Cythonize this\n \n realized = 0.\n \n new_qtys = new_qtys.copy()\n new_prices = new_prices.copy()\n\n _open_prices = np.zeros(len(open_prices) + len(new_prices), dtype=np.float)\n _open_prices[:len(open_prices)] = open_prices\n \n _open_qtys = np.zeros(len(open_qtys) + len(new_qtys), dtype=np.float)\n _open_qtys[:len(open_qtys)] = open_qtys\n \n new_qty_indices = np.nonzero(new_qtys)[0]\n open_qty_indices = np.zeros(len(_open_qtys), dtype=np.int)\n nonzero_indices = np.nonzero(_open_qtys)[0]\n open_qty_indices[:len(nonzero_indices)] = nonzero_indices \n\n i = 0 # index into new_qty_indices to get idx of the new qty we are currently netting\n o = len(nonzero_indices) # virtual length of open_qty_indices\n j = 0 # index into open_qty_indices to get idx of the open qty we are currently netting\n k = len(open_qtys) # virtual length of _open_qtys\n \n # Try to net all new trades against existing non-netted trades.\n # Append any remaining non-netted new trades to end of existing trades\n while i < len(new_qty_indices):\n # Always try to net first non-zero new trade against first non-zero existing trade\n # FIFO acccounting\n new_idx = new_qty_indices[i]\n new_qty, new_price = new_qtys[new_idx], new_prices[new_idx]\n \n # print(f'i: {i} j: {j} k: {k} o: {o} oq: {_open_qtys} oqi: {open_qty_indices} op: {_open_prices} nq: {new_qtys} np: {new_prices}')\n \n if j < o: # while we still have open positions to net against\n open_idx = open_qty_indices[j]\n open_qty, open_price = _open_qtys[open_idx], _open_prices[open_idx]\n \n if math.copysign(1, open_qty) == math.copysign(1, new_qty):\n # Nothing to net against so add this trade to the array and wait for the next offsetting trade\n \n _open_qtys[k] = new_qty\n _open_prices[k] = new_price\n open_qty_indices[o] = k\n k += 1\n o += 1\n\n new_qtys[new_idx] = 0\n i += 1\n\n elif abs(new_qty) > abs(open_qty):\n # New trade has more qty than offsetting trade so:\n # a. net against offsetting trade\n # b. remove the offsetting trade\n # c. reduce qty of new trade\n open_qty, open_price = _open_qtys[open_idx], _open_prices[open_idx]\n realized += open_qty * (new_price - open_price)\n # print(f'open_qty: {open_qty} open_price: {open_price} open_idx: {open_idx} i: {i}\n # j: {j} k: {k} l: {l} oq: {_open_qtys} oqi: {open_qty_indices} op: {_open_prices} nq: {new_qtys} np: {new_prices}')\n _open_qtys[open_idx] = 0\n j += 1\n\n new_qtys[new_idx] += open_qty\n else:\n # New trade has less qty than offsetting trade so:\n # a. net against offsetting trade\n # b. remove new trade\n # c. reduce qty of offsetting trade\n realized += new_qty * (open_price - new_price)\n new_qtys[new_idx] = 0\n i += 1\n _open_qtys[open_idx] += new_qty\n else:\n # Nothing to net against so add this trade to the open trades array and wait for the next offsetting trade\n _open_qtys[k] = new_qty\n _open_prices[k] = new_price\n open_qty_indices[o] = k\n k += 1\n o += 1\n\n new_qtys[new_idx] = 0\n i += 1\n\n mask = _open_qtys != 0\n _open_qtys = _open_qtys[mask]\n _open_prices = _open_prices[mask]\n open_qty = np.sum(_open_qtys)\n if math.isclose(open_qty, 0):\n weighted_avg_price = 0\n else:\n weighted_avg_price = np.sum(_open_qtys * _open_prices) / open_qty\n \n return _open_qtys, _open_prices, open_qty, weighted_avg_price, realized * multiplier\n\n\ndef leading_nan_to_zero(df: pd.DataFrame, columns: Sequence[str]) -> pd.DataFrame:\n for column in columns:\n vals = df[column].values\n first_non_nan_index = np.ravel(np.nonzero(~np.isnan(vals)))\n if len(first_non_nan_index):\n first_non_nan_index = first_non_nan_index[0]\n else:\n first_non_nan_index = -1\n\n if first_non_nan_index > 0 and first_non_nan_index < len(vals):\n vals[:first_non_nan_index] = np.nan_to_num(vals[:first_non_nan_index])\n df[column] = vals\n return df\n\n\ndef find_last_non_nan_index(array: np.ndarray) -> int:\n i = np.nonzero(np.isfinite(array))[0]\n if len(i): return i[-1]\n return 0\n\n\ndef find_index_before(sorted_dict: SortedDict, key: Any) -> int:\n '''\n Find index of the first key in a sorted dict that is less than or equal to the key passed in.\n If the key is less than the first key in the dict, return -1\n '''\n size = len(sorted_dict)\n if not size: return -1\n i = sorted_dict.bisect_left(key)\n if i == size: return size - 1\n if sorted_dict.keys()[i] != key:\n return i - 1\n return i\n\n\nclass ContractPNL:\n '''Computes pnl for a single contract over time given trades and market data'''\n def __init__(self, \n contract: Contract, \n account_timestamps: np.ndarray, \n price_function: Callable[[Contract, np.ndarray, int, SimpleNamespace], float],\n strategy_context: SimpleNamespace) -> None:\n self.contract = contract\n self._price_function = price_function\n self.strategy_context = strategy_context\n self._account_timestamps = account_timestamps\n self._trade_pnl = SortedDict()\n self._net_pnl = SortedDict()\n # Store trades that are not offset so when new trades come in we can offset against these to calc pnl\n self.open_qtys = np.empty(0, dtype=np.int)\n self.open_prices = np.empty(0, dtype=np.float)\n self.first_trade_timestamp = None\n self.final_pnl = np.nan\n \n def _add_trades(self, trades: Sequence[Trade]) -> None:\n '''\n Args:\n trades: Must be sorted by timestamp\n '''\n if not len(trades): return\n timestamps = [trade.timestamp for trade in trades]\n if len(self._trade_pnl):\n k, v = self._trade_pnl.peekitem(0)\n if timestamps[0] <= k:\n raise Exception(f'Can only add a trade that is newer than last added current: {timestamps[0]} prev max timestamp: {k}')\n \n if self.first_trade_timestamp is None: self.first_trade_timestamp = timestamps[0]\n \n for i, timestamp in enumerate(timestamps):\n t_trades = [trade for trade in trades if trade.timestamp == timestamp]\n open_qtys, open_prices, open_qty, weighted_avg_price, realized_chg = calc_trade_pnl(\n self.open_qtys, self.open_prices, \n np.array([trade.qty for trade in t_trades]), \n np.array([trade.price for trade in t_trades]),\n self.contract.multiplier)\n self.open_qtys = open_qtys\n self.open_prices = open_prices\n position_chg = sum([trade.qty for trade in t_trades])\n commission_chg = sum([trade.commission for trade in t_trades])\n fee_chg = sum([trade.fee for trade in t_trades])\n index = find_index_before(self._trade_pnl, timestamp)\n if index == -1:\n self._trade_pnl[timestamp] = (position_chg, realized_chg, fee_chg, commission_chg, open_qty, weighted_avg_price)\n else:\n prev_timestamp, (prev_position, prev_realized, prev_fee, prev_commission, _, _) = self._trade_pnl.peekitem(index)\n self._trade_pnl[timestamp] = (prev_position + position_chg, prev_realized + realized_chg,\n prev_fee + fee_chg, prev_commission + commission_chg, open_qty, weighted_avg_price)\n self.calc_net_pnl(timestamp)\n \n def calc_net_pnl(self, timestamp: np.datetime64) -> None:\n if timestamp in self._net_pnl: return\n if timestamp < self.first_trade_timestamp: return\n # TODO: Option expiry should be a special case. If option expires at 3:00 pm, we put in an expiry order at 3 pm and the\n # trade comes in at 3:01 pm. In this case, the final pnl is recorded at 3:01 but should be at 3 pm.\n if self.contract.expiry is not None and timestamp > self.contract.expiry and not math.isnan(self.final_pnl): return\n i = np.searchsorted(self._account_timestamps, timestamp)\n assert(self._account_timestamps[i] == timestamp)\n\n # Find the index before or equal to current timestamp. If not found, set to 0's\n trade_pnl_index = find_index_before(self._trade_pnl, timestamp)\n if trade_pnl_index == -1:\n realized, fee, commission, open_qty, open_qty, weighted_avg_price = 0, 0, 0, 0, 0, 0\n else:\n _, (_, realized, fee, commission, open_qty, weighted_avg_price) = self._trade_pnl.peekitem(trade_pnl_index)\n\n price = np.nan\n\n if math.isclose(open_qty, 0):\n unrealized = 0\n else:\n price = self._price_function(self.contract, self._account_timestamps, i, self.strategy_context)\n assert np.isreal(price), \\\n f'Unexpected price type: {price} {type(price)} for contract: {self.contract} timestamp: {self._account_timestamps[i]}'\n\n if math.isnan(price):\n index = find_index_before(self._net_pnl, timestamp) # Last index we computed net pnl for\n if index == -1:\n prev_unrealized = 0\n else:\n _, (_, prev_unrealized, _) = self._net_pnl.peekitem(index)\n\n unrealized = prev_unrealized\n else:\n unrealized = open_qty * (price - weighted_avg_price) * self.contract.multiplier\n \n net_pnl = realized + unrealized - commission - fee\n\n self._net_pnl[timestamp] = (price, unrealized, net_pnl)\n if self.contract.expiry is not None and timestamp > self.contract.expiry:\n self.final_pnl = net_pnl\n \n def position(self, timestamp: np.datetime64) -> float:\n index = find_index_before(self._trade_pnl, timestamp)\n if index == -1: return 0.\n _, (position, _, _, _, _, _) = self._trade_pnl.peekitem(index) # Less than or equal to timestamp\n return position\n \n def net_pnl(self, timestamp: np.datetime64) -> float:\n if self.contract.expiry is not None and timestamp > self.contract.expiry and not math.isnan(self.final_pnl):\n return self.final_pnl\n index = find_index_before(self._net_pnl, timestamp)\n if index == -1: return 0.\n _, (_, _, net_pnl) = self._net_pnl.peekitem(index) # Less than or equal to timestamp\n return net_pnl\n \n def pnl(self, timestamp: np.datetime64) -> Tuple[float, float, float, float, float, float, float]:\n index = find_index_before(self._trade_pnl, timestamp)\n position, realized, fee, commission, price, unrealized, net_pnl = 0, 0, 0, 0, 0, 0, 0\n if index != -1:\n _, (position, realized, fee, commission, _, _) = self._trade_pnl.peekitem(index) # Less than or equal to timestamp\n \n index = find_index_before(self._net_pnl, timestamp)\n if index != -1:\n _, (price, unrealized, net_pnl) = self._net_pnl.peekitem(index) # Less than or equal to timestamp\n return position, price, realized, unrealized, fee, commission, net_pnl\n\n def df(self) -> pd.DataFrame:\n '''Returns a pandas dataframe with pnl data'''\n df_trade_pnl = pd.DataFrame.from_records([\n (k, v[0], v[1], v[2], v[3]) for k, v in self._trade_pnl.items()],\n columns=['timestamp', 'position', 'realized', 'fee', 'commission'])\n df_net_pnl = pd.DataFrame.from_records([\n (k, v[0], v[1], v[2]) for k, v in self._net_pnl.items()],\n columns=['timestamp', 'price', 'unrealized', 'net_pnl'])\n all_timestamps = np.unique(np.concatenate((df_trade_pnl.timestamp.values, df_net_pnl.timestamp.values)))\n df_trade_pnl = df_trade_pnl.set_index('timestamp').reindex(all_timestamps, method='ffill').reset_index()\n df_trade_pnl = leading_nan_to_zero(df_trade_pnl, ['position', 'realized', 'fee', 'commission'])\n df_net_pnl = df_net_pnl.set_index('timestamp').reindex(all_timestamps, method='ffill').reset_index()\n del df_net_pnl['timestamp']\n df = pd.concat([df_trade_pnl, df_net_pnl], axis=1)\n df['symbol'] = self.contract.symbol\n df = df[['symbol', 'timestamp', 'position', 'price', 'unrealized', 'realized', 'commission', 'fee', 'net_pnl']]\n return df\n \n\ndef _get_calc_timestamps(timestamps: np.ndarray, pnl_calc_time: int) -> np.ndarray:\n time_delta = np.timedelta64(pnl_calc_time, 'm')\n calc_timestamps = np.unique(timestamps.astype('M8[D]')) + time_delta\n calc_indices = np.searchsorted(timestamps, calc_timestamps, side='left') - 1\n if calc_indices[0] == -1: calc_indices[0] = 0\n return np.unique(timestamps[calc_indices])\n\n\nclass Account:\n '''An Account calculates pnl for a set of contracts'''\n def __init__(self, \n contract_groups: Sequence[ContractGroup], \n timestamps: np.ndarray,\n price_function: Callable[[Contract, np.ndarray, int, SimpleNamespace], float],\n strategy_context: SimpleNamespace,\n starting_equity: float = 1.0e6, \n pnl_calc_time: int = 15 * 60) -> None:\n '''\n Args:\n contract_groups: Contract groups that we want to compute PNL for\n timestamps: Timestamps that we might compute PNL at\n price_function: Function that returns contract prices used to compute pnl\n strategy_context: This is passed into the price function so we can use current state of strategy to compute prices\n starting_equity: Starting equity in account currency. Default 1.e6\n pnl_calc_time: Number of minutes past midnight that we should calculate PNL at. Default 15 * 60, i.e. 3 pm\n '''\n self.starting_equity = starting_equity\n self._price_function = price_function\n self.strategy_context = strategy_context\n \n self.timestamps = timestamps\n self.calc_timestamps = _get_calc_timestamps(timestamps, pnl_calc_time)\n \n self.contracts: MutableSet[Contract] = set()\n self._trades: MutableSequence[Trade] = []\n self._pnl = SortedDict()\n self.symbol_pnls_by_contract_group: MutableMapping[str, MutableSequence[ContractPNL]] = defaultdict(list)\n \n self.symbol_pnls: MutableMapping[str, ContractPNL] = {}\n \n def symbols(self) -> MutableSequence[str]:\n return [contract.symbol for contract in self.contracts]\n \n def _add_contract(self, contract: Contract, timestamp: np.datetime64) -> None:\n if contract.symbol in self.symbol_pnls: \n raise Exception(f'Already have contract with symbol: {contract.symbol} {contract}')\n contract_pnl = ContractPNL(contract, self.timestamps, self._price_function, self.strategy_context)\n self.symbol_pnls[contract.symbol] = contract_pnl\n # For fast lookup in position function\n self.symbol_pnls_by_contract_group[contract.contract_group.name].append(contract_pnl)\n self.contracts.add(contract)\n \n def add_trades(self, trades: Sequence[Trade]) -> None:\n trades = sorted(trades, key=lambda x: getattr(x, 'timestamp'))\n # Break up trades by contract so we can add them in a batch\n trades_by_contract: MutableMapping[Contract, List[Trade]] = defaultdict(list)\n for trade in trades:\n contract = trade.contract\n if contract not in self.contracts: self._add_contract(contract, trade.timestamp)\n trades_by_contract[contract].append(trade)\n \n for contract, contract_trades in trades_by_contract.items():\n contract_trades.sort(key=lambda x: x.timestamp)\n self.symbol_pnls[contract.symbol]._add_trades(contract_trades)\n \n self._trades += trades\n \n def calc(self, timestamp: np.datetime64) -> None:\n '''\n Computes P&L and stores it internally for all contracts.\n \n Args:\n timestamp: timestamp to compute P&L at. Account remembers the last timestamp it computed P&L up to and will compute P&L\n between these and including timestamp. If there is more than one day between the last index and current index, we will \n include pnl for at the defined pnl_calc_time for those dates as well.\n '''\n if timestamp in self._pnl: return\n \n prev_idx = find_index_before(self._pnl, timestamp)\n prev_timestamp = None if prev_idx == -1 else self.timestamps[prev_idx]\n \n # Find the last timestamp per day that is between the previous index we computed and the current index,\n # so we can compute daily pnl in addition to the current index pnl\n calc_timestamps = self.calc_timestamps\n intermediate_calc_timestamps = calc_timestamps[calc_timestamps <= timestamp]\n if prev_timestamp is not None:\n intermediate_calc_timestamps = intermediate_calc_timestamps[intermediate_calc_timestamps > prev_timestamp]\n\n if not len(intermediate_calc_timestamps) or intermediate_calc_timestamps[-1] != timestamp: \n intermediate_calc_timestamps = np.append(intermediate_calc_timestamps, timestamp)\n \n for ts in intermediate_calc_timestamps:\n net_pnl = 0.\n for symbol_pnl in self.symbol_pnls.values():\n symbol_pnl.calc_net_pnl(ts)\n net_pnl += symbol_pnl.net_pnl(ts)\n self._pnl[ts] = net_pnl\n \n def position(self, contract_group: ContractGroup, timestamp: np.datetime64) -> float:\n '''Returns netted position for a contract_group at a given date in number of contracts or shares.'''\n position = 0.\n for symbol_pnl in self.symbol_pnls_by_contract_group[contract_group.name]:\n position += symbol_pnl.position(timestamp)\n return position\n \n def positions(self, contract_group: ContractGroup, timestamp: np.datetime64) -> MutableSequence[Tuple[Contract, float]]:\n '''\n Returns all non-zero positions in a contract group\n '''\n positions = []\n for contract in contract_group.contracts:\n symbol = contract.symbol\n if symbol not in self.symbol_pnls: continue\n position = self.symbol_pnls[symbol].position(timestamp)\n if not math.isclose(position, 0): positions.append((contract, position))\n return positions\n \n def equity(self, timestamp: np.datetime64) -> float:\n '''Returns equity in this account in Account currency. Will cause calculation if Account has not previously \n calculated up to this date'''\n pnl = self._pnl.get(timestamp)\n if pnl is None:\n self.calc(timestamp)\n pnl = self._pnl[timestamp]\n return self.starting_equity + pnl\n \n def trades(self,\n contract_group: ContractGroup = None, \n start_date: np.datetime64 = None, \n end_date: np.datetime64 = None) -> MutableSequence[Trade]:\n '''Returns a list of trades with the given symbol and with trade date between (and including) start date \n and end date if they are specified. If symbol is None trades for all symbols are returned'''\n # start_date, end_date = str2date(start_date), str2date(end_date)\n return [trade for trade in self._trades if (start_date is None or trade.timestamp >= start_date) and (\n end_date is None or trade.timestamp <= end_date) and (\n contract_group is None or trade.contract.contract_group == contract_group)]\n \n def df_pnl(self, contract_groups: Union[ContractGroup, Sequence[ContractGroup]] = None) -> pd.DataFrame:\n '''\n Returns a dataframe with P&L columns broken down by contract group and symbol\n \n Args:\n contract_group: Return PNL for this contract group. If None (default), include all contract groups\n '''\n if contract_groups is None: \n contract_groups = list(set([contract.contract_group for contract in self.contracts]))\n\n if isinstance(contract_groups, ContractGroup): contract_groups = [contract_groups]\n\n dfs = []\n for contract_group in contract_groups:\n for contract in contract_group.contracts:\n symbol = contract.symbol\n if symbol not in self.symbol_pnls: continue\n df = self.symbol_pnls[symbol].df()\n if len(df) > 1:\n net_pnl_diff = np.diff(df.net_pnl.values) # np.diff returns a vector one shorter than the original\n last_index = np.nonzero(net_pnl_diff)\n if len(last_index[0]): \n last_index = last_index[0][-1] + 1\n df = df.iloc[:last_index + 1]\n df['contract_group'] = contract_group.name\n dfs.append(df)\n ret_df = pd.concat(dfs)\n ret_df = ret_df.sort_values(by=['timestamp', 'contract_group', 'symbol'])\n ret_df = ret_df[['timestamp', 'contract_group', 'symbol', 'position', 'price', 'unrealized', 'realized', \n 'commission', 'fee', 'net_pnl']]\n return ret_df\n \n def df_account_pnl(self, contract_group: ContractGroup = None) -> pd.DataFrame:\n '''\n Returns PNL at the account level.\n \n Args:\n contract_group: If set, we only return pnl for this contract_group. Otherwise we return pnl for all contract groups\n '''\n\n if contract_group is not None:\n symbols = [contract.symbol for contract in contract_group.contracts if contract.symbol in self.symbol_pnls]\n symbol_pnls = [self.symbol_pnls[symbol] for symbol in symbols]\n else:\n symbol_pnls = list(self.symbol_pnls.values())\n\n timestamps = self.calc_timestamps\n position = np.full(len(timestamps), 0., dtype=np.float)\n realized = np.full(len(timestamps), 0., dtype=np.float)\n unrealized = np.full(len(timestamps), 0., dtype=np.float)\n fee = np.full(len(timestamps), 0., dtype=np.float)\n commission = np.full(len(timestamps), 0., dtype=np.float)\n net_pnl = np.full(len(timestamps), 0., dtype=np.float)\n\n for i, timestamp in enumerate(timestamps):\n for symbol_pnl in symbol_pnls:\n _position, _price, _realized, _unrealized, _fee, _commission, _net_pnl = symbol_pnl.pnl(timestamp)\n if math.isfinite(_position): position[i] += _position\n if math.isfinite(_realized): realized[i] += _realized\n if math.isfinite(_unrealized): unrealized[i] += _unrealized\n if math.isfinite(_fee): fee[i] += _fee\n if math.isfinite(_commission): commission[i] += _commission\n if math.isfinite(_net_pnl): net_pnl[i] += _net_pnl\n\n df = pd.DataFrame.from_records(zip(timestamps, position, unrealized, realized, commission, fee, net_pnl), \n columns=['timestamp', 'position', 'unrealized', 'realized', 'commission', 'fee', 'net_pnl'])\n df['equity'] = self.starting_equity + df.net_pnl\n return df[['timestamp', 'position', 'unrealized', 'realized', 'commission', 'fee', 'net_pnl', 'equity']]\n \n def df_trades(self, \n contract_group: ContractGroup = None, \n start_date: np.datetime64 = None, \n end_date: np.datetime64 = None) -> pd.DataFrame:\n '''\n Returns a dataframe of trades\n \n Args:\n contract_group: Return trades for this contract group. If None (default), include all contract groups\n start_date: Include trades with date greater than or equal to this timestamp.\n end_date: Include trades with date less than or equal to this timestamp.\n '''\n # start_date, end_date = str2date(start_date), str2date(end_date)\n trades = self.trades(contract_group, start_date, end_date)\n df = pd.DataFrame.from_records([(\n trade.contract.symbol, \n trade.timestamp, \n trade.qty, \n trade.price, \n trade.fee, \n trade.commission, \n trade.order.timestamp, \n trade.order.qty, \n trade.order.reason_code, \n (str(trade.order.properties.__dict__) if trade.order.properties.__dict__ else ''), \n (str(trade.contract.properties.__dict__) if trade.contract.properties.__dict__ else '')) for trade in trades],\n columns=['symbol', 'timestamp', 'qty', 'price', 'fee', 'commission', 'order_date', 'order_qty',\n 'reason_code', 'order_props', 'contract_props'])\n df = df.sort_values(by=['timestamp', 'symbol'])\n return df\n\n\ndef test_account():\n from pyqstrat.pq_types import MarketOrder\n\n def get_close_price(contract, timestamps, idx, strategy_context):\n if contract.symbol == \"IBM\":\n price = idx + 10.1\n elif contract.symbol == \"MSFT\":\n price = idx + 15.3\n else:\n raise Exception(f'unknown contract: {contract}')\n return price\n ContractGroup.clear()\n Contract.clear()\n ibm_cg = ContractGroup.create('IBM')\n msft_cg = ContractGroup.create('MSFT')\n \n ibm_contract = Contract.create('IBM', contract_group=ibm_cg)\n msft_contract = Contract.create('MSFT', contract_group=msft_cg)\n timestamps = np.array(['2018-01-01 09:00', '2018-01-02 08:00', '2018-01-02 09:00', '2018-01-05 13:35'], dtype='M8[m]')\n account = Account([ibm_cg, msft_cg], timestamps, get_close_price, None)\n # account = Account([Contract(symbol)], timestamps, get_close_price)\n trade_1 = Trade(ibm_contract, MarketOrder(ibm_contract, np.datetime64('2018-01-01 09:00'), 10), \n np.datetime64('2018-01-02 08:00'), 10, 10.1, commission=0.01)\n trade_2 = Trade(ibm_contract, MarketOrder(ibm_contract, np.datetime64('2018-01-01 09:00'), -20),\n np.datetime64('2018-01-02 09:00'), -20, 15.1, commission=0.02)\n trade_3 = Trade(msft_contract, MarketOrder(msft_contract, timestamps[1], 15), timestamps[1], 20, 13.2, commission=0.04)\n trade_4 = Trade(msft_contract, MarketOrder(msft_contract, timestamps[2], 20), timestamps[2], 20, 16.2, commission=0.05)\n\n account.add_trades([trade_1, trade_2, trade_3, trade_4])\n account.calc(np.datetime64('2018-01-05 13:35'))\n assert(len(account.df_trades()) == 4)\n assert(len(account.df_pnl()) == 6)\n assert(np.allclose(np.array([9.99, 61.96, 79.97, 103.91, 69.97, 143.91]), account.df_pnl().net_pnl.values, rtol=0))\n assert(np.allclose(np.array([10, 20, -10, 40, -10, 40]), account.df_pnl().position.values, rtol=0))\n assert(np.allclose(np.array([1000000., 1000183.88, 1000213.88]), account.df_account_pnl().equity.values, rtol=0))\n\n\nif __name__ == \"__main__\":\n test_account()\n import doctest\n doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)\n" ]
[ [ "pandas.concat", "numpy.nonzero", "numpy.unique", "numpy.isfinite", "numpy.isnan", "numpy.nan_to_num", "numpy.datetime64", "numpy.timedelta64", "numpy.concatenate", "numpy.append", "numpy.diff", "numpy.searchsorted", "numpy.array", "numpy.isreal", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jjjkkkjjj/pytorch.dl
[ "d82aa1191c14f328c62de85e391ac6fa1b4c7ee3", "d82aa1191c14f328c62de85e391ac6fa1b4c7ee3", "d82aa1191c14f328c62de85e391ac6fa1b4c7ee3", "d82aa1191c14f328c62de85e391ac6fa1b4c7ee3" ]
[ "debug/ssd/test_ssd300.py", "debug/ssd/train_ssd300.py", "dl/data/utils/boxes.py", "dl/models/fots/modules/featextr.py" ]
[ "from dl.data.objdetn import datasets, utils, target_transforms\nfrom dl.data import transforms\n\nfrom dl.models.ssd.ssd300 import SSD300\nfrom dl.data.utils.converter import toVisualizeRectLabelRGBimg\nfrom torch.utils.data import DataLoader\nimport cv2\n\nif __name__ == '__main__':\n augmentation = None\n\n transform = transforms.Compose(\n [transforms.Resize((300, 300)),\n transforms.ToTensor(),\n transforms.Normalize(rgb_means=(0.485, 0.456, 0.406), rgb_stds=(0.229, 0.224, 0.225))]\n )\n target_transform = target_transforms.Compose(\n [target_transforms.Corners2Centroids(),\n target_transforms.OneHot(class_nums=datasets.VOC_class_nums, add_background=True),\n target_transforms.ToTensor()]\n )\n test_dataset = datasets.VOC2007_TestDataset(transform=transform, target_transform=target_transform, augmentation=augmentation)\n\n test_loader = DataLoader(test_dataset,\n batch_size=32,\n shuffle=True,\n collate_fn=utils.batch_ind_fn,\n num_workers=4,\n pin_memory=False)\n\n model = SSD300(class_labels=datasets.VOC_class_labels, batch_norm=False)\n model.load_weights('../../weights/ssd300-voc2007+12+coco/ssd300-voc2007+2012+coco_i-0025000_checkpoints20200611.pth')\n #model.load_for_finetune('./weights/ssd300-voc2007+12+coco/ssd300-voc2007+2012+coco_i-30000.pth')\n model.eval()\n print(model)\n\n #evaluator = VOC2007Evaluator(test_loader, iteration_interval=5000)\n #ap = evaluator(model)\n #print(ap)\n image = cv2.cvtColor(cv2.imread('../../scripts/ssd/assets/coco_testimg.jpg'), cv2.COLOR_BGR2RGB)\n infers, imgs, orig_imgs = model.infer(image, visualize=True, toNorm=True)\n for i, img in enumerate(imgs):\n cv2.imshow('result', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))\n cv2.waitKey()\n\n images = [test_dataset[i][0] for i in range(20)]\n inf, ret_imgs, orig_imgs = model.infer(images, visualize=True, toNorm=False)\n for img in ret_imgs:\n cv2.imshow('result', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))\n cv2.waitKey()", "from dl.data.objdetn import datasets, utils, target_transforms, augmentations\nfrom dl.data import transforms\nfrom dl.loss.ssd import SSDLoss\nfrom dl.models.ssd import SSD300\nfrom dl.optim.scheduler import IterStepLR\nfrom dl.log import *\n\n#from torchvision import transforms > not import!!\nfrom torch.utils.data import DataLoader\nfrom torch.optim.sgd import SGD\n\nif __name__ == '__main__':\n \"\"\"\n augmentation = augmentations.Compose(\n []\n )\"\"\"\n augmentation = augmentations.AugmentationOriginal()\n #augmentation = None\n\n transform = transforms.Compose(\n [transforms.Resize((300, 300)),\n transforms.ToTensor(),\n transforms.Normalize(rgb_means=(0.485, 0.456, 0.406), rgb_stds=(0.229, 0.224, 0.225))]\n )\n target_transform = target_transforms.Compose(\n [target_transforms.Corners2Centroids(),\n target_transforms.OneHot(class_nums=datasets.VOC_class_nums, add_background=True),\n target_transforms.ToTensor()]\n )\n\n train_dataset = datasets.Compose(datasets=(datasets.VOC2007Dataset,), #datasets.VOC2012_TrainValDataset),\n ignore=target_transforms.Ignore(difficult=True), transform=transform, target_transform=target_transform, augmentation=augmentation)\n val_dataset = datasets.VOC2007_TestDataset(ignore=target_transforms.Ignore(difficult=True), transform=transform, target_transform=target_transform)\n\n #train_dataset = datasets.VOC2007Dataset(transform=transform)\n train_loader = DataLoader(train_dataset,\n batch_size=32,\n shuffle=True,\n collate_fn=utils.batch_ind_fn,\n num_workers=4,\n pin_memory=True)\n\n model = SSD300(class_labels=train_dataset.class_labels, batch_norm=False).cuda()\n model.load_vgg_weights()\n #model = build_ssd('train')\n print(model)\n \"\"\"\n imgs, targets = utils.batch_ind_fn((train_dataset[2000],))\n p, d = model(imgs)\n from dl.modules.boxes import matching_strategy\n matching_strategy(targets, d, batch_num=1)\n \"\"\"\n optimizer = SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)\n #optimizer = Adam(model.parameters(), lr=1e-3, weight_decay=5e-4)\n #iter_sheduler = IterMultiStepLR(optimizer, milestones=(10, 20, 30), gamma=0.1, verbose=True)\n iter_sheduler = IterStepLR(optimizer, step_size=60000, gamma=0.1, verbose=True)\n \"\"\"\n save_manager = SaveManager(modelname='ssd300', interval=10, max_checkpoints=3)\n log_manager = LogManager(interval=10, save_manager=save_manager, loss_interval=10, live_graph=None)\n trainer = TrainLogger(model, loss_func=SSDLoss(), optimizer=optimizer, scheduler=iter_sheduler, log_manager=log_manager, gpu=True)\n\n trainer.train(30, train_loader)\n \"\"\"\n #save_manager = SaveManager(modelname='ssd300', interval=100, max_checkpoints=3, plot_interval=10)\n\n #trainer = TrainObjectDetectionConsoleLogger(SSDLoss(), model, optimizer, iter_sheduler)\n #trainer.train_iter(save_manager, 80000, train_loader)\n\n save_manager = SaveManager(modelname='ssd300', interval=1, max_checkpoints=3, plot_interval=10)\n\n trainer = TrainObjectDetectionConsoleLogger(SSDLoss(), model, optimizer, iter_sheduler)\n trainer.train_epoch(save_manager, 2, train_loader)", "import torch\nimport numpy as np\n\ndef iou(a, b):\n \"\"\"\n :param a: Box Tensor, shape is (nums, 4)\n :param b: Box Tensor, shape is (nums, 4)\n IMPORTANT: Note that 4 means (xmin, ymin, xmax, ymax)\n :return:\n iou: Tensor, shape is (a_num, b_num)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n\n # get intersection's xmin, ymin, xmax, ymax\n # xmin = max(a_xmin, b_xmin)\n # ymin = max(a_ymin, b_ymin)\n # xmax = min(a_xmax, b_xmax)\n # ymax = min(a_ymax, b_ymax)\n \"\"\"\n >>> b\n tensor([2., 6.])\n >>> c\n tensor([1., 5.])\n >>> torch.cat((b.unsqueeze(1),c.unsqueeze(1)),1)\n tensor([[2., 1.],\n [6., 5.]])\n \"\"\"\n # convert for broadcast\n # a's shape = (a_num, 1, 4), b's shape = (1, b_num, 4)\n a, b = a.unsqueeze(1), b.unsqueeze(0)\n intersection = torch.cat((torch.max(a[:, :, 0], b[:, :, 0]).unsqueeze(2),\n torch.max(a[:, :, 1], b[:, :, 1]).unsqueeze(2),\n torch.min(a[:, :, 2], b[:, :, 2]).unsqueeze(2),\n torch.min(a[:, :, 3], b[:, :, 3]).unsqueeze(2)), dim=2)\n # get intersection's area\n # (w, h) = (xmax - xmin, ymax - ymin)\n intersection_w, intersection_h = intersection[:, :, 2] - intersection[:, :, 0], intersection[:, :, 3] - intersection[:, :, 1]\n # if intersection's width or height is negative, those will be converted to zero\n intersection_w, intersection_h = torch.clamp(intersection_w, min=0), torch.clamp(intersection_h, min=0)\n\n intersectionArea = intersection_w * intersection_h\n\n # get a and b's area\n # area = (xmax - xmin) * (ymax - ymin)\n A, B = (a[:, :, 2] - a[:, :, 0]) * (a[:, :, 3] - a[:, :, 1]), (b[:, :, 2] - b[:, :, 0]) * (b[:, :, 3] - b[:, :, 1])\n\n return intersectionArea / (A + B - intersectionArea)\n\ndef iou_numpy(a, b):\n \"\"\"\n :param a: Box ndarray, shape is (nums, 4)\n :param b: Box ndarray, shape is (nums, 4)\n IMPORTANT: Note that 4 means (xmin, ymin, xmax, ymax)\n :return:\n iou: ndarray, shape is (a_num, b_num)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n\n # get intersection's xmin, ymin, xmax, ymax\n # xmin = max(a_xmin, b_xmin)\n # ymin = max(a_ymin, b_ymin)\n # xmax = min(a_xmax, b_xmax)\n # ymax = min(a_ymax, b_ymax)\n\n # convert for broadcast\n # a's shape = (a_num, 1, 4), b's shape = (1, b_num, 4)\n a, b = np.expand_dims(a, 1), np.expand_dims(b, 0)\n intersection = np.concatenate((np.expand_dims(np.maximum(a[:, :, 0], b[:, :, 0]), 2),\n np.expand_dims(np.maximum(a[:, :, 1], b[:, :, 1]), 2),\n np.expand_dims(np.minimum(a[:, :, 2], b[:, :, 2]), 2),\n np.expand_dims(np.minimum(a[:, :, 3], b[:, :, 3]), 2)), axis=2)\n # get intersection's area\n # (w, h) = (xmax - xmin, ymax - ymin)\n intersection_w, intersection_h = intersection[:, :, 2] - intersection[:, :, 0], intersection[:, :, 3] - intersection[:, :, 1]\n # if intersection's width or height is negative, those will be converted to zero\n intersection_w, intersection_h = np.clip(intersection_w, a_min=0, a_max=None), np.clip(intersection_h, a_min=0, a_max=None)\n\n intersectionArea = intersection_w * intersection_h\n\n # get a and b's area\n # area = (xmax - xmin) * (ymax - ymin)\n A, B = (a[:, :, 2] - a[:, :, 0]) * (a[:, :, 3] - a[:, :, 1]), (b[:, :, 2] - b[:, :, 0]) * (b[:, :, 3] - b[:, :, 1])\n\n return intersectionArea / (A + B - intersectionArea)\n\ndef iou_dists(a, b):\n \"\"\"\n :param a: dists Tensor, shape is (*, 4=(t,r,b,l))\n :param b: dists Tensor, shape is (*, 4=(t,r,b,l))\n :return:\n iou: Tensor, shape is (*,)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n assert a.shape == b.shape, \"must be same shape, but fot {} and {}\".format(a.shape, b.shape)\n A, B = (a[..., 0] + a[..., 2])*(a[..., 1] + a[..., 3]), (b[..., 0] + b[..., 2])*(b[..., 1] + b[..., 3])\n intersects = torch.min(a, b)\n intersectionArea = (intersects[..., 0] + intersects[..., 2])*(intersects[..., 1] + intersects[..., 3])\n\n return intersectionArea / (A + B - intersectionArea)\n\n\ndef iou_dists_numpy(a, b):\n \"\"\"\n :param a: dists ndarray, shape is (*, 4=(t,r,b,l))\n :param b: dists ndarray, shape is (*, 4=(t,r,b,l))\n :return:\n iou: ndarray, shape is (*,)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n assert a.shape == b.shape, \"must be same shape, but fot {} and {}\".format(a.shape, b.shape)\n A, B = (a[..., 0] + a[..., 2]) * (a[..., 1] + a[..., 3]), (b[..., 0] + b[..., 2]) * (b[..., 1] + b[..., 3])\n intersects = np.minimum(a, b)\n intersectionArea = (intersects[..., 0] + intersects[..., 2]) * (intersects[..., 1] + intersects[..., 3])\n\n return intersectionArea / (A + B - intersectionArea)\n\ndef dice(a, b):\n \"\"\"\n :param a: Box Tensor, shape is (nums, 4)\n :param b: Box Tensor, shape is (nums, 4)\n IMPORTANT: Note that 4 means (xmin, ymin, xmax, ymax)\n :return:\n iou: Tensor, shape is (a_num, b_num)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n\n # get intersection's xmin, ymin, xmax, ymax\n # xmin = max(a_xmin, b_xmin)\n # ymin = max(a_ymin, b_ymin)\n # xmax = min(a_xmax, b_xmax)\n # ymax = min(a_ymax, b_ymax)\n \"\"\"\n >>> b\n tensor([2., 6.])\n >>> c\n tensor([1., 5.])\n >>> torch.cat((b.unsqueeze(1),c.unsqueeze(1)),1)\n tensor([[2., 1.],\n [6., 5.]])\n \"\"\"\n # convert for broadcast\n # a's shape = (a_num, 1, 4), b's shape = (1, b_num, 4)\n a, b = a.unsqueeze(1), b.unsqueeze(0)\n intersection = torch.cat((torch.max(a[:, :, 0], b[:, :, 0]).unsqueeze(2),\n torch.max(a[:, :, 1], b[:, :, 1]).unsqueeze(2),\n torch.min(a[:, :, 2], b[:, :, 2]).unsqueeze(2),\n torch.min(a[:, :, 3], b[:, :, 3]).unsqueeze(2)), dim=2)\n # get intersection's area\n # (w, h) = (xmax - xmin, ymax - ymin)\n intersection_w, intersection_h = intersection[:, :, 2] - intersection[:, :, 0], intersection[:, :, 3] - intersection[:, :, 1]\n # if intersection's width or height is negative, those will be converted to zero\n intersection_w, intersection_h = torch.clamp(intersection_w, min=0), torch.clamp(intersection_h, min=0)\n\n intersectionArea = intersection_w * intersection_h\n\n # get a and b's area\n # area = (xmax - xmin) * (ymax - ymin)\n A, B = (a[:, :, 2] - a[:, :, 0]) * (a[:, :, 3] - a[:, :, 1]), (b[:, :, 2] - b[:, :, 0]) * (b[:, :, 3] - b[:, :, 1])\n\n return 2*intersectionArea / (A + B)\n\ndef dice_numpy(a, b):\n \"\"\"\n :param a: Box ndarray, shape is (nums, 4)\n :param b: Box ndarray, shape is (nums, 4)\n IMPORTANT: Note that 4 means (xmin, ymin, xmax, ymax)\n :return:\n iou: ndarray, shape is (a_num, b_num)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n\n # get intersection's xmin, ymin, xmax, ymax\n # xmin = max(a_xmin, b_xmin)\n # ymin = max(a_ymin, b_ymin)\n # xmax = min(a_xmax, b_xmax)\n # ymax = min(a_ymax, b_ymax)\n\n # convert for broadcast\n # a's shape = (a_num, 1, 4), b's shape = (1, b_num, 4)\n a, b = np.expand_dims(a, 1), np.expand_dims(b, 0)\n intersection = np.concatenate((np.expand_dims(np.maximum(a[:, :, 0], b[:, :, 0]), 2),\n np.expand_dims(np.maximum(a[:, :, 1], b[:, :, 1]), 2),\n np.expand_dims(np.minimum(a[:, :, 2], b[:, :, 2]), 2),\n np.expand_dims(np.minimum(a[:, :, 3], b[:, :, 3]), 2)), axis=2)\n # get intersection's area\n # (w, h) = (xmax - xmin, ymax - ymin)\n intersection_w, intersection_h = intersection[:, :, 2] - intersection[:, :, 0], intersection[:, :, 3] - intersection[:, :, 1]\n # if intersection's width or height is negative, those will be converted to zero\n intersection_w, intersection_h = np.clip(intersection_w, a_min=0, a_max=None), np.clip(intersection_h, a_min=0, a_max=None)\n\n intersectionArea = intersection_w * intersection_h\n\n # get a and b's area\n # area = (xmax - xmin) * (ymax - ymin)\n A, B = (a[:, :, 2] - a[:, :, 0]) * (a[:, :, 3] - a[:, :, 1]), (b[:, :, 2] - b[:, :, 0]) * (b[:, :, 3] - b[:, :, 1])\n\n return 2*intersectionArea / (A + B)\n\ndef sort_corners(a):\n \"\"\"\n Sort corners points (xmin, ymin, xmax, ymax)\n :param a: Box Tensor, shape is ([nums, ]*, 4=(x1,y1,x2,y2))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n return torch.cat((a[:, ::2].min(dim=-1, keepdims=True),\n a[:, 1::2].min(dim=-1, keepdims=True),\n a[:, ::2].max(dim=-1, keepdims=True),\n a[:, 1::2].max(dim=-1, keepdims=True)), dim=-1)\n\ndef sort_corners_numpy(a):\n \"\"\"\n Sort corners points (xmin, ymin, xmax, ymax)\n :param a: Box ndarray, shape is ([nums, ]*, 4=(x1,y1,x2,y2))\n :return a: Box ndarray, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n return np.concatenate((a[:, ::2].min(axis=-1, keepdims=True),\n a[:, 1::2].min(axis=-1, keepdims=True),\n a[:, ::2].max(axis=-1, keepdims=True),\n a[:, 1::2].max(axis=-1, keepdims=True)), axis=-1)\n\ndef corners2centroids(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(cx, cy, w, h))\n \"\"\"\n return torch.cat(((a[..., 2:] + a[..., :2])/2, a[..., 2:] - a[..., :2]), dim=-1)\n\ndef corners2centroids_numpy(a):\n \"\"\"\n :param a: Box ndarray, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n :return a: Box ndarray, shape is ([nums, ]*, 4=(cx, cy, w, h))\n \"\"\"\n return np.concatenate(((a[..., 2:] + a[..., :2])/2, a[..., 2:] - a[..., :2]), axis=-1)\n\ndef corners2minmax(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n \"\"\"\n return torch.index_select(a, dim=-1, index=torch.tensor([0, 2, 1, 3]))\n\ndef corners2minmax_numpy(a):\n \"\"\"\n :param a: Box ndarray, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n :return a: Box ndarray, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n \"\"\"\n return a[..., np.array((0, 2, 1, 3))]\n\ndef centroids2corners(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(cx, cy, w, h))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n return torch.cat((a[..., :2] - a[..., 2:]/2, a[..., :2] + a[..., 2:]/2), dim=-1)\n\ndef centroids2corners_numpy(a):\n \"\"\"\n :param a: Box ndarray, shape is ([nums, ]*, 4=(cx, cy, w, h))\n :return a: Box ndarray, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n return np.concatenate((a[..., :2] - a[..., 2:]/2, a[..., :2] + a[..., 2:]/2), axis=-1)\n\ndef centroids2minmax(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(cx, cy, w, h))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n \"\"\"\n return torch.cat((a[..., 0] - a[..., 2]/2,\n a[..., 0] + a[..., 2]/2,\n a[..., 1] - a[..., 3]/2,\n a[..., 1] + a[..., 3]/2), dim=-1)\n\ndef centroids2minmax_numpy(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(cx, cy, w, h))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n \"\"\"\n return np.concatenate((a[..., 0] - a[..., 2]/2,\n a[..., 0] + a[..., 2]/2,\n a[..., 1] - a[..., 3]/2,\n a[..., 1] + a[..., 3]/2), axis=-1)\n\ndef minmax2centroids(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(cx, cy, w, h))\n \"\"\"\n return torch.cat((a[..., 0] + (a[..., 1] - a[..., 0])/2,\n a[..., 2] + (a[..., 3] - a[..., 2])/2,\n a[..., 1] - a[..., 0],\n a[..., 3] - a[..., 2]), dim=-1)\n\ndef minmax2centroids_numpy(a):\n \"\"\"\n :param a: Box ndarray, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n :return a: Box ndarray, shape is ([nums, ]*, 4=(cx, cy, w, h))\n \"\"\"\n return np.concatenate((a[..., 0] + (a[..., 1] - a[..., 0])/2,\n a[..., 2] + (a[..., 3] - a[..., 2])/2,\n a[..., 1] - a[..., 0],\n a[..., 3] - a[..., 2]), axis=-1)\n\ndef minmax2corners(a):\n \"\"\"\n :param a: Box Tensor, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n :return a: Box Tensor, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n return torch.index_select(a, dim=-1, index=torch.tensor([0, 2, 1, 3]))\n\ndef minmax2corners_numpy(a):\n \"\"\"\n :param a: Box ndarray, shape is ([nums, ]*, 4=(xmin, xmax, ymin, ymax))\n :return a: Box ndarray, shape is ([nums, ]*, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n return a[..., np.array((0, 2, 1, 3))]\n\ndef dists2corners(a):\n \"\"\"\n :param a: dist Tensor, shape = (*, h, w, 4=(t, r, b, l))\n :return a: Box Tensor, shape is (*, h, w, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n assert a.ndim >= 3, 'must be greater than 3d'\n h, w, _ = a.shape[-3:]\n device = a.device\n # shape = (*, h, w, 4=(xmin, ymin, xmax, ymax))\n ret = torch.zeros_like(a, device=device, dtype=torch.float)\n\n heights, widths = torch.meshgrid(torch.arange(h), torch.arange(w))\n # shape = (h, w, 1)\n heights = heights.to(device)\n widths = widths.to(device)\n\n widths, rights, lefts = torch.broadcast_tensors(widths, a[..., 1], a[..., 3])\n heights, tops, bottoms = torch.broadcast_tensors(heights, a[..., 0], a[..., 2])\n xmin = (widths - lefts).unsqueeze(-1) # xmin\n ymin = (heights - tops).unsqueeze(-1) # ymin\n xmax = (widths + rights).unsqueeze(-1) # xmax\n ymax = (heights + bottoms).unsqueeze(-1) # ymax\n\n ret[..., ::2] = torch.clamp(torch.cat((xmin, xmax), dim=-1), 0, w)\n ret[..., 1::2] = torch.clamp(torch.cat((ymin, ymax), dim=-1), 0, h)\n\n return ret\n\ndef dists2corners_numpy(a):\n \"\"\"\n :param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l))\n :return a: Box ndarray, shape is (*, h, w, 4=(xmin, ymin, xmax, ymax))\n \"\"\"\n assert a.ndim >= 3, 'must be greater than 3d'\n h, w, _ = a.shape[-3:]\n\n # shape = (*, h, w, 4=(xmin, ymin, xmax, ymax))\n ret = np.zeros_like(a)\n\n widths, heights = np.meshgrid(np.arange(w), np.arange(h))\n # shape = (h, w, 1)\n widths, rights, lefts = np.broadcast_arrays(widths, a[..., 1], a[..., 3])\n heights, tops, bottoms = np.broadcast_arrays(heights, a[..., 0], a[..., 2])\n xmin = np.expand_dims(widths - lefts, axis=-1) # xmin\n ymin = np.expand_dims(heights - tops, axis=-1) # ymin\n xmax = np.expand_dims(widths + rights, axis=-1) # xmax\n ymax = np.expand_dims(heights + bottoms, axis=-1) # ymax\n\n ret[..., ::2] = np.clip(np.concatenate((xmin, xmax), axis=-1), 0, w)\n ret[..., 1::2] = np.clip(np.concatenate((ymin, ymax), axis=-1), 0, h)\n\n return ret\n\ndef dists2centroids(a):\n \"\"\"\n :param a: dist Tensor, shape = (*, h, w, 4=(t, r, b, l))\n :return a: Box Tensor, shape is (*, h, w, 4=(cx, cy, w, h))\n \"\"\"\n return corners2centroids(dists2corners(a))\n\ndef dists2centroids_numpy(a):\n \"\"\"\n :param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l))\n :return a: Box ndarray, shape is (*, h, w, 4=(cx, cy, w, h))\n \"\"\"\n return corners2centroids_numpy(dists2corners_numpy(a))\n\ndef dists2minmax(a):\n \"\"\"\n :param a: dist Tensor, shape = (*, h, w, 4=(t, r, b, l))\n :return a: Box Tensor, shape is (*, h, w, 4=(xmin, xmax, ymin, ymax))\n \"\"\"\n return corners2minmax(dists2corners(a))\n\ndef dists2minmax_numpy(a):\n \"\"\"\n :param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l))\n :return a: Box ndarray, shape is (*, h, w, 4=(xmin, xmax, ymin, ymax))\n \"\"\"\n return corners2minmax_numpy(dists2corners_numpy(a))\n\ndef dists_pt2line_numpy(line_pt1, line_pt2, pt):\n \"\"\"\n :param line_pt1: ndarray, shape = (*, 2)\n :param line_pt2: ndarray, shape = (*, 2)\n :param pt: ndarray, shape = (..., 2)\n :return: distances: ndarray, shape = (..., *)\n \"\"\"\n assert line_pt1.shape == line_pt2.shape, \"shape of line_pt1 and line_pt2 must be same, but got {} and {}\".format(line_pt1.shape, line_pt2.shape)\n assert line_pt1.shape[-1] == pt.shape[-1] == 2, \"last dimension must be 2\"\n\n # convert shape for broadcasting\n # >>> a=np.arange(216*2).reshape(2,3,6,3,2,2)\n # >>> b=np.arange(5*2).reshape(5,2)\n # >>> np.expand_dims(b, (0,1,2,3,4)).shape\n # (1, 1, 1, 1, 1, 5, 2)\n # >>> np.expand_dims(b, (-2,-3,-4,-5,-6)).shape\n # (5, 1, 1, 1, 1, 1, 2)\n\n line_dim = line_pt1.ndim - 1\n # shape = (..., (1,...,1)=line_dim, 2)\n broadcasted_pt = np.expand_dims(pt, axis=tuple(i for i in range(-2, -(2+line_dim), -1)))\n\n pt_dim = pt.ndim - 1\n # shape = ((1,...,1)=pt_dim, *, 2)\n broadcasted_line_pt1 = np.expand_dims(line_pt1, axis=tuple(i for i in range(0, pt_dim)))\n broadcasted_line_pt2 = np.expand_dims(line_pt2, axis=tuple(i for i in range(0, pt_dim)))\n\n # note that np.cross returns scalar value with shape = (..., *)\n return np.abs(np.cross(broadcasted_line_pt2 - broadcasted_line_pt1, broadcasted_line_pt1 - broadcasted_pt)) \\\n / np.linalg.norm(broadcasted_line_pt2 - broadcasted_line_pt1, axis=-1)\n\n\"\"\"\nrepeat_interleave is similar to numpy.repeat\n>>> a = torch.Tensor([[1,2,3,4],[5,6,7,8]])\n>>> a\ntensor([[1., 2., 3., 4.],\n [5., 6., 7., 8.]])\n>>> torch.repeat_interleave(a, 3, dim=0)\ntensor([[1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [5., 6., 7., 8.],\n [5., 6., 7., 8.]])\n>>> torch.cat(3*[a])\ntensor([[1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [1., 2., 3., 4.],\n [5., 6., 7., 8.],\n [1., 2., 3., 4.],\n [5., 6., 7., 8.]])\n\"\"\"\ndef tensor_tile(a, repeat, dim=0):\n return torch.cat([a]*repeat, dim=dim)\n\ndef coverage_numpy(a, b, divide_b=False):\n \"\"\"\n :param a: Box ndarray, shape is (nums, 4)\n :param b: Box ndarray, shape is (nums, 4)\n IMPORTANT: Note that 4 means (xmin, ymin, xmax, ymax)\n :param divide_b: bool, if true, |a ^ b| / |b|, otherwise, |a ^ b| / |a|\n :return:\n iou: ndarray, shape is (a_num, b_num)\n formula is\n iou = intersection / union = intersection / (A + B - intersection)\n \"\"\"\n\n # get intersection's xmin, ymin, xmax, ymax\n # xmin = max(a_xmin, b_xmin)\n # ymin = max(a_ymin, b_ymin)\n # xmax = min(a_xmax, b_xmax)\n # ymax = min(a_ymax, b_ymax)\n\n # convert for broadcast\n # a's shape = (a_num, 1, 4), b's shape = (1, b_num, 4)\n a, b = np.expand_dims(a, 1), np.expand_dims(b, 0)\n intersection = np.concatenate((np.expand_dims(np.maximum(a[:, :, 0], b[:, :, 0]), 2),\n np.expand_dims(np.maximum(a[:, :, 1], b[:, :, 1]), 2),\n np.expand_dims(np.minimum(a[:, :, 2], b[:, :, 2]), 2),\n np.expand_dims(np.minimum(a[:, :, 3], b[:, :, 3]), 2)), axis=2)\n # get intersection's area\n # (w, h) = (xmax - xmin, ymax - ymin)\n intersection_w, intersection_h = intersection[:, :, 2] - intersection[:, :, 0], intersection[:, :,\n 3] - intersection[:, :, 1]\n # if intersection's width or height is negative, those will be converted to zero\n intersection_w, intersection_h = np.clip(intersection_w, a_min=0, a_max=None), np.clip(intersection_h, a_min=0,\n a_max=None)\n\n intersectionArea = intersection_w * intersection_h\n\n # get a and b's area\n # area = (xmax - xmin) * (ymax - ymin)\n \"\"\"\n >>> a = np.array([-1, 0, 1, 2, 3], dtype=float)\n >>> b = np.array([ 0, 0, 0, 2, 2], dtype=float)\n\n # If you don't pass `out` the indices where (b == 0) will be uninitialized!\n >>> c = np.divide(a, b, out=np.zeros_like(a), where=b!=0)\n >>> print(c)\n [ 0. 0. 0. 1. 1.5]\n \"\"\"\n if divide_b:\n B = (b[:, :, 2] - b[:, :, 0]) * (b[:, :, 3] - b[:, :, 1])\n # return intersectionArea / B\n return np.divide(intersectionArea, B, out=np.zeros_like(intersectionArea), where=B != 0)\n else:\n A = (a[:, :, 2] - a[:, :, 0]) * (a[:, :, 3] - a[:, :, 1])\n # return intersectionArea / A\n return np.divide(intersectionArea, A, out=np.zeros_like(intersectionArea), where=A != 0)\n", "import torch\nfrom torch import nn\nfrom torchvision import models\nfrom torch.nn import functional as F\n\nfrom ...layers import Conv2d\nfrom ..base import FeatureExtractorBase\n\nclass Deconv(nn.Module):\n def __init__(self, prev_channels, out_channels, shared_channels):\n super().__init__()\n in_channels = prev_channels + shared_channels\n\n self.conv = nn.Sequential(\n *Conv2d.relu_one('1', in_channels, out_channels, kernel_size=(1, 1), batch_norm=True, sequential=True), # reduce feature channels by 1x1 kernel\n *Conv2d.relu_one('1', out_channels, out_channels, kernel_size=(3, 3), padding=1, batch_norm=True, sequential=True)\n )\n self.prev_channels = prev_channels\n self.shared_channels = shared_channels\n\n def forward(self, x, shared_x):\n _, c, h, w = x.shape\n assert c == self.prev_channels, \"previous out_channels must be {}, but got {}\".format(self.prev_channels, c)\n\n _, c, h_shared, w_shared = shared_x.shape\n assert c == self.shared_channels, \"shared_x\\'s channels must be {}, but got {}\".format(self.shared_channels, c)\n\n # bilinear upsampling\n x = F.interpolate(x, size=(h_shared, w_shared), mode='bilinear', align_corners=True)\n assert shared_x.shape[2:] == x.shape[2:], \"height and width must be same, but got shared conv: {} and previous conv: {}\".format(shared_x.shape[2:], x.shape[2:])\n\n # share conv\n x = torch.cat((x, shared_x), dim=1)\n\n x = self.conv(x)\n\n return x\n\n\nclass SharedConvRes50(FeatureExtractorBase):\n def __init__(self, out_channels):\n super().__init__(out_channels)\n\n resnet50 = models.resnet50(pretrained=True, progress=True)\n self.conv1 = nn.Sequential(\n resnet50.conv1, resnet50.bn1, resnet50.relu\n )\n self.pool1 = resnet50.maxpool\n self.res2 = resnet50.layer1\n\n self.res3 = resnet50.layer2\n self.res4 = resnet50.layer3\n self.res5 = resnet50.layer4\n\n # Note that deconv args' formula is following;\n # prev_channel = previous out_channels\n # shared_channels = shared_conv's out_channels\n self.deconv_res4 = Deconv(2048, 128, shared_channels=1024)\n self.deconv_res3 = Deconv(128, 64, shared_channels=512)\n self.deconv_res2 = Deconv(64, 32, shared_channels=256)\n\n self.convlast = nn.Sequential(\n *Conv2d.relu_one('1', 32, out_channels, kernel_size=(3, 3), padding=1, batch_norm=True, sequential=True)\n )\n\n def forward(self, x):\n \"\"\"\n :param x: input img Tensor, shape = (b, c, h, w)\n :return: fmaps: output feature maps Tensor, shape = (b, out_channels, h/4, w/4)\n \"\"\"\n x = self.conv1(x)\n x = self.pool1(x)\n\n x = self.res2(x)\n shared_via_res2 = x.clone() # shape = (b, 256, h/4, w/4)\n\n x = self.res3(x)\n shared_via_res3 = x.clone() # shape = (b, 512, h/8, w/8)\n\n x = self.res4(x)\n shared_via_res4 = x.clone() # shape = (b, 1024, h/16, w/16)\n\n x = self.res5(x) # shape = (b, 2048, h/32, w/32)\n x = self.deconv_res4(x, shared_via_res4) # shape = (b, 128, h/16, w/16)\n x = self.deconv_res3(x, shared_via_res3) # shape = (b, 64, h/8, w/8)\n x = self.deconv_res2(x, shared_via_res2) # shape = (b, 32, h/4, w/4)\n\n fmaps = self.convlast(x) # shape = (b, out_channels, h/4, w/4)\n\n return fmaps\n\n\nclass SharedConvRes34(FeatureExtractorBase):\n def __init__(self, out_channels):\n super().__init__(out_channels)\n\n resnet34 = models.resnet34(pretrained=True, progress=True)\n self.conv1 = nn.Sequential(\n resnet34.conv1, resnet34.bn1, resnet34.relu\n )\n #self.pool1 = resnet34.maxpool\n self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))\n self.res2 = resnet34.layer1\n\n self.res3 = resnet34.layer2\n self.res4 = resnet34.layer3\n self.res5 = resnet34.layer4\n\n self.center = nn.Sequential(\n *Conv2d.relu_one('1', 512, 512, kernel_size=(3, 3), stride=(2, 2), padding=1, batch_norm=True, sequential=True),\n *Conv2d.relu_one('2', 512, 1024, kernel_size=(3, 3), padding=1, batch_norm=True, sequential=True)\n )\n\n # Note that deconv args' formula is following;\n # prev_channel = previous out_channels\n # shared_channels = shared_conv's out_channels\n self.deconv_res5 = Deconv(1024, 1024, shared_channels=512)\n self.deconv_res4 = Deconv(1024, 512, shared_channels=256)\n self.deconv_res3 = Deconv(512, 256, shared_channels=128)\n self.deconv_res2 = Deconv(256, 128, shared_channels=64)\n\n self.convlast = nn.Sequential(\n *Conv2d.relu_one('1', 128, out_channels, kernel_size=(3, 3), padding=1, batch_norm=True, sequential=True)\n )\n\n def forward(self, x):\n \"\"\"\n :param x: input img Tensor, shape = (b, c, h, w)\n :return: fmaps: output feature maps Tensor, shape = (b, out_channels, h/4, w/4)\n \"\"\"\n x = self.conv1(x)\n x = self.pool1(x)\n\n x = self.res2(x)\n shared_via_res2 = x.clone() # shape = (b, 64, h/4, w/4)\n\n x = self.res3(x)\n shared_via_res3 = x.clone() # shape = (b, 128, h/8, w/8)\n\n x = self.res4(x)\n shared_via_res4 = x.clone() # shape = (b, 256, h/16, w/16)\n\n x = self.res5(x)\n shared_via_res5 = x.clone() # shape = (b, 512, h/32, w/32)\n\n x = self.center(x) # shape = (b, 1024, h/64, w/64)\n\n x = self.deconv_res5(x, shared_via_res5) # shape = (b, 1024, h/32, w/32)\n x = self.deconv_res4(x, shared_via_res4) # shape = (b, 512, h/16, w/16)\n x = self.deconv_res3(x, shared_via_res3) # shape = (b, 256, h/8, w/8)\n x = self.deconv_res2(x, shared_via_res2) # shape = (b, 128, h/4, w/4)\n\n fmaps = self.convlast(x) # shape = (b, out_channels, h/4, w/4)\n\n return fmaps\n" ]
[ [ "torch.utils.data.DataLoader" ], [ "torch.utils.data.DataLoader" ], [ "numpy.expand_dims", "numpy.minimum", "torch.max", "torch.cat", "numpy.concatenate", "numpy.zeros_like", "numpy.cross", "numpy.clip", "numpy.arange", "torch.tensor", "torch.arange", "torch.min", "torch.zeros_like", "numpy.broadcast_arrays", "numpy.array", "numpy.maximum", "numpy.linalg.norm", "torch.broadcast_tensors", "torch.clamp" ], [ "torch.nn.Sequential", "torch.nn.MaxPool2d", "torch.nn.functional.interpolate", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lsieun/learn-AI
[ "0a164bc2e6317de3aa03c747c0e6f15d93e7f49a", "0a164bc2e6317de3aa03c747c0e6f15d93e7f49a", "0a164bc2e6317de3aa03c747c0e6f15d93e7f49a", "0a164bc2e6317de3aa03c747c0e6f15d93e7f49a" ]
[ "code/learn-AI/matplotlib/graph/sigmoid_function.py", "ML_Chinahadoop/05/code/lesson/5.3.stat05.py", "ML_Chinahadoop/04/code/lesson/4.1.intro18.py", "Tensorflow_InAction_Google/code/004/tensorflow/001.clip_by_value.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef func(x):\n return 1 / (1 + np.exp(-x))\n\n# Return evenly spaced numbers over a specified interval.\nxdata = np.linspace(-8, 8, 960,endpoint=True)\nydata = func(xdata)\n\nplt.plot(xdata,ydata)\n\nplt.show()", "# coding:utf-8\n#\n\nimport math\nimport numpy as np\nfrom scipy import stats\nimport matplotlib as mpl\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\n\ndef calc_statistics(x):\n # 使用系统函数验证\n mu = np.mean(x, axis=0)\n sigma = np.std(x, axis=0)\n skew = stats.skew(x)\n kurtosis = stats.kurtosis(x)\n return mu, sigma, skew, kurtosis\n\nif __name__ == '__main__':\n d = np.random.randn(100000, 2)\n mu, sigma, skew, kurtosis = calc_statistics(d)\n print('函数库计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)\n # 二维图像\n N = 20\n density, edges = np.histogramdd(d, bins=[N, N])\n print('样本总数:', np.sum(density))\n density /= density.max()\n x = y = np.arange(N)\n print('x = ', x)\n print('y = ', y)\n t = np.meshgrid(x, y)\n print(t)\n mpl.rcParams['font.sans-serif'] = 'SimHei'\n mpl.rcParams['axes.unicode_minus'] = False\n fig = plt.figure(facecolor='w')\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(t[0], t[1], density, c='r', s=50*density, marker='o', depthshade=True)\n ax.plot_surface(t[0], t[1], density, cmap=cm.Accent, rstride=1, cstride=1, alpha=0.9, lw=0.75)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.title('二元高斯分布,样本个数:%d' % d.shape[0], fontsize=15)\n plt.tight_layout(0.1)\n plt.show()\n", "#coding:utf-8\n\nimport math\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nnp.set_printoptions(linewidth=200,suppress=True)\n\nmpl.rcParams['font.sans-serif'] = [u'SimHei'] # FangSong/黑体 FangSong/KaiTi\nmpl.rcParams['axes.unicode_minus'] = False\n\n# x ** x x > 0\n# (-x) ** (-x) x < 0\ndef f(x):\n y = np.ones_like(x)\n i = x > 0\n y[i] = np.power(x[i], x[i])\n i = x < 0\n y[i] = np.power(-x[i], -x[i])\n return y\n\n# 5.3 x^x\nplt.figure(facecolor='w')\nx = np.linspace(-1.3, 1.3, 101)\ny = f(x)\nplt.plot(x, y, 'g-', label='x^x', linewidth=2)\nplt.grid()\nplt.legend(loc='upper left')\nplt.show()", "import tensorflow as tf\n\nv = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=tf.float32,shape=(2,3),name=\"v\")\nclip_op = tf.clip_by_value(v, clip_value_min=2.5, clip_value_max=4.5, name=\"clip_op\")\nprint(\"clip_op = \", clip_op)\n\nwith tf.Session() as sess:\n result = clip_op.eval()\n print(result)" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.pyplot.show", "numpy.linspace", "numpy.exp" ], [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.arange", "numpy.histogramdd", "numpy.std", "numpy.random.randn", "numpy.mean", "scipy.stats.kurtosis", "numpy.meshgrid", "scipy.stats.skew", "numpy.sum", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "numpy.ones_like", "numpy.linspace", "numpy.power", "numpy.set_printoptions", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "tensorflow.clip_by_value", "tensorflow.constant", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
Max-astro/A2Project
[ "5d40263742133f214936b06b622d08092e694aed", "5d40263742133f214936b06b622d08092e694aed", "5d40263742133f214936b06b622d08092e694aed" ]
[ "DownData/Link_down.py", "history/il1_Frac_plot.py", "history/gitcode.py" ]
[ "import requests\r\nimport sys\r\nimport h5py\r\nimport numpy as np\r\nimport os\r\n\r\ndef get(path, params=None, savedir=None):\r\n # make HTTP GET request to path\r\n headers = {\"api-key\":\"27d44ba55cd115b10f2dd9153589aff0\"}\r\n r = requests.get(path, params=params, headers=headers)\r\n\r\n # raise exception if response code is not HTTP SUCCESS (200)\r\n r.raise_for_status()\r\n\r\n if r.headers['content-type'] == 'application/json':\r\n return r.json() # parse json responses automatically\r\n\r\n if 'content-disposition' in r.headers:\r\n filename = r.headers['content-disposition'].split(\"filename=\")[1]\r\n if savedir != None:\r\n filename = savedir + filename\r\n with open(filename, 'wb') as f:\r\n f.write(r.content)\r\n return filename # return the filename string\r\n\r\n return r\r\n\r\n\r\n\r\ndef HaloProgenitors(haloID):\r\n '''\r\n haloID is the subhalo's ID in snap_099\r\n return a dict = {'SnapNum' : SubfindID}\r\n '''\r\n url = \"http://www.tng-project.org/api/TNG100-1/snapshots/99/subhalos/%haloID/sublink/simple.json\"%haloID\r\n try:\r\n sublink = get(url, savedir='/home/sublink/')\r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n return -1\r\n\r\n f = sublink\r\n \r\n #Find halo's Subfind ID with redshift(ie:SnapNum), and save the dict in '/Raid0/zhouzb/diskHalo_Sublink/'\r\n snap_num = np.array(f['SnapNum'])\r\n subfind_ID = np.array(f['SubfindID'])\r\n Progenitors_dict = {}\r\n for i in range(len(snap_num)):\r\n Progenitors_dict['%d'%snap_num[i]] = subfind_ID[i]\r\n\r\n f.close()\r\n return Progenitors_dict\r\n\r\n\r\n\r\n'''\r\nsnap_91 z=0.1\r\nsnap_84 z=0.2\r\nsnap_78 z=0.3\r\nsnap_72 z=0.4\r\nsnap_67 z=0.5\r\nsnap_59 z=0.7\r\nsnap_50 z=1.0\r\nsnap_40 z=1.5\r\nsnap_33 z=2.0\r\n'''\r\n\r\n\r\nbarred = np.load('F:/Linux/data/099fig/barredID.npy')\r\nsnap = [99, 91, 84, 78, 72, 67, 59, 50, 40, 33]\r\nerrorHalo = []\r\n\r\nfor haloID in barred:\r\n Prog_dict = HaloProgenitors(haloID)\r\n if Prog_dict == -1:\r\n print('halo: %d Network ERROR, Try next'%haloID)\r\n errorHalo.append(haloID)\r\n continue\r\n else: \r\n #Download stellar particles' information in all selected snapshot z\r\n for z in snap:\r\n print('Now download halo %d in snap_%d'%(haloID, z))\r\n try:\r\n subID = Prog_dict['%d'%z]\r\n cutoff_url = 'http://www.tng-project.org/api/TNG100-1/snapshots/%d/subhalos/%d/cutout.hdf5?stars=Masses,Coordinates,Velocities,GFM_StellarFormationTime'%(z, subID)\r\n if os.path.isfile('F:/Linux/data/TNG/cutoff/disk_%d/cutout_%d.hdf5'%(z, subID)) == False:\r\n get(cutoff_url, savedir='F:/Linux/data/TNG/cutoff/disk_%d/'%z)\r\n except:\r\n print(\"halo %d in snap_%d Fail:\"%(haloID, z), sys.exc_info()[0])\r\n print(\"You need to reload this halo.\")\r\n errorHalo.append(haloID)\r\n break\r\n else:\r\n print('halo %d in snap_%d downloaded'%(haloID, z))\r\n print('halo %d in all snapshot download Completed'%haloID) \r\n\r\nif len(errorHalo) == 0:\r\n print('All done.')\r\nelse:\r\n print('%d halo download faild'%len(errorHalo))\r\n print(\"Error halo's ID were saved in '/Raid0/zhouzb/downError.log.npy'.\")\r\n np.save('F:/Linux/data/TNG/errorID.npy', errorHalo)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "import numpy as np\r\nimport h5py\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport sys\r\nsys.path.append('F:\\Linux')\r\nimport illustris_python as il\r\n\r\ndef FilepathList(path, suffix='.hdf5'):\r\n L = []\r\n for files in os.listdir(path):\r\n if os.path.splitext(files)[1] == '%s'%suffix:\r\n L.append(int(os.path.splitext(files)[0]))\r\n return L \r\n\r\n# particleNum = il.groupcat.loadSubhalos('f:/Linux/data/illustris-1',135,'SubhaloLenType')[:,4]\r\n\r\n# barID = np.load('F:/Linux/data/barID_il1.npy')\r\nbarID = FilepathList('F:/Linux/data/135_4WP_ALL','.png')\r\ndiskID = np.load('F:/Linux/data/diskID_il1.npy')\r\nStellarMass = il.groupcat.loadSubhalos('f:/Linux/data/illustris-1',135,'SubhaloMassType')[:,4]\r\n\r\n# minmass = 10.5\r\n#Disk halo's mass\r\ndiskmass = StellarMass[diskID]\r\ndiskmass = np.log10(diskmass*10**10)\r\n# diskmass = diskmass[diskmass > minmass]\r\n#Barred halo's mass\r\nbarmass = StellarMass[barID]\r\nbarmass = np.log10(barmass*10**10)\r\n# barmass = barmass[barmass > minmass]\r\n\r\n#\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111)\r\nax1.set_xlabel('Stellar Mass')\r\nax1.set_ylabel('Bar Fraction')\r\nax2 = ax1.twinx()\r\nax2.set_ylabel('Halo number N')\r\n\r\n#plot histogram\r\nn,bins,others = ax2.hist(diskmass, 17, rwidth=0.9, alpha = 0.5)\r\nax2.set_xlim(10.0,12)\r\n\r\nFraction = []\r\nx_point = []\r\nfor i in range(len(bins)-1):\r\n low = bins[i]\r\n high = bins[i+1]\r\n x_point.append((low + high)/2)\r\n\r\n disknum = len(diskmass[(diskmass >= low) & (diskmass < high)])\r\n barred = len(barmass[(barmass >= low) & (barmass < high)])\r\n if disknum == 0:\r\n Fraction.append(0)\r\n continue\r\n Barfraction = barred / disknum\r\n Fraction.append(Barfraction)\r\n\r\nax1.plot(x_point, Fraction, 'o', c = 'r')\r\nplt.savefig('F:/Linux/data/il1_DISK-BarFraction.png',dpi=300)\r\n\r\n", "basePath = '/Raid1/Illustris/TNG/'\r\nimport numpy as np\r\nfrom illustris_python.snapshot import loadSubhalo\r\nfrom illustris_python.groupcat import loadSubhalos\r\n\r\ndef specific_angular_momentum(x, v, m):\r\n \"\"\"\r\n specific angular momentum of a group of particles\r\n \r\n Parameters\r\n ----------\r\n x : array_like\r\n array particle positions of shape (Nptcl, ndim)\r\n v : array_like\r\n array of particle velcities wth shape (Nptcl, ndim)\r\n m : array_like\r\n array of particle masses of shape (Nptcl,)\r\n Returns\r\n -------\r\n L : nump.array\r\n specific angular momentum vector\r\n \"\"\"\r\n return (m[:,np.newaxis]*np.cross(x,v)).sum(axis=0)\r\n\r\ndef galaxy_ang_mom(gal_id, basePath, snapNum, reduced=True):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n gal_id : int\r\n basepath : string\r\n snapNum : int\r\n Lbox : array_like\r\n reduced : bool\r\n Returns\r\n -------\r\n eig_vals, eig_vecs\r\n \"\"\"\r\n\r\n # load galaxy position (most bound particle)\r\n gal_positions = loadSubhalos(basePath, snapNum, fields=['SubhaloPos'])/1000.0\r\n gal_position = gal_positions[gal_id]\r\n\r\n # half mass radius\r\n gal_rhalfs = loadSubhalos(basePath, snapNum, fields=['SubhaloHalfmassRadType'])[:,4]/1000.0\r\n gal_rhalf = gal_rhalfs[gal_id]\r\n\r\n # load stellar particles\r\n ptcl_coords = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['Coordinates'])/1000.0\r\n ptcl_masses = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['Masses'])*10.0**10\r\n ptcl_vels = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['Velocities'])\r\n sf_time = loadSubhalo(basePath, snapNum, gal_id, 4, fields=['GFM_StellarFormationTime'])\r\n is_a_star = (sf_time>=0.0) # don't use wind particles\r\n\r\n # account for PBCs\r\n dx = ptcl_coords[:,0] - gal_position[0]\r\n dy = ptcl_coords[:,1] - gal_position[1]\r\n dz = ptcl_coords[:,2] - gal_position[2]\r\n\r\n ptcl_coords = np.vstack((dx,dy,dz)).T\r\n\r\n r = np.sqrt(np.sum(ptcl_coords**2, axis=1))/gal_rhalf\r\n mask = (r<=10.0) & (is_a_star)\r\n\r\n L = specific_angular_momentum(ptcl_coords[mask], ptcl_vels[mask], ptcl_masses[mask])\r\n\r\n mag_L = np.sqrt(np.sum(L**2,axis=-1))\r\n\r\n return L, mag_L, L/mag_L" ]
[ [ "numpy.load", "numpy.array", "numpy.save" ], [ "numpy.load", "numpy.log10", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ], [ "numpy.sum", "numpy.vstack", "numpy.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
j-woz/Benchmarks
[ "d518162fdafb7cfa26071b6a30a3b456dad024f6", "d518162fdafb7cfa26071b6a30a3b456dad024f6", "d518162fdafb7cfa26071b6a30a3b456dad024f6" ]
[ "Pilot1/Combo/combo_dose.py", "Pilot2/P2B1/p2b1_baseline_keras2.py", "common/darts/modules/linear/mixed_layer.py" ]
[ "#! /usr/bin/env python\n\nfrom __future__ import division, print_function\n\nimport argparse\nimport collections\nimport logging\nimport os\nimport random\nimport threading\n\nimport numpy as np\nimport pandas as pd\n\nfrom itertools import cycle, islice\n\nimport keras\nfrom keras import backend as K\nfrom keras import optimizers\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, Dropout\nfrom keras.callbacks import Callback, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, TensorBoard\nfrom keras.utils import get_custom_objects\nfrom keras.utils.vis_utils import plot_model\nfrom sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\nfrom sklearn.model_selection import KFold, StratifiedKFold, GroupKFold\nfrom scipy.stats.stats import pearsonr\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport combo\nimport candle\n\nimport NCI60\n\n\nlogger = logging.getLogger(__name__)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef set_seed(seed):\n os.environ['PYTHONHASHSEED'] = '0'\n np.random.seed(seed)\n\n random.seed(seed)\n\n if K.backend() == 'tensorflow':\n import tensorflow as tf\n tf.set_random_seed(seed)\n # session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n # sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n # K.set_session(sess)\n\n # Uncommit when running on an optimized tensorflow where NUM_INTER_THREADS and\n # NUM_INTRA_THREADS env vars are set.\n # session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),\n #\tintra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))\n # sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n # K.set_session(sess)\n\n\ndef verify_path(path):\n folder = os.path.dirname(path)\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef set_up_logger(logfile, verbose):\n verify_path(logfile)\n fh = logging.FileHandler(logfile)\n fh.setFormatter(logging.Formatter(\"[%(asctime)s %(process)d] %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"))\n fh.setLevel(logging.DEBUG)\n\n sh = logging.StreamHandler()\n sh.setFormatter(logging.Formatter(''))\n sh.setLevel(logging.DEBUG if verbose else logging.INFO)\n\n logger.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n logger.addHandler(sh)\n\n\ndef extension_from_parameters(args):\n \"\"\"Construct string for saving model with annotation of parameters\"\"\"\n ext = ''\n ext += '.A={}'.format(args.activation)\n ext += '.B={}'.format(args.batch_size)\n ext += '.E={}'.format(args.epochs)\n ext += '.O={}'.format(args.optimizer)\n # ext += '.LEN={}'.format(args.maxlen)\n ext += '.LR={}'.format(args.learning_rate)\n ext += '.CF={}'.format(''.join([x[0] for x in sorted(args.cell_features)]))\n ext += '.DF={}'.format(''.join([x[0] for x in sorted(args.drug_features)]))\n if args.feature_subsample > 0:\n ext += '.FS={}'.format(args.feature_subsample)\n if args.dropout > 0:\n ext += '.DR={}'.format(args.dropout)\n if args.warmup_lr:\n ext += '.wu_lr'\n if args.reduce_lr:\n ext += '.re_lr'\n if args.residual:\n ext += '.res'\n if args.use_landmark_genes:\n ext += '.L1000'\n if args.gen:\n ext += '.gen'\n if args.use_combo_score:\n ext += '.scr'\n for i, n in enumerate(args.dense):\n if n > 0:\n ext += '.D{}={}'.format(i+1, n)\n if args.dense_feature_layers != args.dense:\n for i, n in enumerate(args.dense):\n if n > 0:\n ext += '.FD{}={}'.format(i+1, n)\n\n return ext\n\n\ndef discretize(y, bins=5):\n percentiles = [100 / bins * (i + 1) for i in range(bins - 1)]\n thresholds = [np.percentile(y, x) for x in percentiles]\n classes = np.digitize(y, thresholds)\n return classes\n\n\nclass ComboDataLoader(object):\n \"\"\"Load merged drug response, drug descriptors and cell line essay data\n \"\"\"\n\n def __init__(self, seed, val_split=0.2, shuffle=True,\n cell_features=['expression'], drug_features=['descriptors'],\n response_url=None, use_landmark_genes=False, use_combo_score=False,\n preprocess_rnaseq=None, exclude_cells=[], exclude_drugs=[],\n feature_subsample=None, scaling='std', scramble=False,\n cv_partition='overlapping', cv=0):\n \"\"\"Initialize data merging drug response, drug descriptors and cell line essay.\n Shuffle and split training and validation set\n\n Parameters\n ----------\n seed: integer\n seed for random generation\n val_split : float, optional (default 0.2)\n fraction of data to use in validation\n cell_features: list of strings from 'expression', 'expression_5platform', 'mirna', 'proteome', 'all', 'categorical' (default ['expression'])\n use one or more cell line feature sets: gene expression, microRNA, proteome\n use 'all' for ['expression', 'mirna', 'proteome']\n use 'categorical' for one-hot encoded cell lines\n drug_features: list of strings from 'descriptors', 'latent', 'all', 'categorical', 'noise' (default ['descriptors'])\n use dragon7 descriptors, latent representations from Aspuru-Guzik's SMILES autoencoder\n trained on NSC drugs, or both; use random features if set to noise\n use 'categorical' for one-hot encoded drugs\n shuffle : True or False, optional (default True)\n if True shuffles the merged data before splitting training and validation sets\n scramble: True or False, optional (default False)\n if True randomly shuffle dose response data as a control\n feature_subsample: None or integer (default None)\n number of feature columns to use from cellline expressions and drug descriptors\n use_landmark_genes: True or False\n only use LINCS1000 landmark genes\n use_combo_score: bool (default False)\n use combination score in place of percent growth (stored in 'GROWTH' column)\n scaling: None, 'std', 'minmax' or 'maxabs' (default 'std')\n type of feature scaling: 'maxabs' to [-1,1], 'maxabs' to [-1, 1], 'std' for standard normalization\n \"\"\"\n\n self.cv_partition = cv_partition\n\n np.random.seed(seed)\n\n df = NCI60.load_combo_dose_response(response_url=response_url, use_combo_score=use_combo_score, fraction=True, exclude_cells=exclude_cells, exclude_drugs=exclude_drugs)\n logger.info('Loaded {} unique (CL, D1, D2) response sets.'.format(df.shape[0]))\n\n if 'all' in cell_features:\n self.cell_features = ['expression', 'mirna', 'proteome']\n else:\n self.cell_features = cell_features\n\n if 'all' in drug_features:\n self.drug_features = ['descriptors', 'latent']\n else:\n self.drug_features = drug_features\n\n for fea in self.cell_features:\n if fea == 'expression' or fea == 'rnaseq':\n self.df_cell_expr = NCI60.load_cell_expression_rnaseq(ncols=feature_subsample, scaling=scaling, use_landmark_genes=use_landmark_genes, preprocess_rnaseq=preprocess_rnaseq)\n df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')\n elif fea == 'expression_u133p2':\n self.df_cell_expr = NCI60.load_cell_expression_u133p2(ncols=feature_subsample, scaling=scaling, use_landmark_genes=use_landmark_genes)\n df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')\n elif fea == 'expression_5platform':\n self.df_cell_expr = NCI60.load_cell_expression_5platform(ncols=feature_subsample, scaling=scaling, use_landmark_genes=use_landmark_genes)\n df = df.merge(self.df_cell_expr[['CELLNAME']], on='CELLNAME')\n elif fea == 'mirna':\n self.df_cell_mirna = NCI60.load_cell_mirna(ncols=feature_subsample, scaling=scaling)\n df = df.merge(self.df_cell_mirna[['CELLNAME']], on='CELLNAME')\n elif fea == 'proteome':\n self.df_cell_prot = NCI60.load_cell_proteome(ncols=feature_subsample, scaling=scaling)\n df = df.merge(self.df_cell_prot[['CELLNAME']], on='CELLNAME')\n elif fea == 'categorical':\n df_cell_ids = df[['CELLNAME']].drop_duplicates()\n cell_ids = df_cell_ids['CELLNAME'].map(lambda x: x.replace(':', '.'))\n df_cell_cat = pd.get_dummies(cell_ids)\n df_cell_cat.index = df_cell_ids['CELLNAME']\n self.df_cell_cat = df_cell_cat.reset_index()\n\n for fea in self.drug_features:\n if fea == 'descriptors':\n self.df_drug_desc = NCI60.load_drug_descriptors(ncols=feature_subsample, scaling=scaling)\n df = df[df['NSC1'].isin(self.df_drug_desc['NSC']) & df['NSC2'].isin(self.df_drug_desc['NSC'])]\n elif fea == 'latent':\n self.df_drug_auen = NCI60.load_drug_autoencoded_AG(ncols=feature_subsample, scaling=scaling)\n df = df[df['NSC1'].isin(self.df_drug_auen['NSC']) & df['NSC2'].isin(self.df_drug_auen['NSC'])]\n elif fea == 'categorical':\n df_drug_ids = df[['NSC1']].drop_duplicates()\n df_drug_ids.columns = ['NSC']\n drug_ids = df_drug_ids['NSC']\n df_drug_cat = pd.get_dummies(drug_ids)\n df_drug_cat.index = df_drug_ids['NSC']\n self.df_drug_cat = df_drug_cat.reset_index()\n elif fea == 'noise':\n ids1 = df[['NSC1']].drop_duplicates().rename(columns={'NSC1':'NSC'})\n ids2 = df[['NSC2']].drop_duplicates().rename(columns={'NSC2':'NSC'})\n df_drug_ids = pd.concat([ids1, ids2]).drop_duplicates()\n noise = np.random.normal(size=(df_drug_ids.shape[0], 500))\n df_rand = pd.DataFrame(noise, index=df_drug_ids['NSC'],\n columns=['RAND-{:03d}'.format(x) for x in range(500)])\n self.df_drug_rand = df_rand.reset_index()\n\n logger.info('Filtered down to {} rows with matching information.'.format(df.shape[0]))\n\n ids1 = df[['NSC1']].drop_duplicates().rename(columns={'NSC1':'NSC'})\n ids2 = df[['NSC2']].drop_duplicates().rename(columns={'NSC2':'NSC'})\n df_drug_ids = pd.concat([ids1, ids2]).drop_duplicates().reset_index(drop=True)\n\n n_drugs = df_drug_ids.shape[0]\n n_val_drugs = int(n_drugs * val_split)\n n_train_drugs = n_drugs - n_val_drugs\n\n logger.info('Unique cell lines: {}'.format(df['CELLNAME'].nunique()))\n logger.info('Unique drugs: {}'.format(n_drugs))\n # df.to_csv('filtered.growth.min.tsv', sep='\\t', index=False, float_format='%.4g')\n # df.to_csv('filtered.score.max.tsv', sep='\\t', index=False, float_format='%.4g')\n\n if shuffle:\n df = df.sample(frac=1.0, random_state=seed).reset_index(drop=True)\n df_drug_ids = df_drug_ids.sample(frac=1.0, random_state=seed).reset_index(drop=True)\n\n self.df_response = df\n self.df_drug_ids = df_drug_ids\n\n self.train_drug_ids = df_drug_ids['NSC'][:n_train_drugs]\n self.val_drug_ids = df_drug_ids['NSC'][-n_val_drugs:]\n\n if scramble:\n growth = df[['GROWTH']]\n random_growth = growth.iloc[np.random.permutation(np.arange(growth.shape[0]))].reset_index()\n self.df_response[['GROWTH']] = random_growth['GROWTH']\n logger.warn('Randomly shuffled dose response growth values.')\n\n logger.info('Distribution of dose response:')\n logger.info(self.df_response[['GROWTH']].describe())\n\n self.total = df.shape[0]\n self.n_val = int(self.total * val_split)\n self.n_train = self.total - self.n_val\n logger.info('Rows in train: {}, val: {}'.format(self.n_train, self.n_val))\n\n self.cell_df_dict = {'expression': 'df_cell_expr',\n 'expression_5platform': 'df_cell_expr',\n 'expression_u133p2': 'df_cell_expr',\n 'rnaseq': 'df_cell_expr',\n 'mirna': 'df_cell_mirna',\n 'proteome': 'df_cell_prot',\n 'categorical': 'df_cell_cat'}\n\n self.drug_df_dict = {'descriptors': 'df_drug_desc',\n 'latent': 'df_drug_auen',\n 'categorical': 'df_drug_cat',\n 'noise': 'df_drug_rand'}\n\n self.input_features = collections.OrderedDict()\n self.feature_shapes = {}\n for fea in self.cell_features:\n feature_type = 'cell.' + fea\n feature_name = 'cell.' + fea\n df_cell = getattr(self, self.cell_df_dict[fea])\n self.input_features[feature_name] = feature_type\n self.feature_shapes[feature_type] = (df_cell.shape[1] - 1,)\n\n for drug in ['drug1', 'drug2']:\n for fea in self.drug_features:\n feature_type = 'drug.' + fea\n feature_name = drug + '.' + fea\n df_drug = getattr(self, self.drug_df_dict[fea])\n self.input_features[feature_name] = feature_type\n self.feature_shapes[feature_type] = (df_drug.shape[1] - 1,)\n\n self.feature_shapes['dose'] = (1,)\n for dose in ['dose1', 'dose2']:\n self.input_features[dose] = 'dose'\n\n logger.info('Input features shapes:')\n for k, v in self.input_features.items():\n logger.info(' {}: {}'.format(k, self.feature_shapes[v]))\n\n self.input_dim = sum([np.prod(self.feature_shapes[x]) for x in self.input_features.values()])\n logger.info('Total input dimensions: {}'.format(self.input_dim))\n\n if cv > 1:\n if cv_partition == 'disjoint':\n pass\n elif cv_partition == 'disjoint_cells':\n y = self.df_response['GROWTH'].values\n groups = self.df_response['CELLNAME'].values\n gkf = GroupKFold(n_splits=cv)\n splits = gkf.split(y, groups=groups)\n self.cv_train_indexes = []\n self.cv_val_indexes = []\n for index, (train_index, val_index) in enumerate(splits):\n print(index, train_index)\n self.cv_train_indexes.append(train_index)\n self.cv_val_indexes.append(val_index)\n else:\n y = self.df_response['GROWTH'].values\n # kf = KFold(n_splits=cv)\n # splits = kf.split(y)\n skf = StratifiedKFold(n_splits=cv, random_state=seed)\n splits = skf.split(y, discretize(y, bins=cv))\n self.cv_train_indexes = []\n self.cv_val_indexes = []\n for index, (train_index, val_index) in enumerate(splits):\n print(index, train_index)\n self.cv_train_indexes.append(train_index)\n self.cv_val_indexes.append(val_index)\n\n def load_data_all(self, switch_drugs=False):\n df_all = self.df_response\n y_all = df_all['GROWTH'].values\n x_all_list = []\n\n for fea in self.cell_features:\n df_cell = getattr(self, self.cell_df_dict[fea])\n df_x_all = pd.merge(df_all[['CELLNAME']], df_cell, on='CELLNAME', how='left')\n x_all_list.append(df_x_all.drop(['CELLNAME'], axis=1).values)\n\n # for fea in loader.cell_features:\n # df_cell = getattr(loader, loader.cell_df_dict[fea])\n # df_x_all = pd.merge(df_all[['CELLNAME']], df_cell, on='CELLNAME', how='left')\n # df_x_all[:1000].to_csv('df.{}.1k.csv'.format(fea), index=False, float_format=\"%g\")\n\n drugs = ['NSC1', 'NSC2']\n doses = ['pCONC1', 'pCONC2']\n if switch_drugs:\n drugs = ['NSC2', 'NSC1']\n doses = ['pCONC2', 'pCONC1']\n\n for drug in drugs:\n for fea in self.drug_features:\n df_drug = getattr(self, self.drug_df_dict[fea])\n df_x_all = pd.merge(df_all[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')\n x_all_list.append(df_x_all.drop([drug, 'NSC'], axis=1).values)\n\n for dose in doses:\n x_all_list.append(df_all[dose].values)\n\n # for drug in drugs:\n # for fea in loader.drug_features:\n # df_drug = getattr(loader, loader.drug_df_dict[fea])\n # df_x_all = pd.merge(df_all[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')\n # print(df_x_all.shape)\n # df_x_all[:1000].drop([drug], axis=1).to_csv('df.{}.{}.1k.csv'.format(drug, fea), index=False, float_format=\"%g\")\n\n # df_all[:1000].to_csv('df.growth.1k.csv', index=False, float_format=\"%g\")\n\n return x_all_list, y_all, df_all\n\n def load_data_by_index(self, train_index, val_index):\n x_all_list, y_all, df_all = self.load_data_all()\n x_train_list = [x[train_index] for x in x_all_list]\n x_val_list = [x[val_index] for x in x_all_list]\n y_train = y_all[train_index]\n y_val = y_all[val_index]\n df_train = df_all.iloc[train_index, :]\n df_val = df_all.iloc[val_index, :]\n if self.cv_partition == 'disjoint':\n logger.info('Training drugs: {}'.format(set(df_train['NSC1'])))\n logger.info('Validation drugs: {}'.format(set(df_val['NSC1'])))\n elif self.cv_partition == 'disjoint_cells':\n logger.info('Training cells: {}'.format(set(df_train['CELLNAME'])))\n logger.info('Validation cells: {}'.format(set(df_val['CELLNAME'])))\n return x_train_list, y_train, x_val_list, y_val, df_train, df_val\n\n def load_data_cv(self, fold):\n train_index = self.cv_train_indexes[fold]\n val_index = self.cv_val_indexes[fold]\n # print('fold', fold)\n # print(train_index[:5])\n return self.load_data_by_index(train_index, val_index)\n\n def load_data(self):\n if self.cv_partition == 'disjoint':\n train_index = self.df_response[(self.df_response['NSC1'].isin(self.train_drug_ids)) & (self.df_response['NSC2'].isin(self.train_drug_ids))].index\n val_index = self.df_response[(self.df_response['NSC1'].isin(self.val_drug_ids)) & (self.df_response['NSC2'].isin(self.val_drug_ids))].index\n else:\n train_index = range(self.n_train)\n val_index = range(self.n_train, self.total)\n return self.load_data_by_index(train_index, val_index)\n\n def load_data_old(self):\n # bad performance (4x slow) possibly due to incontiguous data\n df_train = self.df_response.iloc[:self.n_train, :]\n df_val = self.df_response.iloc[self.n_train:, :]\n\n y_train = df_train['GROWTH'].values\n y_val = df_val['GROWTH'].values\n\n x_train_list = []\n x_val_list = []\n\n for fea in self.cell_features:\n df_cell = getattr(self, self.cell_df_dict[fea])\n df_x_train = pd.merge(df_train[['CELLNAME']], df_cell, on='CELLNAME', how='left')\n df_x_val = pd.merge(df_val[['CELLNAME']], df_cell, on='CELLNAME', how='left')\n x_train_list.append(df_x_train.drop(['CELLNAME'], axis=1).values)\n x_val_list.append(df_x_val.drop(['CELLNAME'], axis=1).values)\n\n for drug in ['NSC1', 'NSC2']:\n for fea in self.drug_features:\n df_drug = getattr(self, self.drug_df_dict[fea])\n df_x_train = pd.merge(df_train[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')\n df_x_val = pd.merge(df_val[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')\n x_train_list.append(df_x_train.drop([drug, 'NSC'], axis=1).values)\n x_val_list.append(df_x_val.drop([drug, 'NSC'], axis=1).values)\n\n return x_train_list, y_train, x_val_list, y_val, df_train, df_val\n\n\nclass ComboDataGenerator(object):\n \"\"\"Generate training, validation or testing batches from loaded data\n \"\"\"\n def __init__(self, data, partition='train', batch_size=32):\n self.lock = threading.Lock()\n self.data = data\n self.partition = partition\n self.batch_size = batch_size\n\n if partition == 'train':\n self.cycle = cycle(range(data.n_train))\n self.num_data = data.n_train\n elif partition == 'val':\n self.cycle = cycle(range(data.total)[-data.n_val:])\n self.num_data = data.n_val\n else:\n raise Exception('Data partition \"{}\" not recognized.'.format(partition))\n\n def flow(self):\n \"\"\"Keep generating data batches\n \"\"\"\n while 1:\n self.lock.acquire()\n indices = list(islice(self.cycle, self.batch_size))\n self.lock.release()\n\n df = self.data.df_response.iloc[indices, :]\n y = df['GROWTH'].values\n\n x_list = []\n\n for fea in self.data.cell_features:\n df_cell = getattr(self.data, self.data.cell_df_dict[fea])\n df_x = pd.merge(df[['CELLNAME']], df_cell, on='CELLNAME', how='left')\n x_list.append(df_x.drop(['CELLNAME'], axis=1).values)\n\n for drug in ['NSC1', 'NSC2']:\n for fea in self.data.drug_features:\n df_drug = getattr(self.data, self.data.drug_df_dict[fea])\n df_x = pd.merge(df[[drug]], df_drug, left_on=drug, right_on='NSC', how='left')\n x_list.append(df_x.drop([drug, 'NSC'], axis=1).values)\n\n yield x_list, y\n\n\ndef test_generator(loader):\n gen = ComboDataGenerator(loader).flow()\n x_list, y = next(gen)\n for x in x_list:\n print(x.shape)\n print(y.shape)\n\n\ndef test_loader(loader):\n x_train_list, y_train, x_val_list, y_val = loader.load_data()\n print('x_train shapes:')\n for x in x_train_list:\n print(x.shape)\n print('y_train shape:', y_train.shape)\n\n print('x_val shapes:')\n for x in x_val_list:\n print(x.shape)\n print('y_val shape:', y_val.shape)\n\n\ndef r2(y_true, y_pred):\n SS_res = K.sum(K.square(y_true - y_pred))\n SS_tot = K.sum(K.square(y_true - K.mean(y_true)))\n return (1 - SS_res/(SS_tot + K.epsilon()))\n\n\ndef mae(y_true, y_pred):\n return keras.metrics.mean_absolute_error(y_true, y_pred)\n\n\ndef evaluate_prediction(y_true, y_pred):\n mse = mean_squared_error(y_true, y_pred)\n mae = mean_absolute_error(y_true, y_pred)\n r2 = r2_score(y_true, y_pred)\n corr, _ = pearsonr(y_true, y_pred)\n return {'mse': mse, 'mae': mae, 'r2': r2, 'corr': corr}\n\n\ndef log_evaluation(metric_outputs, description='Comparing y_true and y_pred:'):\n logger.info(description)\n for metric, value in metric_outputs.items():\n logger.info(' {}: {:.4f}'.format(metric, value))\n\n\ndef plot_history(out, history, metric='loss', title=None):\n title = title or 'model {}'.format(metric)\n val_metric = 'val_{}'.format(metric)\n plt.figure(figsize=(8, 6))\n plt.plot(history.history[metric], marker='o')\n plt.plot(history.history[val_metric], marker='d')\n plt.title(title)\n plt.ylabel(metric)\n plt.xlabel('epoch')\n plt.legend(['train_{}'.format(metric), 'val_{}'.format(metric)], loc='upper center')\n png = '{}.plot.{}.png'.format(out, metric)\n plt.savefig(png, bbox_inches='tight')\n\n\nclass LoggingCallback(Callback):\n def __init__(self, print_fcn=print):\n Callback.__init__(self)\n self.print_fcn = print_fcn\n\n def on_epoch_end(self, epoch, logs={}):\n msg = \"[Epoch: %i] %s\" % (epoch, \", \".join(\"%s: %f\" % (k, v) for k, v in sorted(logs.items())))\n self.print_fcn(msg)\n\n\nclass PermanentDropout(Dropout):\n def __init__(self, rate, **kwargs):\n super(PermanentDropout, self).__init__(rate, **kwargs)\n self.uses_learning_phase = False\n\n def call(self, x, mask=None):\n if 0. < self.rate < 1.:\n noise_shape = self._get_noise_shape(x)\n x = K.dropout(x, self.rate, noise_shape)\n return x\n\n\nclass ModelRecorder(Callback):\n def __init__(self, save_all_models=False):\n Callback.__init__(self)\n self.save_all_models = save_all_models\n get_custom_objects()['PermanentDropout'] = PermanentDropout\n\n def on_train_begin(self, logs={}):\n self.val_losses = []\n self.best_val_loss = np.Inf\n self.best_model = None\n\n def on_epoch_end(self, epoch, logs={}):\n val_loss = logs.get('val_loss')\n self.val_losses.append(val_loss)\n if val_loss < self.best_val_loss:\n self.best_model = keras.models.clone_model(self.model)\n self.best_val_loss = val_loss\n\n\ndef build_feature_model(input_shape, name='', dense_layers=[1000, 1000],\n activation='relu', residual=False,\n dropout_rate=0, permanent_dropout=True):\n x_input = Input(shape=input_shape)\n h = x_input\n for i, layer in enumerate(dense_layers):\n x = h\n h = Dense(layer, activation=activation)(h)\n if dropout_rate > 0:\n if permanent_dropout:\n h = PermanentDropout(dropout_rate)(h)\n else:\n h = Dropout(dropout_rate)(h)\n if residual:\n try:\n h = keras.layers.add([h, x])\n except ValueError:\n pass\n model = Model(x_input, h, name=name)\n return model\n\n\ndef build_model(loader, args, verbose=False):\n input_models = {}\n dropout_rate = args.dropout\n permanent_dropout = True\n for fea_type, shape in loader.feature_shapes.items():\n box = build_feature_model(input_shape=shape, name=fea_type,\n dense_layers=args.dense_feature_layers,\n dropout_rate=dropout_rate, permanent_dropout=permanent_dropout)\n if verbose:\n box.summary()\n input_models[fea_type] = box\n\n inputs = []\n encoded_inputs = []\n for fea_name, fea_type in loader.input_features.items():\n shape = loader.feature_shapes[fea_type]\n fea_input = Input(shape, name='input.'+fea_name)\n inputs.append(fea_input)\n input_model = input_models[fea_type]\n encoded = input_model(fea_input)\n encoded_inputs.append(encoded)\n\n merged = keras.layers.concatenate(encoded_inputs)\n\n h = merged\n for i, layer in enumerate(args.dense):\n x = h\n h = Dense(layer, activation=args.activation)(h)\n if dropout_rate > 0:\n if permanent_dropout:\n h = PermanentDropout(dropout_rate)(h)\n else:\n h = Dropout(dropout_rate)(h)\n if args.residual:\n try:\n h = keras.layers.add([h, x])\n except ValueError:\n pass\n output = Dense(1)(h)\n\n return Model(inputs, output)\n\n\n\ndef get_combo_parser():\n description = 'Build neural network based models to predict tumor response to drug pairs.'\n parser = argparse.ArgumentParser(prog='combo_baseline', formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=description)\n return combo.common_parser(parser)\n\n\n# def initialize_parameters():\n# # Get command-line parameters\n# parser = get_combo_parser()\n# args = parser.parse_args()\n# # Get parameters from configuration file\n# file_params = combo.read_config_file(args.config_file)\n# # Consolidate parameter set. Command-line parameters overwrite file configuration\n# params = p1_common.args_overwrite_config(args, file_params)\n# # print(params)\n# return params\n\n\ndef initialize_parameters():\n\n # Build benchmark object\n comboBmk = combo.BenchmarkCombo(combo.file_path, 'combo_default_model.txt', 'keras',\n prog='combo_baseline',\n desc = 'Build neural network based models to predict tumor response to drug pairs.')\n\n # Initialize parameters\n gParameters = candle.finalize_parameters(comboBmk)\n #combo.logger.info('Params: {}'.format(gParameters))\n\n return gParameters\n\n\nclass Struct:\n def __init__(self, **entries):\n self.__dict__.update(entries)\n\n\ndef run(params):\n args = Struct(**params)\n set_seed(args.rng_seed)\n ext = extension_from_parameters(args)\n prefix = args.save + ext\n logfile = args.logfile if args.logfile else prefix+'.log'\n set_up_logger(logfile, args.verbose)\n logger.info('Params: {}'.format(params))\n\n loader = ComboDataLoader(seed=args.rng_seed,\n val_split=args.validation_split,\n cell_features=args.cell_features,\n drug_features=args.drug_features,\n response_url=args.response_url,\n use_landmark_genes=args.use_landmark_genes,\n preprocess_rnaseq=args.preprocess_rnaseq,\n exclude_cells=args.exclude_cells,\n exclude_drugs=args.exclude_drugs,\n use_combo_score=args.use_combo_score,\n cv_partition=args.cv_partition, cv=args.cv)\n # test_loader(loader)\n # test_generator(loader)\n\n train_gen = ComboDataGenerator(loader, batch_size=args.batch_size).flow()\n val_gen = ComboDataGenerator(loader, partition='val', batch_size=args.batch_size).flow()\n\n train_steps = int(loader.n_train / args.batch_size)\n val_steps = int(loader.n_val / args.batch_size)\n\n model = build_model(loader, args, verbose=True)\n model.summary()\n # plot_model(model, to_file=prefix+'.model.png', show_shapes=True)\n\n if args.cp:\n model_json = model.to_json()\n with open(prefix+'.model.json', 'w') as f:\n print(model_json, file=f)\n\n def warmup_scheduler(epoch):\n lr = args.learning_rate or base_lr * args.batch_size/100\n if epoch <= 5:\n K.set_value(model.optimizer.lr, (base_lr * (5-epoch) + lr * epoch) / 5)\n logger.debug('Epoch {}: lr={}'.format(epoch, K.get_value(model.optimizer.lr)))\n return K.get_value(model.optimizer.lr)\n\n df_pred_list = []\n\n cv_ext = ''\n cv = args.cv if args.cv > 1 else 1\n\n fold = 0\n while fold < cv:\n if args.cv > 1:\n logger.info('Cross validation fold {}/{}:'.format(fold+1, cv))\n cv_ext = '.cv{}'.format(fold+1)\n\n model = build_model(loader, args)\n\n optimizer = optimizers.deserialize({'class_name': args.optimizer, 'config': {}})\n base_lr = args.base_lr or K.get_value(optimizer.lr)\n if args.learning_rate:\n K.set_value(optimizer.lr, args.learning_rate)\n\n model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2])\n\n # calculate trainable and non-trainable params\n # params.update(compute_trainable_params(model))\n\n # candle_monitor = CandleRemoteMonitor(params=params)\n # timeout_monitor = TerminateOnTimeOut(params['timeout'])\n\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=0.00001)\n warmup_lr = LearningRateScheduler(warmup_scheduler)\n checkpointer = ModelCheckpoint(prefix+cv_ext+'.weights.h5', save_best_only=True, save_weights_only=True)\n tensorboard = TensorBoard(log_dir=\"tb/tb{}{}\".format(ext, cv_ext))\n history_logger = LoggingCallback(logger.debug)\n model_recorder = ModelRecorder()\n\n callbacks = [history_logger, model_recorder]\n # callbacks = [candle_monitor, timeout_monitor, history_logger, model_recorder]\n if args.reduce_lr:\n callbacks.append(reduce_lr)\n if args.warmup_lr:\n callbacks.append(warmup_lr)\n if args.cp:\n callbacks.append(checkpointer)\n if args.tb:\n callbacks.append(tensorboard)\n\n if args.gen:\n history = model.fit_generator(train_gen, train_steps,\n epochs=args.epochs,\n callbacks=callbacks,\n validation_data=val_gen, validation_steps=val_steps)\n else:\n if args.cv > 1:\n x_train_list, y_train, x_val_list, y_val, df_train, df_val = loader.load_data_cv(fold)\n else:\n x_train_list, y_train, x_val_list, y_val, df_train, df_val = loader.load_data()\n\n y_shuf = np.random.permutation(y_val)\n log_evaluation(evaluate_prediction(y_val, y_shuf),\n description='Between random pairs in y_val:')\n history = model.fit(x_train_list, y_train,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n epochs=args.epochs,\n callbacks=callbacks,\n validation_data=(x_val_list, y_val))\n\n if args.cp:\n model.load_weights(prefix+cv_ext+'.weights.h5')\n\n if not args.gen:\n y_val_pred = model.predict(x_val_list, batch_size=args.batch_size).flatten()\n scores = evaluate_prediction(y_val, y_val_pred)\n if args.cv > 1 and scores[args.loss] > args.max_val_loss:\n logger.warn('Best val_loss {} is greater than {}; retrain the model...'.format(scores[args.loss], args.max_val_loss))\n continue\n else:\n fold += 1\n log_evaluation(scores)\n df_val.is_copy = False\n df_val['GROWTH_PRED'] = y_val_pred\n df_val['GROWTH_ERROR'] = y_val_pred - y_val\n df_pred_list.append(df_val)\n\n if args.cp:\n # model.save(prefix+'.model.h5')\n model_recorder.best_model.save(prefix+'.model.h5')\n\n # test reloadded model prediction\n new_model = keras.models.load_model(prefix+'.model.h5')\n new_model.load_weights(prefix+cv_ext+'.weights.h5')\n new_pred = new_model.predict(x_val_list, batch_size=args.batch_size).flatten()\n # print('y_val:', y_val[:10])\n # print('old_pred:', y_val_pred[:10])\n # print('new_pred:', new_pred[:10])\n\n plot_history(prefix, history, 'loss')\n plot_history(prefix, history, 'r2')\n\n if K.backend() == 'tensorflow':\n K.clear_session()\n\n pred_fname = prefix + '.predicted.growth.tsv'\n if args.use_combo_score:\n pred_fname = prefix + '.predicted.score.tsv'\n df_pred = pd.concat(df_pred_list)\n df_pred.to_csv(pred_fname, sep='\\t', index=False, float_format='%.4g')\n\n logger.handlers = []\n\n return history\n\n\ndef main():\n params = initialize_parameters()\n run(params)\n\n\nif __name__ == '__main__':\n main()\n if K.backend() == 'tensorflow':\n K.clear_session()\n", "import numpy as np\nimport scipy as sp\nimport pickle\nimport sys, os, json\nimport argparse\nimport h5py\nimport logging\ntry:\n reload # Python 2.7\nexcept NameError:\n try:\n from importlib import reload # Python 3.4+\n except ImportError:\n from imp import reload # Python 3.0 - 3.3\n\nTIMEOUT=3600 # in sec; set this to -1 for no timeout\nfile_path = os.path.dirname(os.path.realpath(__file__))\n#lib_path = os.path.abspath(os.path.join(file_path, '..', 'common'))\n#sys.path.append(lib_path)\nlib_path2 = os.path.abspath(os.path.join(file_path, '..','..', 'common'))\nsys.path.append(lib_path2)\n\nfrom keras import backend as K\n\nimport p2b1\nimport candle\n\nimport p2b1_AE_models as AE_models\n\nHOME = os.environ['HOME']\n\nlogger = logging.getLogger(__name__)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\ndef parse_list(option, opt, value, parser):\n setattr(parser.values, option.dest, value.split(','))\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\ndef initialize_parameters(default_model = 'p2b1_default_model.txt'):\n\n # Build benchmark object\n p2b1Bmk = p2b1.BenchmarkP2B1(p2b1.file_path, default_model, 'keras',\n prog='p2b1_baseline', desc='Train Molecular Frame Autoencoder - Pilot 2 Benchmark 1')\n\n # Initialize parameters\n GP = candle.finalize_parameters(p2b1Bmk)\n #p2b1.logger.info('Params: {}'.format(gParameters))\n\n print ('\\nTraining parameters:')\n for key in sorted(GP):\n print (\"\\t%s: %s\" % (key, GP[key]))\n\n # print json.dumps(GP, indent=4, skipkeys=True, sort_keys=True)\n\n if GP['backend'] != 'theano' and GP['backend'] != 'tensorflow':\n sys.exit('Invalid backend selected: %s' % GP['backend'])\n\n os.environ['KERAS_BACKEND'] = GP['backend']\n reload(K)\n '''\n if GP['backend'] == 'theano':\n K.set_image_dim_ordering('th')\n elif GP['backend'] == 'tensorflow':\n K.set_image_dim_ordering('tf')\n '''\n K.set_image_data_format('channels_last')\n#\"th\" format means that the convolutional kernels will have the shape (depth, input_depth, rows, cols)\n\n#\"tf\" format means that the convolutional kernels will have the shape (rows, cols, input_depth, depth)\n print (\"Image data format: \", K.image_data_format())\n# print \"Image ordering: \", K.image_dim_ordering()\n return GP\n\n\ndef run(GP):\n\n # set the seed\n if GP['rng_seed']:\n np.random.seed(GP['rng_seed'])\n else:\n np.random.seed(np.random.randint(10000))\n\n # Set paths\n if not os.path.isdir(GP['home_dir']):\n print ('Keras home directory not set')\n sys.exit(0)\n sys.path.append(GP['home_dir'])\n\n # Setup loggin\n args = candle.ArgumentStruct(**GP)\n# set_seed(args.rng_seed)\n# ext = extension_from_parameters(args)\n candle.verify_path(args.save_path)\n prefix = args.save_path # + ext\n logfile = args.logfile if args.logfile else prefix+'.log'\n candle.set_up_logger(logfile, logger, False) #args.verbose\n logger.info('Params: {}'.format(GP))\n\n import p2b1 as hf\n reload(hf)\n\n #import keras_model_utils as KEU\n #reload(KEU)\n #reload(p2ck)\n #reload(p2ck.optimizers)\n maps = hf.autoencoder_preprocess()\n\n from keras.optimizers import SGD, RMSprop, Adam\n from keras.datasets import mnist\n from keras.callbacks import LearningRateScheduler, ModelCheckpoint\n from keras import callbacks\n from keras.layers.advanced_activations import ELU\n from keras.preprocessing.image import ImageDataGenerator\n\n# GP=hf.ReadConfig(opts.config_file)\n batch_size = GP['batch_size']\n learning_rate = GP['learning_rate']\n kerasDefaults = candle.keras_default_config()\n\n##### Read Data ########\n import helper\n (data_files, fields)=p2b1.get_list_of_data_files(GP)\n # Read from local directoy\n #(data_files, fields) = helper.get_local_files('/p/gscratchr/brainusr/datasets/cancer/pilot2/3k_run16_10us.35fs-DPPC.20-DIPC.60-CHOL.20.dir/')\n #(data_files, fields) = helper.get_local_files('3k_run16', '/p/lscratchf/brainusr/datasets/cancer/pilot2/')\n\n # Define datagenerator\n datagen = hf.ImageNoiseDataGenerator(corruption_level=GP['noise_factor'])\n\n # get data dimension ##\n num_samples = 0\n for f in data_files:\n\n # Seperate different arrays from the data\n (X, nbrs, resnums) = helper.get_data_arrays(f)\n\n num_samples += X.shape[0]\n\n (X, nbrs, resnums) = helper.get_data_arrays(data_files[0])\n print ('\\nData chunk shape: ', X.shape)\n\n molecular_hidden_layers = GP['molecular_num_hidden']\n if not molecular_hidden_layers:\n X_train = hf.get_data(X, case=GP['case'])\n input_dim = X_train.shape[1]\n else:\n # computing input dimension for outer AE\n input_dim = X.shape[1]*molecular_hidden_layers[-1]\n\n print ('\\nState AE input/output dimension: ', input_dim)\n\n # get data dimension for molecular autoencoder\n molecular_nbrs = np.int(GP['molecular_nbrs'])\n num_molecules = X.shape[1]\n num_beads = X.shape[2]\n\n if GP['nbr_type'] == 'relative':\n # relative x, y, z positions\n num_loc_features = 3\n loc_feat_vect = ['rel_x', 'rel_y', 'rel_z']\n elif GP['nbr_type'] == 'invariant':\n # relative distance and angle\n num_loc_features = 2\n loc_feat_vect = ['rel_dist', 'rel_angle']\n else:\n print ('Invalid nbr_type!!')\n exit()\n\n if not GP['type_bool']:\n # only consider molecular location coordinates\n num_type_features = 0\n type_feat_vect = []\n else:\n num_type_features = 5\n type_feat_vect = list(fields.keys())[3:8]\n\n num_features = num_loc_features + num_type_features + num_beads\n dim = np.prod([num_beads, num_features, molecular_nbrs+1])\n bead_kernel_size = num_features\n molecular_input_dim = dim\n mol_kernel_size = num_beads\n\n feature_vector = loc_feat_vect + type_feat_vect + list(fields.keys())[8:]\n\n print ('\\nMolecular AE input/output dimension: ', molecular_input_dim)\n\n print ('\\nData Format:\\n[Frames (%s), Molecules (%s), Beads (%s), %s (%s)]' % (\n num_samples, num_molecules, num_beads, feature_vector, num_features))\n\n### Define Model, Solver and Compile ##########\n print ('\\nDefine the model and compile')\n opt = candle.build_optimizer(GP['optimizer'], learning_rate, kerasDefaults)\n model_type = 'mlp'\n memo = '%s_%s' % (GP['base_memo'], model_type)\n\n######## Define Molecular Model, Solver and Compile #########\n molecular_nonlinearity = GP['molecular_nonlinearity']\n\n len_molecular_hidden_layers = len(molecular_hidden_layers)\n conv_bool = GP['conv_bool']\n full_conv_bool = GP['full_conv_bool']\n if conv_bool:\n molecular_model, molecular_encoder = AE_models.conv_dense_mol_auto(bead_k_size=bead_kernel_size,\n mol_k_size=mol_kernel_size,\n weights_path=None,\n input_shape=(1, molecular_input_dim, 1),\n nonlinearity=molecular_nonlinearity,\n hidden_layers=molecular_hidden_layers,\n l2_reg=GP['l2_reg'],\n drop=float(GP['dropout']))\n elif full_conv_bool:\n molecular_model, molecular_encoder = AE_models.full_conv_mol_auto(bead_k_size=bead_kernel_size,\n mol_k_size=mol_kernel_size,\n weights_path=None,\n input_shape=(1, molecular_input_dim, 1),\n nonlinearity=molecular_nonlinearity,\n hidden_layers=molecular_hidden_layers,\n l2_reg=GP['l2_reg'],\n drop=float(GP['dropout']))\n\n else:\n molecular_model, molecular_encoder = AE_models.dense_auto(weights_path=None, input_shape=(molecular_input_dim,),\n nonlinearity=molecular_nonlinearity,\n hidden_layers=molecular_hidden_layers,\n l2_reg=GP['l2_reg'],\n drop=float(GP['dropout']))\n\n if GP['loss'] == 'mse':\n loss_func = 'mse'\n elif GP['loss'] == 'custom':\n loss_func = helper.combined_loss\n\n molecular_model.compile(optimizer=opt, loss=loss_func, metrics=['mean_squared_error', 'mean_absolute_error'])\n print ('\\nModel Summary: \\n')\n molecular_model.summary()\n ##### set up callbacks and cooling for the molecular_model ##########\n drop = GP['dropout']\n mb_epochs = GP['epochs']\n initial_lrate = GP['learning_rate']\n epochs_drop = 1+int(np.floor(mb_epochs/3))\n\n def step_decay(epoch):\n global initial_lrate, epochs_drop, drop\n lrate = initial_lrate * np.power(drop, np.floor((1+epoch)/epochs_drop))\n return lrate\n\n lr_scheduler = LearningRateScheduler(step_decay)\n history = callbacks.History()\n # callbacks=[history,lr_scheduler]\n\n history_logger = candle.LoggingCallback(logger.debug)\n candleRemoteMonitor = candle.CandleRemoteMonitor(params=GP)\n timeoutMonitor = candle.TerminateOnTimeOut(TIMEOUT)\n callbacks = [history, history_logger, candleRemoteMonitor, timeoutMonitor]\n loss = 0.\n\n#### Save the Model to disk\n if GP['save_path'] != None:\n save_path = GP['save_path']\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n else:\n save_path = '.'\n\n model_json = molecular_model.to_json()\n with open(save_path + '/model.json', \"w\") as json_file:\n json_file.write(model_json)\n\n encoder_json = molecular_encoder.to_json()\n with open(save_path + '/encoder.json', \"w\") as json_file:\n json_file.write(encoder_json)\n\n print('Saved model to disk')\n\n#### Train the Model\n if GP['train_bool']:\n ct = hf.Candle_Molecular_Train(molecular_model, molecular_encoder, data_files, mb_epochs, callbacks,\n batch_size=batch_size, nbr_type=GP['nbr_type'], save_path=GP['save_path'],\n len_molecular_hidden_layers=len_molecular_hidden_layers,\n molecular_nbrs=molecular_nbrs,\n conv_bool=conv_bool,\n full_conv_bool=full_conv_bool,\n type_bool=GP['type_bool'],\n sampling_density=GP['sampling_density'])\n frame_loss, frame_mse = ct.train_ac()\n else:\n frame_mse = []\n frame_loss = []\n\n return frame_loss, frame_mse\n\ndef main():\n\n gParameters = initialize_parameters()\n run(gParameters)\n\nif __name__ == '__main__':\n main()\n try:\n K.clear_session()\n except AttributeError: # theano does not have this function\n pass\n", "import torch\nimport torch.nn as nn\n\nfrom darts.api import Model\nfrom darts.genotypes import LINEAR_PRIMITIVES\nfrom darts.modules.operations.linear import OPS\n\n\nclass MixedLayer(Model):\n \"\"\" A mixture of 8 unit types\n\n We use weights to aggregate these outputs while training.\n and softmax to select the strongest edges while inference.\n \"\"\"\n def __init__(self, c, stride):\n super(MixedLayer, self).__init__()\n self.reset(c, stride)\n\n def reset(self, c, stride):\n self.layers = nn.ModuleList()\n\n for primitive in LINEAR_PRIMITIVES:\n layer = OPS[primitive](c, stride, False)\n\n if 'pool' in primitive:\n layer = nn.Sequential(layer, nn.BatchNorm1d(c, affine=False))\n\n self.layers.append(layer)\n\n def forward(self, x, weights):\n \"\"\"\n Parameters\n ----------\n x : torch.tensor\n Data\n\n Weights : torch.tensor\n alpha, [op_num:8], the output = sum of alpha * op(x)\n \"\"\"\n x = [w * layer(x) for w, layer in zip(weights, self.layers)]\n return sum(x)\n" ]
[ [ "pandas.merge", "sklearn.metrics.r2_score", "sklearn.metrics.mean_absolute_error", "sklearn.metrics.mean_squared_error", "matplotlib.pyplot.plot", "numpy.digitize", "numpy.arange", "scipy.stats.stats.pearsonr", "sklearn.model_selection.StratifiedKFold", "matplotlib.pyplot.figure", "pandas.concat", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "tensorflow.set_random_seed", "sklearn.model_selection.GroupKFold", "matplotlib.pyplot.ylabel", "numpy.random.seed", "matplotlib.use", "numpy.percentile", "numpy.random.normal", "numpy.random.permutation", "numpy.prod", "matplotlib.pyplot.xlabel", "pandas.get_dummies" ], [ "numpy.random.seed", "numpy.int", "numpy.floor", "numpy.prod", "numpy.random.randint" ], [ "torch.nn.BatchNorm1d", "torch.nn.ModuleList" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3" ], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
salvacarrion/nmt-continual-learning
[ "302147ac9c270f3341a68a72c803c457f05ff37b" ]
[ "mt/preprocess/1_process_raw.py" ]
[ "import os\nimport pandas as pd\nfrom pathlib import Path\nimport numpy as np\n\nfrom mt import RAW_PATH\nfrom mt import utils\n\nSUFFLE = True\nCONSTRAINED = True\n\nTR_DATA_PATH = \"/home/salva/Documents/Programming/Datasets/scielo/originals/scielo-gma/scielo-gma\"\nTR_RAW_FILES = [\"es-en-gma-biological.csv\", \"es-en-gma-health.csv\", \"fr-en-gma-health.csv\",\n \"pt-en-gma-biological.csv\", \"pt-en-gma-health.csv\"]\n\nTS_DATA_PATH = \"/home/salva/Documents/Programming/Datasets/scielo/originals/testset-gma/testset_gma\"\nTS_RAW_FILES = [\"test-gma-en2es-biological.csv\", \"test-gma-en2es-health.csv\", \"test-gma-en2fr-health.csv\",\n \"test-gma-en2pt-biological.csv\", \"test-gma-en2pt-health.csv\", \"test-gma-es2en-biological.csv\",\n \"test-gma-es2en-health.csv\", \"test-gma-fr2en-health.csv\", \"test-gma-pt2en-biological.csv\",\n \"test-gma-pt2en-health.csv\"]\n\n\n# Create path if doesn't exists\npath = Path(RAW_PATH)\npath.mkdir(parents=True, exist_ok=True)\n\n# Process splits train/test files\nfor split in [\"train\", \"test\"]:\n\n # Select split to process\n if split == \"train\":\n print(\"Processing training files...\")\n DATA_PATH = TR_DATA_PATH\n RAW_FILES = TR_RAW_FILES\n istrain = True\n\n elif split == \"test\":\n print(\"Processing test files...\")\n DATA_PATH = TS_DATA_PATH\n RAW_FILES = TS_RAW_FILES\n istrain = False\n\n else:\n raise ValueError(\"Invalid split name\")\n\n # Process raw files\n for fname in RAW_FILES:\n # Read file\n print(f\"Reading file... ({fname})\")\n filename = os.path.join(DATA_PATH, fname)\n df = pd.read_csv(filename)\n\n # Limit dataset\n domain = utils.get_domain(fname)\n SRC_LANG, TRG_LANG = utils.get_langs(fname, istrain=istrain)\n\n # Clean dataset (basic)\n total_old = len(df)\n df = utils.preprocess_dataset(df, src_col=SRC_LANG, trg_col=TRG_LANG)\n\n # Shuffle dataset\n if SUFFLE:\n np.random.seed(123)\n np.random.shuffle(df.values)\n\n if CONSTRAINED and istrain:\n if domain == \"health\" and \"es\" in {SRC_LANG, TRG_LANG}:\n max_size = 123597 # Biological rows\n print(f\"Limiting size to {max_size}\")\n df = df[:max_size]\n elif domain == \"health\" and \"pt\" in {SRC_LANG, TRG_LANG}:\n max_size = 120301 # Biological rows\n print(f\"Limiting size to {max_size}\")\n df = df[:max_size]\n\n # Stats\n total_doctypes = df['doctype'].value_counts()\n removed = total_old - len(df)\n print(f\"Stats for: {fname} **************************\")\n print(f\"\\t- Documents: {len(set(df['docid']))}\")\n print(f\"\\t- Sentences: {len(df)}\")\n print(\"\\t\\t- Removed: {} ({:.2f}%)\".format(removed, removed / total_old * 100))\n print(\"\\t- Titles/Abstracts: {}/{} ({:.2f}%)\".format(total_doctypes['title'], total_doctypes['text'],\n total_doctypes['title'] / total_doctypes['text'] * 100))\n\n # Save data\n df.to_csv(os.path.join(RAW_PATH, fname), index=False)\n print(\"File saved!\")\n print(\"\")\n\nprint(\"Done!\")\n" ]
[ [ "numpy.random.shuffle", "pandas.read_csv", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
JakobHavtorn/es-rl
[ "30d81ad908a30e78d03c83d37454dbe8e05d1452" ]
[ "data-analysis/analyze_E017+020.py" ]
[ "import os\nfrom distutils.dir_util import copy_tree\nimport warnings\n\nimport IPython\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport torch\n\nfrom context import utils\nimport utils.filesystem as fs\nimport utils.plotting as plot\nfrom utils.data_analysis import invert_signs, load_stats\nfrom utils.misc import get_equal_dicts, length_of_longest\n\n\ndef create_plots(stats_list, keys_to_plot, groups, result_dir, include_val=True):\n n_keys = len(keys_to_plot)\n n_chars = len(str(n_keys))\n f = ' {:' + str(n_chars) + 'd}/{:' + str(n_chars) + 'd} monitored keys plotted'\n groups_org = groups.copy()\n for i_key, k in enumerate(keys_to_plot):\n # Get data and subset only those series that are done (or the one that is the longest)\n groups = groups_org.copy()\n list_of_series = [s[k].tolist() for s in stats_list if k in s]\n list_of_genera = [s['generations'].tolist() for s in stats_list if k in s] \n l = length_of_longest(list_of_series)\n indices = [i for i, series in enumerate(list_of_series) if len(series) == l]\n groups = groups[indices]\n list_of_series = [list_of_series[i] for i in indices]\n list_of_genera = [list_of_genera[i] for i in indices]\n\n # Validation series\n if include_val:\n val_k = k[:-4] + '_val'\n list_of_series_val = [s[val_k].tolist() for i, s in enumerate(stats_list) if val_k in s and i in indices]\n if include_val and not len(list_of_series_val) == 0:\n list_of_genera_val = [np.where(~np.isnan(l))[0].tolist() for l in list_of_series_val]\n list_of_genera.extend(list_of_genera_val)\n list_of_series_val = [np.array(l) for l in list_of_series_val]\n list_of_series_val = [l[~np.isnan(l)].tolist() for l in list_of_series_val]\n list_of_series.extend(list_of_series_val)\n groups_val = np.array([g + ', validation' for g in groups])\n groups = np.append(groups, groups_val)\n\n if k is 'return_val':\n IPython.embed()\n # Sort\n list_of_genera = [x for _,x in sorted(zip(groups.tolist(), list_of_genera))]\n list_of_series = [x for _,x in sorted(zip(groups.tolist(), list_of_series))]\n groups.sort()\n\n # Plot\n plot.timeseries_mean_grouped(list_of_genera, list_of_series, groups, xlabel='generations', ylabel=k, map_labels='supervised')\n if 'return' in k:\n plt.gca().set_ylim(0, 1.5)\n elif 'accuracy' in k:\n plt.gca().set_ylim(0.4, 1)\n plt.savefig(os.path.join(result_dir, k + '-all-series-mean-sd' + '.pdf'), bbox_inches='tight')\n plt.close()\n # Progress\n if i_key + 1 == n_keys:\n print(f.format(i_key+1, n_keys), end='\\n')\n else:\n print(f.format(i_key+1, n_keys), end='\\r')\n\n\ndef get_directories(experiment_id):\n # Get directories to analyze\n this_file_dir_local = os.path.dirname(os.path.abspath(__file__))\n package_root_this_file = fs.get_parent(this_file_dir_local, 'es-rl')\n d = os.path.join(package_root_this_file, 'experiments', 'checkpoints', experiment_id)\n directories = [os.path.join(d, di) for di in os.listdir(d) if os.path.isdir(os.path.join(d, di))]\n directories = [d for d in directories if 'monitoring' not in d and 'analysis' not in d]\n # Create result directory\n result_dir = os.path.join(d, str(experiment_id[:4]))\n dst_dir = '/home/jakob/Dropbox/Apps/ShareLaTeX/Master\\'s Thesis/graphics/' + experiment_id[:4]\n if not os.path.exists(result_dir + '-bn-analysis'):\n os.mkdir(result_dir + '-bn-analysis'),\n if not os.path.exists(result_dir + '-init-analysis'):\n os.mkdir(result_dir + '-init-analysis')\n return directories, result_dir, dst_dir\n\n\ndef load(experiment_id, optimizer):\n stats_init = []\n stats_bn = []\n groups_init = np.array([])\n groups_bn = np.array([])\n for d in directories:\n try:\n st = pd.read_csv(os.path.join(d, 'stats.csv'))\n with open(os.path.join(d, 'init.log'), 'r') as f:\n s = f.read()\n if 'MNISTNetNoInit' in s:\n groups_init = np.append(groups_init, 'Default init' + optimizer) # Has BN\n stats_init.append(st)\n elif 'MNISTNetNoBN' in s:\n groups_bn = np.append(groups_bn, 'No Batchnorm' + optimizer) # Has Xavier Glorot\n stats_bn.append(st)\n else:\n groups_bn = np.append(groups_bn, 'Batchnorm' + optimizer) # Has Xavier Glorot\n groups_init = np.append(groups_init, 'Xavier-Glorot' + optimizer) # Has BN\n stats_init.append(st)\n stats_bn.append(st)\n except:\n print(\"None in: \" + d)\n return stats_init, stats_bn, groups_init, groups_bn\n\n\nif __name__ == '__main__':\n # Ignore warnings from matplotlib\n warnings.filterwarnings(\"ignore\", module=\"matplotlib\")\n # Font setting\n matplotlib.rcParams.update({'font.size': 12})\n # Experiment IDs\n experiment_ids = ['E017-bn-init', 'E020-bn-init']\n # Optimizer labels\n # optimizers = [', SGD', ', ADAM']\n optimizers = ['', '']\n # Keys to analyze\n keys_to_plot = {'return_unp', 'return_avg', 'accuracy_unp', 'accuracy_avg', 'sigma'}\n # Analyze\n for experiment_id, optimizer in zip(experiment_ids, optimizers):\n # Get directories\n directories, result_dir, dst_dir = get_directories(experiment_id)\n if len(directories) == 0:\n print('No results for {}'.format(experiment_id))\n continue\n\n # Load data\n stats_init, stats_bn, groups_init, groups_bn = load(experiment_id, optimizer)\n\n # Plot\n invert_signs(stats_init)\n invert_signs(stats_bn)\n create_plots(stats_init, keys_to_plot, groups_init, result_dir + '-init-analysis', include_val=True)\n create_plots(stats_bn, keys_to_plot, groups_bn, result_dir + '-bn-analysis', include_val=True)\n \n copy_tree(result_dir + '-init-analysis', dst_dir + '-init-analysis')\n copy_tree(result_dir + '-bn-analysis', dst_dir + '-bn-analysis')\n " ]
[ [ "matplotlib.pyplot.gca", "numpy.isnan", "numpy.append", "matplotlib.rcParams.update", "matplotlib.pyplot.close", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RosettaCommons/RFDesign
[ "b404b8b2c57f89c047529c30259aeeb8f6012b61", "b404b8b2c57f89c047529c30259aeeb8f6012b61", "9fea2bafbbb7cbf702c9884e8b3ec69ed50ff2f5" ]
[ "se3_transformer/model/layers/linear.py", "inpainting/model/ss_features.py", "scripts/RosettaTR/utils.py" ]
[ "# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n#\n# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES\n# SPDX-License-Identifier: MIT\n\n\nfrom typing import Dict\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom se3_transformer.model.fiber import Fiber\n\n\nclass LinearSE3(nn.Module):\n \"\"\"\n Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.\n Maps a fiber to a fiber with the same degrees (channels may be different).\n No interaction between degrees, but interaction between channels.\n\n type-0 features (C_0 channels) ────> Linear(bias=False) ────> type-0 features (C'_0 channels)\n type-1 features (C_1 channels) ────> Linear(bias=False) ────> type-1 features (C'_1 channels)\n :\n type-k features (C_k channels) ────> Linear(bias=False) ────> type-k features (C'_k channels)\n \"\"\"\n\n def __init__(self, fiber_in: Fiber, fiber_out: Fiber):\n super().__init__()\n self.weights = nn.ParameterDict({\n str(degree_out): nn.Parameter(\n torch.randn(channels_out, fiber_in[degree_out]) / np.sqrt(fiber_in[degree_out]))\n for degree_out, channels_out in fiber_out\n })\n\n def forward(self, features: Dict[str, Tensor], *args, **kwargs) -> Dict[str, Tensor]:\n return {\n degree: self.weights[degree] @ features[degree]\n for degree, weight in self.weights.items()\n }\n", "import sys, os\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n# distributed data parallel\nfrom icecream import ic\nimport util\n\n\nsys.path.append('./model')\nimport RoseTTAFoldModel\n\nSS2IDX = {'L':0, # change SS id to integer \n 'E':1,\n 'H':2,\n 'U':3}\n\nIDX2SS = {val:key for key,val in SS2IDX.items()}\n\ndef ss_to_tensor(ss):\n \"\"\"\n Turn secondary structure annotations into integers \n\n Parameters:\n ss (numpy.ndarray): Array with dtype'<U1' (strings?)\n\n Returns:\n ss_out: torch.tensor \n \"\"\"\n ss_out = []\n for i_ss, assignment in enumerate(ss):\n\n ss_out.append(SS2IDX[assignment]) # convert ss assignment to integer \n \n return nn.functional.one_hot( torch.tensor(ss_out), num_classes=4)\n\n\ndef zero_ss_information(ss_feats, zero_inf_mask, zero_inf_state):\n \"\"\"\n Apply the zero information state to any SS tokens with True in zero_inf_mask\n\n \"\"\"\n #NOTE: This function broken if num ss dimensions is 3 instead of 4 \n\n N = zero_inf_mask.sum() # total number of positions that need zero information \n \n if zero_inf_state == 'unknown':\n # apply the unknown token (4) to all positions which are True \n replacement = nn.functional.one_hot( torch.full((N,), 3), num_classes=4).float()\n\n elif zero_inf_state == 'uniform':\n # apply uniform distribution over SS where True \n a = 1/3\n replacement = torch.tensor([a,a,a,0.])[None].repeat(N,1)\n\n ss_feats[zero_inf_mask] = replacement \n\n return ss_feats \n\n\ndef get_chunk_boundaries(L, max_chunks, min_chunks=1):\n \"\"\"\n Get indices of the boundaries that would divide an array into chunks of random size \n \n Parameters:\n L (int, required): Length of protein (or crop)\n \n max_chunks (int, required): Maximum number of chunks to make \n \n min_chunks (int, optional): Min number of chunks \n \"\"\"\n assert (max_chunks > 0) and (min_chunks > 0)\n \n N = np.random.randint(min_chunks, max_chunks) # random number of chunks \n \n # number of boundaries is 1 less than number of chunks\n # L-1 so that when we add 1 later we don't have boundaries at non-existent indices\n boundaries = torch.randperm(L-1)[:N] \n sorted_, indices = torch.sort(boundaries)\n \n # add 1 to ensure we don't have a boundary at 0\n return sorted_ + 1\n\n\ndef smear_ss_chunks(ss_hot, boundaries):\n \"\"\"\n Smear secondary structure content according to boundaries\n \"\"\"\n assert len(ss_hot.shape) == 2 # (L,?)\n ss_smeared = ss_hot.clone()\n N = len(boundaries)\n\n # fence post bug 1: end can never be 0, else error \n start=0\n for i,end in enumerate(boundaries):\n end = int(end)\n \n chunk = ss_hot[start:end]\n avg_chunk = torch.mean(chunk, dim=0) # mean ss features along the chunk\n\n \n ss_smeared[start:end] = avg_chunk \n \n start = end \n \n # fence post bug 2: do the last remaining chunk \n chunk = ss_hot[end:]\n avg_chunk = torch.mean(chunk, dim=0)\n ss_smeared[end:] = avg_chunk\n \n return ss_smeared\n\n\ndef sample_swap_ss(orig_ss_hot, use_unknown=False, frac_mut=0.25):\n \"\"\"\n Given the original one-hot encoding of the secondary structure, \"mutate\" certain positions\n to have RANDOM secondary structure\n\n Parameters:\n orig_ss_hot (torch.tensor, required): Original one hot encoding of secondary sructure\n\n use_unkown (bool, optional): If true, allowed to sample \"uknown\" ss tokens, else just helix/sheet/loop\n\n frac_mut (float, optional): Fraction of residues to randomly sample SS for\n \"\"\"\n new_ss_hot = torch.clone(orig_ss_hot)\n\n classes = 4 if use_unknown else 3\n L = new_ss_hot.shape[0]\n\n # create the uniform distribution over L residues\n dist = torch.ones(L,classes)\n div = dist.sum(dim=-1)[:,None]\n dist /= div\n\n # take a single sample at each position and one hot encode\n sampled_ss_hot = nn.functional.one_hot( torch.multinomial(dist, 1).squeeze(), num_classes=4).float()\n\n mask = torch.rand(L) < frac_mut # bool mask\n\n # swap the sampled ss at positions where mask is true\n new_ss_hot[mask] = sampled_ss_hot[mask]\n\n return new_ss_hot\n\n\ndef maskAndCatExtra1D(t1d, extra_t1d, mask_dict, chosen_task):\n \"\"\"\n Takes extra 1D features, masks them, and concatenates them to existing 1D features \n\n Parameters:\n t1d (torch.tensor, required): Traditional 22 dim 1D template features \n\n extra_t1d (dict, required): Dict of extra 1d feature tensors\n\n mask_dict (dict, required): Dictionary of masks \n\n chosen_task (str, required): current task, in case masking should be conditioned on it \n \"\"\"\n t1d_out = [t1d] # list of tensors to append to and then concat \n \n extra_keys = list(extra_t1d.keys())\n for key in extra_keys:\n\n\n # (1) mask the features \n feats = extra_t1d[key]\n mask = mask_dict[key]\n \n # Mask must come in a form which can be added \n # to perform the desired operation \n feats = feats + mask \n\n # a bit hacky: check if there is a mismatch in the second dimension\n # This occurs in seq2str mode, \n if (t1d.shape[1] != feats[None,...].shape[1]):\n repeats = t1d.shape[1]\n feats = feats.repeat(repeats,1,1)\n\n\n t1d_out.append(feats[None,...]) # expand first dim to be able to concat \n\n \n t1d_out = torch.cat(t1d_out, dim=-1)\n\n return t1d_out \n\n\ndef init_lecun_normal(shape, scale=1.0):\n def truncated_normal(uniform, mu=0.0, sigma=1.0, a=-2, b=2):\n normal = torch.distributions.normal.Normal(0, 1)\n\n alpha = (a - mu) / sigma\n beta = (b - mu) / sigma\n\n alpha_normal_cdf = normal.cdf(torch.tensor(alpha))\n p = alpha_normal_cdf + (normal.cdf(torch.tensor(beta)) - alpha_normal_cdf) * uniform\n\n v = torch.clamp(2 * p - 1, -1 + 1e-8, 1 - 1e-8)\n x = mu + sigma * np.sqrt(2) * torch.erfinv(v)\n x = torch.clamp(x, a, b)\n\n return x\n\n def sample_truncated_normal(shape, scale=1.0):\n stddev = np.sqrt(scale/shape[-1])/.87962566103423978 # shape[-1] = fan_in\n return stddev * truncated_normal(torch.rand(shape))\n\n out_param = torch.nn.Parameter( (sample_truncated_normal(shape)) )\n return out_param\n\ndef init_zeros(shape):\n\n out_param = torch.nn.Parameter( torch.zeros(shape) )\n return out_param\n\n\ndef custom_ckpt_load(model, ckpt, special_keys=[], init_method=init_lecun_normal, init_kwargs={}):\n\n \"\"\"\n Manually loads parameters into model with support for mismatch in tensor sizes \n for specified modules within modules in module_keys\n\n Parameters:\n model (torch.nn.module, required): RoseTTAFold model\n\n ckpt (dict, required): Loaded torch checkpoint dict \n\n\n \"\"\"\n print('From inside custom ckpt load, special keys: ',special_keys)\n pretrained_state = ckpt['model_state_dict'] # state from pretrained model \n model_state = model.state_dict() # state from initialized model with different architecture \n\n param_keys = list(model_state.keys()) # iter through all keys in the model which is being initialized \n # to ensure we don't miss any keys \n\n for i_key,key in enumerate(param_keys):\n\n if 'module.' in key:\n key_safe = key.replace('module.','')\n else:\n key_safe = key \n \n # if the key isn't one of the special ones, load like normal \n if key_safe not in special_keys:\n\n model_state[key] = pretrained_state[key_safe]\n \n # key is special, support the different shapes of the params \n else:\n print(f'Found special parameter {key}, accomodating shape mismatch')\n\n shape_pretrained = pretrained_state[key_safe].shape\n shape_model = model_state[key].shape\n\n # for now, only allow model to be larger than pretrained \n for i_dim,_ in enumerate(shape_pretrained):\n assert shape_pretrained[i_dim] <= shape_model[i_dim]\n \n # create a new tensor whose first entries in \n # every dimension are the pretrained ones\n pretrained_param = pretrained_state[key_safe]\n new_param = init_method(shape_model,**init_kwargs) \n\n \n # replace params in new tensor with pretrained ones \n if len(shape_model) == 2: # rank 2 (matrix)\n a,b = shape_pretrained\n new_param.data[:a,:b] = pretrained_param\n\n\n elif len(shape_model) == 1: # rank 1 (vector)\n a = shape_pretrained[0]\n new_param.data[:a] = pretrained_param\n\n\n else:\n raise RuntimeError('Cannot currently custom load parameters with number of dims = {len(shape_model)}')\n\n # put new tensor into pretrained model \n model_state[key] = new_param\n\n model.load_state_dict(model_state)\n print('Successful custom checkpoint load')\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n \n \n\nMODEL_PARAM = {'SE3_param': {'div': 4,\n 'l0_in_features': 32,\n 'l0_out_features': 32,\n 'l1_in_features': 3,\n 'l1_out_features': 2,\n 'n_heads': 4,\n 'num_channels': 32,\n 'num_degrees': 2,\n 'num_edge_features': 32,\n 'num_layers': 3},\n 'd_hidden': 32,\n 'd_hidden_templ': 64,\n 'd_msa': 256,\n 'd_msa_full': 64,\n 'd_pair': 128,\n 'd_templ': 64,\n 'n_head_msa': 8,\n 'n_head_pair': 4,\n 'n_head_templ': 4,\n 'n_module_2track': 24,\n 'n_module_3track': 8,\n 'p_drop': 0.15}\n\nif __name__ == '__main__':\n # ADD T1D features here:\n standard_1d = 21+1\n ss_1d = 3\n bonus_1d = 1\n dim_t1d = standard_1d + ss_1d + bonus_1d\n\n MODEL_PARAM['d_t1d'] = dim_t1d\n\n model_extra = RoseTTAFoldModel.RoseTTAFoldModule(**MODEL_PARAM)\n print('Number of model parameters is ',count_parameters(model_extra))\n\n #ckpt_path = '/home/minkbaek/for/jue/RoseTTAFold.Nov05/models/BFF_last.pt'\n ckpt_path = 'models/BFF_last.pt'\n has_gpu = torch.cuda.is_available()\n if not has_gpu:\n kwargs={'map_location':'cpu'}\n else:\n kwargs={}\n ckpt = torch.load(ckpt_path,**kwargs)\n\n\n custom_ckpt_load(model_extra, ckpt, special_keys=['templ_emb.emb.weight', 'templ_emb.emb.bias'], init_method=init_zeros)\n", "import numpy as np\nimport random\nimport scipy\nfrom scipy.signal import *\nfrom pyrosetta import *\n\neps = 1e-9\nP_ADD_OMEGA = 0.5\nP_ADD_THETA = 0.5\nP_ADD_PHI = 0.6\n\ndef gen_rst(params, L1):\n\n npz = np.load(params['NPZ'])\n\n dist,omega,theta,phi = npz['dist'],npz['omega'],npz['theta'],npz['phi']\n\n if params['ROLL']==True:\n print(\"Apply circular shift...\")\n dist = np.roll(dist,1,axis=-1)\n omega = np.roll(omega,1,axis=-1)\n theta = np.roll(theta,1,axis=-1)\n phi = np.roll(phi,1,axis=-1)\n\n dist = dist.astype(np.float32) + eps\n omega = omega.astype(np.float32) + eps\n theta = theta.astype(np.float32) + eps\n phi = phi.astype(np.float32) + eps\n\n # dictionary to store Rosetta restraints\n rst = {'dist' : [], 'omega' : [], 'theta' : [], 'phi' : []}\n\n ########################################################\n # assign parameters\n ########################################################\n PCUT = 0.05 #params['PCUT']\n EBASE = params['EBASE']\n EREP = params['EREP']\n DREP = params['DREP']\n PREP = params['PREP']\n SIGD = params['SIGD']\n SIGM = params['SIGM']\n MEFF = params['MEFF']\n DCUT = params['DCUT']\n ALPHA = params['ALPHA']\n BBWGHT = params['BBWGHT']\n\n DSTEP = params['DSTEP']\n ASTEP = np.deg2rad(params['ASTEP'])\n\n seq = params['seq']\n\n sg_flag = False\n if params['SG'] != '':\n sg_flag = True\n sg_w,sg_n = [int(v) for v in params['SG'].split(\",\")]\n print(\"Savitzky-Golay: %d,%d\"%(sg_w,sg_n))\n\n ########################################################\n # dist: 0..20A\n ########################################################\n nres = dist.shape[0]\n bins = np.array([4.25+DSTEP*i for i in range(32)])\n prob = np.sum(dist[:,:,5:], axis=-1) # prob of dist within 20A\n prob_12 = np.sum(dist[:,:,5:21], axis=-1) # prob of dist within 12A\n bkgr = np.array((bins/DCUT)**ALPHA)\n attr = -np.log((dist[:,:,5:]+MEFF)/(dist[:,:,-1][:,:,None]*bkgr[None,None,:]))+EBASE\n repul = np.maximum(attr[:,:,0],np.zeros((nres,nres)))[:,:,None]+np.array(EREP)[None,None,:]\n dist = np.concatenate([repul,attr], axis=-1)\n bins = np.concatenate([DREP,bins])\n x = pyrosetta.rosetta.utility.vector1_double()\n _ = [x.append(v) for v in bins]\n #\n prob = np.triu(prob, k=1) # fill zeros to diagonal and lower (for speed-up)\n i,j = np.where(prob>PCUT)\n prob = prob[i,j]\n prob_12 = prob_12[i,j]\n #nbins = 35\n step = 0.5\n for a,b,p,p_12 in zip(i,j,prob,prob_12):\n y = pyrosetta.rosetta.utility.vector1_double()\n if sg_flag == True:\n _ = [y.append(v) for v in savgol_filter(dist[a,b],sg_w,sg_n)]\n else:\n _ = [y.append(v) for v in dist[a,b]]\n if L1 is not None and (a < L1 and b > L1):\n w = 2.0\n else:\n w = 1.0\n spline = rosetta.core.scoring.func.SplineFunc(\"\", w, 0.0, step, x,y)\n ida = rosetta.core.id.AtomID(5,a+1)\n idb = rosetta.core.id.AtomID(5,b+1)\n rst['dist'].append([a,b,p,p_12,rosetta.core.scoring.constraints.AtomPairConstraint(ida, idb, spline)])\n print(\"dist restraints: %d\"%(len(rst['dist'])))\n\n\n ########################################################\n # omega: -pi..pi\n ########################################################\n nbins = omega.shape[2]-1\n ASTEP = 2.0*np.pi/nbins\n nbins += 4\n bins = np.linspace(-np.pi-1.5*ASTEP, np.pi+1.5*ASTEP, nbins)\n x = pyrosetta.rosetta.utility.vector1_double()\n _ = [x.append(v) for v in bins]\n prob = np.sum(omega[:,:,1:], axis=-1)\n prob = np.triu(prob, k=1) # fill zeros to diagonal and lower (for speed-up)\n i,j = np.where(prob>PCUT+P_ADD_OMEGA)\n prob = prob[i,j]\n omega = -np.log((omega+MEFF)/(omega[:,:,-1]+MEFF)[:,:,None])\n #if sg_flag == True:\n # omega = savgol_filter(omega,sg_w,sg_n,axis=-1,mode='wrap')\n omega = np.concatenate([omega[:,:,-2:],omega[:,:,1:],omega[:,:,1:3]],axis=-1)\n for a,b,p in zip(i,j,prob):\n y = pyrosetta.rosetta.utility.vector1_double()\n _ = [y.append(v) for v in omega[a,b]]\n spline = rosetta.core.scoring.func.SplineFunc(\"\", 1.0, 0.0, ASTEP, x,y)\n id1 = rosetta.core.id.AtomID(2,a+1) # CA-i\n id2 = rosetta.core.id.AtomID(5,a+1) # CB-i\n id3 = rosetta.core.id.AtomID(5,b+1) # CB-j\n id4 = rosetta.core.id.AtomID(2,b+1) # CA-j\n rst['omega'].append([a,b,p,rosetta.core.scoring.constraints.DihedralConstraint(id1,id2,id3,id4, spline)])\n print(\"omega restraints: %d\"%(len(rst['omega'])))\n\n\n ########################################################\n # theta: -pi..pi\n ########################################################\n prob = np.sum(theta[:,:,1:], axis=-1)\n np.fill_diagonal(prob, 0.0)\n i,j = np.where(prob>PCUT+P_ADD_THETA)\n prob = prob[i,j]\n theta = -np.log((theta+MEFF)/(theta[:,:,-1]+MEFF)[:,:,None])\n #if sg_flag == True:\n # theta = savgol_filter(theta,sg_w,sg_n,axis=-1,mode='wrap')\n theta = np.concatenate([theta[:,:,-2:],theta[:,:,1:],theta[:,:,1:3]],axis=-1)\n for a,b,p in zip(i,j,prob):\n y = pyrosetta.rosetta.utility.vector1_double()\n _ = [y.append(v) for v in theta[a,b]]\n spline = rosetta.core.scoring.func.SplineFunc(\"\", 1.0, 0.0, ASTEP, x,y)\n id1 = rosetta.core.id.AtomID(1,a+1) # N-i\n id2 = rosetta.core.id.AtomID(2,a+1) # CA-i\n id3 = rosetta.core.id.AtomID(5,a+1) # CB-i\n id4 = rosetta.core.id.AtomID(5,b+1) # CB-j\n rst['theta'].append([a,b,p,rosetta.core.scoring.constraints.DihedralConstraint(id1,id2,id3,id4, spline)])\n\n print(\"theta restraints: %d\"%(len(rst['theta'])))\n\n\n ########################################################\n # phi: 0..pi\n ########################################################\n nbins = phi.shape[2]-1+4\n bins = np.linspace(-1.5*ASTEP, np.pi+1.5*ASTEP, nbins)\n x = pyrosetta.rosetta.utility.vector1_double()\n _ = [x.append(v) for v in bins]\n prob = np.sum(phi[:,:,1:], axis=-1)\n np.fill_diagonal(prob, 0.0)\n i,j = np.where(prob>PCUT+P_ADD_PHI)\n prob = prob[i,j]\n phi = -np.log((phi+MEFF)/(phi[:,:,-1]+MEFF)[:,:,None])\n #if sg_flag == True:\n # phi = savgol_filter(phi,sg_w,sg_n,axis=-1,mode='mirror')\n phi = np.concatenate([np.flip(phi[:,:,1:3],axis=-1),phi[:,:,1:],np.flip(phi[:,:,-2:],axis=-1)], axis=-1)\n for a,b,p in zip(i,j,prob):\n y = pyrosetta.rosetta.utility.vector1_double()\n _ = [y.append(v) for v in phi[a,b]]\n spline = rosetta.core.scoring.func.SplineFunc(\"\", 1.0, 0.0, ASTEP, x,y)\n id1 = rosetta.core.id.AtomID(2,a+1) # CA-i\n id2 = rosetta.core.id.AtomID(5,a+1) # CB-i\n id3 = rosetta.core.id.AtomID(5,b+1) # CB-j\n rst['phi'].append([a,b,p,rosetta.core.scoring.constraints.AngleConstraint(id1,id2,id3, spline)])\n print(\"phi restraints: %d\"%(len(rst['phi'])))\n\n ########################################################\n # backbone torsions\n ########################################################\n if (params['BB'] != ''):\n bbnpz = np.load(params['BB'])\n bbphi,bbpsi = bbnpz['phi'],bbnpz['psi']\n rst['bbphi'] = []\n rst['bbpsi'] = []\n nbins = bbphi.shape[1]+4\n step = 2.*np.pi/bbphi.shape[1]\n bins = np.linspace(-1.5*step-np.pi, np.pi+1.5*step, nbins)\n x = pyrosetta.rosetta.utility.vector1_double()\n _ = [x.append(v) for v in bins]\n\n bbphi = -np.log(bbphi)\n bbphi = np.concatenate([bbphi[:,-2:],bbphi,bbphi[:,:2]],axis=-1).copy()\n\n bbpsi = -np.log(bbpsi)\n bbpsi = np.concatenate([bbpsi[:,-2:],bbpsi,bbpsi[:,:2]],axis=-1).copy()\n\n for i in range(1,nres):\n N1 = rosetta.core.id.AtomID(1,i)\n Ca1 = rosetta.core.id.AtomID(2,i)\n C1 = rosetta.core.id.AtomID(3,i)\n N2 = rosetta.core.id.AtomID(1,i+1)\n Ca2 = rosetta.core.id.AtomID(2,i+1)\n C2 = rosetta.core.id.AtomID(3,i+1)\n\n # psi(i)\n ypsi = pyrosetta.rosetta.utility.vector1_double()\n _ = [ypsi.append(v) for v in bbpsi[i-1]]\n spsi = rosetta.core.scoring.func.SplineFunc(\"\", BBWGHT, 0.0, step, x,ypsi)\n rst['bbpsi'].append(rosetta.core.scoring.constraints.DihedralConstraint(N1,Ca1,C1,N2, spsi))\n\n # phi(i+1)\n yphi = pyrosetta.rosetta.utility.vector1_double()\n _ = [yphi.append(v) for v in bbphi[i]]\n sphi = rosetta.core.scoring.func.SplineFunc(\"\", BBWGHT, 0.0, step, x,yphi)\n rst['bbphi'].append(rosetta.core.scoring.constraints.DihedralConstraint(C1,N2,Ca2,C2, sphi))\n\n print(\"bbbtor restraints: %d\"%(len(rst['bbphi'])+len(rst['bbpsi'])))\n\n return rst\n\ndef set_predicted_dihedral(pose, phi, psi, omega):\n\n nbins = phi.shape[1]\n bins = np.linspace(-180.,180.,nbins+1)[:-1] + 180./nbins\n\n nres = pose.total_residue()\n for i in range(nres):\n pose.set_phi(i+1,np.random.choice(bins,p=phi[i]))\n pose.set_psi(i+1,np.random.choice(bins,p=psi[i]))\n\n if np.random.uniform() < omega[i,0]:\n pose.set_omega(i+1,0)\n else:\n pose.set_omega(i+1,180)\n\ndef set_random_dihedral(pose):\n nres = pose.total_residue()\n for i in range(1, nres+1):\n phi,psi=random_dihedral()\n pose.set_phi(i,phi)\n pose.set_psi(i,psi)\n pose.set_omega(i,180)\n\n return(pose)\n\n\n#pick phi/psi randomly from:\n#-140 153 180 0.135 B\n# -72 145 180 0.155 B\n#-122 117 180 0.073 B\n# -82 -14 180 0.122 A\n# -61 -41 180 0.497 A\n# 57 39 180 0.018 L\ndef random_dihedral():\n phi=0\n psi=0\n r=random.random()\n if(r<=0.135):\n phi=-140\n psi=153\n elif(r>0.135 and r<=0.29):\n phi=-72\n psi=145\n elif(r>0.29 and r<=0.363):\n phi=-122\n psi=117\n elif(r>0.363 and r<=0.485):\n phi=-82\n psi=-14\n elif(r>0.485 and r<=0.982):\n phi=-61\n psi=-41\n else:\n phi=57\n psi=39\n return(phi, psi)\n\n\ndef read_fasta(file):\n fasta=\"\"\n first = True\n with open(file, \"r\") as f:\n for line in f:\n if(line[0] == \">\"):\n if first:\n first = False\n continue\n else:\n break\n else:\n line=line.rstrip()\n fasta = fasta + line;\n return fasta\n\n\ndef remove_clash(scorefxn, mover, pose):\n for _ in range(0, 5):\n if float(scorefxn(pose)) < 10:\n break\n mover.apply(pose)\n\n\ndef add_rst(pose, rst, sep1, sep2, params, nogly=False, use_orient=None, pcut=None, p12_cut=0.0):\n if use_orient == None:\n use_orient = params['USE_ORIENT']\n if pcut == None:\n pcut=params['PCUT']\n \n seq = params['seq']\n\n # collect restraints\n array = []\n\n if nogly==True:\n dist_r = [r for a,b,p,p_12,r in rst['dist'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and seq[a]!='G' and seq[b]!='G' and p>=pcut and p_12>=p12_cut]\n if use_orient:\n omega_r = [r for a,b,p,r in rst['omega'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and seq[a]!='G' and seq[b]!='G' and p>=pcut+P_ADD_OMEGA] #0.5\n theta_r = [r for a,b,p,r in rst['theta'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and seq[a]!='G' and seq[b]!='G' and p>=pcut+P_ADD_THETA] #0.5\n phi_r = [r for a,b,p,r in rst['phi'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and seq[a]!='G' and seq[b]!='G' and p>=pcut+P_ADD_PHI] #0.6\n else:\n dist_r = [r for a,b,p,p_12,r in rst['dist'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and p>=pcut and p_12>=p12_cut]\n if use_orient:\n omega_r = [r for a,b,p,r in rst['omega'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and p>=pcut+P_ADD_OMEGA]\n theta_r = [r for a,b,p,r in rst['theta'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and p>=pcut+P_ADD_THETA]\n phi_r = [r for a,b,p,r in rst['phi'] if abs(a-b)>=sep1 and abs(a-b)<sep2 and p>=pcut+P_ADD_PHI] #0.6\n\n #if params['BB'] != '':\n # array += [r for r in rst['bbphi']]\n # array += [r for r in rst['bbpsi']]\n array += dist_r\n if use_orient:\n array += omega_r\n array += theta_r\n array += phi_r\n\n if len(array) < 1:\n return\n\n print (\"Number of applied pair restraints:\", len(array))\n print (\" - Distance restraints:\", len(dist_r))\n if use_orient:\n print (\" - Omega restraints:\", len(omega_r))\n print (\" - Theta restraints:\", len(theta_r))\n print (\" - Phi restraints: \", len(phi_r))\n\n #random.shuffle(array)\n\n cset = rosetta.core.scoring.constraints.ConstraintSet()\n [cset.add_constraint(a) for a in array]\n\n # add to pose\n constraints = rosetta.protocols.constraint_movers.ConstraintSetMover()\n constraints.constraint_set(cset)\n constraints.add_constraints(True)\n constraints.apply(pose)\n\ndef add_crd_rst(pose, nres, std=1.0, tol=1.0):\n flat_har = rosetta.core.scoring.func.FlatHarmonicFunc(0.0, std, tol)\n rst = list()\n for i in range(1, nres+1):\n xyz = pose.residue(i).atom(\"CA\").xyz() # xyz coord of CA atom\n ida = rosetta.core.id.AtomID(2,i) # CA idx for residue i\n rst.append(rosetta.core.scoring.constraints.CoordinateConstraint(ida, ida, xyz, flat_har)) \n\n if len(rst) < 1:\n return\n \n print (\"Number of applied coordinate restraints:\", len(rst))\n #random.shuffle(rst)\n\n cset = rosetta.core.scoring.constraints.ConstraintSet()\n [cset.add_constraint(a) for a in rst]\n\n # add to pose\n constraints = rosetta.protocols.constraint_movers.ConstraintSetMover()\n constraints.constraint_set(cset)\n constraints.add_constraints(True)\n constraints.apply(pose)\n\n" ]
[ [ "torch.randn", "numpy.sqrt" ], [ "torch.mean", "torch.erfinv", "torch.ones", "numpy.sqrt", "torch.full", "torch.cat", "torch.load", "torch.randperm", "torch.clone", "torch.zeros", "torch.multinomial", "torch.tensor", "torch.sort", "torch.cuda.is_available", "torch.rand", "torch.distributions.normal.Normal", "torch.clamp", "numpy.random.randint" ], [ "numpy.log", "numpy.linspace", "numpy.random.choice", "numpy.concatenate", "numpy.random.uniform", "numpy.deg2rad", "numpy.fill_diagonal", "numpy.flip", "numpy.roll", "numpy.load", "numpy.triu", "numpy.array", "numpy.where", "numpy.sum", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chrisluedtke/divvy-data-analysis
[ "441fa9028ed4bb77ad47e8109a8be749ea1d30b1", "441fa9028ed4bb77ad47e8109a8be749ea1d30b1" ]
[ "divvydata/historical_data.py", "nb_utils/data_processing.py" ]
[ "\"\"\"\nPulls data from:\nhttps://www.divvybikes.com/system-data\nhttps://s3.amazonaws.com/divvy-data/tripdata\n\"\"\"\nfrom io import BytesIO\nimport os\nimport re\nimport requests\nfrom zipfile import ZipFile\nfrom typing import List\n\nfrom lxml import html\nimport pandas as pd\n\nfrom .stations_feed import StationsFeed\n\n\nSTN_DT_FORM = {\n '2013': \"%m/%d/%Y\", # Not labeled for quarters\n '2014_Q1Q2': None, # xlsx file\n '2014_Q3Q4': \"%m/%d/%Y %H:%M\",\n '2015': None, # no date column and not labeled for quarters\n '2016_Q1Q2': \"%m/%d/%Y\",\n '2016_Q3': \"%m/%d/%Y\",\n '2016_Q4': \"%m/%d/%Y\",\n '2017_Q1Q2': \"%m/%d/%Y %H:%M:%S\",\n '2017_Q3Q4': \"%m/%d/%Y %H:%M\",\n}\n\nSTN_COL_MAP = {\n 'latitude': 'lat',\n 'longitude': 'lon',\n 'dateCreated': 'online_date',\n 'online date': 'online_date',\n}\n\nRD_DT_FORM = {\n '2013': \"%Y-%m-%d %H:%M\", # Not labeled for quarters\n '2014_Q1Q2': \"%m/%d/%Y %H:%M\",\n '2014_Q3': \"%m/%d/%Y %H:%M\",\n '2014_Q4': \"%m/%d/%Y %H:%M\",\n '2015_Q1': \"%m/%d/%Y %H:%M\",\n '2015_Q2': \"%m/%d/%Y %H:%M\",\n '2015': \"%m/%d/%Y %H:%M\", # Q3 labeled as month integer\n '2015_Q4': \"%m/%d/%Y %H:%M\",\n '2016_Q1': \"%m/%d/%Y %H:%M\",\n '2016': \"%m/%d/%Y %H:%M\", # Q2 labeled as month integer\n '2016_Q3': \"%m/%d/%Y %H:%M:%S\",\n '2016_Q4': \"%m/%d/%Y %H:%M:%S\",\n '2017_Q1': \"%m/%d/%Y %H:%M:%S\",\n '2017_Q2': \"%m/%d/%Y %H:%M:%S\",\n '2017_Q3': \"%m/%d/%Y %H:%M:%S\",\n '2017_Q4': \"%m/%d/%Y %H:%M\",\n '2018_Q1': \"%Y-%m-%d %H:%M:%S\",\n '2018_Q2': \"%Y-%m-%d %H:%M:%S\",\n '2018_Q3': \"%Y-%m-%d %H:%M:%S\",\n '2018_Q4': \"%Y-%m-%d %H:%M:%S\",\n}\n\nRD_COL_MAP = {\n '01 - Rental Details Rental ID': 'trip_id',\n '01 - Rental Details Local Start Time': 'start_time',\n '01 - Rental Details Local End Time': 'end_time',\n '01 - Rental Details Bike ID': 'bikeid',\n '01 - Rental Details Duration In Seconds Uncapped': 'tripduration',\n '03 - Rental Start Station ID': 'from_station_id',\n '03 - Rental Start Station Name': 'from_station_name',\n '02 - Rental End Station ID': 'to_station_id',\n '02 - Rental End Station Name': 'to_station_name',\n 'User Type': 'usertype',\n 'Member Gender': 'gender',\n '05 - Member Details Member Birthday Year': 'birthyear',\n 'stoptime': 'end_time',\n 'starttime': 'start_time',\n 'birthday': 'birthyear',\n}\n\n\ndef parse_zip_urls_from_url(url):\n r = requests.get(url)\n webpage = html.fromstring(r.content)\n\n base_source = 'https://s3.amazonaws.com/divvy-data/tripdata/'\n urls = [url for url in set(webpage.xpath('//a/@href'))\n if (base_source in url and url.endswith('.zip'))]\n\n return urls\n\n\ndef year_lookup_to_date(yr_lookup: str) -> str:\n q_map = {\n 'Q1': '03-31',\n 'Q2': '06-30',\n 'Q3': '09-30',\n 'Q4': '12-31',\n }\n\n yr_l_splt = yr_lookup.split('_')\n q = yr_l_splt[-1][-2:]\n date = q_map.get(q, '12-31')\n date = f'{yr_l_splt[0]}-{date}'\n\n return date\n\n\ndef get_current_stations():\n \"\"\"Pulls most recent data from Divvy JSON feed.\n\n Necessar because Divvy did not provide 2018 station data.\n \"\"\"\n df = StationsFeed().get_current_data()\n cols = ['id', 'stationName', 'latitude', 'longitude',\n 'totalDocks', 'lastCommunicationTime']\n df = df[cols].rename(columns={\n 'stationName': 'name',\n 'lastCommunicationTime': 'as_of_date',\n 'totalDocks': 'dpcapacity'\n })\n df = df.rename(columns=STN_COL_MAP)\n\n return df\n\n\ndef process_ride_df(z, fpath, year_lookup):\n df = (pd.read_csv(z.open(fpath))\n .rename(columns=RD_COL_MAP))\n\n df['start_time'] = pd.to_datetime(\n df['start_time'],\n format=RD_DT_FORM.get(year_lookup, None),\n errors='coerce'\n )\n df['end_time'] = pd.to_datetime(\n df['end_time'],\n format=RD_DT_FORM.get(year_lookup, None),\n errors='coerce'\n )\n\n return df\n\n\ndef process_station_df(z, fpath, year_lookup):\n if fpath.endswith('.csv'):\n df = pd.read_csv(z.open(fpath))\n else: # must be '.xlsx'\n df = pd.read_excel(z.open(fpath))\n\n df = df.rename(columns=STN_COL_MAP)\n df['as_of_date'] = year_lookup_to_date(year_lookup)\n df['as_of_date'] = pd.to_datetime(df['as_of_date'])\n\n if 'online_date' in df:\n df['online_date'] = pd.to_datetime(\n df['online_date'],\n format=STN_DT_FORM.get(year_lookup, None),\n errors='coerce'\n )\n\n return df\n\n\ndef combine_ride_dfs(dfs: List[pd.DataFrame]) -> pd.DataFrame:\n dfs = (pd.concat(dfs, ignore_index=True, sort=True)\n .sort_values('start_time')\n .reset_index(drop=True))\n dfs['tripduration'] = (\n dfs.tripduration.astype(str).str.replace(',', '').astype(float)\n )\n\n cols = ['trip_id', 'bikeid', 'start_time', 'end_time', 'tripduration',\n 'from_station_id', 'from_station_name', 'to_station_id',\n 'to_station_name', 'usertype', 'gender', 'birthyear']\n dfs = dfs[[col for col in cols if col in dfs]]\n\n return dfs\n\n\ndef combine_station_dfs(dfs: List[pd.DataFrame]) -> pd.DataFrame:\n dfs = (pd.concat(dfs, ignore_index=True, sort=True)\n .sort_values(['id', 'as_of_date'])\n .reset_index(drop=True))\n\n # excludes ['city', 'Unnamed: 7']\n cols = ['id', 'name', 'as_of_date', 'lat', 'lon', 'dpcapacity',\n 'online_date', 'landmark']\n dfs = dfs[[col for col in cols if col in dfs]]\n\n return dfs\n\n\ndef get_historical_data(years: List[str], write_to: str = '', rides=True,\n stations=True):\n \"\"\"Gathers and cleans historical Divvy data\n\n write_to: optional local folder path to extract zip files to\n returns: (pandas.DataFrame of rides, pandas.DataFrame of stations)\n \"\"\"\n\n if isinstance(years, str):\n years = [years]\n\n ride_dfs = []\n station_dfs = []\n\n if not (rides or stations):\n return ride_dfs, station_dfs\n\n urls = parse_zip_urls_from_url('https://www.divvybikes.com/system-data')\n\n for url in sorted(urls):\n z_fn = url.split('/')[-1]\n z_year = re.findall(r'20\\d{2}', z_fn)[0]\n if z_year not in years:\n continue\n\n print(url)\n\n r = requests.get(url)\n with ZipFile(BytesIO(r.content)) as z:\n if write_to:\n write_path = os.path.join(write_to, z_fn.replace('.zip', ''))\n z.extractall(write_path)\n\n for fpath in z.namelist():\n fn = fpath.split('/')[-1]\n if fn.endswith(('.csv', '.xlsx')) and not fn.startswith('.'):\n quarter = re.findall('Q[1-4]', fn)\n if quarter:\n year_lookup = f\"{z_year}_{''.join(quarter)}\"\n else:\n year_lookup = z_year\n else:\n continue\n\n if rides and '_trips_' in fn.lower():\n print(fn, year_lookup)\n df = process_ride_df(z, fpath, year_lookup)\n ride_dfs.append(df)\n\n elif stations and '_stations_' in fn.lower():\n print(fn, year_lookup)\n df = process_station_df(z, fpath, year_lookup)\n station_dfs.append(df)\n\n if rides:\n ride_dfs = combine_ride_dfs(ride_dfs)\n\n if stations:\n if '2018' in years:\n df = get_current_stations()\n station_dfs.append(df)\n\n station_dfs = combine_station_dfs(station_dfs)\n\n return ride_dfs, station_dfs\n", "import numpy as np\nimport pandas as pd\n\n\ndef my_melt(df, keep_cols=[]):\n \"\"\"Reshapes DataFrame so each row represents a station interaction\"\"\"\n # standardize language\n df = df.rename(columns={col: (col.replace('from_', 'start_')\n .replace('to_', 'end_'))\n for col in df})\n\n start_cols = [col for col in df if col.startswith('start_')]\n end_cols = [col for col in df if col.startswith('end_')]\n\n df = pd.concat([(df.loc[:,keep_cols + start_cols]\n .rename(columns={col: col.replace('start_','')\n for col in start_cols})\n .assign(type='departure')),\n (df.loc[:,keep_cols + end_cols]\n .rename(columns={col: col.replace('end_','')\n for col in end_cols})\n .assign(type='arrival'))],\n sort=False, axis=0)\n\n return df\n\n\ndef add_empty_rows(df, fill_series, constants=['station_id', 'lat','lon']):\n \"\"\"Add empty rows to DataFrame. Cols other than constants fill with NaN\n\n fill_series: pd.Series representing unique values that should be represented\n in the returned DataFrame\n \"\"\"\n if fill_series.name not in df:\n raise ValueError('fill_series name must be column of DataFrame')\n\n fill_df = pd.merge(pd.DataFrame(fill_series).assign(key=1),\n df[constants].drop_duplicates().assign(key=1),\n on='key', how='left')\n fill_df = fill_df.drop(columns=['key'])\n\n fill_df = fill_df.merge(df, on=([fill_series.name] + constants),\n how='left')\n\n fill_df = fill_df.sort_values((constants + [fill_series.name]))\n\n return fill_df\n" ]
[ [ "pandas.concat", "pandas.to_datetime" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
hoangdzung/dgl
[ "738b75f41e5d3229e5ccda52d76e1297d7b0520d" ]
[ "python/dgl/distributed/graph_partition_book.py" ]
[ "\"\"\"Define graph partition book.\"\"\"\n\nimport pickle\nfrom abc import ABC\nimport numpy as np\n\nfrom .. import backend as F\nfrom ..base import NID, EID\nfrom .. import utils\nfrom .shared_mem_utils import _to_shared_mem, _get_ndata_path, _get_edata_path, DTYPE_DICT\nfrom .._ffi.ndarray import empty_shared_mem\nfrom ..ndarray import exist_shared_mem_array\nfrom .id_map import IdMap\n\ndef _move_metadata_to_shared_mem(graph_name, num_nodes, num_edges, part_id,\n num_partitions, node_map, edge_map, is_range_part):\n ''' Move all metadata of the partition book to the shared memory.\n\n These metadata will be used to construct graph partition book.\n\n Parameters\n ----------\n graph_name : str\n The name of the graph\n num_nodes : int\n The total number of nodes\n num_edges : int\n The total number of edges\n part_id : int\n The partition ID.\n num_partitions : int\n The number of physical partitions generated for the graph.\n node_map : Tensor\n It stores the mapping information from node IDs to partitions. With range partitioning,\n the tensor stores the serialized result of partition ranges.\n edge_map : Tensor\n It stores the mapping information from edge IDs to partitions. With range partitioning,\n the tensor stores the serialized result of partition ranges.\n is_range_part : bool\n Indicate that we use a range partition. This is important for us to deserialize data\n in node_map and edge_map.\n\n Returns\n -------\n (Tensor, Tensor, Tensor)\n The first tensor stores the serialized metadata, the second tensor stores the serialized\n node map and the third tensor stores the serialized edge map. All tensors are stored in\n shared memory.\n '''\n meta = _to_shared_mem(F.tensor([int(is_range_part), num_nodes, num_edges,\n num_partitions, part_id,\n len(node_map), len(edge_map)]),\n _get_ndata_path(graph_name, 'meta'))\n node_map = _to_shared_mem(node_map, _get_ndata_path(graph_name, 'node_map'))\n edge_map = _to_shared_mem(edge_map, _get_edata_path(graph_name, 'edge_map'))\n return meta, node_map, edge_map\n\ndef _get_shared_mem_metadata(graph_name):\n ''' Get the metadata of the graph from shared memory.\n\n The server serializes the metadata of a graph and store them in shared memory.\n The client needs to deserialize the data in shared memory and get the metadata\n of the graph.\n\n Parameters\n ----------\n graph_name : str\n The name of the graph. We can use the graph name to find the shared memory name.\n\n Returns\n -------\n (bool, int, int, Tensor, Tensor)\n The first element indicates whether it is range partitioning;\n the second element is the partition ID;\n the third element is the number of partitions;\n the fourth element is the tensor that stores the serialized result of node maps;\n the fifth element is the tensor that stores the serialized result of edge maps.\n '''\n # The metadata has 7 elements: is_range_part, num_nodes, num_edges, num_partitions, part_id,\n # the length of node map and the length of the edge map.\n shape = (7,)\n dtype = F.int64\n dtype = DTYPE_DICT[dtype]\n data = empty_shared_mem(_get_ndata_path(graph_name, 'meta'), False, shape, dtype)\n dlpack = data.to_dlpack()\n meta = F.asnumpy(F.zerocopy_from_dlpack(dlpack))\n is_range_part, _, _, num_partitions, part_id, node_map_len, edge_map_len = meta\n\n # Load node map\n data = empty_shared_mem(_get_ndata_path(graph_name, 'node_map'), False, (node_map_len,), dtype)\n dlpack = data.to_dlpack()\n node_map = F.zerocopy_from_dlpack(dlpack)\n\n # Load edge_map\n data = empty_shared_mem(_get_edata_path(graph_name, 'edge_map'), False, (edge_map_len,), dtype)\n dlpack = data.to_dlpack()\n edge_map = F.zerocopy_from_dlpack(dlpack)\n\n return is_range_part, part_id, num_partitions, node_map, edge_map\n\n\ndef get_shared_mem_partition_book(graph_name, graph_part):\n '''Get a graph partition book from shared memory.\n\n A graph partition book of a specific graph can be serialized to shared memory.\n We can reconstruct a graph partition book from shared memory.\n\n Parameters\n ----------\n graph_name : str\n The name of the graph.\n graph_part : DGLGraph\n The graph structure of a partition.\n\n Returns\n -------\n GraphPartitionBook\n A graph partition book for a particular partition.\n '''\n if not exist_shared_mem_array(_get_ndata_path(graph_name, 'meta')):\n return None\n is_range_part, part_id, num_parts, node_map_data, edge_map_data = \\\n _get_shared_mem_metadata(graph_name)\n if is_range_part == 1:\n # node ID ranges and edge ID ranges are stored in the order of node type IDs\n # and edge type IDs.\n node_map = {}\n ntypes = {}\n # node_map_data and edge_map_data were serialized with pickle and converted into\n # a list of bytes and then stored in a numpy array before being placed in shared\n # memory. To deserialize, we need to reverse the process.\n node_map_data = pickle.loads(bytes(F.asnumpy(node_map_data).tolist()))\n for i, (ntype, nid_range) in enumerate(node_map_data):\n ntypes[ntype] = i\n node_map[ntype] = nid_range\n\n edge_map = {}\n etypes = {}\n edge_map_data = pickle.loads(bytes(F.asnumpy(edge_map_data).tolist()))\n for i, (etype, eid_range) in enumerate(edge_map_data):\n etypes[etype] = i\n edge_map[etype] = eid_range\n return RangePartitionBook(part_id, num_parts, node_map, edge_map, ntypes, etypes)\n else:\n return BasicPartitionBook(part_id, num_parts, node_map_data, edge_map_data, graph_part)\n\nclass GraphPartitionBook(ABC):\n \"\"\" The base class of the graph partition book.\n\n For distributed training, a graph is partitioned into multiple parts and is loaded\n in multiple machines. The partition book contains all necessary information to locate\n nodes and edges in the cluster.\n\n The partition book contains various partition information, including\n\n * the number of partitions,\n * the partition ID that a node or edge belongs to,\n * the node IDs and the edge IDs that a partition has.\n * the local IDs of nodes and edges in a partition.\n\n Currently, there are two classes that implement ``GraphPartitionBook``:\n ``BasicGraphPartitionBook`` and ``RangePartitionBook``. ``BasicGraphPartitionBook``\n stores the mappings between every individual node/edge ID and partition ID on\n every machine, which usually consumes a lot of memory, while ``RangePartitionBook``\n calculates the mapping between node/edge IDs and partition IDs based on some small\n metadata because nodes/edges have been relabeled to have IDs in the same partition\n fall in a contiguous ID range. ``RangePartitionBook`` is usually a preferred way to\n provide mappings between node/edge IDs and partition IDs.\n\n A graph partition book is constructed automatically when a graph is partitioned.\n When a graph partition is loaded, a graph partition book is loaded as well.\n Please see :py:meth:`~dgl.distributed.partition.partition_graph`,\n :py:meth:`~dgl.distributed.partition.load_partition` and\n :py:meth:`~dgl.distributed.partition.load_partition_book` for more details.\n \"\"\"\n\n def shared_memory(self, graph_name):\n \"\"\"Move the partition book to shared memory.\n\n Parameters\n ----------\n graph_name : str\n The graph name. This name will be used to read the partition book from shared\n memory in another process.\n \"\"\"\n\n def num_partitions(self):\n \"\"\"Return the number of partitions.\n\n Returns\n -------\n int\n number of partitions\n \"\"\"\n\n def metadata(self):\n \"\"\"Return the partition meta data.\n\n The meta data includes:\n\n * The machine ID.\n * Number of nodes and edges of each partition.\n\n Examples\n --------\n >>> print(g.get_partition_book().metadata())\n >>> [{'machine_id' : 0, 'num_nodes' : 3000, 'num_edges' : 5000},\n ... {'machine_id' : 1, 'num_nodes' : 2000, 'num_edges' : 4888},\n ... ...]\n\n Returns\n -------\n list[dict[str, any]]\n Meta data of each partition.\n \"\"\"\n\n def nid2partid(self, nids, ntype):\n \"\"\"From global node IDs to partition IDs\n\n Parameters\n ----------\n nids : tensor\n global node IDs\n ntype : str\n The node type\n\n Returns\n -------\n tensor\n partition IDs\n \"\"\"\n\n def eid2partid(self, eids, etype):\n \"\"\"From global edge IDs to partition IDs\n\n Parameters\n ----------\n eids : tensor\n global edge IDs\n etype : str\n The edge type\n\n Returns\n -------\n tensor\n partition IDs\n \"\"\"\n\n def partid2nids(self, partid, ntype):\n \"\"\"From partition id to global node IDs\n\n Parameters\n ----------\n partid : int\n partition id\n ntype : str\n The node type\n\n Returns\n -------\n tensor\n node IDs\n \"\"\"\n\n def partid2eids(self, partid, etype):\n \"\"\"From partition id to global edge IDs\n\n Parameters\n ----------\n partid : int\n partition id\n etype : str\n The edge type\n\n Returns\n -------\n tensor\n edge IDs\n \"\"\"\n\n def nid2localnid(self, nids, partid, ntype):\n \"\"\"Get local node IDs within the given partition.\n\n Parameters\n ----------\n nids : tensor\n global node IDs\n partid : int\n partition ID\n ntype : str\n The node type\n\n Returns\n -------\n tensor\n local node IDs\n \"\"\"\n\n def eid2localeid(self, eids, partid, etype):\n \"\"\"Get the local edge ids within the given partition.\n\n Parameters\n ----------\n eids : tensor\n global edge IDs\n partid : int\n partition ID\n etype : str\n The edge type\n\n Returns\n -------\n tensor\n local edge IDs\n \"\"\"\n\n @property\n def partid(self):\n \"\"\"Get the current partition ID\n\n Return\n ------\n int\n The partition ID of current machine\n \"\"\"\n\n @property\n def ntypes(self):\n \"\"\"Get the list of node types\n \"\"\"\n\n @property\n def etypes(self):\n \"\"\"Get the list of edge types\n \"\"\"\n\n def map_to_per_ntype(self, ids):\n \"\"\"Map homogeneous node IDs to type-wise IDs and node types.\n\n Parameters\n ----------\n ids : tensor\n Homogeneous node IDs.\n\n Returns\n -------\n (tensor, tensor)\n node type IDs and type-wise node IDs.\n \"\"\"\n\n def map_to_per_etype(self, ids):\n \"\"\"Map homogeneous edge IDs to type-wise IDs and edge types.\n\n Parameters\n ----------\n ids : tensor\n Homogeneous edge IDs.\n\n Returns\n -------\n (tensor, tensor)\n edge type IDs and type-wise edge IDs.\n \"\"\"\n\n def map_to_homo_nid(self, ids, ntype):\n \"\"\"Map type-wise node IDs and type IDs to homogeneous node IDs.\n\n Parameters\n ----------\n ids : tensor\n Type-wise node Ids\n ntype : str\n node type\n\n Returns\n -------\n Tensor\n Homogeneous node IDs.\n \"\"\"\n\n def map_to_homo_eid(self, ids, etype):\n \"\"\"Map type-wise edge IDs and type IDs to homogeneous edge IDs.\n\n Parameters\n ----------\n ids : tensor\n Type-wise edge Ids\n etype : str\n edge type\n\n Returns\n -------\n Tensor\n Homogeneous edge IDs.\n \"\"\"\n\nclass BasicPartitionBook(GraphPartitionBook):\n \"\"\"This provides the most flexible way to store parition information.\n\n The partition book maintains the mapping of every single node IDs and edge IDs to\n partition IDs. This is very flexible at the coast of large memory consumption.\n On a large graph, the mapping consumes significant memory and this partition book\n is not recommended.\n\n Parameters\n ----------\n part_id : int\n partition ID of current partition book\n num_parts : int\n number of total partitions\n node_map : tensor\n global node ID mapping to partition ID\n edge_map : tensor\n global edge ID mapping to partition ID\n part_graph : DGLGraph\n The graph partition structure.\n \"\"\"\n def __init__(self, part_id, num_parts, node_map, edge_map, part_graph):\n assert part_id >= 0, 'part_id cannot be a negative number.'\n assert num_parts > 0, 'num_parts must be greater than zero.'\n self._part_id = int(part_id)\n self._num_partitions = int(num_parts)\n self._nid2partid = F.tensor(node_map)\n assert F.dtype(self._nid2partid) == F.int64, \\\n 'the node map must be stored in an integer array'\n self._eid2partid = F.tensor(edge_map)\n assert F.dtype(self._eid2partid) == F.int64, \\\n 'the edge map must be stored in an integer array'\n # Get meta data of the partition book.\n self._partition_meta_data = []\n _, nid_count = np.unique(F.asnumpy(self._nid2partid), return_counts=True)\n _, eid_count = np.unique(F.asnumpy(self._eid2partid), return_counts=True)\n for partid in range(self._num_partitions):\n part_info = {}\n part_info['machine_id'] = partid\n part_info['num_nodes'] = int(nid_count[partid])\n part_info['num_edges'] = int(eid_count[partid])\n self._partition_meta_data.append(part_info)\n # Get partid2nids\n self._partid2nids = []\n sorted_nid = F.tensor(np.argsort(F.asnumpy(self._nid2partid)))\n start = 0\n for offset in nid_count:\n part_nids = sorted_nid[start:start+offset]\n start += offset\n self._partid2nids.append(part_nids)\n # Get partid2eids\n self._partid2eids = []\n sorted_eid = F.tensor(np.argsort(F.asnumpy(self._eid2partid)))\n start = 0\n for offset in eid_count:\n part_eids = sorted_eid[start:start+offset]\n start += offset\n self._partid2eids.append(part_eids)\n # Get nidg2l\n self._nidg2l = [None] * self._num_partitions\n global_id = part_graph.ndata[NID]\n max_global_id = np.amax(F.asnumpy(global_id))\n # TODO(chao): support int32 index\n g2l = F.zeros((max_global_id+1), F.int64, F.context(global_id))\n g2l = F.scatter_row(g2l, global_id, F.arange(0, len(global_id)))\n self._nidg2l[self._part_id] = g2l\n # Get eidg2l\n self._eidg2l = [None] * self._num_partitions\n global_id = part_graph.edata[EID]\n max_global_id = np.amax(F.asnumpy(global_id))\n # TODO(chao): support int32 index\n g2l = F.zeros((max_global_id+1), F.int64, F.context(global_id))\n g2l = F.scatter_row(g2l, global_id, F.arange(0, len(global_id)))\n self._eidg2l[self._part_id] = g2l\n # node size and edge size\n self._edge_size = len(self.partid2eids(self._part_id))\n self._node_size = len(self.partid2nids(self._part_id))\n\n def shared_memory(self, graph_name):\n \"\"\"Move data to shared memory.\n \"\"\"\n self._meta, self._nid2partid, self._eid2partid = _move_metadata_to_shared_mem(\n graph_name, self._num_nodes(), self._num_edges(), self._part_id, self._num_partitions,\n self._nid2partid, self._eid2partid, False)\n\n def num_partitions(self):\n \"\"\"Return the number of partitions.\n \"\"\"\n return self._num_partitions\n\n def metadata(self):\n \"\"\"Return the partition meta data.\n \"\"\"\n return self._partition_meta_data\n\n def _num_nodes(self, ntype='_N'):\n \"\"\" The total number of nodes\n \"\"\"\n assert ntype == '_N', 'Base partition book only supports homogeneous graph.'\n return len(self._nid2partid)\n\n def _num_edges(self, etype='_E'):\n \"\"\" The total number of edges\n \"\"\"\n assert etype == '_E', 'Base partition book only supports homogeneous graph.'\n return len(self._eid2partid)\n\n def map_to_per_ntype(self, ids):\n \"\"\"Map global homogeneous node IDs to node type IDs.\n Returns\n type_ids, per_type_ids\n \"\"\"\n return F.zeros((len(ids),), F.int32, F.cpu()), ids\n\n def map_to_per_etype(self, ids):\n \"\"\"Map global homogeneous edge IDs to edge type IDs.\n Returns\n type_ids, per_type_ids\n \"\"\"\n return F.zeros((len(ids),), F.int32, F.cpu()), ids\n\n def map_to_homo_nid(self, ids, ntype):\n \"\"\"Map per-node-type IDs to global node IDs in the homogeneous format.\n \"\"\"\n assert ntype == '_N', 'Base partition book only supports homogeneous graph.'\n return ids\n\n def map_to_homo_eid(self, ids, etype):\n \"\"\"Map per-edge-type IDs to global edge IDs in the homoenegeous format.\n \"\"\"\n assert etype == '_E', 'Base partition book only supports homogeneous graph.'\n return ids\n\n def nid2partid(self, nids, ntype='_N'):\n \"\"\"From global node IDs to partition IDs\n \"\"\"\n assert ntype == '_N', 'Base partition book only supports homogeneous graph.'\n return F.gather_row(self._nid2partid, nids)\n\n def eid2partid(self, eids, etype='_E'):\n \"\"\"From global edge IDs to partition IDs\n \"\"\"\n assert etype == '_E', 'Base partition book only supports homogeneous graph.'\n return F.gather_row(self._eid2partid, eids)\n\n def partid2nids(self, partid, ntype='_N'):\n \"\"\"From partition id to global node IDs\n \"\"\"\n assert ntype == '_N', 'Base partition book only supports homogeneous graph.'\n return self._partid2nids[partid]\n\n def partid2eids(self, partid, etype='_E'):\n \"\"\"From partition id to global edge IDs\n \"\"\"\n assert etype == '_E', 'Base partition book only supports homogeneous graph.'\n return self._partid2eids[partid]\n\n def nid2localnid(self, nids, partid, ntype='_N'):\n \"\"\"Get local node IDs within the given partition.\n \"\"\"\n assert ntype == '_N', 'Base partition book only supports homogeneous graph.'\n if partid != self._part_id:\n raise RuntimeError('Now GraphPartitionBook does not support \\\n getting remote tensor of nid2localnid.')\n return F.gather_row(self._nidg2l[partid], nids)\n\n def eid2localeid(self, eids, partid, etype='_E'):\n \"\"\"Get the local edge ids within the given partition.\n \"\"\"\n assert etype == '_E', 'Base partition book only supports homogeneous graph.'\n if partid != self._part_id:\n raise RuntimeError('Now GraphPartitionBook does not support \\\n getting remote tensor of eid2localeid.')\n return F.gather_row(self._eidg2l[partid], eids)\n\n @property\n def partid(self):\n \"\"\"Get the current partition ID\n \"\"\"\n return self._part_id\n\n @property\n def ntypes(self):\n \"\"\"Get the list of node types\n \"\"\"\n return ['_N']\n\n @property\n def etypes(self):\n \"\"\"Get the list of edge types\n \"\"\"\n return ['_E']\n\n\nclass RangePartitionBook(GraphPartitionBook):\n \"\"\"This partition book supports more efficient storage of partition information.\n\n This partition book is used if the nodes and edges of a graph partition are assigned\n with contiguous IDs. It uses very small amount of memory to store the partition\n information.\n\n Parameters\n ----------\n part_id : int\n partition ID of current partition book\n num_parts : int\n number of total partitions\n node_map : dict[str, Tensor]\n Global node ID ranges within partitions for each node type. The key is the node type\n name in string. The value is a tensor of shape :math:`(K, 2)`, where :math:`K` is\n the number of partitions. Each row has two integers: the starting and the ending IDs\n for a particular node type in a partition. For example, all nodes of type ``\"T\"`` in\n partition ``i`` has ID range ``node_map[\"T\"][i][0]`` to ``node_map[\"T\"][i][1]``.\n edge_map : dict[str, Tensor]\n Global edge ID ranges within partitions for each edge type. The key is the edge type\n name in string. The value is a tensor of shape :math:`(K, 2)`, where :math:`K` is\n the number of partitions. Each row has two integers: the starting and the ending IDs\n for a particular edge type in a partition. For example, all edges of type ``\"T\"`` in\n partition ``i`` has ID range ``edge_map[\"T\"][i][0]`` to ``edge_map[\"T\"][i][1]``.\n ntypes : dict[str, int]\n map ntype strings to ntype IDs.\n etypes : dict[str, int]\n map etype strings to etype IDs.\n \"\"\"\n def __init__(self, part_id, num_parts, node_map, edge_map, ntypes, etypes):\n assert part_id >= 0, 'part_id cannot be a negative number.'\n assert num_parts > 0, 'num_parts must be greater than zero.'\n self._partid = part_id\n self._num_partitions = num_parts\n self._ntypes = [None] * len(ntypes)\n self._etypes = [None] * len(etypes)\n for ntype in ntypes:\n ntype_id = ntypes[ntype]\n self._ntypes[ntype_id] = ntype\n assert all([ntype is not None for ntype in self._ntypes]), \\\n \"The node types have invalid IDs.\"\n for etype in etypes:\n etype_id = etypes[etype]\n self._etypes[etype_id] = etype\n assert all([etype is not None for etype in self._etypes]), \\\n \"The edge types have invalid IDs.\"\n\n # This stores the node ID ranges for each node type in each partition.\n # The key is the node type, the value is a NumPy matrix with two columns, in which\n # each row indicates the start and the end of the node ID range in a partition.\n # The node IDs are global node IDs in the homogeneous representation.\n self._typed_nid_range = {}\n # This stores the node ID map for per-node-type IDs in each partition.\n # The key is the node type, the value is a NumPy vector which indicates\n # the last node ID in a partition.\n self._typed_max_node_ids = {}\n max_node_map = np.zeros((num_parts,), dtype=np.int64)\n for key in node_map:\n if not isinstance(node_map[key], np.ndarray):\n node_map[key] = F.asnumpy(node_map[key])\n assert node_map[key].shape == (num_parts, 2)\n self._typed_nid_range[key] = node_map[key]\n # This is used for per-node-type lookup.\n self._typed_max_node_ids[key] = np.cumsum(self._typed_nid_range[key][:, 1]\n - self._typed_nid_range[key][:, 0])\n # This is used for homogeneous node ID lookup.\n max_node_map = np.maximum(self._typed_nid_range[key][:, 1], max_node_map)\n # This is a vector that indicates the last node ID in each partition.\n # The ID is the global ID in the homogeneous representation.\n self._max_node_ids = max_node_map\n\n # Similar to _typed_nid_range.\n self._typed_eid_range = {}\n # similar to _typed_max_node_ids.\n self._typed_max_edge_ids = {}\n max_edge_map = np.zeros((num_parts,), dtype=np.int64)\n for key in edge_map:\n if not isinstance(edge_map[key], np.ndarray):\n edge_map[key] = F.asnumpy(edge_map[key])\n assert edge_map[key].shape == (num_parts, 2)\n self._typed_eid_range[key] = edge_map[key]\n # This is used for per-edge-type lookup.\n self._typed_max_edge_ids[key] = np.cumsum(self._typed_eid_range[key][:, 1]\n - self._typed_eid_range[key][:, 0])\n # This is used for homogeneous edge ID lookup.\n max_edge_map = np.maximum(self._typed_eid_range[key][:, 1], max_edge_map)\n # Similar to _max_node_ids\n self._max_edge_ids = max_edge_map\n\n # These two are map functions that map node/edge IDs to node/edge type IDs.\n self._nid_map = IdMap(self._typed_nid_range)\n self._eid_map = IdMap(self._typed_eid_range)\n\n # Get meta data of the partition book\n self._partition_meta_data = []\n for partid in range(self._num_partitions):\n nrange_start = max_node_map[partid - 1] if partid > 0 else 0\n nrange_end = max_node_map[partid]\n num_nodes = nrange_end - nrange_start\n\n erange_start = max_edge_map[partid - 1] if partid > 0 else 0\n erange_end = max_edge_map[partid]\n num_edges = erange_end - erange_start\n\n part_info = {}\n part_info['machine_id'] = partid\n part_info['num_nodes'] = int(num_nodes)\n part_info['num_edges'] = int(num_edges)\n self._partition_meta_data.append(part_info)\n\n def shared_memory(self, graph_name):\n \"\"\"Move data to shared memory.\n \"\"\"\n # we need to store the nid ranges and eid ranges of different types in the order defined\n # by type IDs.\n nid_range = [None] * len(self.ntypes)\n for i, ntype in enumerate(self.ntypes):\n nid_range[i] = (ntype, self._typed_nid_range[ntype])\n nid_range_pickle = pickle.dumps(nid_range)\n nid_range_pickle = [e for e in nid_range_pickle]\n\n eid_range = [None] * len(self.etypes)\n for i, etype in enumerate(self.etypes):\n eid_range[i] = (etype, self._typed_eid_range[etype])\n eid_range_pickle = pickle.dumps(eid_range)\n eid_range_pickle = [e for e in eid_range_pickle]\n\n self._meta = _move_metadata_to_shared_mem(graph_name,\n 0, # We don't need to provide the number of nodes\n 0, # We don't need to provide the number of edges\n self._partid, self._num_partitions,\n F.tensor(nid_range_pickle),\n F.tensor(eid_range_pickle),\n True)\n\n def num_partitions(self):\n \"\"\"Return the number of partitions.\n \"\"\"\n return self._num_partitions\n\n\n def _num_nodes(self, ntype='_N'):\n \"\"\" The total number of nodes\n \"\"\"\n if ntype == '_N':\n return int(self._max_node_ids[-1])\n else:\n return int(self._typed_max_node_ids[ntype][-1])\n\n def _num_edges(self, etype='_E'):\n \"\"\" The total number of edges\n \"\"\"\n if etype == '_E':\n return int(self._max_edge_ids[-1])\n else:\n return int(self._typed_max_edge_ids[etype][-1])\n\n def metadata(self):\n \"\"\"Return the partition meta data.\n \"\"\"\n return self._partition_meta_data\n\n def map_to_per_ntype(self, ids):\n \"\"\"Map global homogeneous node IDs to node type IDs.\n Returns\n type_ids, per_type_ids\n \"\"\"\n return self._nid_map(ids)\n\n def map_to_per_etype(self, ids):\n \"\"\"Map global homogeneous edge IDs to edge type IDs.\n Returns\n type_ids, per_type_ids\n \"\"\"\n return self._eid_map(ids)\n\n def map_to_homo_nid(self, ids, ntype):\n \"\"\"Map per-node-type IDs to global node IDs in the homogeneous format.\n \"\"\"\n ids = utils.toindex(ids).tousertensor()\n partids = self.nid2partid(ids, ntype)\n typed_max_nids = F.zerocopy_from_numpy(self._typed_max_node_ids[ntype])\n end_diff = F.gather_row(typed_max_nids, partids) - ids\n typed_nid_range = F.zerocopy_from_numpy(self._typed_nid_range[ntype][:, 1])\n return F.gather_row(typed_nid_range, partids) - end_diff\n\n def map_to_homo_eid(self, ids, etype):\n \"\"\"Map per-edge-type IDs to global edge IDs in the homoenegeous format.\n \"\"\"\n ids = utils.toindex(ids).tousertensor()\n partids = self.eid2partid(ids, etype)\n typed_max_eids = F.zerocopy_from_numpy(self._typed_max_edge_ids[etype])\n end_diff = F.gather_row(typed_max_eids, partids) - ids\n typed_eid_range = F.zerocopy_from_numpy(self._typed_eid_range[etype][:, 1])\n return F.gather_row(typed_eid_range, partids) - end_diff\n\n def nid2partid(self, nids, ntype='_N'):\n \"\"\"From global node IDs to partition IDs\n \"\"\"\n nids = utils.toindex(nids)\n if ntype == '_N':\n ret = np.searchsorted(self._max_node_ids, nids.tonumpy(), side='right')\n else:\n ret = np.searchsorted(self._typed_max_node_ids[ntype], nids.tonumpy(), side='right')\n ret = utils.toindex(ret)\n return ret.tousertensor()\n\n def eid2partid(self, eids, etype='_E'):\n \"\"\"From global edge IDs to partition IDs\n \"\"\"\n eids = utils.toindex(eids)\n if etype == '_E':\n ret = np.searchsorted(self._max_edge_ids, eids.tonumpy(), side='right')\n else:\n ret = np.searchsorted(self._typed_max_edge_ids[etype], eids.tonumpy(), side='right')\n ret = utils.toindex(ret)\n return ret.tousertensor()\n\n\n def partid2nids(self, partid, ntype='_N'):\n \"\"\"From partition ID to global node IDs\n \"\"\"\n # TODO do we need to cache it?\n if ntype == '_N':\n start = self._max_node_ids[partid - 1] if partid > 0 else 0\n end = self._max_node_ids[partid]\n return F.arange(start, end)\n else:\n start = self._typed_max_node_ids[ntype][partid - 1] if partid > 0 else 0\n end = self._typed_max_node_ids[ntype][partid]\n return F.arange(start, end)\n\n\n def partid2eids(self, partid, etype='_E'):\n \"\"\"From partition ID to global edge IDs\n \"\"\"\n # TODO do we need to cache it?\n if etype == '_E':\n start = self._max_edge_ids[partid - 1] if partid > 0 else 0\n end = self._max_edge_ids[partid]\n return F.arange(start, end)\n else:\n start = self._typed_max_edge_ids[etype][partid - 1] if partid > 0 else 0\n end = self._typed_max_edge_ids[etype][partid]\n return F.arange(start, end)\n\n\n def nid2localnid(self, nids, partid, ntype='_N'):\n \"\"\"Get local node IDs within the given partition.\n \"\"\"\n if partid != self._partid:\n raise RuntimeError('Now RangePartitionBook does not support \\\n getting remote tensor of nid2localnid.')\n\n nids = utils.toindex(nids)\n nids = nids.tousertensor()\n if ntype == '_N':\n start = self._max_node_ids[partid - 1] if partid > 0 else 0\n else:\n start = self._typed_max_node_ids[ntype][partid - 1] if partid > 0 else 0\n return nids - int(start)\n\n\n def eid2localeid(self, eids, partid, etype='_E'):\n \"\"\"Get the local edge IDs within the given partition.\n \"\"\"\n if partid != self._partid:\n raise RuntimeError('Now RangePartitionBook does not support \\\n getting remote tensor of eid2localeid.')\n\n eids = utils.toindex(eids)\n eids = eids.tousertensor()\n if etype == '_E':\n start = self._max_edge_ids[partid - 1] if partid > 0 else 0\n else:\n start = self._typed_max_edge_ids[etype][partid - 1] if partid > 0 else 0\n return eids - int(start)\n\n\n @property\n def partid(self):\n \"\"\"Get the current partition ID.\n \"\"\"\n return self._partid\n\n @property\n def ntypes(self):\n \"\"\"Get the list of node types\n \"\"\"\n return self._ntypes\n\n @property\n def etypes(self):\n \"\"\"Get the list of edge types\n \"\"\"\n return self._etypes\n\nNODE_PART_POLICY = 'node'\nEDGE_PART_POLICY = 'edge'\n\nclass PartitionPolicy(object):\n \"\"\"This defines a partition policy for a distributed tensor or distributed embedding.\n\n When DGL shards tensors and stores them in a cluster of machines, it requires\n partition policies that map rows of the tensors to machines in the cluster.\n\n Although an arbitrary partition policy can be defined, DGL currently supports\n two partition policies for mapping nodes and edges to machines. To define a partition\n policy from a graph partition book, users need to specify the policy name ('node' or 'edge').\n\n Parameters\n ----------\n policy_str : str\n Partition policy name, e.g., 'edge:_E' or 'node:_N'.\n partition_book : GraphPartitionBook\n A graph partition book\n \"\"\"\n def __init__(self, policy_str, partition_book):\n splits = policy_str.split(':')\n if len(splits) == 1:\n assert policy_str in (EDGE_PART_POLICY, NODE_PART_POLICY), \\\n 'policy_str must contain \\'edge\\' or \\'node\\'.'\n if NODE_PART_POLICY == policy_str:\n policy_str = NODE_PART_POLICY + \":_N\"\n else:\n policy_str = EDGE_PART_POLICY + \":_E\"\n self._policy_str = policy_str\n self._part_id = partition_book.partid\n self._partition_book = partition_book\n\n @property\n def policy_str(self):\n \"\"\"Get the policy name\n\n Returns\n -------\n str\n The name of the partition policy.\n \"\"\"\n return self._policy_str\n\n @property\n def part_id(self):\n \"\"\"Get partition ID\n\n Returns\n -------\n int\n The partition ID\n \"\"\"\n return self._part_id\n\n @property\n def partition_book(self):\n \"\"\"Get partition book\n\n Returns\n -------\n GraphPartitionBook\n The graph partition book\n \"\"\"\n return self._partition_book\n\n def get_data_name(self, name):\n \"\"\"Get HeteroDataName\n \"\"\"\n is_node = NODE_PART_POLICY in self._policy_str\n return HeteroDataName(is_node, self._policy_str[5:], name)\n\n def to_local(self, id_tensor):\n \"\"\"Mapping global ID to local ID.\n\n Parameters\n ----------\n id_tensor : tensor\n Gloabl ID tensor\n\n Return\n ------\n tensor\n local ID tensor\n \"\"\"\n if EDGE_PART_POLICY in self._policy_str:\n return self._partition_book.eid2localeid(id_tensor, self._part_id, self._policy_str[5:])\n elif NODE_PART_POLICY in self._policy_str:\n return self._partition_book.nid2localnid(id_tensor, self._part_id, self._policy_str[5:])\n else:\n raise RuntimeError('Cannot support policy: %s ' % self._policy_str)\n\n def to_partid(self, id_tensor):\n \"\"\"Mapping global ID to partition ID.\n\n Parameters\n ----------\n id_tensor : tensor\n Global ID tensor\n\n Return\n ------\n tensor\n partition ID\n \"\"\"\n if EDGE_PART_POLICY in self._policy_str:\n return self._partition_book.eid2partid(id_tensor, self._policy_str[5:])\n elif NODE_PART_POLICY in self._policy_str:\n return self._partition_book.nid2partid(id_tensor, self._policy_str[5:])\n else:\n raise RuntimeError('Cannot support policy: %s ' % self._policy_str)\n\n def get_part_size(self):\n \"\"\"Get data size of current partition.\n\n Returns\n -------\n int\n data size\n \"\"\"\n if EDGE_PART_POLICY in self._policy_str:\n return len(self._partition_book.partid2eids(self._part_id, self._policy_str[5:]))\n elif NODE_PART_POLICY in self._policy_str:\n return len(self._partition_book.partid2nids(self._part_id, self._policy_str[5:]))\n else:\n raise RuntimeError('Cannot support policy: %s ' % self._policy_str)\n\n def get_size(self):\n \"\"\"Get the full size of the data.\n\n Returns\n -------\n int\n data size\n \"\"\"\n if EDGE_PART_POLICY in self._policy_str:\n return self._partition_book._num_edges(self._policy_str[5:])\n elif NODE_PART_POLICY in self._policy_str:\n return self._partition_book._num_nodes(self._policy_str[5:])\n else:\n raise RuntimeError('Cannot support policy: %s ' % self._policy_str)\n\nclass NodePartitionPolicy(PartitionPolicy):\n '''Partition policy for nodes.\n '''\n def __init__(self, partition_book, ntype='_N'):\n super(NodePartitionPolicy, self).__init__(NODE_PART_POLICY + ':' + ntype, partition_book)\n\nclass EdgePartitionPolicy(PartitionPolicy):\n '''Partition policy for edges.\n '''\n def __init__(self, partition_book, etype='_E'):\n super(EdgePartitionPolicy, self).__init__(EDGE_PART_POLICY + ':' + etype, partition_book)\n\nclass HeteroDataName(object):\n ''' The data name in a heterogeneous graph.\n\n A unique data name has three components:\n * indicate it's node data or edge data.\n * indicate the node/edge type.\n * the name of the data.\n\n Parameters\n ----------\n is_node : bool\n Indicate whether it's node data or edge data.\n entity_type : str\n The type of the node/edge.\n data_name : str\n The name of the data.\n '''\n def __init__(self, is_node, entity_type, data_name):\n self.policy_str = NODE_PART_POLICY if is_node else EDGE_PART_POLICY\n self.policy_str = self.policy_str + ':' + entity_type\n self.data_name = data_name\n\n def is_node(self):\n ''' Is this the name of node data\n '''\n return NODE_PART_POLICY in self.policy_str\n\n def is_edge(self):\n ''' Is this the name of edge data\n '''\n return EDGE_PART_POLICY in self.policy_str\n\n def get_type(self):\n ''' The type of the node/edge.\n This is only meaningful in a heterogeneous graph.\n In homogeneous graph, type is '_N' for a node and '_E' for an edge.\n '''\n return self.policy_str[5:]\n\n def get_name(self):\n ''' The name of the data.\n '''\n return self.data_name\n\n def __str__(self):\n ''' The full name of the data.\n\n The full name is used as the key in the KVStore.\n '''\n return self.policy_str + ':' + self.data_name\n\ndef parse_hetero_data_name(name):\n '''Parse data name and create HeteroDataName.\n\n The data name has a specialized format. We can parse the name to determine if\n it's node data or edge data, node/edge type and its actual name. The data name\n has three fields and they are separated by \":\".\n\n Parameters\n ----------\n name : str\n The data name\n\n Returns\n -------\n HeteroDataName\n '''\n names = name.split(':')\n assert len(names) == 3, '{} is not a valid heterograph data name'.format(name)\n assert names[0] in (NODE_PART_POLICY, EDGE_PART_POLICY), \\\n '{} is not a valid heterograph data name'.format(name)\n return HeteroDataName(names[0] == NODE_PART_POLICY, names[1], names[2])\n" ]
[ [ "numpy.maximum", "numpy.zeros", "numpy.cumsum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zpwithme/zzzzpppp
[ "0f5df647f1e9d6cb8c01b3fc7df25ee543714af3", "0f5df647f1e9d6cb8c01b3fc7df25ee543714af3", "0f5df647f1e9d6cb8c01b3fc7df25ee543714af3", "0f5df647f1e9d6cb8c01b3fc7df25ee543714af3", "0f5df647f1e9d6cb8c01b3fc7df25ee543714af3" ]
[ "deep-learning-for-image-processing-master/pytorch_object_detection/train_coco_dataset/network_files/boxes.py", "deep-learning-for-image-processing-master/others_project/readPbFile/readPb.py", "deep-learning-for-image-processing-master/pytorch_object_detection/train_coco_dataset/backbone/feature_pyramid_network.py", "deep-learning-for-image-processing-master/pytorch_classification/analyze_weights_featuremap/alexnet_model.py", "deep-learning-for-image-processing-master/pytorch_classification/Test1_official_demo/model.py" ]
[ "import torch\nfrom typing import Tuple\nfrom torch import Tensor\nimport torchvision\n\n\ndef nms(boxes, scores, iou_threshold):\n # type: (Tensor, Tensor, float) -> Tensor\n \"\"\"\n Performs non-maximum suppression (NMS) on the boxes according\n to their intersection-over-union (IoU).\n\n NMS iteratively removes lower scoring boxes which have an\n IoU greater than iou_threshold with another (higher scoring)\n box.\n\n Parameters\n ----------\n boxes : Tensor[N, 4])\n boxes to perform NMS on. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n iou_threshold : float\n discards all overlapping\n boxes with IoU < iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices\n of the elements that have been kept\n by NMS, sorted in decreasing order of scores\n \"\"\"\n return torch.ops.torchvision.nms(boxes, scores, iou_threshold)\n\n\ndef batched_nms(boxes, scores, idxs, iou_threshold):\n # type: (Tensor, Tensor, Tensor, float) -> Tensor\n \"\"\"\n Performs non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Parameters\n ----------\n boxes : Tensor[N, 4]\n boxes where NMS will be performed. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n idxs : Tensor[N]\n indices of the categories for each one of the boxes.\n iou_threshold : float\n discards all overlapping boxes\n with IoU < iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices of\n the elements that have been kept by NMS, sorted\n in decreasing order of scores\n \"\"\"\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n # 获取所有boxes中最大的坐标值(xmin, ymin, xmax, ymax)\n max_coordinate = boxes.max()\n\n # to(): Performs Tensor dtype and/or device conversion\n # 为每一个类别/每一层生成一个很大的偏移量\n # 这里的to只是让生成tensor的dytpe和device与boxes保持一致\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n # boxes加上对应层的偏移量后,保证不同类别/层之间boxes不会有重合的现象\n boxes_for_nms = boxes + offsets[:, None]\n keep = nms(boxes_for_nms, scores, iou_threshold)\n return keep\n\n\ndef remove_small_boxes(boxes, min_size):\n # type: (Tensor, float) -> Tensor\n \"\"\"\n Remove boxes which contains at least one side smaller than min_size.\n 移除宽高小于指定阈值的索引\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n min_size (float): minimum size\n\n Returns:\n keep (Tensor[K]): indices of the boxes that have both sides\n larger than min_size\n \"\"\"\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] # 预测boxes的宽和高\n # keep = (ws >= min_size) & (hs >= min_size) # 当满足宽,高都大于给定阈值时为True\n keep = torch.logical_and(torch.ge(ws, min_size), torch.ge(hs, min_size))\n # nonzero(): Returns a tensor containing the indices of all non-zero elements of input\n # keep = keep.nonzero().squeeze(1)\n keep = torch.where(keep)[0]\n return keep\n\n\ndef clip_boxes_to_image(boxes, size):\n # type: (Tensor, Tuple[int, int]) -> Tensor\n \"\"\"\n Clip boxes so that they lie inside an image of size `size`.\n 裁剪预测的boxes信息,将越界的坐标调整到图片边界上\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n size (Tuple[height, width]): size of the image\n\n Returns:\n clipped_boxes (Tensor[N, 4])\n \"\"\"\n dim = boxes.dim()\n boxes_x = boxes[..., 0::2] # x1, x2\n boxes_y = boxes[..., 1::2] # y1, y2\n height, width = size\n\n if torchvision._is_tracing():\n boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))\n else:\n boxes_x = boxes_x.clamp(min=0, max=width) # 限制x坐标范围在[0,width]之间\n boxes_y = boxes_y.clamp(min=0, max=height) # 限制y坐标范围在[0,height]之间\n\n clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)\n return clipped_boxes.reshape(boxes.shape)\n\n\ndef box_area(boxes):\n \"\"\"\n Computes the area of a set of bounding boxes, which are specified by its\n (x1, y1, x2, y2) coordinates.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes for which the area will be computed. They\n are expected to be in (x1, y1, x2, y2) format\n\n Returns:\n area (Tensor[N]): area for each box\n \"\"\"\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n\ndef box_iou(boxes1, boxes2):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n\n Arguments:\n boxes1 (Tensor[N, 4])\n boxes2 (Tensor[M, 4])\n\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n\n # When the shapes do not match,\n # the shape of the returned output tensor follows the broadcasting rules\n lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # left-top [N,M,2]\n rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # right-bottom [N,M,2]\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n\n", "import tensorflow as tf\nimport configparser\nfrom distutils.version import StrictVersion\nimport cv2\nimport glob\nfrom using_function import draw_box, read_pbtxt, get_inAndout_tensor, convert_type, read_image\n\nif StrictVersion(tf.__version__) < StrictVersion('1.12.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')\n\n# 读取参数配置文件\nconf = configparser.ConfigParser()\nconf.read('info.config')\npath_to_frozen_graph = conf.get('tensorflow', 'path_to_frozen_graph')\npath_to_labels = conf.get('tensorflow', 'path_to_labels')\npath_to_images = conf.get('tensorflow', 'path_to_images')\nprobability_thresh = float(conf.get('tensorflow', 'probability_thresh'))\n\n# 读取pbtxt标签信息\ncategory_index = read_pbtxt(path_to_labels)\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(path_to_frozen_graph, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\nwith detection_graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n tensor_dict, image_tensor = get_inAndout_tensor()\n test_image_paths = glob.glob(path_to_images)\n for image_path in test_image_paths:\n image_BGR, image_np_expanded = read_image(image_path)\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: image_np_expanded})\n # all outputs are float32 numpy arrays, so convert types as appropriate\n convert_type(output_dict)\n\n draw_box(image_BGR,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n thresh=probability_thresh,\n line_thickness=5)\n cv2.namedWindow(\"prediction\", cv2.WINDOW_AUTOSIZE)\n cv2.imshow(\"prediction\", image_BGR)\n cv2.waitKey(0)\n", "from collections import OrderedDict\n\nimport torch.nn as nn\nimport torch\nfrom torch import Tensor\nimport torch.nn.functional as F\n\nfrom torch.jit.annotations import Tuple, List, Dict\n\n\nclass FeaturePyramidNetwork(nn.Module):\n \"\"\"\n Module that adds a FPN from on top of a set of feature maps. This is based on\n `\"Feature Pyramid Network for Object Detection\" <https://arxiv.org/abs/1612.03144>`_.\n The feature maps are currently supposed to be in increasing depth\n order.\n The input to the model is expected to be an OrderedDict[Tensor], containing\n the feature maps on top of which the FPN will be added.\n Arguments:\n in_channels_list (list[int]): number of channels for each feature map that\n is passed to the module\n out_channels (int): number of channels of the FPN representation\n extra_blocks (ExtraFPNBlock or None): if provided, extra operations will\n be performed. It is expected to take the fpn features, the original\n features and the names of the original features as input, and returns\n a new list of feature maps and their corresponding names\n \"\"\"\n\n def __init__(self, in_channels_list, out_channels, extra_blocks=None):\n super(FeaturePyramidNetwork, self).__init__()\n # 用来调整resnet特征矩阵(layer1,2,3,4)的channel(kernel_size=1)\n self.inner_blocks = nn.ModuleList()\n # 对调整后的特征矩阵使用3x3的卷积核来得到对应的预测特征矩阵\n self.layer_blocks = nn.ModuleList()\n for in_channels in in_channels_list:\n if in_channels == 0:\n continue\n inner_block_module = nn.Conv2d(in_channels, out_channels, 1)\n layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n self.inner_blocks.append(inner_block_module)\n self.layer_blocks.append(layer_block_module)\n\n # initialize parameters now to avoid modifying the initialization of top_blocks\n for m in self.children():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_uniform_(m.weight, a=1)\n nn.init.constant_(m.bias, 0)\n\n self.extra_blocks = extra_blocks\n\n def get_result_from_inner_blocks(self, x, idx):\n # type: (Tensor, int) -> Tensor\n \"\"\"\n This is equivalent to self.inner_blocks[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = len(self.inner_blocks)\n if idx < 0:\n idx += num_blocks\n i = 0\n out = x\n for module in self.inner_blocks:\n if i == idx:\n out = module(x)\n i += 1\n return out\n\n def get_result_from_layer_blocks(self, x, idx):\n # type: (Tensor, int) -> Tensor\n \"\"\"\n This is equivalent to self.layer_blocks[idx](x),\n but torchscript doesn't support this yet\n \"\"\"\n num_blocks = len(self.layer_blocks)\n if idx < 0:\n idx += num_blocks\n i = 0\n out = x\n for module in self.layer_blocks:\n if i == idx:\n out = module(x)\n i += 1\n return out\n\n def forward(self, x):\n # type: (Dict[str, Tensor]) -> Dict[str, Tensor]\n \"\"\"\n Computes the FPN for a set of feature maps.\n Arguments:\n x (OrderedDict[Tensor]): feature maps for each feature level.\n Returns:\n results (OrderedDict[Tensor]): feature maps after FPN layers.\n They are ordered from highest resolution first.\n \"\"\"\n # unpack OrderedDict into two lists for easier handling\n names = list(x.keys())\n x = list(x.values())\n\n # 将resnet layer4的channel调整到指定的out_channels\n # last_inner = self.inner_blocks[-1](x[-1])\n last_inner = self.get_result_from_inner_blocks(x[-1], -1)\n # result中保存着每个预测特征层\n results = []\n # 将layer4调整channel后的特征矩阵,通过3x3卷积后得到对应的预测特征矩阵\n # results.append(self.layer_blocks[-1](last_inner))\n results.append(self.get_result_from_layer_blocks(last_inner, -1))\n\n for idx in range(len(x) - 2, -1, -1):\n inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)\n feat_shape = inner_lateral.shape[-2:]\n inner_top_down = F.interpolate(last_inner, size=feat_shape, mode=\"nearest\")\n last_inner = inner_lateral + inner_top_down\n results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))\n\n # 在layer4对应的预测特征层基础上生成预测特征矩阵5\n if self.extra_blocks is not None:\n results, names = self.extra_blocks(results, x, names)\n\n # make it back an OrderedDict\n out = OrderedDict([(k, v) for k, v in zip(names, results)])\n\n return out\n\n\nclass LastLevelMaxPool(torch.nn.Module):\n \"\"\"\n Applies a max_pool2d on top of the last feature map\n \"\"\"\n\n def forward(self, x, y, names):\n # type: (List[Tensor], List[Tensor], List[str]) -> Tuple[List[Tensor], List[str]]\n names.append(\"pool\")\n x.append(F.max_pool2d(x[-1], 1, 2, 0))\n return x, names\n", "import torch.nn as nn\nimport torch\n\n\nclass AlexNet(nn.Module):\n def __init__(self, num_classes=1000, init_weights=False):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2), # input[3, 224, 224] output[48, 55, 55]\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # output[48, 27, 27]\n nn.Conv2d(48, 128, kernel_size=5, padding=2), # output[128, 27, 27]\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 13, 13]\n nn.Conv2d(128, 192, kernel_size=3, padding=1), # output[192, 13, 13]\n nn.ReLU(inplace=True),\n nn.Conv2d(192, 192, kernel_size=3, padding=1), # output[192, 13, 13]\n nn.ReLU(inplace=True),\n nn.Conv2d(192, 128, kernel_size=3, padding=1), # output[128, 13, 13]\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2), # output[128, 6, 6]\n )\n self.classifier = nn.Sequential(\n nn.Dropout(p=0.5),\n nn.Linear(128 * 6 * 6, 2048),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n nn.Linear(2048, 2048),\n nn.ReLU(inplace=True),\n nn.Linear(2048, num_classes),\n )\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n outputs = []\n for name, module in self.features.named_children():\n x = module(x)\n if name in [\"0\", \"3\", \"6\"]:\n outputs.append(x)\n\n return outputs\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n", "import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 16, 5)\n self.pool1 = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(16, 32, 5)\n self.pool2 = nn.MaxPool2d(2, 2)\n self.fc1 = nn.Linear(32*5*5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x)) # input(3, 32, 32) output(16, 28, 28)\n x = self.pool1(x) # output(16, 14, 14)\n x = F.relu(self.conv2(x)) # output(32, 10, 10)\n x = self.pool2(x) # output(32, 5, 5)\n x = x.view(-1, 32*5*5) # output(32*5*5)\n x = F.relu(self.fc1(x)) # output(120)\n x = F.relu(self.fc2(x)) # output(84)\n x = self.fc3(x) # output(10)\n return x\n\n\n" ]
[ [ "torch.ge", "torch.max", "torch.empty", "torch.min", "torch.ops.torchvision.nms", "torch.tensor", "torch.where", "torch.stack" ], [ "tensorflow.Graph", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "tensorflow.Session", "tensorflow.GraphDef" ], [ "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.init.kaiming_uniform_", "torch.nn.functional.interpolate", "torch.nn.functional.max_pool2d" ], [ "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.init.normal_", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ], [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Conv2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
talk2sunil83/UpgradLearning
[ "70c4f993c68ce5030e9df0edd15004bbb9fc71e7", "70c4f993c68ce5030e9df0edd15004bbb9fc71e7", "70c4f993c68ce5030e9df0edd15004bbb9fc71e7", "70c4f993c68ce5030e9df0edd15004bbb9fc71e7", "70c4f993c68ce5030e9df0edd15004bbb9fc71e7" ]
[ "zExtraLearning/MLPrep/tf2.0/NbExtracts/23tf2_0_mirrored_strategy.py", "03Machine Learning 1/02Logistic Regression/04Logistic Regression - Industry Applications - Part II/temp.py", "02Statistics Essentials/03Hypothesis Testing/02Concepts of Hypothesis Testing - II/Concepts of Hypothesis Testing - II.py", "06Deep Learning/01Introduction to Neural Networks/OnlineClass/nn_from_scratch.py", "carrier/clustering/src/utils/eda.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"TF2.0 Mirrored Strategy.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1e7_N_vVQGyfa3Wz9ND0smWnnsHsQUs_k\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout, GlobalMaxPooling2D, MaxPooling2D, BatchNormalization\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nprint(tf.__version__)\n\n# additional imports\n\n\n# Load in the data\ncifar10 = tf.keras.datasets.cifar10\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\ny_train, y_test = y_train.flatten(), y_test.flatten()\nprint(\"x_train.shape:\", x_train.shape)\nprint(\"y_train.shape\", y_train.shape)\n\n# number of classes\nK = len(set(y_train))\nprint(\"number of classes:\", K)\n\n# Build the model using the functional API\n\n\ndef create_model():\n i = Input(shape=x_train[0].shape)\n\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(i)\n x = BatchNormalization()(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = MaxPooling2D((2, 2))(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = MaxPooling2D((2, 2))(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = BatchNormalization()(x)\n x = MaxPooling2D((2, 2))(x)\n\n x = Flatten()(x)\n x = Dropout(0.2)(x)\n x = Dense(1024, activation='relu')(x)\n x = Dropout(0.2)(x)\n x = Dense(K, activation='softmax')(x)\n\n model = Model(i, x)\n return model\n\n\nstrategy = tf.distribute.MirroredStrategy()\n# strategy = tf.distribute.experimental.CentralStorageStrategy()\n\nprint(f'Number of devices: {strategy.num_replicas_in_sync}')\n\nwith strategy.scope():\n model = create_model()\n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n# Fit\nr = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)\n\n50000/391\n\n10000/79\n\n# Compare this to non-distributed training\nmodel2 = create_model()\nmodel2.compile(loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nr = model2.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)\n", "# %%\r\nimport pandas as pd\r\nfrom math import radians, sqrt, ceil\r\n# %%\r\n\r\nn = 10\r\n\r\n\r\ndef fib(n: int) -> None:\r\n if n <= 0:\r\n raise ValueError(\"n cannot be negative\")\r\n elif n == 1:\r\n print(0)\r\n elif n == 2:\r\n print(0)\r\n print(1)\r\n else:\r\n pre_pre, pre = 0, 1\r\n print(pre_pre)\r\n print(pre)\r\n for i in range(n-2):\r\n next_v = pre_pre+pre\r\n print(next_v)\r\n pre_pre, pre = pre, next_v\r\n\r\n\r\nfib(n)\r\n# %%\r\nn = 10\r\n\r\n\r\ndef is_prime(n: int) -> str:\r\n if n > 1:\r\n for i in range(2, ceil(sqrt(n))):\r\n if n % i == 0:\r\n return \"number entered is not prime\"\r\n return \"number entered is prime\"\r\n else:\r\n raise ValueError(\"n must be greater than one\")\r\n\r\n\r\nis_prime(n)\r\n\r\n# %%\r\n\r\nn = 152\r\n\r\n\r\ndef is_armstrong(n: int) -> bool:\r\n return sum([int(d)**3 for d in str(n)]) == n\r\n\r\n\r\nprint(is_armstrong(n))\r\n# %%\r\nn = 153\r\n\r\n\r\ndef f(n: int) -> str:\r\n pass\r\n\r\n\r\nprint(f(n))\r\n\r\n# %%\r\n\r\ndf = pd.read_csv(\r\n \"https://media-doselect.s3.amazonaws.com/generic/X0kvr3wEYXRzONE5W37xWWYYA/test.csv\")\r\n\r\nto_omit = ['PassengerId', 'Pclass', 'Name', 'Sex', 'Embarked']\r\n# %%\r\n\r\n# cols_to_select = sorted(set(df.columns) - set(to_omit))\r\n# cols_to_select\r\n\r\n# print(df[sorted(set(df.columns) - set(to_omit))].head())\r\n\r\nnew_df = df.drop(to_omit, axis=1)\r\nnew_df = new_df[sorted(new_df.columns)]\r\nprint(new_df.head(5))\r\n# %%\r\ndf = df[df.columns[~df.columns.isin(to_omit)]]\r\nprint(df.loc[:, sorted(list(df.columns))].head())\r\n# %%\r\ninput_list = [[1, 2, 3, 4, 5, 6, 7], [1, 3, 7]]\r\nseries1 = pd.Series(input_list[0])\r\nseries2 = pd.Series(input_list[1])\r\nout_list = series1[series1.isin(series2)].index # store your output here\r\n# do not alter this step, list must be int type for evaluation purposes\r\nprint(list(map(int, out_list)))\r\n# %%\r\ndf = pd.read_csv(\r\n \"https://media-doselect.s3.amazonaws.com/generic/8NMooe4G0ENEe8z9q5ZvaZA7/googleplaystore.csv\")\r\n# %%\r\ndf.shape\r\n# %%\r\ndf.columns\r\n# %%\r\ndf['Installs'].unique()\r\n# %%\r\ndf = df[df['Installs'] != 'Free']\r\ndf['Installs'] = df['Installs'].apply(\r\n lambda ins: int(ins.replace(\"+\", \"\").replace(\",\", \"\")))\r\nprint(df.corr())\r\n# %%\r\nprint(df.corr())\r\n# %%\r\n\"I am now a master of Logistic regression\".title()\r\n# %%\r\ninput_list = [7, 2, 0, 9, -1, 8]\r\n# input_list = [6, 6, 6, 6, 6]\r\n# input_list = [3, 1, 4, 4, 5, 5, 5, 0, 2, 2]\r\n\r\n\r\ndef get_second_largest(values):\r\n if values is not None and len(values) > 2:\r\n values = set(values)\r\n if len(values) == 1:\r\n return \"not present\"\r\n\r\n values = sorted(values, reverse=True)\r\n return values[1] if values[0] > values[1] else \"not present\"\r\n else:\r\n raise ValueError(\"list must have at least two elements\")\r\n\r\n\r\nprint(get_second_largest(input_list))\r\n\r\n# %%\r\ninput_list = [7, 2, 0, 9, -1, 8]\r\ninput_list = [6, 6, 6, 6, 6]\r\ninput_list = [3, 1, 4, 4, 5, 5, 5, 0, 2, 2]\r\ninput_list = [7, 7]\r\n\r\n\r\ndef get_second_largest(values):\r\n if values is not None and len(values) >= 2:\r\n values = set(values)\r\n if len(values) == 1:\r\n return \"not present\"\r\n return sorted(values, reverse=True)[1]\r\n else:\r\n raise ValueError(\"list must have at least two elements\")\r\n\r\n\r\nprint(get_second_largest(input_list))\r\n# %%\r\nwholesale = pd.read_csv(\r\n 'https://media-doselect.s3.amazonaws.com/generic/OkbnaOBqrBXZOpRQw1JGMgaM9/Wholesale_Data.csv')\r\nwholesale.columns\r\n# %%\r\nwholesale['Channel'].unique()\r\n# %%\r\n# 'Hotel', 'Restaurant', and 'Cafe'.\r\n\r\n\r\n# wholesale['Channel'] = wholesale['Channel'].replace({\r\n# \"Hot\":\"Hotel\",\r\n# \"H\":\"Hotel\"\r\n# \"Hote\"\r\n# })\r\n\r\nwholesale['Channel'] = wholesale['Channel'].apply(lambda c: \"Hotel\" if c.lower(\r\n).startswith(\"h\") else \"Restaurant\" if c.lower().startswith(\"r\") else \"Cafe\")\r\nwholesale['Channel'].unique()\r\n# %%\r\nwholesale = pd.read_csv(\r\n 'https://media-doselect.s3.amazonaws.com/generic/OqwpypRKN09x5GYej2LvVrprn/Wholesale_Data_Cleaned.csv')\r\n# %%\r\nprint(list(wholesale.groupby(\"Channel\").sum().sum(\r\n axis=1).nsmallest(1).index)[0])\r\n# %%\r\n", "# %%\r\nimport re\r\n\r\nfrom pandas.core.algorithms import value_counts\r\n\r\n\r\ndef checkmail(email):\r\n # complete the function\r\n # the function should return the strings \"invalid\" or \"valid\" based on the email ID entered\r\n # https://www.w3schools.com/python/python_regex.asp\r\n email_re = \"^[0-9a-zA-Z]+[@]+[a-z]+[\\._]?[a-z]{2,3}$\"\r\n return \"valid\" if re.search(email_re, email) else \"invalid\"\r\n\r\n\r\nemail = \"a#[email protected]\" # \"[email protected]\" # input()\r\nprint(checkmail(email))\r\n\r\n# %%\r\n\r\ninput_list = [[1, 2, 3], [4, 5], [6, 7, 8, 9]]\r\nop = []\r\nfor lst in input_list:\r\n op.extend(lst)\r\n\r\nop\r\n# %%\r\nn = 9\r\n\r\n\r\ndef weirdsum(n, dig_count=4):\r\n return sum([int(x * str(n)) for x in range(1, (dig_count + 1))])\r\n\r\n\r\nprint(weirdsum(9))\r\n# %%\r\nfrom collections import Counter\r\n\r\nvalue = \"ddddaacccb\"\r\nsorted([x[0] for x in Counter(value).most_common(3)])\r\n\r\n\r\n# %%\r\nx = 3\r\ny = 4\r\nimport numpy as np\r\n\r\nnested_lst = [[(i + j) / 2 for j in range(y)] for i in range(x)]\r\nprint(np.matrix(nested_lst))\r\n\r\n\r\n# %% [markdown]\r\n\"\"\"\r\nSELECT FORMAT(SUM(ABS(dist_other_from_origin - emp_dist_from_origin)) / COUNT(dist_other_from_origin), 2) AS average\r\nFROM (\r\n SELECT CAST(address AS SIGNED) AS dist_other_from_origin\r\n FROM employee\r\n WHERE ssn != '123456789'\r\n ) other_employees\r\nCROSS JOIN (\r\n SELECT CAST(address AS SIGNED) AS emp_dist_from_origin\r\n FROM employee\r\n WHERE ssn = '123456789'\r\n ) employee\r\n\"\"\"\r\n\r\n# %% [markdown]\r\n\"\"\"\r\nSELECT \r\n student_id\r\nFROM (\r\n SELECT \r\n student_id, \r\n (SUBSTRING(marks,1,2) + SUBSTRING(marks,4,2) + SUBSTRING(marks,7,2))/3 as avg_marks \r\n FROM \r\n upgrad.marks \r\n ORDER BY \r\n avg_marks \r\n DESC limit 1) avg_pcm_student\r\n\"\"\"\r\n", "# %%\nimport math\nimport copy\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.special import expit as sigmoid, logit\n# %%\nnp.random.seed(0)\n# %%\n\n\ndef generate_dataset(N_points):\n # 1 class\n radiuses = np.random.uniform(0, 0.5, size=N_points//2)\n angles = np.random.uniform(0, 2*math.pi, size=N_points//2)\n\n x_1 = np.multiply(radiuses, np.cos(angles)).reshape(N_points//2, 1)\n x_2 = np.multiply(radiuses, np.sin(angles)).reshape(N_points//2, 1)\n X_class_1 = np.concatenate((x_1, x_2), axis=1)\n Y_class_1 = np.full((N_points//2,), 1)\n\n # 0 class\n radiuses = np.random.uniform(0.6, 1, size=N_points//2)\n angles = np.random.uniform(0, 2*math.pi, size=N_points//2)\n\n x_1 = np.multiply(radiuses, np.cos(angles)).reshape(N_points//2, 1)\n x_2 = np.multiply(radiuses, np.sin(angles)).reshape(N_points//2, 1)\n X_class_0 = np.concatenate((x_1, x_2), axis=1)\n Y_class_0 = np.full((N_points//2,), 0)\n\n X = np.concatenate((X_class_1, X_class_0), axis=0)\n Y = np.concatenate((Y_class_1, Y_class_0), axis=0)\n return X, Y\n\n\nN_points = 1000\nX, Y = generate_dataset(N_points)\n\nplt.scatter(X[:N_points//2, 0], X[:N_points//2, 1], color='red', label='class 1')\nplt.scatter(X[N_points//2:, 0], X[N_points//2:, 1], color='blue', label='class 0')\nplt.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2)\nplt.show()\n# %%\nX.shape, Y.shape\n# %%\nweights = dict(\n W1=np.random.randn(3, 2),\n b1=np.zeros(3),\n W2=np.random.randn(3),\n b2=0\n)\n# weights = {'W1': np.array([[-0.1049797, 1.36741498],\n# [-1.65534404, 0.15364446],\n# [-1.58447356, 0.84445431]]),\n# 'b1': np.array([0., 0., 0.]),\n# 'W2': np.array([-1.21286782, 0.28376955, -0.28219588]),\n# 'b2': 0}\ninitial_weights = copy.deepcopy(weights)\n\n# %%\n\n\ndef forward_propagation(X, weights):\n Z1 = X@weights['W1'].T + weights['b1']\n H = sigmoid(Z1)\n\n Z2 = H@weights['W2']+weights['b2']\n Y = sigmoid(Z2)\n\n return Y, Z2, H, Z1\n\n\n# %%\nforward_propagation(X, weights)\n\n# %%\n", "from re import T\nimport set_base_path\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\nimport plotly.figure_factory as ff\nfrom enum import Enum, auto\nfrom typing import Tuple\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly.express as px\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef print_null_percents(frame: pd.DataFrame, full: bool = False, display_cols=True) -> pd.Series:\n \"\"\"Prints null columns perdent and count\n\n Args:\n frame (pd.DataFrame):Dataframe where null needs to be counted\n full (bool, optional): show all columns. Defaults to False.\n display_cols (bool, optional): show columns or not. Defaults to True.\n \"\"\"\n null_counts = frame.isna().sum()\n if not full:\n null_counts = null_counts[null_counts > 0]\n if display_cols:\n display(round((null_counts/frame.shape[0])*100, 2).sort_values(ascending=False))\n print(f\"Columns count with null: {len(null_counts)}\")\n return null_counts\n\n\nclass GraphType(Enum):\n \"\"\"Graph Type Enum\n\n Args:\n Enum ([type]): Built-in Enum Class\n \"\"\"\n BAR = auto()\n LINE = auto()\n DIST = auto()\n\n\ndef __plot_univariate_series__(\n series: pd.Series,\n title: str,\n xlabel: str,\n ylabel: str,\n graph_type: GraphType = None,\n showlegend: bool = False,\n log_x: bool = False,\n log_y: bool = False,\n interactive: bool = False,\n x_rotation: int = None,\n y_rotation: int = None,\n **kwargs) -> None:\n \"\"\"Bar plots a interger series\n\n Args:\n series (pd.Series): series to be plotted\n title (str): graph title\n xlabel (str): x-axis label\n ylabel (str): y-axis label\n graph_type (GraphType, optional): graph type\n showlegend (bool, optional): default False\n log_x (bool, optional): default False\n log_y (bool, optional): default False\n \"\"\"\n labels = {\"x\": xlabel, \"y\": ylabel}\n\n if interactive:\n fig = None\n if graph_type is None or graph_type == GraphType.BAR:\n fig = px.bar(x=series.index, y=series, color=series.index,\n title=title, labels=labels, log_x=log_x, log_y=log_y, **kwargs)\n\n if graph_type == GraphType.LINE:\n px.scatter(x=series.index, y=series, title=title, labels=labels, color=series.index, **kwargs)\n\n fig.update_layout(showlegend=showlegend)\n fig.show()\n else:\n plt.figure(figsize=(12, 10))\n ax = None\n if graph_type is None or graph_type == GraphType.BAR:\n ax = sns.barplot(x=series.index, y=series, palette=\"deep\", **kwargs)\n\n if graph_type == GraphType.LINE:\n ax = sns.lineplot(x=series.index, y=series, **kwargs)\n\n ax.set(xlabel=xlabel, ylabel=ylabel, **kwargs)\n\n if x_rotation:\n plt.xticks(rotation=x_rotation)\n if y_rotation:\n plt.yticks(rotation=y_rotation)\n plt.show()\n\n\ndef get_univariate_cat_plot_strs(value: str, **kwargs) -> Tuple[str, str, str]:\n \"\"\"Creates graph title, x-axis text and y-axis text for given value\n\n Args:\n value (str): column name\n\n Returns:\n Tuple[str, str, str]: title, x-axis text and y-axis text\n \"\"\"\n full_name = value.replace(\"_\", \" \").replace(\"-\", \" \").replace(\".\", \" \").title() # TODO: write logic to make name\n if len(full_name) > 30:\n full_name = value\n count_str = full_name + ' Count' + \" - Log Scale\" if kwargs.get(\"log_y\") else \"\"\n return count_str + ' Plot', full_name, count_str\n\n\ndef __plot_cat_data__(col_name: str, value_counts_ser: pd.Series, x_rotation=None, y_rotation=None, interactive=False, **kwargs):\n \"\"\"Plots the value count series\n\n Args:\n c ([str]): column name\n value_counts_ser ([pd.Series]): value counts series\n \"\"\"\n t, xl, yl = get_univariate_cat_plot_strs(col_name, **kwargs)\n __plot_univariate_series__(value_counts_ser, t, xl, yl, x_rotation=x_rotation, y_rotation=y_rotation, interactive=interactive, **kwargs)\n\n\ndef plot_univariate_categorical_columns(dataframe: pd.DataFrame, plot_limit: int = 30, print_value_counts=False, x_rotation=None, y_rotation=None, interactive=False, ** kwargs) -> None:\n \"\"\"plots categorical variable bars\n\n Args:\n dataframe (pd.DataFrame): data frame with all categorical columns\n plot_limit (int, optional): plot if category count is less than. Defaults to 30.\n print_value_counts (bool, optional): print value counts or not. Defaults to False.\n x_rotation ([type], optional): x-axis text rotation angle (in degrees with non-interactive module). Defaults to None.\n y_rotation ([type], optional): y-axis text rotation angle (in degrees with non-interactive module). Defaults to None.\n interactive (bool, optional): if plot to be interactive (slow and make notebook more in size). Defaults to False.\n \"\"\"\n for c in dataframe.columns:\n value_counts_ser = dataframe[c].value_counts().sort_values(ascending=False)\n if print_value_counts:\n print(value_counts_ser)\n cnt_len = len(value_counts_ser)\n if cnt_len > plot_limit:\n value_counts_ser = value_counts_ser[:plot_limit]\n print(f\"Plotting only top(in decending order) {plot_limit} categories\")\n __plot_cat_data__(c, value_counts_ser, x_rotation=x_rotation, y_rotation=y_rotation, interactive=interactive, ** kwargs)\n\n\ndef plot_dist(data_frame: pd.DataFrame, merge_all: bool = False, width=800, interactive: bool = False, **kwargs) -> None:\n cols_to_plot = data_frame.columns\n if interactive:\n if merge_all:\n fig = ff.create_distplot(hist_data=data_frame, group_labels=cols_to_plot, **kwargs)\n fig.update_layout(title_text=f\"Dist plot for Numeric Columns\", width=width)\n fig.show()\n else:\n for c in cols_to_plot:\n fig = ff.create_distplot(hist_data=[data_frame[c].values], group_labels=[c], **kwargs)\n fig.update_layout(title_text=f\"Distribution plot for {c}\", width=width)\n fig.show()\n else:\n if merge_all:\n sns.displot(data=data_frame, y=cols_to_plot, hue=cols_to_plot, **kwargs)\n plt.show()\n else:\n for c in cols_to_plot:\n sns.displot(data=data_frame, x=c, kind='kde', **kwargs)\n plt.show()\n\n\nclass TwoVarPlotType(Enum):\n BOX = auto()\n SCATTER = auto()\n\n\ndef __plot_two_features__(df: pd.DataFrame, x: str, y: str, plot_type: TwoVarPlotType, **kwargs):\n x_rotation = kwargs.get('x_rotation', 0)\n y_rotation = kwargs.get('y_rotation', 0)\n legend = kwargs.get('legend', None)\n _ = [kwargs.pop(key, None) for key in ['x_rotation', 'y_rotation', 'legend']]\n ax = None\n if plot_type == TwoVarPlotType.BOX:\n ax = sns.boxplot(data=df, x=x, y=y, hue=x, **kwargs)\n if plot_type == TwoVarPlotType.SCATTER:\n ax = sns.scatterplot(data=df, x=x, y=y, **kwargs)\n if ax is not None:\n plt.xticks(rotation=int(x_rotation))\n plt.yticks(rotation=int(y_rotation))\n if legend is not None and not legend:\n lgnd = ax.get_legend()\n if lgnd is not None:\n lgnd.remove()\n plt.show()\n\n\ndef plot_box(df: pd.DataFrame, x: str, y: str, interactive: bool = False, **kwargs) -> None:\n if interactive:\n fig = px.box(df, x=x, y=y, color=x, **kwargs)\n fig.show()\n else:\n __plot_two_features__(df, x, y, TwoVarPlotType.BOX, **kwargs)\n\n\ndef __getdtype__(col_data: pd.Series):\n str_dtype = str(col_data.dtype)\n if str_dtype in 'iufc' or col_data.dtype in [np.int64, np.float64]:\n return 'num'\n elif str_dtype in 'OSUb' or col_data.dtype in ['object']:\n return 'cat'\n elif str_dtype in 'mM':\n return 'date'\n else:\n return None\n\n# REFACTOR: Make it consistent\n\n\ndef plot_two_variables(df, x, y, interactive: bool = False, **kwargs):\n if __getdtype__(df[x]) == 'num' and __getdtype__(df[y]) == 'num':\n if interactive:\n fig = px.scatter(df, x=x, y=y, trendline=\"ols\", **kwargs)\n fig.show()\n else:\n __plot_two_features__(df, x, y, TwoVarPlotType.BOX, **kwargs)\n\n elif (__getdtype__(df[x]) == 'cat' and __getdtype__(df[y]) == 'num'):\n plot_box(df, x, y, interactive, **kwargs)\n elif (__getdtype__(df[x]) == 'num' and __getdtype__(df[y]) == 'cat'):\n plot_box(df, y, x, interactive, **kwargs)\n\n\ndef __set_value_count_color__(value):\n return \"background-color: rgba(217, 38, 38, 0.2)\" if value < 100 else ''\n\n\ndef __set_value_count_percent_color__(value):\n return \"background-color: rgba(221, 207, 155, 0.2)\" if value <= 5. else ''\n\n\ndef print_value_count_percents(dataframe: pd.DataFrame) -> None:\n total_recs = dataframe.shape[0]\n # ret_values = {}\n for c in dataframe.columns:\n value_counts_ser = dataframe[c].value_counts()\n value_counts_per = round(dataframe[c].value_counts()*100/total_recs, 2)\n df = pd.DataFrame({\"Value\": value_counts_ser.index, \"Value Counts\": value_counts_ser.values, \"Percent\": value_counts_per.values})\n df.sort_values(by=\"Percent\", ascending=False)\n # ret_values[c] = df\n print(f\"\\nValue Counts for {c}\")\n # styled_df = df.style.apply(lambda row: highlight_other_group(row, col_count, 5), axis=1)\n styled_df = df.style.format({\n \"Percent\": \"{:.2f}%\"\n }). \\\n applymap(__set_value_count_percent_color__, subset=[\"Percent\"]). \\\n applymap(__set_value_count_color__, subset=[\"Value Counts\"]). \\\n hide_index()\n\n display(styled_df)\n\n # return ret_values\n\n\ndef count_of_uniques(dataframe: pd.DataFrame, display_res=False) -> pd.DataFrame:\n cols = dataframe.columns\n unique_values = []\n unique_len = []\n for c in cols:\n uniques = dataframe[c].unique()\n # unique_values.append(sorted(uniques))\n unique_values.append(uniques)\n unique_len.append(len(uniques))\n\n frame = pd.DataFrame({\n \"Column\": cols,\n \"Unique Values\": unique_values,\n \"Column Unique Count\": unique_len})\n frame.sort_values(by=[\"Column Unique Count\", \"Column\"], ascending=[False, True], inplace=True)\n if display_res:\n display(frame.style.hide_index())\n return frame\n\n\ndef get_data_frame_overview(df: pd.DataFrame, data_sample_size: int = 5) -> None:\n # Shape\n print(f\"Shape:\\n{df.shape}\")\n print(\"-\"*50)\n # DTypes\n print(f\"\\nDTypes:\\n{df.dtypes}\")\n print(\"-\"*50)\n # Total Nulls\n print(f\"\\nTotal Nulls:\\n{df.isnull().sum().sum()}\")\n print(\"-\"*50)\n # Nulls\n null_percent = round(df.isnull().mean()*100, 2)\n print(f\"\\nNulls Percentage:\\n{null_percent[null_percent > 0].sort_values(ascending=False)}\")\n print(\"-\"*50)\n # Duplicate\n print(f\"\\nDuplicate Rows count:\\n{len(df) - len(df.drop_duplicates())}\")\n print(\"-\"*50)\n # Sample\n print(f\"\\nSample:\\n\")\n display(df.sample(data_sample_size))\n print(\"-\"*50)\n # Head\n print(f\"\\nHead:\\n\")\n display(df.head(data_sample_size))\n print(\"-\"*50)\n # tail\n print(f\"\\nTail:\\n\")\n display(df.tail(data_sample_size))\n print(\"-\"*50)\n # Describe\n print(f\"\\nDescribe:\\n\")\n display(df.describe(include='all', ).T)\n print(\"-\"*50)\n # info\n print(f\"\\nInfo:\\n\")\n display(df.info(verbose=1))\n print(\"-\"*50)\n # Count of data types\n print(f\"\\nCount of data types:\\n\")\n display(df.dtypes.value_counts())\n print(\"-\"*50)\n # Column Names\n print(f\"\\nColumn Names:\\n\")\n display(df.columns)\n" ]
[ [ "tensorflow.keras.models.Model", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Input" ], [ "pandas.read_csv", "pandas.Series" ], [ "numpy.matrix" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.scatter", "numpy.random.seed", "scipy.special.expit", "numpy.cos", "numpy.full", "numpy.concatenate", "numpy.sin", "numpy.random.randn", "numpy.random.uniform", "matplotlib.pyplot.show", "numpy.zeros" ], [ "pandas.DataFrame", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
a1rb4Ck/auto-sklearn
[ "cdf48b82632927ec56c8c14258c0bfc4c6b2e7d1" ]
[ "autosklearn/smbo.py" ]
[ "import json\nimport os\nimport time\nimport traceback\nimport warnings\n\nimport numpy as np\nimport pynisher\n\nfrom smac.facade.smac_facade import SMAC\nfrom smac.optimizer.objective import average_cost\nfrom smac.runhistory.runhistory import RunHistory\nfrom smac.runhistory.runhistory2epm import RunHistory2EPM4Cost\nfrom smac.scenario.scenario import Scenario\nfrom smac.tae.execute_ta_run import StatusType\nfrom smac.optimizer import pSMAC\n\n\nimport autosklearn.metalearning\nfrom autosklearn.constants import MULTILABEL_CLASSIFICATION, \\\n BINARY_CLASSIFICATION, TASK_TYPES_TO_STRING, CLASSIFICATION_TASKS, \\\n REGRESSION_TASKS, MULTICLASS_CLASSIFICATION, REGRESSION\nfrom autosklearn.metalearning.mismbo import suggest_via_metalearning\nfrom autosklearn.data.abstract_data_manager import AbstractDataManager\nfrom autosklearn.data.competition_data_manager import CompetitionDataManager\nfrom autosklearn.evaluation import ExecuteTaFuncWithQueue, WORST_POSSIBLE_RESULT\nfrom autosklearn.util import get_logger\nfrom autosklearn.metalearning.metalearning.meta_base import MetaBase\nfrom autosklearn.metalearning.metafeatures.metafeatures import \\\n calculate_all_metafeatures_with_labels, calculate_all_metafeatures_encoded_labels\n\nEXCLUDE_META_FEATURES_CLASSIFICATION = {\n 'Landmark1NN',\n 'LandmarkDecisionNodeLearner',\n 'LandmarkDecisionTree',\n 'LandmarkLDA',\n 'LandmarkNaiveBayes',\n 'PCAFractionOfComponentsFor95PercentVariance',\n 'PCAKurtosisFirstPC',\n 'PCASkewnessFirstPC',\n 'PCA'\n}\n\nEXCLUDE_META_FEATURES_REGRESSION = {\n 'Landmark1NN',\n 'LandmarkDecisionNodeLearner',\n 'LandmarkDecisionTree',\n 'LandmarkLDA',\n 'LandmarkNaiveBayes',\n 'PCAFractionOfComponentsFor95PercentVariance',\n 'PCAKurtosisFirstPC',\n 'PCASkewnessFirstPC',\n 'NumberOfClasses',\n 'ClassOccurences',\n 'ClassProbabilityMin',\n 'ClassProbabilityMax',\n 'ClassProbabilityMean',\n 'ClassProbabilitySTD',\n 'ClassEntropy',\n 'LandmarkRandomNodeLearner',\n 'PCA',\n}\n\n\n# dataset helpers\ndef load_data(dataset_info, backend, max_mem=None):\n try:\n D = backend.load_datamanager()\n except IOError:\n D = None\n\n # Datamanager probably doesn't exist\n if D is None:\n if max_mem is None:\n D = CompetitionDataManager(dataset_info)\n else:\n D = CompetitionDataManager(dataset_info, max_memory_in_mb=max_mem)\n return D\n\n\n# metalearning helpers\ndef _calculate_metafeatures(data_feat_type, data_info_task, basename,\n x_train, y_train, watcher, logger):\n # == Calculate metafeatures\n task_name = 'CalculateMetafeatures'\n watcher.start_task(task_name)\n categorical = [True if feat_type.lower() in ['categorical'] else False\n for feat_type in data_feat_type]\n\n EXCLUDE_META_FEATURES = EXCLUDE_META_FEATURES_CLASSIFICATION \\\n if data_info_task in CLASSIFICATION_TASKS else EXCLUDE_META_FEATURES_REGRESSION\n\n if data_info_task in [MULTICLASS_CLASSIFICATION, BINARY_CLASSIFICATION,\n MULTILABEL_CLASSIFICATION, REGRESSION]:\n logger.info('Start calculating metafeatures for %s', basename)\n result = calculate_all_metafeatures_with_labels(\n x_train, y_train, categorical=categorical,\n dataset_name=basename,\n dont_calculate=EXCLUDE_META_FEATURES, )\n for key in list(result.metafeature_values.keys()):\n if result.metafeature_values[key].type_ != 'METAFEATURE':\n del result.metafeature_values[key]\n\n else:\n result = None\n logger.info('Metafeatures not calculated')\n watcher.stop_task(task_name)\n logger.info(\n 'Calculating Metafeatures (categorical attributes) took %5.2f',\n watcher.wall_elapsed(task_name))\n return result\n\ndef _calculate_metafeatures_encoded(basename, x_train, y_train, watcher,\n task, logger):\n EXCLUDE_META_FEATURES = EXCLUDE_META_FEATURES_CLASSIFICATION \\\n if task in CLASSIFICATION_TASKS else EXCLUDE_META_FEATURES_REGRESSION\n\n task_name = 'CalculateMetafeaturesEncoded'\n watcher.start_task(task_name)\n result = calculate_all_metafeatures_encoded_labels(\n x_train, y_train, categorical=[False] * x_train.shape[1],\n dataset_name=basename, dont_calculate=EXCLUDE_META_FEATURES)\n for key in list(result.metafeature_values.keys()):\n if result.metafeature_values[key].type_ != 'METAFEATURE':\n del result.metafeature_values[key]\n watcher.stop_task(task_name)\n logger.info(\n 'Calculating Metafeatures (encoded attributes) took %5.2fsec',\n watcher.wall_elapsed(task_name))\n return result\n\ndef _get_metalearning_configurations(meta_base, basename, metric,\n configuration_space,\n task,\n initial_configurations_via_metalearning,\n is_sparse,\n watcher, logger):\n task_name = 'InitialConfigurations'\n watcher.start_task(task_name)\n try:\n metalearning_configurations = suggest_via_metalearning(\n meta_base, basename, metric,\n task,\n is_sparse == 1,\n initial_configurations_via_metalearning\n )\n except Exception as e:\n logger.error(\"Error getting metalearning configurations!\")\n logger.error(str(e))\n logger.error(traceback.format_exc())\n metalearning_configurations = []\n watcher.stop_task(task_name)\n return metalearning_configurations\n\ndef _print_debug_info_of_init_configuration(initial_configurations, basename,\n time_for_task, logger, watcher):\n logger.debug('Initial Configurations: (%d)' % len(initial_configurations))\n for initial_configuration in initial_configurations:\n logger.debug(initial_configuration)\n logger.debug('Looking for initial configurations took %5.2fsec',\n watcher.wall_elapsed('InitialConfigurations'))\n logger.info(\n 'Time left for %s after finding initial configurations: %5.2fsec',\n basename, time_for_task - watcher.wall_elapsed(basename))\n\n\ndef get_smac_object(\n scenario_dict,\n seed,\n ta,\n backend,\n metalearning_configurations,\n runhistory,\n):\n scenario_dict['input_psmac_dirs'] = backend.get_smac_output_glob(\n smac_run_id=seed if not scenario_dict['shared-model'] else '*',\n )\n scenario = Scenario(scenario_dict)\n if len(metalearning_configurations) > 0:\n default_config = scenario.cs.get_default_configuration()\n initial_configurations = [default_config] + metalearning_configurations\n else:\n initial_configurations = None\n rh2EPM = RunHistory2EPM4Cost(\n num_params=len(scenario.cs.get_hyperparameters()),\n scenario=scenario,\n success_states=[\n StatusType.SUCCESS,\n StatusType.MEMOUT,\n StatusType.TIMEOUT,\n # As long as we don't have a model for crashes yet!\n StatusType.CRASHED,\n ],\n impute_censored_data=False,\n impute_state=None,\n )\n return SMAC(\n scenario=scenario,\n rng=seed,\n runhistory2epm=rh2EPM,\n tae_runner=ta,\n initial_configurations=initial_configurations,\n runhistory=runhistory,\n run_id=seed,\n )\n\n\nclass AutoMLSMBO(object):\n\n def __init__(self, config_space, dataset_name,\n backend,\n total_walltime_limit,\n func_eval_time_limit,\n memory_limit,\n metric,\n watcher, start_num_run=1,\n data_memory_limit=None,\n num_metalearning_cfgs=25,\n config_file=None,\n seed=1,\n metadata_directory=None,\n resampling_strategy='holdout',\n resampling_strategy_args=None,\n shared_mode=False,\n include_estimators=None,\n exclude_estimators=None,\n include_preprocessors=None,\n exclude_preprocessors=None,\n disable_file_output=False,\n std_scores=False,\n smac_scenario_args=None,\n get_smac_object_callback=None):\n super(AutoMLSMBO, self).__init__()\n # data related\n self.dataset_name = dataset_name\n self.datamanager = None\n self.metric = metric\n self.task = None\n self.backend = backend\n\n # the configuration space\n self.config_space = config_space\n\n # Evaluation\n self.resampling_strategy = resampling_strategy\n if resampling_strategy_args is None:\n resampling_strategy_args = {}\n self.resampling_strategy_args = resampling_strategy_args\n\n # and a bunch of useful limits\n self.total_walltime_limit = int(total_walltime_limit)\n self.func_eval_time_limit = int(func_eval_time_limit)\n self.memory_limit = memory_limit\n self.data_memory_limit = data_memory_limit\n self.watcher = watcher\n self.num_metalearning_cfgs = num_metalearning_cfgs\n self.config_file = config_file\n self.seed = seed\n self.metadata_directory = metadata_directory\n self.start_num_run = start_num_run\n self.shared_mode = shared_mode\n self.include_estimators = include_estimators\n self.exclude_estimators = exclude_estimators\n self.include_preprocessors = include_preprocessors\n self.exclude_preprocessors = exclude_preprocessors\n self.disable_file_output = disable_file_output\n self.std_scores = std_scores\n self.smac_scenario_args = smac_scenario_args\n self.get_smac_object_callback = get_smac_object_callback\n\n logger_name = '%s(%d):%s' % (self.__class__.__name__, self.seed,\n \":\" + dataset_name if dataset_name is\n not None else \"\")\n self.logger = get_logger(logger_name)\n\n def _send_warnings_to_log(self, message, category, filename, lineno,\n file=None, line=None):\n self.logger.debug('%s:%s: %s:%s', filename, lineno, category.__name__,\n message)\n\n def reset_data_manager(self, max_mem=None):\n if max_mem is None:\n max_mem = self.data_memory_limit\n if self.datamanager is not None:\n del self.datamanager\n if isinstance(self.dataset_name, AbstractDataManager):\n self.datamanager = self.dataset_name\n else:\n self.datamanager = load_data(self.dataset_name,\n self.backend,\n max_mem=max_mem)\n\n self.task = self.datamanager.info['task']\n\n def collect_metalearning_suggestions(self, meta_base):\n metalearning_configurations = _get_metalearning_configurations(\n meta_base=meta_base,\n basename=self.dataset_name,\n metric=self.metric,\n configuration_space=self.config_space,\n task=self.task,\n is_sparse=self.datamanager.info['is_sparse'],\n initial_configurations_via_metalearning=self.num_metalearning_cfgs,\n watcher=self.watcher,\n logger=self.logger)\n _print_debug_info_of_init_configuration(\n metalearning_configurations,\n self.dataset_name,\n self.total_walltime_limit,\n self.logger,\n self.watcher)\n\n return metalearning_configurations\n\n def _calculate_metafeatures(self):\n with warnings.catch_warnings():\n warnings.showwarning = self._send_warnings_to_log\n\n meta_features = _calculate_metafeatures(\n data_feat_type=self.datamanager.feat_type,\n data_info_task=self.datamanager.info['task'],\n x_train=self.datamanager.data['X_train'],\n y_train=self.datamanager.data['Y_train'],\n basename=self.dataset_name,\n watcher=self.watcher,\n logger=self.logger)\n return meta_features\n\n def _calculate_metafeatures_with_limits(self, time_limit):\n res = None\n time_limit = max(time_limit, 1)\n try:\n safe_mf = pynisher.enforce_limits(mem_in_mb=self.memory_limit,\n wall_time_in_s=int(time_limit),\n grace_period_in_s=30,\n logger=self.logger)(\n self._calculate_metafeatures)\n res = safe_mf()\n except Exception as e:\n self.logger.error('Error getting metafeatures: %s', str(e))\n\n return res\n\n def _calculate_metafeatures_encoded(self):\n with warnings.catch_warnings():\n warnings.showwarning = self._send_warnings_to_log\n\n meta_features_encoded = _calculate_metafeatures_encoded(\n self.dataset_name,\n self.datamanager.data['X_train'],\n self.datamanager.data['Y_train'],\n self.watcher,\n self.datamanager.info['task'],\n self.logger)\n return meta_features_encoded\n\n def _calculate_metafeatures_encoded_with_limits(self, time_limit):\n res = None\n time_limit = max(time_limit, 1)\n try:\n safe_mf = pynisher.enforce_limits(mem_in_mb=self.memory_limit,\n wall_time_in_s=int(time_limit),\n grace_period_in_s=30,\n logger=self.logger)(\n self._calculate_metafeatures_encoded)\n res = safe_mf()\n except Exception as e:\n self.logger.error('Error getting metafeatures (encoded) : %s',\n str(e))\n\n return res\n\n def run_smbo(self):\n\n self.watcher.start_task('SMBO')\n\n # == first things first: load the datamanager\n self.reset_data_manager()\n\n # == Initialize non-SMBO stuff\n # first create a scenario\n seed = self.seed\n self.config_space.seed(seed)\n num_params = len(self.config_space.get_hyperparameters())\n # allocate a run history\n num_run = self.start_num_run\n\n # Initialize some SMAC dependencies\n\n metalearning_configurations = self.get_metalearning_suggestions()\n\n if self.resampling_strategy in ['partial-cv',\n 'partial-cv-iterative-fit']:\n num_folds = self.resampling_strategy_args['folds']\n instances = [[json.dumps({'task_id': self.dataset_name,\n 'fold': fold_number})]\n for fold_number in range(num_folds)]\n else:\n instances = [[json.dumps({'task_id': self.dataset_name})]]\n\n # TODO rebuild target algorithm to be it's own target algorithm\n # evaluator, which takes into account that a run can be killed prior\n # to the model being fully fitted; thus putting intermediate results\n # into a queue and querying them once the time is over\n exclude = dict()\n include = dict()\n if self.include_preprocessors is not None and \\\n self.exclude_preprocessors is not None:\n raise ValueError('Cannot specify include_preprocessors and '\n 'exclude_preprocessors.')\n elif self.include_preprocessors is not None:\n include['preprocessor'] = self.include_preprocessors\n elif self.exclude_preprocessors is not None:\n exclude['preprocessor'] = self.exclude_preprocessors\n\n if self.include_estimators is not None and \\\n self.exclude_estimators is not None:\n raise ValueError('Cannot specify include_estimators and '\n 'exclude_estimators.')\n elif self.include_estimators is not None:\n if self.task in CLASSIFICATION_TASKS:\n include['classifier'] = self.include_estimators\n elif self.task in REGRESSION_TASKS:\n include['regressor'] = self.include_estimators\n else:\n raise ValueError(self.task)\n elif self.exclude_estimators is not None:\n if self.task in CLASSIFICATION_TASKS:\n exclude['classifier'] = self.exclude_estimators\n elif self.task in REGRESSION_TASKS:\n exclude['regressor'] = self.exclude_estimators\n else:\n raise ValueError(self.task)\n\n ta = ExecuteTaFuncWithQueue(backend=self.backend,\n autosklearn_seed=seed,\n resampling_strategy=self.resampling_strategy,\n initial_num_run=num_run,\n logger=self.logger,\n include=include,\n exclude=exclude,\n metric=self.metric,\n memory_limit=self.memory_limit,\n disable_file_output=self.disable_file_output,\n std_scores=self.std_scores,\n **self.resampling_strategy_args)\n\n startup_time = self.watcher.wall_elapsed(self.dataset_name)\n total_walltime_limit = self.total_walltime_limit - startup_time - 5\n scenario_dict = {\n 'abort_on_first_run_crash': False,\n 'cs': self.config_space,\n 'cutoff_time': self.func_eval_time_limit,\n 'deterministic': 'true',\n 'instances': instances,\n 'memory_limit': self.memory_limit,\n 'output-dir':\n self.backend.get_smac_output_directory(),\n 'run_obj': 'quality',\n 'shared-model': self.shared_mode,\n 'wallclock_limit': total_walltime_limit,\n 'cost_for_crash': WORST_POSSIBLE_RESULT,\n }\n if self.smac_scenario_args is not None:\n for arg in [\n 'abort_on_first_run_crash',\n 'cs',\n 'deterministic',\n 'instances',\n 'output-dir',\n 'run_obj',\n 'shared-model',\n 'cost_for_crash',\n ]:\n if arg in self.smac_scenario_args:\n self.logger.warning('Cannot override scenario argument %s, '\n 'will ignore this.', arg)\n del self.smac_scenario_args[arg]\n for arg in [\n 'cutoff_time',\n 'memory_limit',\n 'wallclock_limit',\n ]:\n if arg in self.smac_scenario_args:\n self.logger.warning(\n 'Overriding scenario argument %s: %s with value %s',\n arg,\n scenario_dict[arg],\n self.smac_scenario_args[arg]\n )\n scenario_dict.update(self.smac_scenario_args)\n\n runhistory = RunHistory(aggregate_func=average_cost)\n smac_args = {\n 'scenario_dict': scenario_dict,\n 'seed': seed,\n 'ta': ta,\n 'backend': self.backend,\n 'metalearning_configurations': metalearning_configurations,\n 'runhistory': runhistory,\n }\n if self.get_smac_object_callback is not None:\n smac = self.get_smac_object_callback(**smac_args)\n else:\n smac = get_smac_object(**smac_args)\n\n smac.optimize()\n\n # Patch SMAC to read in data from parallel runs after the last\n # function evaluation\n if self.shared_mode:\n pSMAC.read(\n run_history=smac.solver.runhistory,\n output_dirs=smac.solver.scenario.input_psmac_dirs,\n configuration_space=smac.solver.config_space,\n logger=smac.solver.logger,\n )\n\n self.runhistory = smac.solver.runhistory\n self.trajectory = smac.solver.intensifier.traj_logger.trajectory\n\n return self.runhistory, self.trajectory\n\n def get_metalearning_suggestions(self):\n # == METALEARNING suggestions\n # we start by evaluating the defaults on the full dataset again\n # and add the suggestions from metalearning behind it\n if self.num_metalearning_cfgs > 0:\n # If metadata directory is None, use default\n if self.metadata_directory is None:\n metalearning_directory = os.path.dirname(\n autosklearn.metalearning.__file__)\n # There is no multilabel data in OpenML\n if self.task == MULTILABEL_CLASSIFICATION:\n meta_task = BINARY_CLASSIFICATION\n else:\n meta_task = self.task\n metadata_directory = os.path.join(\n metalearning_directory, 'files',\n '%s_%s_%s' % (self.metric, TASK_TYPES_TO_STRING[meta_task],\n 'sparse' if self.datamanager.info['is_sparse']\n else 'dense'))\n self.metadata_directory = metadata_directory\n\n # If metadata directory is specified by user,\n # then verify that it exists.\n else:\n if not os.path.exists(self.metadata_directory):\n raise ValueError('The specified metadata directory \\'%s\\' '\n 'does not exist!' % self.metadata_directory)\n\n else:\n # There is no multilabel data in OpenML\n if self.task == MULTILABEL_CLASSIFICATION:\n meta_task = BINARY_CLASSIFICATION\n else:\n meta_task = self.task\n\n metadata_directory = os.path.join(\n self.metadata_directory,\n '%s_%s_%s' % (self.metric, TASK_TYPES_TO_STRING[meta_task],\n 'sparse' if self.datamanager.info['is_sparse']\n else 'dense'))\n # Check that the metadata directory has the correct\n # subdirectory needed for this dataset.\n if os.path.basename(metadata_directory) not in \\\n os.listdir(self.metadata_directory):\n raise ValueError('The specified metadata directory '\n '\\'%s\\' does not have the correct '\n 'subdirectory \\'%s\\'' %\n (self.metadata_directory,\n os.path.basename(metadata_directory))\n )\n self.metadata_directory = metadata_directory\n\n if os.path.exists(self.metadata_directory):\n\n self.logger.info('Metadata directory: %s',\n self.metadata_directory)\n meta_base = MetaBase(self.config_space, self.metadata_directory)\n\n metafeature_calculation_time_limit = int(\n self.total_walltime_limit / 4)\n metafeature_calculation_start_time = time.time()\n meta_features = self._calculate_metafeatures_with_limits(\n metafeature_calculation_time_limit)\n metafeature_calculation_end_time = time.time()\n metafeature_calculation_time_limit = \\\n metafeature_calculation_time_limit - (\n metafeature_calculation_end_time -\n metafeature_calculation_start_time)\n\n if metafeature_calculation_time_limit < 1:\n self.logger.warning(\n 'Time limit for metafeature calculation less '\n 'than 1 seconds (%f). Skipping calculation '\n 'of metafeatures for encoded dataset.',\n metafeature_calculation_time_limit)\n meta_features_encoded = None\n else:\n with warnings.catch_warnings():\n warnings.showwarning = self._send_warnings_to_log\n self.datamanager.perform1HotEncoding()\n meta_features_encoded = \\\n self._calculate_metafeatures_encoded_with_limits(\n metafeature_calculation_time_limit)\n\n # In case there is a problem calculating the encoded meta-features\n if meta_features is None:\n if meta_features_encoded is not None:\n meta_features = meta_features_encoded\n else:\n if meta_features_encoded is not None:\n meta_features.metafeature_values.update(\n meta_features_encoded.metafeature_values)\n\n if meta_features is not None:\n meta_base.add_dataset(self.dataset_name, meta_features)\n # Do mean imputation of the meta-features - should be done specific\n # for each prediction model!\n all_metafeatures = meta_base.get_metafeatures(\n features=list(meta_features.keys()))\n all_metafeatures.fillna(all_metafeatures.mean(),\n inplace=True)\n\n with warnings.catch_warnings():\n warnings.showwarning = self._send_warnings_to_log\n metalearning_configurations = self.collect_metalearning_suggestions(\n meta_base)\n if metalearning_configurations is None:\n metalearning_configurations = []\n self.reset_data_manager()\n\n self.logger.info('%s', meta_features)\n\n # Convert meta-features into a dictionary because the scenario\n # expects a dictionary\n meta_features_dict = {}\n for dataset, series in all_metafeatures.iterrows():\n meta_features_dict[dataset] = series.values\n meta_features_list = []\n for meta_feature_name in all_metafeatures.columns:\n meta_features_list.append(\n meta_features[meta_feature_name].value)\n meta_features_list = np.array(meta_features_list).reshape(\n (1, -1))\n self.logger.info(list(meta_features_dict.keys()))\n\n else:\n meta_features = None\n self.logger.warning('Could not find meta-data directory %s' %\n metadata_directory)\n\n else:\n meta_features = None\n if meta_features is None:\n meta_features_list = []\n metalearning_configurations = []\n return metalearning_configurations\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
suchir/passenger_screening_algorithm_challenge
[ "65e3e3ce1889e9a100f6b9b6a53fe5c785a84612" ]
[ "model_v2/synthetic_data.py" ]
[ "from common.caching import read_input_dir, cached, read_log_dir\nfrom common.dataio import get_aps_data_hdf5, get_passenger_clusters, get_data\n\nfrom . import dataio\n\nfrom collections import defaultdict\nimport numpy as np\nimport skimage.transform\nimport skimage.io\nimport skimage.color\nimport glob\nimport os\nimport tqdm\nimport h5py\nimport pickle\nimport imageio\nimport math\nimport time\nimport subprocess\nimport json\n\n\n@cached(version=0)\ndef generate_random_models(n_models):\n with read_input_dir('makehuman/passengers'):\n ranges = defaultdict(lambda: [float('inf'), float('-inf')])\n for file in glob.glob('*.mhm'):\n with open(file, 'r') as f:\n modifiers = f.readlines()[4:-5]\n for modifier in modifiers:\n _, m, x = modifier.split(' ')\n x = float(x)\n r = ranges[m]\n r[0], r[1] = min(r[0], x), max(r[1], x)\n\n np.random.seed(0)\n for i in range(n_models):\n lines = ['version v1.1.1']\n for modifier in ranges:\n val = np.random.uniform(*ranges[modifier])\n lines.append('modifier %s %s' % (modifier, val))\n lines.append('skeleton game_engine.mhskel')\n with open('%s.mhm' % i, 'w') as f:\n f.write('\\n'.join(lines))\n\n\nBODY_ZONE_COLORS = np.array([\n [255, 255, 255],\n [255, 115, 35],\n [55, 64, 197],\n [32, 168, 67],\n [116, 116, 116],\n [255, 193, 17],\n [255, 164, 194],\n [172, 226, 28],\n [193, 183, 227],\n [142, 212, 231],\n [255, 240, 3],\n [234, 25, 33],\n [176, 110, 77],\n [232, 219, 164],\n [101, 135, 182],\n [255, 3, 255],\n [125, 0, 21],\n [153, 64, 154]\n])\n\n\ndef _convert_colors_to_label(image):\n highlight = lambda color: np.sum(np.abs(image-color), axis=-1)\n dist = np.stack([highlight(color) for color in BODY_ZONE_COLORS], axis=-1)\n return np.argmin(dist, axis=-1)\n\n\n@cached(generate_random_models, subdir='ssd', version=0)\ndef render_synthetic_zone_data(mode):\n assert mode in ('all', 'sample_large', 'sample')\n if not os.path.exists('done'):\n with read_input_dir('makehuman/generated'):\n mesh_paths = sorted(['%s/%s' % (os.getcwd(), x) for x in glob.glob('*.mhx2')])\n if mode == 'sample_large':\n mesh_paths = mesh_paths[:100]\n elif mode == 'sample':\n mesh_paths = mesh_paths[:10]\n\n with read_input_dir('hand_labeling/blender'):\n texture_path = os.getcwd() + '/zones.png'\n with read_input_dir('scripts/blender'):\n script_path = os.getcwd() + '/render_synthetic_data.py'\n\n angles = 16\n with open('config.json', 'w') as f:\n json.dump({\n 'num_angles': angles,\n 'texture_path': texture_path,\n 'mesh_paths': mesh_paths\n }, f)\n subprocess.check_call(['blender', '--python', script_path, '--background'])\n\n f = h5py.File('data.hdf5', 'w')\n dset = f.create_dataset('dset', (len(mesh_paths), angles, 330, 256, 2))\n\n for i, file in enumerate(tqdm.tqdm(glob.glob('*_depth.png'))):\n zones_file = file.replace('depth', 'zones')\n angle = int(file.split('_')[-2])\n dset[i//angles, angle, ..., 0] = skimage.color.rgb2gray(skimage.io.imread(file))\n zones = skimage.io.imread(zones_file)\n labels = _convert_colors_to_label(zones[..., :3])\n dset[i//angles, angle, ..., 1] = labels\n\n open('done', 'w').close()\n else:\n f = h5py.File('data.hdf5', 'r')\n dset = f['dset']\n return dset" ]
[ [ "numpy.abs", "numpy.random.seed", "numpy.argmin", "numpy.random.uniform", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
laurallu/imbalanced-learn
[ "321b751f90ef8faaec6b39218f8c531893e9e79f", "9a1191e1369f688903649b4342b24e0041c6cf33", "321b751f90ef8faaec6b39218f8c531893e9e79f" ]
[ "imblearn/under_sampling/_prototype_selection/tests/test_instance_hardness_threshold.py", "imblearn/over_sampling/tests/test_adasyn.py", "imblearn/under_sampling/_prototype_generation/_cluster_centroids.py" ]
[ "\"\"\"Test the module .\"\"\"\n# Authors: Guillaume Lemaitre <[email protected]>\n# Christos Aridas\n# License: MIT\n\nimport pytest\nimport numpy as np\n\nfrom sklearn.ensemble import GradientBoostingClassifier\n\nfrom imblearn.under_sampling import InstanceHardnessThreshold\n\nRND_SEED = 0\nX = np.array(\n [\n [-0.3879569, 0.6894251],\n [-0.09322739, 1.28177189],\n [-0.77740357, 0.74097941],\n [0.91542919, -0.65453327],\n [-0.03852113, 0.40910479],\n [-0.43877303, 1.07366684],\n [-0.85795321, 0.82980738],\n [-0.18430329, 0.52328473],\n [-0.30126957, -0.66268378],\n [-0.65571327, 0.42412021],\n [-0.28305528, 0.30284991],\n [0.20246714, -0.34727125],\n [1.06446472, -1.09279772],\n [0.30543283, -0.02589502],\n [-0.00717161, 0.00318087],\n ]\n)\nY = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])\nESTIMATOR = GradientBoostingClassifier(random_state=RND_SEED)\n\n\ndef test_iht_init():\n sampling_strategy = \"auto\"\n iht = InstanceHardnessThreshold(\n ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED\n )\n\n assert iht.sampling_strategy == sampling_strategy\n assert iht.random_state == RND_SEED\n\n\ndef test_iht_fit_resample():\n iht = InstanceHardnessThreshold(ESTIMATOR, random_state=RND_SEED)\n X_resampled, y_resampled = iht.fit_resample(X, Y)\n assert X_resampled.shape == (12, 2)\n assert y_resampled.shape == (12,)\n\n\ndef test_iht_fit_resample_half():\n sampling_strategy = {0: 6, 1: 8}\n iht = InstanceHardnessThreshold(\n ESTIMATOR, sampling_strategy=sampling_strategy, random_state=RND_SEED\n )\n X_resampled, y_resampled = iht.fit_resample(X, Y)\n assert X_resampled.shape == (14, 2)\n assert y_resampled.shape == (14,)\n\n\ndef test_iht_fit_resample_class_obj():\n est = GradientBoostingClassifier(random_state=RND_SEED)\n iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)\n X_resampled, y_resampled = iht.fit_resample(X, Y)\n assert X_resampled.shape == (12, 2)\n assert y_resampled.shape == (12,)\n\n\ndef test_iht_fit_resample_wrong_class_obj():\n from sklearn.cluster import KMeans\n\n est = KMeans()\n iht = InstanceHardnessThreshold(estimator=est, random_state=RND_SEED)\n with pytest.raises(ValueError, match=\"Invalid parameter `estimator`\"):\n iht.fit_resample(X, Y)\n", "\"\"\"Test the module under sampler.\"\"\"\n# Authors: Guillaume Lemaitre <[email protected]>\n# Christos Aridas\n# License: MIT\n\nimport pytest\nimport numpy as np\n\nfrom sklearn.utils._testing import assert_allclose\nfrom sklearn.utils._testing import assert_array_equal\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom imblearn.over_sampling import ADASYN\n\nRND_SEED = 0\nX = np.array(\n [\n [0.11622591, -0.0317206],\n [0.77481731, 0.60935141],\n [1.25192108, -0.22367336],\n [0.53366841, -0.30312976],\n [1.52091956, -0.49283504],\n [-0.28162401, -2.10400981],\n [0.83680821, 1.72827342],\n [0.3084254, 0.33299982],\n [0.70472253, -0.73309052],\n [0.28893132, -0.38761769],\n [1.15514042, 0.0129463],\n [0.88407872, 0.35454207],\n [1.31301027, -0.92648734],\n [-1.11515198, -0.93689695],\n [-0.18410027, -0.45194484],\n [0.9281014, 0.53085498],\n [-0.14374509, 0.27370049],\n [-0.41635887, -0.38299653],\n [0.08711622, 0.93259929],\n [1.70580611, -0.11219234],\n ]\n)\nY = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])\nR_TOL = 1e-4\n\n\ndef test_ada_init():\n sampling_strategy = \"auto\"\n ada = ADASYN(sampling_strategy=sampling_strategy, random_state=RND_SEED)\n assert ada.random_state == RND_SEED\n\n\ndef test_ada_fit_resample():\n ada = ADASYN(random_state=RND_SEED)\n X_resampled, y_resampled = ada.fit_resample(X, Y)\n X_gt = np.array(\n [\n [0.11622591, -0.0317206],\n [0.77481731, 0.60935141],\n [1.25192108, -0.22367336],\n [0.53366841, -0.30312976],\n [1.52091956, -0.49283504],\n [-0.28162401, -2.10400981],\n [0.83680821, 1.72827342],\n [0.3084254, 0.33299982],\n [0.70472253, -0.73309052],\n [0.28893132, -0.38761769],\n [1.15514042, 0.0129463],\n [0.88407872, 0.35454207],\n [1.31301027, -0.92648734],\n [-1.11515198, -0.93689695],\n [-0.18410027, -0.45194484],\n [0.9281014, 0.53085498],\n [-0.14374509, 0.27370049],\n [-0.41635887, -0.38299653],\n [0.08711622, 0.93259929],\n [1.70580611, -0.11219234],\n [0.94899098, -0.30508981],\n [0.28204936, -0.13953426],\n [1.58028868, -0.04089947],\n [0.66117333, -0.28009063],\n ]\n )\n y_gt = np.array(\n [\n 0,\n 1,\n 0,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n 1,\n 0,\n 0,\n 0,\n 0,\n 0,\n ]\n )\n assert_allclose(X_resampled, X_gt, rtol=R_TOL)\n assert_array_equal(y_resampled, y_gt)\n\n\ndef test_ada_fit_resample_nn_obj():\n nn = NearestNeighbors(n_neighbors=6)\n ada = ADASYN(random_state=RND_SEED, n_neighbors=nn)\n X_resampled, y_resampled = ada.fit_resample(X, Y)\n X_gt = np.array(\n [\n [0.11622591, -0.0317206],\n [0.77481731, 0.60935141],\n [1.25192108, -0.22367336],\n [0.53366841, -0.30312976],\n [1.52091956, -0.49283504],\n [-0.28162401, -2.10400981],\n [0.83680821, 1.72827342],\n [0.3084254, 0.33299982],\n [0.70472253, -0.73309052],\n [0.28893132, -0.38761769],\n [1.15514042, 0.0129463],\n [0.88407872, 0.35454207],\n [1.31301027, -0.92648734],\n [-1.11515198, -0.93689695],\n [-0.18410027, -0.45194484],\n [0.9281014, 0.53085498],\n [-0.14374509, 0.27370049],\n [-0.41635887, -0.38299653],\n [0.08711622, 0.93259929],\n [1.70580611, -0.11219234],\n [0.94899098, -0.30508981],\n [0.28204936, -0.13953426],\n [1.58028868, -0.04089947],\n [0.66117333, -0.28009063],\n ]\n )\n y_gt = np.array(\n [\n 0,\n 1,\n 0,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 1,\n 1,\n 0,\n 0,\n 1,\n 1,\n 1,\n 1,\n 0,\n 1,\n 0,\n 0,\n 0,\n 0,\n 0,\n ]\n )\n assert_allclose(X_resampled, X_gt, rtol=R_TOL)\n assert_array_equal(y_resampled, y_gt)\n\n\[email protected](\n \"adasyn_params, err_msg\",\n [\n (\n {\"sampling_strategy\": {0: 9, 1: 12}},\n \"No samples will be generated.\",\n ),\n ({\"n_neighbors\": \"rnd\"}, \"has to be one of\"),\n ],\n)\ndef test_adasyn_error(adasyn_params, err_msg):\n adasyn = ADASYN(**adasyn_params)\n with pytest.raises(ValueError, match=err_msg):\n adasyn.fit_resample(X, Y)\n", "\"\"\"Class to perform under-sampling by generating centroids based on\nclustering.\"\"\"\n\n# Authors: Guillaume Lemaitre <[email protected]>\n# Fernando Nogueira\n# Christos Aridas\n# License: MIT\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom sklearn.base import clone\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.utils import _safe_indexing\n\nfrom ..base import BaseUnderSampler\nfrom ...utils import Substitution\nfrom ...utils._docstring import _random_state_docstring\n\nVOTING_KIND = (\"auto\", \"hard\", \"soft\")\n\n\n@Substitution(\n sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,\n random_state=_random_state_docstring,\n)\nclass ClusterCentroids(BaseUnderSampler):\n \"\"\"Perform under-sampling by generating centroids based on\n clustering methods.\n\n Method that under samples the majority class by replacing a\n cluster of majority samples by the cluster centroid of a KMeans\n algorithm. This algorithm keeps N majority samples by fitting the\n KMeans algorithm with N cluster to the majority class and using\n the coordinates of the N cluster centroids as the new majority\n samples.\n\n Read more in the :ref:`User Guide <cluster_centroids>`.\n\n Parameters\n ----------\n {sampling_strategy}\n\n {random_state}\n\n estimator : object, optional(default=KMeans())\n Pass a :class:`sklearn.cluster.KMeans` estimator.\n\n voting : str, optional (default='auto')\n Voting strategy to generate the new samples:\n\n - If ``'hard'``, the nearest-neighbors of the centroids found using the\n clustering algorithm will be used.\n - If ``'soft'``, the centroids found by the clustering algorithm will\n be used.\n - If ``'auto'``, if the input is sparse, it will default on ``'hard'``\n otherwise, ``'soft'`` will be used.\n\n .. versionadded:: 0.3.0\n\n n_jobs : int or None, optional (default=None)\n Number of CPU cores used during the cross-validation loop.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See\n `Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`_\n for more details.\n\n Notes\n -----\n Supports multi-class resampling by sampling each class independently.\n\n Examples\n --------\n\n >>> from collections import Counter\n >>> from sklearn.datasets import make_classification\n >>> from imblearn.under_sampling import \\\nClusterCentroids # doctest: +NORMALIZE_WHITESPACE\n >>> X, y = make_classification(n_classes=2, class_sep=2,\n ... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,\n ... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)\n >>> print('Original dataset shape %s' % Counter(y))\n Original dataset shape Counter({{1: 900, 0: 100}})\n >>> cc = ClusterCentroids(random_state=42)\n >>> X_res, y_res = cc.fit_resample(X, y)\n >>> print('Resampled dataset shape %s' % Counter(y_res))\n ... # doctest: +ELLIPSIS\n Resampled dataset shape Counter({{...}})\n\n \"\"\"\n\n def __init__(\n self,\n sampling_strategy=\"auto\",\n random_state=None,\n estimator=None,\n voting=\"auto\",\n n_jobs=None,\n ):\n super().__init__(sampling_strategy=sampling_strategy)\n self.random_state = random_state\n self.estimator = estimator\n self.voting = voting\n self.n_jobs = n_jobs\n\n def _validate_estimator(self):\n \"\"\"Private function to create the KMeans estimator\"\"\"\n if self.estimator is None:\n self.estimator_ = KMeans(\n random_state=self.random_state, n_jobs=self.n_jobs\n )\n elif isinstance(self.estimator, KMeans):\n self.estimator_ = clone(self.estimator)\n else:\n raise ValueError(\n \"`estimator` has to be a KMeans clustering.\"\n \" Got {} instead.\".format(type(self.estimator))\n )\n\n def _generate_sample(self, X, y, centroids, target_class):\n if self.voting_ == \"hard\":\n nearest_neighbors = NearestNeighbors(n_neighbors=1)\n nearest_neighbors.fit(X, y)\n indices = nearest_neighbors.kneighbors(\n centroids, return_distance=False\n )\n X_new = _safe_indexing(X, np.squeeze(indices))\n else:\n if sparse.issparse(X):\n X_new = sparse.csr_matrix(centroids, dtype=X.dtype)\n else:\n X_new = centroids\n y_new = np.array([target_class] * centroids.shape[0], dtype=y.dtype)\n\n return X_new, y_new\n\n def _fit_resample(self, X, y):\n self._validate_estimator()\n\n if self.voting == \"auto\":\n if sparse.issparse(X):\n self.voting_ = \"hard\"\n else:\n self.voting_ = \"soft\"\n else:\n if self.voting in VOTING_KIND:\n self.voting_ = self.voting\n else:\n raise ValueError(\n \"'voting' needs to be one of {}. Got {}\"\n \" instead.\".format(VOTING_KIND, self.voting)\n )\n\n X_resampled, y_resampled = [], []\n for target_class in np.unique(y):\n if target_class in self.sampling_strategy_.keys():\n n_samples = self.sampling_strategy_[target_class]\n self.estimator_.set_params(**{\"n_clusters\": n_samples})\n self.estimator_.fit(X[y == target_class])\n X_new, y_new = self._generate_sample(\n X, y, self.estimator_.cluster_centers_, target_class\n )\n X_resampled.append(X_new)\n y_resampled.append(y_new)\n else:\n target_class_indices = np.flatnonzero(y == target_class)\n X_resampled.append(_safe_indexing(X, target_class_indices))\n y_resampled.append(_safe_indexing(y, target_class_indices))\n\n if sparse.issparse(X):\n X_resampled = sparse.vstack(X_resampled)\n else:\n X_resampled = np.vstack(X_resampled)\n y_resampled = np.hstack(y_resampled)\n\n return X_resampled, np.array(y_resampled, dtype=y.dtype)\n\n def _more_tags(self):\n return {\"sample_indices\": False}\n" ]
[ [ "numpy.array", "sklearn.cluster.KMeans", "sklearn.ensemble.GradientBoostingClassifier" ], [ "sklearn.utils._testing.assert_array_equal", "numpy.array", "sklearn.utils._testing.assert_allclose", "sklearn.neighbors.NearestNeighbors" ], [ "numpy.hstack", "sklearn.utils._safe_indexing", "scipy.sparse.issparse", "sklearn.cluster.KMeans", "numpy.unique", "numpy.squeeze", "scipy.sparse.csr_matrix", "numpy.flatnonzero", "sklearn.base.clone", "sklearn.neighbors.NearestNeighbors", "scipy.sparse.vstack", "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
christian-jacobsen/hypernet
[ "9f62e1531eb152cc08af0b0c6b09d6fde8d42400" ]
[ "hypernet/src/thermophysicalModels/chemistry/reactions/reactionRate/arrhenius.py" ]
[ "import numpy as np\n\nfrom hypernet.src.thermophysicalModels.chemistry.reactions.reactionRate import Basic\n\n\nclass Arrhenius(Basic):\n\n # Initialization\n ###########################################################################\n def __init__(\n self,\n reactionsDatabase,\n *args,\n **kwargs\n ):\n super(Arrhenius, self).__init__(\n reactionsDatabase,\n *args,\n **kwargs\n )\n self.A = self.reacDB['A'].to_numpy()\n self.beta = self.reacDB['beta'].to_numpy()\n self.Ta = self.reacDB['Ta'].to_numpy()\n\n # Methods\n ###########################################################################\n # Forward reaction rates --------------------------------------------------\n def k_(self, T):\n return self.A * np.power(T, self.beta) * np.exp(-self.Ta / T)\n\n def dkdT_(self, T):\n return (self.beta + self.Ta / T) * self.k / T\n" ]
[ [ "numpy.exp", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lopa23/flim_optcrf
[ "2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9", "2d9a1dba37a7e5e6beae66c536b07bb7ae4bdfe9" ]
[ "qpth/qp.py", "qpth/solvers/pdipm/batch.py" ]
[ "import torch\nfrom torch.autograd import Function\n\nfrom .util import bger, expandParam, extract_nBatch\nfrom . import solvers\nfrom .solvers.pdipm import batch as pdipm_b\nfrom .solvers.pdipm import spbatch as pdipm_spb\n# from .solvers.pdipm import single as pdipm_s\n\nfrom enum import Enum\n\n\nclass QPSolvers(Enum):\n PDIPM_BATCHED = 1\n CVXPY = 2\n\n\ndef QPFunction(eps=1e-12, verbose=1, notImprovedLim=3,\n maxIter=20, solver=QPSolvers.PDIPM_BATCHED,\n check_Q_spd=False):\n class QPFunctionFn(Function):\n @staticmethod\n def forward(ctx, Q_, p_, G_, h_, A_, b_):\n \"\"\"Solve a batch of QPs.\n\n This function solves a batch of QPs, each optimizing over\n `nz` variables and having `nineq` inequality constraints\n and `neq` equality constraints.\n The optimization problem for each instance in the batch\n (dropping indexing from the notation) is of the form\n\n \\hat z = argmin_z 1/2 z^T Q z + p^T z\n subject to Gz <= h\n Az = b\n\n where Q \\in S^{nz,nz},\n S^{nz,nz} is the set of all positive semi-definite matrices,\n p \\in R^{nz}\n G \\in R^{nineq,nz}\n h \\in R^{nineq}\n A \\in R^{neq,nz}\n b \\in R^{neq}\n\n These parameters should all be passed to this function as\n Variable- or Parameter-wrapped Tensors.\n (See torch.autograd.Variable and torch.nn.parameter.Parameter)\n\n If you want to solve a batch of QPs where `nz`, `nineq` and `neq`\n are the same, but some of the contents differ across the\n minibatch, you can pass in tensors in the standard way\n where the first dimension indicates the batch example.\n This can be done with some or all of the coefficients.\n\n You do not need to add an extra dimension to coefficients\n that will not change across all of the minibatch examples.\n This function is able to infer such cases.\n\n If you don't want to use any equality or inequality constraints,\n you can set the appropriate values to:\n\n e = Variable(torch.Tensor())\n\n Parameters:\n Q: A (nBatch, nz, nz) or (nz, nz) Tensor.\n p: A (nBatch, nz) or (nz) Tensor.\n G: A (nBatch, nineq, nz) or (nineq, nz) Tensor.\n h: A (nBatch, nineq) or (nineq) Tensor.\n A: A (nBatch, neq, nz) or (neq, nz) Tensor.\n b: A (nBatch, neq) or (neq) Tensor.\n\n Returns: \\hat z: a (nBatch, nz) Tensor.\n \"\"\"\n nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)\n Q, _ = expandParam(Q_, nBatch, 3)\n p, _ = expandParam(p_, nBatch, 2)\n G, _ = expandParam(G_, nBatch, 3)\n h, _ = expandParam(h_, nBatch, 2)\n A, _ = expandParam(A_, nBatch, 3)\n b, _ = expandParam(b_, nBatch, 2)\n\n if check_Q_spd:\n for i in range(nBatch):\n e, _ = torch.eig(Q[i])\n \n if not torch.all(e[:,0] > 0):\n raise RuntimeError('Q is not SPD.')\n\n _, nineq, nz = G.size()\n print(\"In constructor QP\", G.size())\n neq = A.size(1) if A.nelement() > 0 else 0\n assert(neq > 0 or nineq > 0)\n ctx.neq, ctx.nineq, ctx.nz = neq, nineq, nz\n\n if solver == QPSolvers.PDIPM_BATCHED:\n ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)\n zhats, ctx.nus, ctx.lams, ctx.slacks = pdipm_b.forward(\n Q, p, G, h, A, b, ctx.Q_LU, ctx.S_LU, ctx.R,\n eps, verbose, notImprovedLim, maxIter)\n elif solver == QPSolvers.CVXPY:\n vals = torch.Tensor(nBatch).type_as(Q)\n zhats = torch.Tensor(nBatch, ctx.nz).type_as(Q)\n lams = torch.Tensor(nBatch, ctx.nineq).type_as(Q)\n nus = torch.Tensor(nBatch, ctx.neq).type_as(Q) \\\n if ctx.neq > 0 else torch.Tensor()\n slacks = torch.Tensor(nBatch, ctx.nineq).type_as(Q)\n for i in range(nBatch):\n Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)\n vals[i], zhati, nui, lami, si = solvers.cvxpy.forward_single_np(\n *[x.cpu().numpy() if x is not None else None\n for x in (Q[i], p[i], G[i], h[i], Ai, bi)])\n # if zhati[0] is None:\n # import IPython, sys; IPython.embed(); sys.exit(-1)\n zhats[i] = torch.Tensor(zhati)\n lams[i] = torch.Tensor(lami)\n slacks[i] = torch.Tensor(si)\n if neq > 0:\n nus[i] = torch.Tensor(nui)\n\n ctx.vals = vals\n ctx.lams = lams\n ctx.nus = nus\n ctx.slacks = slacks\n else:\n assert False\n\n ctx.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_)\n return zhats\n\n @staticmethod\n def backward(ctx, dl_dzhat):\n zhats, Q, p, G, h, A, b = ctx.saved_tensors\n nBatch = extract_nBatch(Q, p, G, h, A, b)\n Q, Q_e = expandParam(Q, nBatch, 3)\n p, p_e = expandParam(p, nBatch, 2)\n G, G_e = expandParam(G, nBatch, 3)\n h, h_e = expandParam(h, nBatch, 2)\n A, A_e = expandParam(A, nBatch, 3)\n b, b_e = expandParam(b, nBatch, 2)\n\n # neq, nineq, nz = ctx.neq, ctx.nineq, ctx.nz\n neq, nineq = ctx.neq, ctx.nineq\n #print(\"Here in backward\")\n\n if solver == QPSolvers.CVXPY:\n ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)\n\n # Clamp here to avoid issues coming up when the slacks are too small.\n # TODO: A better fix would be to get lams and slacks from the\n # solver that don't have this issue.\n d = torch.clamp(ctx.lams, min=1e-8) / torch.clamp(ctx.slacks, min=1e-8)\n\n pdipm_b.factor_kkt(ctx.S_LU, ctx.R, d)\n dx, _, dlam, dnu = pdipm_b.solve_kkt(\n ctx.Q_LU, d, G, A, ctx.S_LU,\n dl_dzhat, torch.zeros(nBatch, nineq).type_as(G),\n torch.zeros(nBatch, nineq).type_as(G),\n torch.zeros(nBatch, neq).type_as(G) if neq > 0 else torch.Tensor())\n\n print(\"In backwards,aftersolve_kkt\")\n dps = dx\n dGs = bger(dlam, zhats) + bger(ctx.lams, dx)\n if G_e:\n dGs = dGs.mean(0)\n dhs = -dlam\n if h_e:\n dhs = dhs.mean(0)\n if neq > 0:\n dAs = bger(dnu, zhats) + bger(ctx.nus, dx)\n dbs = -dnu\n if A_e:\n dAs = dAs.mean(0)\n if b_e:\n dbs = dbs.mean(0)\n else:\n dAs, dbs = None, None\n dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))\n if Q_e:\n dQs = dQs.mean(0)\n if p_e:\n dps = dps.mean(0)\n\n\n grads = (dQs, dps, dGs, dhs, dAs, dbs)\n\n return grads\n return QPFunctionFn.apply\n\n\nclass SpQPFunction(Function):\n def __init__(self, Qi, Qsz, Gi, Gsz, Ai, Asz,\n eps=1e-12, verbose=0, notImprovedLim=3, maxIter=20):\n self.Qi, self.Qsz = Qi, Qsz\n self.Gi, self.Gsz = Gi, Gsz\n self.Ai, self.Asz = Ai, Asz\n\n self.eps = eps\n self.verbose = verbose\n self.notImprovedLim = notImprovedLim\n self.maxIter = maxIter\n\n self.nineq, self.nz = Gsz\n self.neq, _ = Asz\n\n def forward(self, Qv, p, Gv, h, Av, b):\n self.nBatch = Qv.size(0)\n\n zhats, self.nus, self.lams, self.slacks = pdipm_spb.forward(\n self.Qi, Qv, self.Qsz, p, self.Gi, Gv, self.Gsz, h,\n self.Ai, Av, self.Asz, b, self.eps, self.verbose,\n self.notImprovedLim, self.maxIter)\n\n self.save_for_backward(zhats, Qv, p, Gv, h, Av, b)\n return zhats\n\n def backward(self, dl_dzhat):\n zhats, Qv, p, Gv, h, Av, b = self.saved_tensors\n\n Di = type(self.Qi)([range(self.nineq), range(self.nineq)])\n Dv = self.lams / self.slacks\n Dsz = torch.Size([self.nineq, self.nineq])\n dx, _, dlam, dnu = pdipm_spb.solve_kkt(\n self.Qi, Qv, self.Qsz, Di, Dv, Dsz,\n self.Gi, Gv, self.Gsz,\n self.Ai, Av, self.Asz, dl_dzhat,\n type(p)(self.nBatch, self.nineq).zero_(),\n type(p)(self.nBatch, self.nineq).zero_(),\n type(p)(self.nBatch, self.neq).zero_())\n\n dps = dx\n\n dGs = bger(dlam, zhats) + bger(self.lams, dx)\n GM = torch.cuda.sparse.DoubleTensor(\n self.Gi, Gv[0].clone().fill_(1.0), self.Gsz\n ).to_dense().byte().expand_as(dGs)\n dGs = dGs[GM].view_as(Gv)\n\n dhs = -dlam\n\n dAs = bger(dnu, zhats) + bger(self.nus, dx)\n AM = torch.cuda.sparse.DoubleTensor(\n self.Ai, Av[0].clone().fill_(1.0), self.Asz\n ).to_dense().byte().expand_as(dAs)\n dAs = dAs[AM].view_as(Av)\n\n dbs = -dnu\n\n dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))\n QM = torch.cuda.sparse.DoubleTensor(\n self.Qi, Qv[0].clone().fill_(1.0), self.Qsz\n ).to_dense().byte().expand_as(dQs)\n dQs = dQs[QM].view_as(Qv)\n\n grads = (dQs, dps, dGs, dhs, dAs, dbs)\n\n return grads\n", "import torch\nfrom enum import Enum\n# from block import block\n\nfrom qpth.util import get_sizes, bdiag\n\n\ndef lu_hack(x):\n \n data, pivots = x.lu(pivot=not x.is_cuda)\n if x.is_cuda:\n if x.ndimension() == 2:\n pivots = torch.arange(1, 1+x.size(0)).int().cuda()\n elif x.ndimension() == 3:\n pivots = torch.arange(\n 1, 1+x.size(1),\n ).unsqueeze(0).repeat(x.size(0), 1).int().cuda()\n else:\n assert False\n return (data, pivots)\n\n\nINACC_ERR = \"\"\"\n--------\nqpth warning: Returning an inaccurate and potentially incorrect solution.\n\nSome residual is large.\nYour problem may be infeasible or difficult.\n\nYou can try using the CVXPY solver to see if your problem is feasible\nand you can use the verbose option to check the convergence status of\nour solver while increasing the number of iterations.\n\nAdvanced users:\nYou can also try to enable iterative refinement in the solver:\nhttps://github.com/locuslab/qpth/issues/6\n--------\n\"\"\"\n\n\nclass KKTSolvers(Enum):\n LU_FULL = 1\n LU_PARTIAL = 2\n IR_UNOPT = 3\n\n\ndef forward(Q, p, G, h, A, b, Q_LU, S_LU, R, eps=1e-12, verbose=1, notImprovedLim=10,\n maxIter=20, solver=KKTSolvers.LU_PARTIAL):\n \"\"\"\n Q_LU, S_LU, R = pre_factor_kkt(Q, G, A)\n \"\"\"\n nineq, nz, neq, nBatch = get_sizes(G, A)\n qlu, pivot=Q_LU\n print(\" Sizes nineq, nz, neq, nBatch\", nineq, nz, neq, nBatch)\n # print(\"Residual\",best['resids'].max())print(\"size of qlu\",qlu.size())\n # Find initial values\n if solver == KKTSolvers.LU_FULL:\n D = torch.eye(nineq).repeat(nBatch, 1, 1).type_as(Q)\n x, s, z, y = factor_solve_kkt(\n Q, D, G, A, p,\n torch.zeros(nBatch, nineq).type_as(Q),\n -h, -b if b is not None else None)\n elif solver == KKTSolvers.LU_PARTIAL:\n d = torch.ones(nBatch, nineq).type_as(Q)\n factor_kkt(S_LU, R, d)\n x, s, z, y = solve_kkt(\n Q_LU, d, G, A, S_LU,\n p, torch.zeros(nBatch, nineq).type_as(Q),\n -h, -b if neq > 0 else None)\n elif solver == KKTSolvers.IR_UNOPT:\n D = torch.eye(nineq).repeat(nBatch, 1, 1).type_as(Q)\n x, s, z, y = solve_kkt_ir(\n Q, D, G, A, p,\n torch.zeros(nBatch, nineq).type_as(Q),\n -h, -b if b is not None else None)\n else:\n assert False\n\n # Make all of the slack variables >= 1.\n #print(\"Done with 1st solve_KKT\");\n \n M = torch.min(s, 1)[0]\n #print(s,M.size())\n if(M.dim()>= 2):\n M = M.view(M.size(0), 2).repeat(1, nineq)\n s=s.view(-1,M.size(1))\n #M=M.view(1,M.size(1)*M.size(2))\n else:\n M = M.view(M.size(0), 1).repeat(1, nineq)\n I = M < 0\n #print(\"In forward\",I.size(),M.size(),s.size())\n \n s[I] -= M[I] - 1\n\n # Make all of the inequality dual variables >= 1.\n \n M = torch.min(z, 1)[0]\n if(G.size(0)>= 2):\n M = M.view(M.size(0), 2).repeat(1, nineq)\n z=z.view(-1,M.size(1))\n else:\n M = M.view(M.size(0), 1).repeat(1, nineq)\n I = M < 0\n z[I] -= M[I] - 1\n\n best = {'resids': None, 'x': None, 'z': None, 's': None, 'y': None}\n nNotImproved = 0\n \n #if(G.size(1)==2):\n #x=x.squeeze(0)\n #print(y,A.size(),z.size(),G.size(),x.size(),Q.size(),p.size(),torch.bmm(x.unsqueeze(1), Q.transpose(1, 2)).squeeze(1).size())\n \n for i in range(maxIter):\n # affine scaling direction\n rx = (torch.bmm(y.unsqueeze(1), A).squeeze(1) if neq > 0 else 0.) + \\\n torch.bmm(z.unsqueeze(1), G).squeeze(1) + \\\n torch.bmm(x.unsqueeze(1), Q.transpose(1, 2)).squeeze(1) + \\\n p\n rs = z\n rz = torch.bmm(x.unsqueeze(1), G.transpose(1, 2)).squeeze(1) + s - h\n ry = torch.bmm(x.unsqueeze(1), A.transpose(\n 1, 2)).squeeze(1) - b if neq > 0 else 0.0\n mu = torch.abs((s * z).sum(1).squeeze() / nineq)\n z_resid = torch.norm(rz, 2, 1).squeeze()\n y_resid = torch.norm(ry, 2, 1).squeeze() if neq > 0 else 0\n pri_resid = y_resid + z_resid\n dual_resid = torch.norm(rx, 2, 1).squeeze()\n resids = pri_resid + dual_resid + nineq * mu\n\n d = z / s\n try:\n factor_kkt(S_LU, R, d)\n except:\n return best['x'], best['y'], best['z'], best['s']\n\n if verbose == 1:\n print('iter: {}, pri_resid: {:.5e}, dual_resid: {:.5e}, mu: {:.5e}'.format(\n i, pri_resid.mean(), dual_resid.mean(), mu.mean()))\n if best['resids'] is None:\n best['resids'] = resids\n best['x'] = x.clone()\n best['z'] = z.clone()\n best['s'] = s.clone()\n best['y'] = y.clone() if y is not None else None\n nNotImproved = 0\n else:\n I = resids < best['resids']\n if I.sum() > 0:\n nNotImproved = 0\n else:\n nNotImproved += 1\n\n if(nineq>1) and I.size(0)==nineq:\n I=I[0]\n \n I_nz = I.repeat(nz, 1).t()\n \n \n I_nineq = I.repeat(nineq, 1).t()\n \n #print(\"Best values\",best['x'].size(), best['z'].size(),x.size(),I_nz.size(),I_nineq.size())\n best['resids'][I] = resids[I]\n best['x'][I_nz] = x[I_nz]\n best['z'][I_nineq] = z[I_nineq]\n best['s'][I_nineq] = s[I_nineq]\n if neq > 0:\n I_neq = I.repeat(neq, 1).t()\n best['y'][I_neq] = y[I_neq]\n\n print(\"Residual\",best['resids'].max(), nNotImproved,notImprovedLim)\n if nNotImproved == notImprovedLim or best['resids'].max() < eps or mu.min() > 1e32:\n if best['resids'].max() > 100. and verbose >= 0:\n print(INACC_ERR)\n return best['x'], best['y'], best['z'], best['s']\n\n if solver == KKTSolvers.LU_FULL:\n D = bdiag(d)\n dx_aff, ds_aff, dz_aff, dy_aff = factor_solve_kkt(\n Q, D, G, A, rx, rs, rz, ry)\n elif solver == KKTSolvers.LU_PARTIAL:\n dx_aff, ds_aff, dz_aff, dy_aff = solve_kkt(\n Q_LU, d, G, A, S_LU, rx, rs, rz, ry)\n elif solver == KKTSolvers.IR_UNOPT:\n D = bdiag(d)\n dx_aff, ds_aff, dz_aff, dy_aff = solve_kkt_ir(\n Q, D, G, A, rx, rs, rz, ry)\n else:\n assert False\n\n # compute centering directions\n alpha = torch.min(torch.min(get_step(z, dz_aff),\n get_step(s, ds_aff)),\n torch.ones(nBatch).type_as(Q))\n alpha_nineq = alpha.repeat(nineq, 1).t()\n t1 = s + alpha_nineq * ds_aff\n t2 = z + alpha_nineq * dz_aff\n t3 = torch.sum(t1 * t2, 1).squeeze()\n t4 = torch.sum(s * z, 1).squeeze()\n sig = (t3 / t4)**3\n\n rx = torch.zeros(nBatch, nz).type_as(Q)\n rs = ((-mu * sig).repeat(nineq, 1).t() + ds_aff * dz_aff) / s\n #print(\"Rz size before\",rz.size())\n rz = torch.zeros(nBatch, nineq).type_as(Q)\n #print(\"Rz size after\",rz.size())\n ry = torch.zeros(nBatch, neq).type_as(Q) if neq > 0 else torch.Tensor()\n\n if solver == KKTSolvers.LU_FULL:\n D = bdiag(d)\n dx_cor, ds_cor, dz_cor, dy_cor = factor_solve_kkt(\n Q, D, G, A, rx, rs, rz, ry)\n elif solver == KKTSolvers.LU_PARTIAL:\n dx_cor, ds_cor, dz_cor, dy_cor = solve_kkt(\n Q_LU, d, G, A, S_LU, rx, rs, rz, ry)\n elif solver == KKTSolvers.IR_UNOPT:\n D = bdiag(d)\n dx_cor, ds_cor, dz_cor, dy_cor = solve_kkt_ir(\n Q, D, G, A, rx, rs, rz, ry)\n else:\n assert False\n\n dx = dx_aff + dx_cor\n ds = ds_aff + ds_cor\n dz = dz_aff + dz_cor\n dy = dy_aff + dy_cor if neq > 0 else None\n alpha = torch.min(0.999 * torch.min(get_step(z, dz),\n get_step(s, ds)),\n torch.ones(nBatch).type_as(Q))\n alpha_nineq = alpha.repeat(nineq, 1).t()\n alpha_neq = alpha.repeat(neq, 1).t() if neq > 0 else None\n alpha_nz = alpha.repeat(nz, 1).t()\n\n x += alpha_nz * dx\n s += alpha_nineq * ds\n z += alpha_nineq * dz\n y = y + alpha_neq * dy if neq > 0 else None\n\n print(\"Residual\",best['resids'].max())\n if best['resids'].max() > 1. and verbose >= 0:\n print(INACC_ERR)\n return best['x'], best['y'], best['z'], best['s']\n\n\ndef get_step(v, dv):\n a = -v / dv\n a[dv > 0] = max(1.0, a.max())\n return a.min(1)[0].squeeze()\n\n\ndef unpack_kkt(v, nz, nineq, neq):\n i = 0\n x = v[:, i:i + nz]\n i += nz\n s = v[:, i:i + nineq]\n i += nineq\n z = v[:, i:i + nineq]\n i += nineq\n y = v[:, i:i + neq]\n return x, s, z, y\n\n\ndef kkt_resid_reg(Q_tilde, D_tilde, G, A, eps, dx, ds, dz, dy, rx, rs, rz, ry):\n dx, ds, dz, dy, rx, rs, rz, ry = [\n x.unsqueeze(2) if x is not None else None for x in\n [dx, ds, dz, dy, rx, rs, rz, ry]\n ]\n resx = Q_tilde.bmm(dx) + G.transpose(1, 2).bmm(dz) + rx\n if dy is not None:\n resx += A.transpose(1, 2).bmm(dy)\n ress = D_tilde.bmm(ds) + dz + rs\n resz = G.bmm(dx) + ds - eps * dz + rz\n resy = A.bmm(dx) - eps * dy + ry if dy is not None else None\n resx, ress, resz, resy = (\n v.squeeze(2) if v is not None else None for v in (resx, ress, resz, resy))\n return resx, ress, resz, resy\n\n\ndef solve_kkt_ir(Q, D, G, A, rx, rs, rz, ry, niter=1):\n \"\"\"Inefficient iterative refinement.\"\"\"\n nineq, nz, neq, nBatch = get_sizes(G, A)\n\n eps = 1e-7\n Q_tilde = Q + eps * torch.eye(nz).type_as(Q).repeat(nBatch, 1, 1)\n D_tilde = D + eps * torch.eye(nineq).type_as(Q).repeat(nBatch, 1, 1)\n\n dx, ds, dz, dy = factor_solve_kkt_reg(\n Q_tilde, D_tilde, G, A, rx, rs, rz, ry, eps)\n res = kkt_resid_reg(Q, D, G, A, eps,\n dx, ds, dz, dy, rx, rs, rz, ry)\n resx, ress, resz, resy = res\n res = resx\n for k in range(niter):\n ddx, dds, ddz, ddy = factor_solve_kkt_reg(Q_tilde, D_tilde, G, A, -resx, -ress, -resz,\n -resy if resy is not None else None,\n eps)\n dx, ds, dz, dy = [v + dv if v is not None else None\n for v, dv in zip((dx, ds, dz, dy), (ddx, dds, ddz, ddy))]\n res = kkt_resid_reg(Q, D, G, A, eps,\n dx, ds, dz, dy, rx, rs, rz, ry)\n resx, ress, resz, resy = res\n # res = torch.cat(resx)\n res = resx\n\n return dx, ds, dz, dy\n\n\ndef factor_solve_kkt_reg(Q_tilde, D, G, A, rx, rs, rz, ry, eps):\n nineq, nz, neq, nBatch = get_sizes(G, A)\n\n H_ = torch.zeros(nBatch, nz + nineq, nz + nineq).type_as(Q_tilde)\n H_[:, :nz, :nz] = Q_tilde\n H_[:, -nineq:, -nineq:] = D\n if neq > 0:\n # H_ = torch.cat([torch.cat([Q, torch.zeros(nz,nineq).type_as(Q)], 1),\n # torch.cat([torch.zeros(nineq, nz).type_as(Q), D], 1)], 0)\n A_ = torch.cat([torch.cat([G, torch.eye(nineq).type_as(Q_tilde).repeat(nBatch, 1, 1)], 2),\n torch.cat([A, torch.zeros(nBatch, neq, nineq).type_as(Q_tilde)], 2)], 1)\n g_ = torch.cat([rx, rs], 1)\n h_ = torch.cat([rz, ry], 1)\n else:\n A_ = torch.cat(\n [G, torch.eye(nineq).type_as(Q_tilde).repeat(nBatch, 1, 1)], 2)\n g_ = torch.cat([rx, rs], 1)\n h_ = rz\n\n H_LU = lu_hack(H_)\n\n invH_A_ = A_.transpose(1, 2).lu_solve(*H_LU)\n invH_g_ = g_.unsqueeze(2).lu_solve(*H_LU).squeeze(2)\n\n S_ = torch.bmm(A_, invH_A_)\n S_ -= eps * torch.eye(neq + nineq).type_as(Q_tilde).repeat(nBatch, 1, 1)\n S_LU = lu_hack(S_)\n t_ = torch.bmm(invH_g_.unsqueeze(1), A_.transpose(1, 2)).squeeze(1) - h_\n w_ = -t_.unsqueeze(2).lu_solve(*S_LU).squeeze(2)\n t_ = -g_ - w_.unsqueeze(1).bmm(A_).squeeze()\n v_ = t_.unsqueeze(2).lu_solve(*H_LU).squeeze(2)\n\n dx = v_[:, :nz]\n ds = v_[:, nz:]\n dz = w_[:, :nineq]\n dy = w_[:, nineq:] if neq > 0 else None\n\n return dx, ds, dz, dy\n\n\ndef factor_solve_kkt(Q, D, G, A, rx, rs, rz, ry):\n nineq, nz, neq, nBatch = get_sizes(G, A)\n\n H_ = torch.zeros(nBatch, nz + nineq, nz + nineq).type_as(Q)\n H_[:, :nz, :nz] = Q\n H_[:, -nineq:, -nineq:] = D\n G=G.squeeze(0)#added\n if neq > 0:\n A_ = torch.cat([torch.cat([G, torch.eye(nineq).type_as(Q).repeat(nBatch, 1, 1)], 2),\n torch.cat([A, torch.zeros(nBatch, neq, nineq).type_as(Q)], 2)], 1)\n g_ = torch.cat([rx, rs], 1)\n h_ = torch.cat([rz, ry], 1)\n else:\n #print(G.size(),torch.eye(nineq).type_as(Q).size())\n A_ = torch.cat([G, torch.eye(nineq).type_as(Q)], 1)\n g_ = torch.cat([rx, rs], 1)\n h_ = rz\n\n H_LU = lu_hack(H_)\n lu, pv=H_LU\n #\n # print(A_.size())\n invH_A_ = A_.lu_solve(*H_LU).unsqueeze(1)#changed from A_.transpose(1, 2).lu_solve(*H_LU)\n invH_g_ = g_.unsqueeze(2).lu_solve(*H_LU).squeeze(2)\n\n \n \n A_=A_.unsqueeze(0).transpose(1,2)#added\n S_ = torch.bmm(A_, invH_A_)\n S_LU = lu_hack(S_)\n slu, pv=S_LU\n\n #print(A_.size(),invH_g_.unsqueeze(1).size(), torch.bmm(invH_g_.unsqueeze(1), A_).size(),h_.size())\n t_ = torch.bmm(A_,invH_g_.unsqueeze(1)).squeeze(1) - h_#changed from torch.bmm(invH_g_.unsqueeze(1), A_.transpose(1, 2)).squeeze(1) - h_\n \n w_ = -t_.lu_solve(*S_LU).squeeze(2)#changed\n #print(g_.size(),w_.size(),A_.size())\n t_ = -g_ - w_.bmm(A_).squeeze()\n v_ = t_.unsqueeze(2).lu_solve(*H_LU).squeeze(2)\n\n dx = v_[:, :nz]\n ds = v_[:, nz:]\n dz = w_[:, :nineq]\n dy = w_[:, nineq:] if neq > 0 else None\n\n return dx, ds, dz, dy\n\n\ndef solve_kkt(Q_LU, d, G, A, S_LU, rx, rs, rz, ry):\n \"\"\" Solve KKT equations for the affine step\"\"\"\n nineq, nz, neq, nBatch = get_sizes(G, A)\n qlu, pivots=Q_LU\n #print(\"InEq cons, nz, eq cons, btch\",nineq, nz, neq, nBatch) #how is G changing dim to 3D\n #print(\"Now in solve KKt\", G.size(),rx.size(), qlu.size())\n if G.size(1)>=2:\n invQ_rx = rx.unsqueeze(2).lu_solve(*Q_LU).squeeze(2)\n else:\n invQ_rx = rx.unsqueeze(2).lu_solve(*Q_LU).squeeze(2)\n\n #print(\"Size after\",invQ_rx.size(),G.size(),rs.size(),rz.size())\n \n if neq > 0:\n h = torch.cat((invQ_rx.unsqueeze(1).bmm(A.transpose(1, 2)).squeeze(1) - ry,\n invQ_rx.unsqueeze(1).bmm(G.transpose(1, 2)).squeeze(1) + rs / d - rz), 1)\n else:\n \n h = invQ_rx.unsqueeze(1).bmm(G.transpose(1, 2)).squeeze(1) + rs / d - rz\n #if(rz.size(0)==1):\n # h=torch.cat(h,h,1)\n \n slu, pv=S_LU\n\n #print(\"W getting generated\",h.size(),slu.size())\n if G.size(1)>=2 and h.size(0)>1:\n w = -(h.unsqueeze(0).lu_solve(*S_LU)).squeeze(2)\n w=w[:,0,:]\n else:\n w = -(h.unsqueeze(2).lu_solve(*S_LU)).squeeze(2)\n\n #print(w.size())\n if G.size(1)>=2:\n g1 = -rx - w[:, neq:].matmul(G).squeeze(1)\n else:\n g1 = -rx - w[:, neq:].unsqueeze(1).bmm(G).squeeze(1)\n if neq > 0:\n g1 -= w[:, :neq].unsqueeze(1).bmm(A).squeeze(1)\n \n g2 = -rs - w[:, neq:]\n #print(g1.size(),qlu.size()) \n if g1.dim()>2:\n dx=g1.transpose(1,2).lu_solve(*Q_LU).transpose(1,2).squeeze(2)\n else:\n dx = g1.unsqueeze(2).lu_solve(*Q_LU).squeeze(2)\n ds = g2 / d\n dz = w[:, neq:]\n dy = w[:, :neq] if neq > 0 else None\n # print(\"Done Solve KKT\");\n return dx, ds, dz, dy\n\n\ndef pre_factor_kkt(Q, G, A):\n \"\"\" Perform all one-time factorizations and cache relevant matrix products\"\"\"\n nineq, nz, neq, nBatch = get_sizes(G, A)\n \n \n try:\n Q_LU = lu_hack(Q)\n except:\n raise RuntimeError(\"\"\"\nqpth Error: Cannot perform LU factorization on Q.\nPlease make sure that your Q matrix is PSD and has\na non-zero diagonal.\n\"\"\")\n\n # S = [ A Q^{-1} A^T A Q^{-1} G^T ]\n # [ G Q^{-1} A^T G Q^{-1} G^T + D^{-1} ]\n #\n # We compute a partial LU decomposition of the S matrix\n # that can be completed once D^{-1} is known.\n # See the 'Block LU factorization' part of our website\n # for more details.\n qlu, pivots=Q_LU\n ##put in a condition for m>1\n if G.size(2)==2: ##something funny here\n print(G.size(),qlu.size())\n G_invQ_GT = torch.matmul(G, G.lu_solve(*Q_LU).transpose(1,2))\n else:\n print(G.size(2),G.transpose(1,2).size(),qlu.size())\n G_invQ_GT = torch.bmm(G, G.transpose(1, 2).lu_solve(*Q_LU))\n \n R = G_invQ_GT.clone()\n S_LU_pivots = torch.IntTensor(range(1, 1 + neq + nineq)).unsqueeze(0) \\\n .repeat(nBatch, 1).type_as(Q).int()\n if neq > 0:\n invQ_AT = A.transpose(1, 2).lu_solve(*Q_LU)\n A_invQ_AT = torch.bmm(A, invQ_AT)\n G_invQ_AT = torch.bmm(G, invQ_AT)\n\n LU_A_invQ_AT = lu_hack(A_invQ_AT)\n P_A_invQ_AT, L_A_invQ_AT, U_A_invQ_AT = torch.lu_unpack(*LU_A_invQ_AT)\n P_A_invQ_AT = P_A_invQ_AT.type_as(A_invQ_AT)\n\n S_LU_11 = LU_A_invQ_AT[0]\n U_A_invQ_AT_inv = (P_A_invQ_AT.bmm(L_A_invQ_AT)\n ).lu_solve(*LU_A_invQ_AT)\n S_LU_21 = G_invQ_AT.bmm(U_A_invQ_AT_inv)\n T = G_invQ_AT.transpose(1, 2).lu_solve(*LU_A_invQ_AT)\n S_LU_12 = U_A_invQ_AT.bmm(T)\n S_LU_22 = torch.zeros(nBatch, nineq, nineq).type_as(Q)\n S_LU_data = torch.cat((torch.cat((S_LU_11, S_LU_12), 2),\n torch.cat((S_LU_21, S_LU_22), 2)),\n 1)\n S_LU_pivots[:, :neq] = LU_A_invQ_AT[1]\n\n R -= G_invQ_AT.bmm(T)\n else:\n S_LU_data = torch.zeros(nBatch, nineq, nineq).type_as(Q)\n\n S_LU = [S_LU_data, S_LU_pivots]\n return Q_LU, S_LU, R\n\n\nfactor_kkt_eye = None\n\n\ndef factor_kkt(S_LU, R, d):\n \"\"\" Factor the U22 block that we can only do after we know D. \"\"\"\n nBatch, nineq = d.size()\n neq = S_LU[1].size(1) - nineq\n # TODO: There's probably a better way to add a batched diagonal.\n global factor_kkt_eye\n if factor_kkt_eye is None or factor_kkt_eye.size() != d.size():\n # print('Updating batchedEye size.')\n factor_kkt_eye = torch.eye(nineq).repeat(\n nBatch, 1, 1).type_as(R)\n factor_kkt_eye = factor_kkt_eye.type(torch.ByteTensor)\n T = R.clone()\n T[factor_kkt_eye] += (1. / d).squeeze().view(-1)\n\n T_LU = lu_hack(T)\n\n if not T.is_cuda:\n # TODO: Don't use pivoting in most cases because\n # torch.lu_unpack is inefficient here:\n oldPivotsPacked = S_LU[1][:, -nineq:] - neq\n oldPivots, _, _ = torch.lu_unpack(\n T_LU[0], oldPivotsPacked, unpack_data=False)\n newPivotsPacked = T_LU[1]\n newPivots, _, _ = torch.lu_unpack(\n T_LU[0], newPivotsPacked, unpack_data=False)\n\n # Re-pivot the S_LU_21 block.\n if neq > 0:\n S_LU_21 = S_LU[0][:, -nineq:, :neq]\n S_LU[0][:, -nineq:,\n :neq] = newPivots.transpose(1, 2).bmm(oldPivots.bmm(S_LU_21))\n\n # Add the new S_LU_22 block pivots.\n S_LU[1][:, -nineq:] = newPivotsPacked + neq\n\n # Add the new S_LU_22 block.\n S_LU[0][:, -nineq:, -nineq:] = T_LU[0]\n" ]
[ [ "torch.all", "torch.Size", "torch.Tensor", "torch.zeros", "torch.eig", "torch.clamp" ], [ "torch.norm", "torch.lu_unpack", "torch.ones", "torch.Tensor", "torch.cat", "torch.zeros", "torch.min", "torch.sum", "torch.eye", "torch.bmm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
intact-solutions/pysparse
[ "f3dca3ae9d02ab3f49486fbae5d9d68059a318ab" ]
[ "examples/poisson_test.py" ]
[ "import numpy as np\nimport math\nfrom pysparse.sparse import spmatrix\nfrom pysparse.itsolvers.krylov import pcg, qmrs\nfrom pysparse.precon import precon\nimport time\n\ndef poisson2d(n):\n L = spmatrix.ll_mat(n*n, n*n)\n for i in range(n):\n for j in range(n):\n k = i + n*j\n L[k,k] = 4\n if i > 0:\n L[k,k-1] = -1\n if i < n-1:\n L[k,k+1] = -1\n if j > 0:\n L[k,k-n] = -1\n if j < n-1:\n L[k,k+n] = -1\n return L\n\ndef poisson2d_sym(n):\n L = spmatrix.ll_mat_sym(n*n)\n for i in range(n):\n for j in range(n):\n k = i + n*j\n L[k,k] = 4\n if i > 0:\n L[k,k-1] = -1\n if j > 0:\n L[k,k-n] = -1\n return L\n\ndef poisson2d_sym_blk(n):\n L = spmatrix.ll_mat_sym(n*n)\n I = spmatrix.ll_mat_sym(n)\n P = spmatrix.ll_mat_sym(n)\n for i in range(n):\n I[i,i] = -1\n for i in range(n):\n P[i,i] = 4\n if i > 0: P[i,i-1] = -1\n for i in range(0, n*n, n):\n L[i:i+n,i:i+n] = P\n if i > 0: L[i:i+n,i-n:i] = I\n return L\n\ntol = 1e-8\nn = 100\n\nt1 = time.clock()\nL = poisson2d_sym_blk(n)\nprint('Time for constructing the matrix using poisson2d_sym_blk: %8.2f sec' % (time.clock() - t1, ))\n\nt1 = time.clock()\nL = poisson2d_sym(n)\nprint('Time for constructing the matrix using poisson2d_sym : %8.2f sec' % (time.clock() - t1, ))\n\nt1 = time.clock()\nL = poisson2d(n)\nprint('Time for constructing the matrix using poisson2d : %8.2f sec' % (time.clock() - t1, ))\n\n\nA = L.to_csr()\nS = L.to_sss()\nprint(L.nnz)\nprint(S.nnz)\nprint(A.nnz)\nb = np.ones(n*n, 'd')\n\n# -----------------------------------------------------------------------------\n\nt1 = time.clock()\n\nx = np.empty(n*n, 'd')\ninfo, iter, relres = pcg(S, b, x, tol, 2000)\nprint('info=%d, iter=%d, relres=%e' % (info, iter, relres))\n\nprint('Solve time using SSS matrix: %8.2f s' % (time.clock() - t1))\n\nprint('norm(x) = %g' % np.linalg.norm(x))\n\nr = np.empty(n*n, 'd')\nS.matvec(x, r)\nr = b - r\nprint('norm(b - A*x) = %g' % np.linalg.norm(r))\n\nprint(x[0:10])\n\n# -----------------------------------------------------------------------------\n\nt1 = time.clock()\n\nx = np.empty(n*n, 'd')\ninfo, iter, relres = pcg(A, b, x, tol, 2000)\nprint('info=%d, iter=%d, relres=%e' % (info, iter, relres))\n\nprint('Solve time using CSR matrix: %8.2f sec' % (time.clock() - t1))\n\nprint('norm(x) = %g' % np.linalg.norm(x))\n\nr = np.empty(n*n, 'd')\nA.matvec(x, r)\nr = b - r\nprint('norm(b - A*x) = %g' % np.linalg.norm(r))\n\n# -----------------------------------------------------------------------------\n\nt1 = time.clock()\n\nx = np.empty(n*n, 'd')\ninfo, iter, relres = pcg(L, b, x, tol, 2000)\nprint('info=%d, iter=%d, relres=%e' % (info, iter, relres))\n\nprint('Solve time using LL matrix: %8.2f sec' % (time.clock() - t1))\n\nprint('norm(x) = %g' % np.linalg.norm(x))\n\nr = np.empty(n*n, 'd')\nA.matvec(x, r)\nr = b - r\nprint('norm(b - A*x) = %g' % np.linalg.norm(r))\n\n# -----------------------------------------------------------------------------\n\nK_ssor = precon.ssor(S, 1.9)\nt1 = time.clock()\n\nx = np.empty(n*n, 'd')\ninfo, iter, relres = pcg(S, b, x, tol, 2000, K_ssor)\nprint('info=%d, iter=%d, relres=%e' % (info, iter, relres))\n\nprint('Solve time using SSS matrix and SSOR preconditioner: %8.2f sec' % (time.clock() - t1))\n\nprint('norm(x) = %g' % np.linalg.norm(x))\n\nr = np.empty(n*n, 'd')\nS.matvec(x, r)\nr = b - r\nprint('norm(b - A*x) = %g' % np.linalg.norm(r))\n\n# -----------------------------------------------------------------------------\n\nfrom pysparse.eigen import jdsym\njdsym.jdsym(S, None, None, 5, 0.0, 1e-8, 100, qmrs, clvl=1)\n" ]
[ [ "numpy.linalg.norm", "numpy.empty", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ryuzakyl/data-bloodhound
[ "ae0413e748e55a0d2dbae35bbe96a672f313a64b", "ae0413e748e55a0d2dbae35bbe96a672f313a64b", "ae0413e748e55a0d2dbae35bbe96a672f313a64b", "ae0413e748e55a0d2dbae35bbe96a672f313a64b" ]
[ "datasets/raman_tablets/__init__.py", "measures/corr_shape_dissimilarity.py", "measures/correlation_coefficient.py", "measures/minkowski_distance.py" ]
[ "#!/usr/bin/env\n# -*- coding: utf-8 -*-\n# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved\n# Unauthorized copying of this file, via any medium is strictly prohibited\n# Proprietary and confidential\n# Written by Victor M. Mendiola Lau <[email protected]>, February 2017\n\nimport os\n\nimport scipy.io as sio\n\nimport utils.datasets as utils\n\n# ---------------------------------------------------------------\n\n# data set paths\n__data_set_path = \"{}/data/Ramandata_tablets.mat\".format(os.path.split(__file__)[0])\n\n__pickle_path = \"{}/cache/raman_tablets.pickle\".format(os.path.split(__file__)[0])\n\n# ---------------------------------------------------------------\n\n\n# TODO: Add docstring with usage examples (see 'uv_fuel' data set)\n\[email protected]_data_from_pickle(__pickle_path)\ndef load_raman_tablets():\n # loading matlab data set\n raw_data = sio.loadmat(__data_set_path)\n\n # getting samples labels\n samples_labels = raw_data['ObjLabels'].tolist()\n\n # getting features labels\n raw_features = raw_data['VarLabels'].tolist()\n features_labels = list(map(float, raw_features[2:]))\n\n # getting data\n raw_data = raw_data['Matrix']\n data = raw_data[:, 2:]\n\n # creating the extra columns\n other_cols = {\n 'active (% w/w)': raw_data[:, 0].tolist(),\n 'Type': raw_data[:, 1].astype(int).tolist(),\n }\n\n # returning the built data set\n return utils.build_data_set(data, samples_labels, features_labels, extra_cols=other_cols)\n", "#!/usr/bin/env\n# -*- coding: utf-8 -*-\n# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved\n# Unauthorized copying of this file, via any medium is strictly prohibited\n# Proprietary and confidential\n# Written by Victor M. Mendiola Lau <[email protected]>, June 2017\n\nfrom math import ceil, floor\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter1d as scipy_gauss1d\nfrom scipy.spatial.distance import correlation\n\ndef derfilter(data, sigma=2.0):\n # computing x boundaries\n x_lb = int(floor(-3 * sigma))\n x_ub = int(ceil(3 * sigma)) + 1 # accounting for the upper exclusive boundary in python\n\n # computing x and g\n x = np.arange(x_lb, x_ub)\n g = np.exp(-0.5 * (x ** 2) / sigma ** 2) # parenthesis only for clarity\n\n # computing kernel\n dg = -x * g / sigma ** 2\n kernel = dg\n\n # computing data and sizes\n sc, fc = data.shape\n kernel_length = kernel.shape[0]\n kc = kernel_length + 10 # amount of columns of kernel\n\n # output data\n out = np.zeros(data.shape)\n\n # extend the data by mirroring the tails\n data2 = np.hstack([np.fliplr(data[:, 0:kc]), data, np.fliplr(data[:, -kc:])])\n\n # size of the convolution output\n outc = fc + 2*kc + kernel_length - 1\n\n # ind = kc+ceil(length(kernel)/2):outc-kc-floor(length(kernel)/2);\n ind = slice(kc + int(ceil(kernel_length / 2)), outc - kc - int(floor(kernel_length / 2)) + 1) # +1 because Python excludes upper bound while MATLAB does not\n\n # TODO: Optimization here!: 1-list comprehension, 2-out=np.vstack(list_comprehension)\n for i in range(sc):\n t = np.convolve(data2[i, :], kernel)\n out[i, :] = t[ind]\n\n # data convolved with a gaussian kernel\n return out\n\n\ndef corr_dshape(x, y, sigma=2.0):\n # creating a data array from the two samples\n data = np.array([x, y])\n\n # computing the dissimilarity representation via shape measure\n dr_data = corr_dspec_shape(data, data, sigma)\n\n # dr_data should have shape (2, 2) and dshape(x, y) = dr_data[0, 1] = dr_data[1, 0]\n return dr_data[0, 1]\n\n\ndef corr_dspec_shape(data, proto, sigma=2.0):\n # validating feature sizes\n if data.shape[1] != proto.shape[1]:\n raise Exception('Both \"data\" and \"prototypes\" must have the same feature sizes.')\n\n # getting samples and prototypes count\n sc = data.shape[0]\n pc = proto.shape[0]\n\n # resulting dissimilarity representation\n d = np.zeros((sc, pc))\n\n # derivative filter for both data and prototypes\n data2 = derfilter(data, sigma)\n proto2 = derfilter(proto, sigma)\n\n # normalizing each row by its maximum value\n data2 = np.apply_along_axis(lambda row: row / row.max(), 1, data2)\n proto2 = np.apply_along_axis(lambda row: row / row.max(), 1, proto2)\n\n # change here!!!!!!\n # TODO: Optimization here!: 1-list comprehension, 2-out=np.vstack(list_comprehension)\n for i in range(pc):\n t = np.apply_along_axis(lambda row: correlation(row, proto2[i, :]), 1, data2)\n d[:, i] = t\n\n # the dissimilarity representation\n return d\n\n\ndef corr_shape_measure(x, y, sigma=2.0):\n \"\"\"Computes the shape dissimilarity value.\n\n Args:\n x (list): The first vector.\n y (list): The second vector.\n sigma (float): The smoothing parameter\n\n Returns:\n float: The shape dissimilarity value between vectors x and y.\n\n \"\"\"\n\n # getting the length of the vectors\n x_length = len(x)\n y_length = len(y)\n\n # validating parameters\n if x_length != y_length:\n raise Exception('Vectors with different sizes')\n\n # TODO: Here it is assumed that x and y are lists. Analyze the possibility for them to be tuples or numpy arrays\n\n # converting x and y to numpy arrays\n x_arr = np.array(x, np.float32)\n y_arr = np.array(y, np.float32)\n\n # applying a first gaussian derivative filter to both\n x_gauss = scipy_gauss1d(x_arr, sigma, order=1)\n y_gauss = scipy_gauss1d(y_arr, sigma, order=1)\n\n # computing the shape dissimilarity\n return correlation(x_gauss, y_gauss)\n", "#!/usr/bin/env\n# -*- coding: utf-8 -*-\n# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved\n# Unauthorized copying of this file, via any medium is strictly prohibited\n# Proprietary and confidential\n# Written by Victor M. Mendiola Lau <[email protected]>, August 2016\n\nfrom math import sqrt\n\nfrom scipy.spatial.distance import correlation as scy_correlation\n\n\ndef correlation(x, y):\n \"\"\"Computes the correlation coefficient.\n\n Args:\n x (list): The first vector.\n y (list): The second vector.\n\n Returns:\n float: The correlation coefficient between vectors x and y.\n\n \"\"\"\n\n # getting the length of the vectors\n x_length = len(x)\n y_length = len(y)\n\n # validating parameters\n if x_length != y_length:\n raise Exception('Vectors with different sizes')\n\n # computing the means of both vectors\n mean_x = 0.0\n mean_y = 0.0\n for i in range(x_length):\n mean_x += x[i]\n mean_y += y[i]\n\n # dividing by the length of the vectors\n mean_x /= x_length\n mean_y /= y_length\n\n # computing the values k, f1 and f2\n k = 0.0\n f1 = 0.0\n f2 = 0.0\n for i in range(x_length):\n # computing the offsets\n offset_x = x[i] - mean_x\n offset_y = y[i] - mean_y\n\n # updating k, f1 and f2\n k += offset_x * offset_y\n f1 += offset_x * offset_x\n f2 += offset_y * offset_y\n\n # returning the computed correlation distance\n return k / (sqrt(f1) * sqrt(f2))\n\n\ndef probabilistic_correlation(x, y):\n \"\"\"Computes the correlation coefficient as a probability value.\n\n Args:\n x (list): The first vector.\n y (list): The second vector.\n\n Returns:\n float: The correlation probability value between vectors x and y.\n\n \"\"\"\n\n # computing the correlation value\n value = correlation(x, y)\n\n # moving value to interval [0, 2]\n value += 1.0\n\n # stretching value to the interval [0, 1]\n return 0.5 * value\n\n\ndef dis_correlation_manual(x, y):\n \"\"\"Computes the correlation coefficient as a dissimilarity measure.\n\n Args:\n x (list): The first vector.\n y (list): The second vector.\n\n Returns:\n float: The dissimilarity value between vectors x and y.\n\n Examples:\n >>> dis_correlation_manual([1.0, 2.0, 3.0], [4.0, 5.0, 6.0])\n 2.220446049250313e-16\n >>> dis_correlation_manual([1.0, 2.0, 3.0], [6.0, 5.0, 4.0])\n 1.9999999999999998\n >>> dis_correlation_manual([1.0, 2.0, 3.0, 20.0, 150.0, 3.0], [6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n 1.4245486433123256\n >>> dis_correlation_manual([1.0, 2.0, 3.0, 20.0, 150.0, 3.0], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n 0.5754513566876744\n\n \"\"\"\n\n # computing the correlation value\n value = correlation(x, y)\n\n # setting dissimilarity value to the interval [0, 2]\n return 1 - value\n\n\ndef dis_correlation_scipy(x, y):\n \"\"\"Computes the correlation distance between `x` and `y`.\n\n Args:\n x (list): The first vector.\n y (list): The second vector.\n\n Returns:\n double: The correlation distance between vectors `x` and `y`.\n\n References:\n * https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.correlation.html\n\n Examples:\n >>> dis_correlation_scipy([1.0, 2.0, 3.0], [4.0, 5.0, 6.0])\n 2.2204460492503131e-16\n >>> dis_correlation_scipy([1.0, 2.0, 3.0], [6.0, 5.0, 4.0])\n 1.9999999999999998\n >>> dis_correlation_scipy([1.0, 2.0, 3.0, 20.0, 150.0, 3.0], [6.0, 5.0, 4.0, 3.0, 2.0, 1.0])\n 1.4245486433123256\n >>> dis_correlation_scipy([1.0, 2.0, 3.0, 20.0, 150.0, 3.0], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n 0.57545135668767444\n\n \"\"\"\n\n # returning the correlation distance\n return scy_correlation(x, y)\n", "#!/usr/bin/env\n# -*- coding: utf-8 -*-\n# Copyright (C) Victor M. Mendiola Lau - All Rights Reserved\n# Unauthorized copying of this file, via any medium is strictly prohibited\n# Proprietary and confidential\n# Written by Victor M. Mendiola Lau <[email protected]>, August 2016\n\nimport numpy as np\nfrom scipy.spatial.distance import minkowski as scipy_minkowski\n\n\ndef minkowski(x, y, p=5):\n \"\"\"Computes Minkowski distance.\n\n Args:\n x (list): The first vector.\n y (list): The second vector.\n p (int): Parameter p of Minkowski.\n\n Returns:\n float: The minkowski distance value between vectors x and y.\n\n \"\"\"\n\n # getting the length of the vectors\n x_length = len(x)\n y_length = len(y)\n\n # validating parameters\n if x_length != y_length:\n raise Exception('Vectors with different sizes')\n\n # TODO: Here it is assumed that x and y are lists. Analyze the possibility for them to be tuples or numpy arrays\n\n # converting x and y to numpy arrays\n x_arr = np.array(x, np.float32)\n y_arr = np.array(y, np.float32)\n\n # returning minkowski distance from scipy\n return scipy_minkowski(x_arr, y_arr, p)\n" ]
[ [ "scipy.io.loadmat" ], [ "numpy.convolve", "scipy.spatial.distance.correlation", "numpy.fliplr", "numpy.arange", "numpy.exp", "scipy.ndimage.filters.gaussian_filter1d", "numpy.array", "numpy.zeros" ], [ "scipy.spatial.distance.correlation" ], [ "numpy.array", "scipy.spatial.distance.minkowski" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "1.3", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
ethanabrooks/oyster
[ "08b758b15ca19c50c43a137cba733b79be55654a" ]
[ "rlkit/core/eval_util.py" ]
[ "\"\"\"\nCommon evaluation utilities.\n\"\"\"\n\nfrom collections import OrderedDict\nfrom numbers import Number\nimport os\nimport numpy as np\n\n\ndef dprint(*args):\n # hacky, but will do for now\n if int(os.environ[\"DEBUG\"]) == 1:\n print(args)\n\n\ndef get_generic_path_information(paths, stat_prefix=\"\"):\n \"\"\"\n Get an OrderedDict with a bunch of statistic names and values.\n \"\"\"\n statistics = OrderedDict()\n returns = [sum(path[\"rewards\"]) for path in paths]\n\n rewards = np.vstack([path[\"rewards\"] for path in paths])\n statistics.update(\n create_stats_ordered_dict(\"Rewards\", rewards, stat_prefix=stat_prefix)\n )\n statistics.update(\n create_stats_ordered_dict(\"Returns\", returns, stat_prefix=stat_prefix)\n )\n actions = [path[\"actions\"] for path in paths]\n if len(actions[0].shape) == 1:\n actions = np.hstack([path[\"actions\"] for path in paths])\n else:\n actions = np.vstack([path[\"actions\"] for path in paths])\n statistics.update(\n create_stats_ordered_dict(\"Actions\", actions, stat_prefix=stat_prefix)\n )\n statistics[\"Num Paths\"] = len(paths)\n\n return statistics\n\n\ndef get_average_returns(paths):\n returns = [sum(path[\"rewards\"]) for path in paths]\n return np.mean(returns)\n\n\ndef create_stats_ordered_dict(\n name, data, stat_prefix=None, always_show_all_stats=True, exclude_max_min=False,\n):\n if stat_prefix is not None:\n name = \"{} {}\".format(stat_prefix, name)\n if isinstance(data, Number):\n return OrderedDict({name: data})\n\n if len(data) == 0:\n return OrderedDict()\n\n if isinstance(data, tuple):\n ordered_dict = OrderedDict()\n for number, d in enumerate(data):\n sub_dict = create_stats_ordered_dict(\"{0}_{1}\".format(name, number), d,)\n ordered_dict.update(sub_dict)\n return ordered_dict\n\n if isinstance(data, list):\n try:\n iter(data[0])\n except TypeError:\n pass\n else:\n data = np.concatenate(data)\n\n if isinstance(data, np.ndarray) and data.size == 1 and not always_show_all_stats:\n return OrderedDict({name: float(data)})\n\n stats = OrderedDict(\n [(name + \" Mean\", np.mean(data)), (name + \" Std\", np.std(data)),]\n )\n if not exclude_max_min:\n stats[name + \" Max\"] = np.max(data)\n stats[name + \" Min\"] = np.min(data)\n return stats\n" ]
[ [ "numpy.hstack", "numpy.min", "numpy.concatenate", "numpy.max", "numpy.std", "numpy.mean", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
qzlvyh/sassoftware-python-dlpy
[ "9bf8cc4ffd5ae235e377004644ef70398431e09c" ]
[ "dlpy/timeseries.py" ]
[ "#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n''' Timeseries related classes and functions '''\n\nfrom __future__ import (print_function, division, absolute_import, unicode_literals)\nfrom swat.cas.table import CASTable\nfrom .utils import random_name, get_cas_host_type, char_to_double, int_to_double\nfrom dlpy.utils import DLPyError\nfrom swat.cas import datamsghandlers\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport warnings\nimport datetime\nimport numbers\nimport re\nimport swat\n\n\ndef plot_timeseries(tbl, timeid, timeseries, figure=None, \n groupid=None, start_time=None, end_time=None, xlim=None, \n ylim=None, xlabel=None, ylabel=None, xdate_format=None,\n title=None, figsize=None, \n fontsize_spec=None, **kwargs):\n '''\n Create an timeseries line plot from a CASTable or pandas DataFrame\n\n Parameters\n ----------\n tbl : :class:`CASTable` or :class:`pandas.DataFrame` or :class:`pandas.Series`\n The input table for the plot. If it is CASTable, it will be fetched to \n the client. If it is pandas.Series, the index name will become timeid, \n the series name will become timeseries. \n timeid : str\n The name of the timeid variable. It will be the value to be used in the \n x-axis.\n timeseries : str\n The name of the column contains the timeseries value. It will be the\n value to be used in the y-axis.\n figure : two-element-tuple, optional\n The tuple must be in the form (:class:`matplotlib.figure.Figure`,\n :class:`matplotlib.axes.Axes`). These are the figure and axes that the\n user wants to plot on. It can be used to plot new timeseries plot on\n pre-existing figures.\n Default: None\n groupid : dict, optional\n It is in the format {column1 : value1, column2 : value2, ...}.\n It is used to plot subset of the data where column1 = value1 and \n column2 = value2, etc.\n Default: None, which means do not subset the data.\n start_time : :class:`datetime.datetime` or :class:`datetime.date`, optional\n The start time of the plotted timeseries. \n Default: None, which means the plot starts at the beginning of the\n timeseries. \n end_time : :class:`datetime.datetime` or :class:`datetime.date`, optional\n The end time of the plotted timeseries.\n Default: None, which means the plot ends at the end of the timeseries.\n xlim : tuple, optional\n Set the data limits for the x-axis.\n Default: None\n ylim : tuple, optional\n Set the data limits for the y-axis.\n Default: None\n xlabel : string, optional\n Set the label for the x-axis.\n ylabel : string, optional\n Set the label for the y-axis.\n xdate_format : string, optional\n If the x-axis represents date or datetime, this is the date or datetime \n format string. (e.g. '%Y-%m-%d' is the format of 2000-03-10, \n refer to documentation for :meth:`datetime.datetime.strftime`)\n Default: None\n title : string, optional\n Set the title of the figure.\n Default: None\n figsize : tuple, optional\n The size of the figure.\n Default: None\n fontsize_spec : dict, optional\n It specifies the fontsize for 'xlabel', 'ylabel', 'xtick', 'ytick', \n 'legend' and 'title'. (e.g. {'xlabel':14, 'ylabel':14}).\n If None, and figure is specified, then it will take from provided\n figure object. Otherwise, it will take the default fontsize, which are\n {'xlabel':16, 'ylabel':16, 'xtick':14, 'ytick':14, 'legend':14, 'title':20}\n Default: None\n `**kwargs` : keyword arguments, optional\n Options to pass to matplotlib plotting method. \n\n Returns\n -------\n (:class:`matplotlib.figure.Figure`, :class:`matplotlib.axes.Axes`)\n\n '''\n default_fontsize_spec = {'xlabel':16, 'ylabel':16, 'xtick':14,\n 'ytick':14, 'legend':14, 'title':20}\n \n if figure is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n \n if fontsize_spec is not None:\n default_fontsize_spec.update(fontsize_spec)\n \n fontsize_spec = default_fontsize_spec\n else:\n fig, ax = figure\n if fontsize_spec is None:\n fontsize_spec = {}\n \n if 'legend' not in fontsize_spec.keys():\n fontsize_spec['legend'] = default_fontsize_spec['legend']\n \n if isinstance(tbl, CASTable):\n if groupid is None:\n tbl = tbl.to_frame()\n else:\n where_clause_list = []\n for gid in groupid.keys():\n where_clause_list.append(gid + '=' + str(groupid[gid]))\n \n where_clause = ' and '.join(where_clause_list)\n tbl = tbl.query(where_clause)\n tbl = tbl.to_frame()\n else:\n if isinstance(tbl, pd.Series):\n timeseries = tbl.name\n tbl = tbl.reset_index()\n timeid = [colname for colname in tbl.columns if colname != timeseries][0]\n \n if groupid is not None:\n for gid in groupid.keys():\n tbl = tbl.loc[tbl[gid]==groupid[gid]]\n\n if not (np.issubdtype(tbl[timeid].dtype, np.integer) or\n np.issubdtype(tbl[timeid].dtype, np.floating)):\n tbl[timeid] = pd.to_datetime(tbl[timeid])\n fig.autofmt_xdate()\n if xdate_format is not None:\n import matplotlib.dates as mdates\n xfmt = mdates.DateFormatter(xdate_format)\n ax.xaxis.set_major_formatter(xfmt)\n \n if start_time is not None:\n if isinstance(start_time, datetime.date):\n start_time = pd.Timestamp(start_time)\n \n tbl = tbl.loc[tbl[timeid]>=start_time]\n \n if end_time is not None:\n if isinstance(start_time, datetime.date):\n end_time = pd.Timestamp(end_time)\n \n tbl = tbl.loc[tbl[timeid]<=end_time]\n \n tbl = tbl.sort_values(timeid)\n \n ax.plot(tbl[timeid], tbl[timeseries], **kwargs)\n \n if xlabel is not None: \n if 'xlabel' in fontsize_spec.keys():\n ax.set_xlabel(xlabel, fontsize=fontsize_spec['xlabel'])\n else:\n ax.set_xlabel(xlabel)\n elif figure is not None:\n if 'xlabel' in fontsize_spec.keys():\n ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize_spec['xlabel'])\n else:\n ax.set_xlabel(timeid, fontsize=fontsize_spec['xlabel'])\n \n \n if ylabel is not None:\n if 'ylabel' in fontsize_spec.keys():\n ax.set_ylabel(ylabel, fontsize=fontsize_spec['ylabel'])\n else:\n ax.set_ylabel(ylabel)\n elif figure is not None:\n if 'ylabel' in fontsize_spec.keys():\n ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize_spec['ylabel'])\n else:\n ax.set_ylabel(timeseries, fontsize=fontsize_spec['ylabel'])\n \n if xlim is not None: \n ax.set_xlim(xlim)\n \n if ylim is not None:\n ax.set_ylim(ylim)\n \n if title is not None:\n if 'title' in fontsize_spec.keys():\n ax.set_title(title, fontsize=fontsize_spec['title'])\n else:\n ax.set_title(title)\n elif figure is not None:\n if 'title' in fontsize_spec.keys():\n ax.set_title(ax.get_title(), fontsize=fontsize_spec['title'])\n \n ax.legend(loc='best', bbox_to_anchor=(1, 1), prop={'size': fontsize_spec['legend']})\n if 'xtick' in fontsize_spec.keys():\n ax.get_xaxis().set_tick_params(direction='out', labelsize=fontsize_spec['xtick'])\n else:\n ax.get_xaxis().set_tick_params(direction='out')\n \n if 'ytick' in fontsize_spec.keys():\n ax.get_yaxis().set_tick_params(direction='out', labelsize=fontsize_spec['ytick'])\n else:\n ax.get_yaxis().set_tick_params(direction='out') \n \n \n return (fig, ax)\n \n\nclass TimeseriesTable(CASTable):\n '''\n Table for preprocessing timeseries\n\n It creates an instance of :class:`TimeseriesTable` by loading from\n files on the server side, or files on the client side, or in\n memory :class:`CASTable`, :class:`pandas.DataFrame` or\n :class:`pandas.Series. It then performs inplace timeseries formatting,\n timeseries accumulation, timeseries subsequence generation, and\n timeseries partitioning to prepare the timeseries into a format that\n can be followed by subsequent deep learning models.\n \n Parameters\n ----------\n name : string, optional\n Name of the CAS table\n timeid : string, optional\n Specifies the column name for the timeid. \n Default: None\n groupby_var : string or list-of-strings, optional\n The groupby variables. \n Default: None.\n sequence_opt : dict, optional\n Dictionary with keys: 'input_length', 'target_length' and 'token_size'.\n It will be created by the prepare_subsequences method.\n Default: None\n inputs_target : dict, optional\n Dictionary with keys: 'inputs', 'target'.\n It will be created by the prepare_subsequences method.\n Default: None\n\n Returns\n -------\n :class:`TimeseriesTable`\n\n '''\n running_caslib = None\n\n def __init__(self, name, timeid=None, groupby_var=None, \n sequence_opt=None, inputs_target=None, **table_params):\n CASTable.__init__(self, name, **table_params)\n self.timeid = timeid\n self.groupby_var = groupby_var \n self.sequence_opt = sequence_opt\n self.inputs_target = inputs_target\n\n @classmethod\n def from_table(cls, tbl, columns=None, casout=None):\n '''\n Create an TimeseriesTable from a CASTable\n\n Parameters\n ----------\n tbl : :class:`CASTable`\n The CASTable object to use as the source.\n columns : list-of-strings, optional\n Columns to keep when loading the data.\n None means it will include all the columns from the source.\n Empty list means include no column, which will generate empty data.\n Default: None\n casout : dict or :class:`CASTable`, optional\n if it is dict, it specifies the output CASTable parameters.\n if it is CASTable, it is the CASTable that will be overwritten. \n None means a new CASTable with random name will be generated.\n Default: None\n\n Returns\n -------\n :class:`TimeseriesTable`\n\n '''\n input_tbl_params = tbl.to_outtable_params()\n input_tbl_name = input_tbl_params['name']\n\n conn = tbl.get_connection()\n\n if casout is None:\n casout_params = {}\n elif isinstance(casout, CASTable):\n casout_params = casout.to_outtable_params()\n elif isinstance(casout, dict):\n casout_params = casout\n\n if 'name' not in casout_params:\n casout_params['name'] = random_name('Timeseries', 6)\n \n output_tbl_name = casout_params['name']\n \n if columns is None:\n keep_col_sascode = '''\n data {0};\n set {1};\n run;\n '''.format(output_tbl_name, input_tbl_name)\n \n conn.retrieve('dataStep.runCode', _messagelevel='error',\n code=keep_col_sascode)\n else:\n if not isinstance(columns, list):\n columns = [columns]\n \n keepcol = ' '.join(columns)\n \n keep_col_sascode = '''\n data {0};\n set {1};\n keep {2};\n run;\n '''.format(output_tbl_name, input_tbl_name, keepcol)\n \n conn.retrieve('dataStep.runCode', _messagelevel='error',\n code=keep_col_sascode)\n \n \n out = cls(**casout_params)\n out.set_connection(conn)\n\n return out\n\n @classmethod\n def from_pandas(cls, conn, pandas_df, casout=None):\n '''\n Create an TimeseriesTable from a pandas DataFrame or Series\n\n Parameters\n ----------\n conn : CAS\n The CAS connection object\n pandas_df : :class:`pandas.DataFrame` or :class:`pandas.Series`\n The pandas dataframe or series to use as the source.\n casout : dict or :class:`CASTable`, optional\n if it is dict, it specifies the output CASTable parameters.\n if it is CASTable, it is the CASTable that will be overwritten. \n None means a new CASTable with random name will be generated.\n Default: None\n\n Returns\n -------\n :class:`TimeseriesTable`\n\n '''\n if isinstance(pandas_df, pd.Series):\n pandas_df = pandas_df.reset_index()\n\n if casout is None:\n casout_params = {}\n elif isinstance(casout, CASTable):\n casout_params = casout.to_outtable_params()\n elif isinstance(casout, dict):\n casout_params = casout\n\n if 'name' not in casout_params:\n casout_params['name'] = random_name('Timeseries', 6)\n \n output_tbl_name = casout_params['name']\n \n handler = datamsghandlers.PandasDataFrame(pandas_df)\n \n conn.addtable(table=output_tbl_name, replace=True, **handler.args.addtable)\n \n tbl = conn.CASTable(name=output_tbl_name)\n \n return cls.from_table(tbl, columns=None, casout=casout_params)\n\n @classmethod\n def from_localfile(cls, conn, path, columns=None, importoptions=None,\n casout=None):\n '''\n Create an TimeseriesTable from a file on the client side.\n\n Parameters\n ----------\n conn : CAS\n The CAS connection object\n path : string\n The full path to the local file that will be uploaded to the server.\n columns : list-of-strings, optional\n Columns to keep when loading the data.\n None means it will include all the columns from the source.\n Empty list means to include no column, which will generate empty data.\n Default: None\n importoptions : dict, optional\n Options to import data and upload to the server, such as filetype,\n delimiter, etc. None means use the default 'auto' method in the\n importoptions from CAS.upload.\n Default: None\n casout : dict or :class:`CASTable`, optional\n If it is dict, it specifies the output CASTable parameters.\n If it is CASTable, it is the CASTable that will be overwritten.\n None means a new CASTable with random name will be generated.\n Default: None\n\n Returns\n -------\n :class:`TimeseriesTable`\n\n '''\n if casout is None:\n casout_params = {}\n elif isinstance(casout, CASTable):\n casout_params = casout.to_outtable_params()\n elif isinstance(casout, dict):\n casout_params = casout\n\n if 'name' not in casout_params:\n casout_params['name'] = random_name('Timeseries', 6)\n \n if importoptions is None:\n importoptions = {}\n \n upload_result = conn.upload(path, \n importoptions=importoptions, \n casout=casout_params)\n \n tbl = conn.CASTable(**casout_params)\n \n return cls.from_table(tbl, columns=columns, casout=casout_params)\n \n @classmethod\n def from_serverfile(cls, conn, path, columns=None, caslib=None,\n importoptions=None, casout=None):\n '''\n Create an TimeseriesTable from a file on the server side\n\n Parameters\n ----------\n conn : CAS\n The CAS connection object\n path : string\n The path that the server can access. If the caslib is specified,\n it is relative path to the file with respect to the caslib.\n otherwise, it is the full path to the file.\n columns : list-of-strings, optional\n columns to keep when loading the data.\n None means it will include all the columns from the source.\n Empty list means include no column, which will generate empty data.\n Default: None\n caslib : string, optional\n The name of the caslib which contains the file to be uploaded.\n Default: None\n importoptions : dict, optional\n Options to import data and upload to the server, such as filetype,\n delimiter, etc. None means use the default 'auto' method in the\n importoptions from CAS.upload.\n Default: None\n casout : dict or :class:`CASTable`, optional\n If it is dict, it specifies the output CASTable parameters.\n If it is CASTable, it is the CASTable that will be overwritten.\n None means a new CASTable with random name will be generated.\n Default: None\n\n Returns\n -------\n :class:`TimeseriesTable`\n\n '''\n if casout is None:\n casout_params = {}\n elif isinstance(casout, CASTable):\n casout_params = casout.to_outtable_params()\n elif isinstance(casout, dict):\n casout_params = casout\n\n if 'name' not in casout_params:\n casout_params['name'] = random_name('Timeseries', 6)\n \n if importoptions is None:\n importoptions = {}\n \n if caslib is None:\n caslib, rest_path = cls.find_file_caslib(conn, path)\n if caslib is None:\n server_type = get_cas_host_type(conn).lower()\n if server_type.startswith(\"lin\") or server_type.startswith(\"osx\"):\n path_split = path.rsplit(\"/\", 1)\n else:\n path_split = path.rsplit(\"\\\\\", 1)\n \n caslib = random_name('Caslib', 6)\n rt1 = conn.retrieve('addcaslib', _messagelevel='error', \n name=caslib, path=path_split[0],\n activeonadd=False, subdirectories=False, \n datasource={'srctype':'path'})\n \n if rt1.severity < 2:\n rt2 = conn.retrieve('table.loadTable', \n _messagelevel='error', \n casout=casout_params,\n caslib=caslib,\n importoptions=importoptions,\n path=path_split[1])\n if rt2.severity > 1:\n for msg in rt2.messages:\n print(msg)\n raise DLPyError('cannot load files, something is wrong!')\n else:\n for msg in rt1.messages:\n print(msg)\n raise DLPyError('''cannot create caslib with path:{}, \n something is wrong!'''.format(path_split[0]))\n else:\n rt3 = conn.retrieve('table.loadTable', \n _messagelevel='error', \n casout=casout_params,\n caslib=caslib, \n importoptions=importoptions,\n path=rest_path)\n if rt3.severity > 1:\n for msg in rt3.messages:\n print(msg)\n raise DLPyError('cannot load files, something is wrong!')\n else:\n rt4 = conn.retrieve('table.loadTable', \n _messagelevel='error', \n casout=casout_params,\n caslib=caslib, \n importoptions=importoptions,\n path=path)\n if rt4.severity > 1:\n for msg in rt4.messages:\n print(msg)\n raise DLPyError('cannot load files, something is wrong!')\n \n \n tbl = conn.CASTable(**casout_params)\n \n return cls.from_table(tbl, columns=columns, casout=casout_params)\n \n \n def timeseries_formatting(self, timeid, timeseries, \n timeid_informat=None, timeid_format=None,\n extra_columns=None):\n '''\n Format the TimeseriesTable\n\n Format timeid into appropriate format and check and format\n timeseries columns into numeric columns.\n\n Parameters\n ----------\n timeid : string\n Specifies the column name for the timeid. \n timeseries : string or list-of-strings\n Specifies the column name for the timeseries, that will be part of \n the input or output of the RNN. If str, then it is univariate \n time series. If list of strings, then it is multivariate timeseries. \n timeid_informat : string, optional\n if timeid is in the string format, this is required to parse the \n timeid column. \n Default: None\n timeid_format : string, optional\n Specifies the SAS format that the timeid column will be stored in\n after parsing.\n None means it will be stored in numeric form, not a specific date or datetime format.\n Default: None\n extra_columns : string or list-of-strings, optional\n Specifies the addtional columns to be included. \n Empty list means to include no extra columns other than timeid and timeseries.\n if None, all columns are included.\n Default: None\n\n '''\n self.timeid = timeid\n self.timeseries = timeseries\n self.timeid_format = timeid_format\n self.timeid_informat = timeid_informat \n self.extra_columns = extra_columns\n \n input_tbl_params = self.to_outtable_params()\n input_tbl_name = input_tbl_params['name']\n\n conn = self.get_connection()\n \n tbl_colinfo = self.columninfo().ColumnInfo\n \n if self.timeid_format is None:\n if self.timeid_informat is None:\n self.timeid_format = self.timeid_informat\n elif self.timeid_informat.lower().startswith('anydtdtm'):\n self.timeid_format = 'DATETIME19.'\n else:\n self.timeid_format = self.timeid_informat\n \n\n if (((self.timeid_type not in ['double', 'date', 'datetime']) \n and (not self.timeid_type.startswith('int'))) \n and (self.timeid_informat is not None)):\n fmt_code = '''\n data {0}; \n set {0}(rename=({1}=c_{1})); \n {1} = input(c_{1},{2});\n drop c_{1};\n format {1} {3};\n run;\n '''.format(input_tbl_name, self.timeid, \n self.timeid_informat, self.timeid_format) \n \n conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)\n \n elif (((self.timeid_type not in ['double', 'date', 'datetime']) \n and (not self.timeid_type.startswith('int'))) \n and (self.timeid_informat is None)):\n raise ValueError('''timeid variable is not in the numeric format, \n so timeid_informat is required for parsing the timeid variable. \n ''')\n elif (self.timeid_format is not None):\n fmt_code = '''\n data {0}; \n set {0}; \n format {1} {2};\n run;\n '''.format(input_tbl_name, self.timeid, self.timeid_format) \n conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code) \n else:\n fmt_code = '''\n data {0}; \n set {0}; \n run;\n '''.format(input_tbl_name) \n conn.retrieve('dataStep.runCode', _messagelevel='error', code=fmt_code)\n \n tbl_colinfo = self.columninfo().ColumnInfo\n \n if not isinstance(self.timeseries, list):\n self.timeseries = [self.timeseries]\n \n if set(self.timeseries).issubset(tbl_colinfo.Column):\n char_to_double(conn, tbl_colinfo, input_tbl_name, \n input_tbl_name, self.timeseries)\n else:\n raise ValueError('''One or more variables specified in 'timeseries' \n do not exist in the input table.\n ''')\n \n if self.extra_columns is not None:\n if not isinstance(self.extra_columns, list):\n self.extra_columns = [self.extra_columns] \n \n keepcol = [self.timeid]\n keepcol.extend(self.timeseries + self.extra_columns)\n keepcol = ' '.join(keepcol)\n \n keep_col_sascode = '''\n data {0};\n set {0};\n keep {1};\n run;\n '''.format(input_tbl_name, keepcol)\n \n conn.retrieve('dataStep.runCode', _messagelevel='error', code=keep_col_sascode)\n \n print('NOTE: Timeseries formatting is completed.')\n \n def timeseries_accumlation(self, acc_interval='day',timeid=None,\n timeseries=None, groupby=None,\n extra_num_columns=None, default_ts_acc='sum',\n default_col_acc = 'avg',\n acc_method_byvar=None):\n '''\n Accumulate the TimeseriesTable into regular consecutive intervals\n\n Parameters\n ----------\n acc_interval : string, optional\n The accumulation interval, such as 'year', 'qtr', 'month', 'week',\n 'day', 'hour', 'minute', 'second'. \n timeid : string, optional\n Specifies the column name for the timeid. \n If None, it will take the timeid specified in timeseries_formatting.\n Default: None\n timeseries : string or list-of-strings, optional\n Specifies the column name for the timeseries, that will be part of \n the input or output of the RNN. If str, then it is univariate \n time series. If list of strings, then it is multivariate timeseries. \n If None, it will take the timeseries specified in timeseries_formatting.\n Default: None\n groupby : string or list-of-strings, optional\n The groupby variables. \n Default: None\n extra_num_columns : string or list-of-strings, optional\n Specifies the addtional numeric columns to be included for\n accumulation. These columns can include static feature, and might\n be accumulated differently than the timeseries that will be used\n in RNN. if None, it means no additional numeric columns will be\n accumulated for later processing and modeling.\n Default: None\n default_ts_acc : string, optional\n Default accumulation method for timeseries.\n Default: sum\n default_col_acc : string, optional\n Default accumulation method for additional numeric columns\n Default: avg\n acc_method_byvar : dict, optional\n It specifies specific accumulation method for individual columns,\n if the method is different from the default.\n It has following structure: {'column1 name': 'accumulation method1',\n 'column2 name': 'accumulation method2', ...}\n Default: None\n\n '''\n if (timeid is None) and (self.timeid is None):\n raise DLPyError('''timeid is not specified, consider specifying \n and formatting it with timeseries_formatting''')\n elif (timeid is not None) and (timeid != self.timeid):\n warnings.warn('''timeid has not been formatted by timeseries_formatting,\n consider reload the data and use timeseries_formatting to format the data,\n unless the data has already been pre-formatted.''')\n self.timeid = timeid\n\n if timeseries is None:\n if ((hasattr(self, 'timeseries') and self.timeseries is None) or \n (not hasattr(self, 'timeseries'))): \n raise DLPyError('''timeseries is not specified, consider specifying \n and formatting it with timeseries_formatting''')\n else:\n if not isinstance(timeseries, list):\n timeseries = [timeseries]\n \n if ((hasattr(self, 'timeseries') and (self.timeseries is None)) or \n (not hasattr(self, 'timeseries'))): \n warnings.warn('''timeseries has not been formatted by timeseries_formatting,\n consider reload the data and use timeseries_formatting to format the data,\n unless the data has already been pre-formatted.''')\n elif not set(timeseries).issubset(self.timeseries):\n warnings.warn('''timeseries contains variable(s) that has not been\n formatted by timeseries_formatting, consider reload the data and use \n timeseries_formatting to format the data,\n unless the data has already been pre-formatted.''')\n\n self.timeseries = timeseries\n\n self.groupby_var = groupby\n self.extra_num_columns = extra_num_columns\n \n input_tbl_params = self.to_outtable_params()\n input_tbl_name = input_tbl_params['name']\n\n conn = self.get_connection()\n conn.loadactionset('timeData')\n \n tbl_colinfo = self.columninfo().ColumnInfo\n \n if self.groupby_var is None:\n self.groupby_var = []\n elif not isinstance(self.groupby_var, list):\n self.groupby_var = [self.groupby_var]\n \n if set(self.groupby_var).issubset(tbl_colinfo.Column):\n int_to_double(conn, tbl_colinfo, input_tbl_name, \n input_tbl_name, self.groupby_var)\n else:\n raise ValueError('''One or more variables specified in 'groupby' \n do not exist in the input table.\n ''')\n \n tbl_colinfo = self.columninfo().ColumnInfo \n \n #Check timeid is in the input columns\n if self.timeid not in tbl_colinfo.Column.values:\n raise ValueError('''variable 'timeid' does not exist in input table.\n ''')\n \n #Check timeseries is in the input columns\n if not isinstance(self.timeseries, list):\n self.timeseries = [self.timeseries]\n \n if not set(self.timeseries).issubset(tbl_colinfo.Column):\n raise ValueError('''One or more variables specified in 'timeseries' \n do not exist in the input table.\n ''')\n \n #Check extra_num_columns is in the input columns \n if self.extra_num_columns is None:\n self.extra_num_columns = []\n elif not isinstance(self.extra_num_columns, list):\n self.extra_num_columns = [self.extra_num_columns]\n \n if not set(self.extra_num_columns).issubset(tbl_colinfo.Column):\n raise ValueError('''One or more variables specified in 'extra_num_columns' \n do not exist in the input table.\n ''')\n\n if self.timeid_type == 'datetime':\n acc_interval = 'dt' + acc_interval\n elif ((self.timeid_type == 'date') \n and (acc_interval.lower() in ['hour', 'minute', 'second'])):\n raise ValueError('''the acc_interval has higher frequency than day, \n yet the timeid variable is in the date format. \n ''') \n\n if acc_method_byvar is None:\n acc_method_byvar = {}\n \n serieslist = []\n for ts in self.timeseries:\n if ts in acc_method_byvar.keys():\n method_dict = {'acc':acc_method_byvar[ts],'name':ts}\n serieslist.append(method_dict)\n else:\n method_dict = {'acc':default_ts_acc,'name':ts}\n serieslist.append(method_dict)\n \n for extra_col in self.extra_num_columns:\n if extra_col in self.timeseries:\n warnings.warn('''\n columns in extra_num_columns are also found in \n timeseries, and will be ignored.\n ''')\n continue\n \n elif extra_col in acc_method_byvar.keys():\n method_dict = {'acc':acc_method_byvar[extra_col],'name':extra_col}\n serieslist.append(method_dict)\n else:\n method_dict = {'acc':default_col_acc,'name':extra_col}\n serieslist.append(method_dict)\n \n acc_result = conn.retrieve('timedata.timeseries', _messagelevel='error',\n table={'groupby':self.groupby_var,'name': input_tbl_name},\n series=serieslist,\n timeid=self.timeid,\n interval=acc_interval,\n trimid='BOTH',\n sumout=dict(name=input_tbl_name + '_summary', replace=True),\n casout=dict(name=input_tbl_name, replace=True))\n \n if acc_interval.startswith('dt'): \n print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval[2:]))\n else:\n print('NOTE: Timeseries are accumulated to the frequency of {}'.format(acc_interval))\n \n def prepare_subsequences(self, seq_len, target, predictor_timeseries=None,\n timeid=None, groupby=None,\n input_length_name='xlen', target_length_name='ylen',\n missing_handling='drop'):\n '''\n Prepare the subsequences that will be pass into RNN\n\n Parameters\n ----------\n seq_len : int\n subsequence length that will be passed onto RNN.\n target : string\n the target variable for RNN. Currenly only support univariate target,\n so only string is accepted here, not list of strings.\n predictor_timeseries : string or list-of-strings, optional\n Timeseries that will be used to predict target. They will be preprocessed\n into subsequences as well. If None, it will take the target timeseries\n as the predictor, which corresponds to auto-regressive models.\n Default: None\n timeid : string, optional\n Specifies the column name for the timeid. \n If None, it will take the timeid specified in timeseries_accumlation.\n Default: None\n groupby : string or list-of-strings, optional\n The groupby variables. if None, it will take the groupby specified\n in timeseries_accumlation.\n Default: None\n input_length_name : string, optional\n The column name in the CASTable specifying input sequence length. \n Default: xlen\n target_length_name : string, optional\n The column name in the CASTable specifying target sequence length. \n currently target length only support length 1 for numeric sequence.\n Default: ylen\n missing_handling : string, optional\n How to handle missing value in the subsequences. \n default: drop\n\n ''' \n tbl_colinfo = self.columninfo().ColumnInfo\n input_tbl_params = self.to_outtable_params()\n input_tbl_name = input_tbl_params['name']\n\n conn = self.get_connection()\n \n if timeid is not None:\n self.timeid = timeid\n elif self.timeid is None:\n raise ValueError('''timeid is not specified''')\n \n if self.timeid not in tbl_colinfo.Column.values:\n raise ValueError('''timeid does not exist in the input table''')\n \n if groupby is not None:\n self.groupby_var = groupby\n \n if self.groupby_var is None:\n self.groupby_var = []\n elif not isinstance(self.groupby_var, list):\n self.groupby_var = [self.groupby_var]\n \n if set(self.groupby_var).issubset(tbl_colinfo.Column):\n int_to_double(conn, tbl_colinfo, input_tbl_name, \n input_tbl_name, self.groupby_var)\n else:\n raise ValueError('''One or more variables specified in 'groupby' \n do not exist in the input table.\n ''')\n \n if isinstance(target, list):\n if len(target) > 1:\n raise DLPyError('''currently only support univariate target''')\n else:\n target = [target]\n \n if predictor_timeseries is None:\n predictor_timeseries = target\n elif not isinstance(predictor_timeseries, list):\n predictor_timeseries = [predictor_timeseries]\n \n if set(target).issubset(predictor_timeseries):\n independent_pred = [var for var in predictor_timeseries \n if var not in target] \n self.auto_regressive = True\n else:\n independent_pred = predictor_timeseries\n self.auto_regressive = False\n \n if not set(target).issubset(tbl_colinfo.Column):\n raise ValueError('''invalid target variable''')\n \n if len(independent_pred) > 0:\n if not set(independent_pred).issubset(tbl_colinfo.Column):\n raise ValueError('''columns in predictor_timeseries are absent from\n the accumulated timeseriest table.''') \n \n if self.timeseries is None:\n warnings.warn('''timeseries has not been formatted by timeseries_formatting,\n consider reload the data and use timeseries_formatting to format the data,\n unless the data has already been pre-formatted.''')\n else:\n if not set(target).issubset(self.timeseries):\n warnings.warn('''target is not in pre-formatted timeseries,\n consider reload the data and use timeseries_formatting to format the data,\n unless the data has already been pre-formatted.''')\n \n if len(independent_pred) > 0:\n if not set(independent_pred).issubset(self.timeseries):\n warnings.warn('''\n some of predictor_timeseries are not in pre-accumulated timeseries,\\n\n consider reload the data and use timeseries_accumulation to accumulate the data,\\n\n unless the data has already been pre-formatted.\n ''')\n \n self.target = target[0]\n self.independent_pred = independent_pred\n self.seq_len = seq_len\n \n if self.seq_len < 1:\n raise ValueError('''RNN sequence length at least need to be 1''') \n \n sasCode = 'data {0}; set {0}; by {1} {2};'.format(\n input_tbl_name, ' '.join(self.groupby_var), self.timeid)\n \n if self.seq_len > 1:\n for var in self.independent_pred:\n sasCode += self.create_lags(var, self.seq_len - 1, self.groupby_var)\n \n if self.auto_regressive:\n sasCode += self.create_lags(self.target, self.seq_len, self.groupby_var)\n \n sasCode += '{0} = {1};'.format(input_length_name, self.seq_len)\n sasCode += '{} = 1;'.format(target_length_name) # Currently only support one timestep numeric output.\n if missing_handling == 'drop':\n sasCode += 'if not cmiss(of _all_) then output {};'.format(input_tbl_name)\n sasCode += 'run;'\n if len(self.groupby_var) == 0:\n conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode, \n single='Yes')\n else:\n conn.retrieve('dataStep.runCode', _messagelevel='error', code=sasCode)\n \n self.input_vars = []\n \n for i in range(self.seq_len):\n if self.auto_regressive:\n self.input_vars.append('{0}_lag{1}'.format(self.target, i+1))\n \n for var in self.independent_pred:\n if i == 0:\n self.input_vars.append(var)\n else:\n self.input_vars.append('{0}_lag{1}'.format(var, i))\n \n self.input_vars.reverse()\n \n self.tokensize = len(predictor_timeseries)\n \n self.sequence_opt = dict(input_length=input_length_name, \n target_length=target_length_name,\n token_size=self.tokensize)\n \n self.inputs_target = dict(inputs=self.input_vars, \n target=self.target)\n \n print('NOTE: timeseries subsequences are prepared with subsequence length = {}'.format(seq_len))\n \n @property\n def timeid_type(self):\n tbl_colinfo = self.columninfo().ColumnInfo\n timeid_type = self.identify_coltype(self.timeid, tbl_colinfo)\n return timeid_type\n \n @staticmethod\n def identify_coltype(col, tbl_colinfo):\n if col not in tbl_colinfo.Column.values:\n raise ValueError('''variable {} does not exist in input table.\n '''.format(col))\n \n if 'Format' in tbl_colinfo.columns:\n cas_timeid_fmt = tbl_colinfo.Format[tbl_colinfo.Column == col].values[0]\n else:\n cas_timeid_fmt = None\n \n col_type = tbl_colinfo.Type[tbl_colinfo.Column == col].values[0]\n if cas_timeid_fmt:\n for pattern in swat.options.cas.dataset.date_formats:\n if re.match(r'{}\\Z'.format(pattern), cas_timeid_fmt):\n col_type = 'date'\n break\n \n for pattern in swat.options.cas.dataset.datetime_formats:\n if re.match(r'{}\\Z'.format(pattern), cas_timeid_fmt):\n if col_type == 'date':\n raise DLPyError('''{} format in CASTable is ambiguous,\n and can match both sas date and sas datetime format'''.format(col))\n else: \n col_type = 'datetime'\n break\n \n return col_type \n\n def timeseries_partition(self, training_start=None, validation_start=None, \n testing_start=None, end_time=None, \n partition_var_name='split_id', \n traintbl_suffix='train',\n validtbl_suffix='valid',\n testtbl_suffix='test'):\n '''\n Split the dataset into training, validation and testing set\n\n Parameters\n ----------\n training_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional\n The training set starting time stamp. if None, the training set\n start at the earliest observation record in the table.\n Default: None\n validation_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional\n The validation set starting time stamp. The training set\n ends right before it. If None, there is no validation set,\n and the training set ends right before the start of\n testing set.\n Default: None\n testing_start : float or :class:`datetime.datetime` or :class:`datetime.date`, optional\n The testing set starting time stamp. The validation set \n (or training set if validation set is not specified) ends\n right before it. If None, there is no testing set, and\n the validation set (or training set if validation set is\n not set) ends at the end_time.\n Default: None\n end_time : float or :class:`datetime.datetime` or :class:`datetime.date`, optional\n The end time for the table.\n partition_var_name : string, optional\n The name of the indicator column that indicates training,\n testing and validation.\n Default: 'split_id'.\n traintbl_suffix : string, optional\n The suffix name of the CASTable for the training set.\n Default: 'train'\n validtbl_suffix : string, optional\n The suffix name of the CASTable for the validation set.\n Default: 'valid'\n testtbl_suffix : string, optional\n The suffix name of the CASTable for the testing set.\n Default: 'test'\n\n Returns\n -------\n ( training TimeseriesTable, validation TimeseriesTable, testing TimeseriesTable )\n\n ''' \n self.partition_var_name = partition_var_name\n conn = self.get_connection()\n \n training_start = self.convert_to_sas_time_format(training_start, self.timeid_type)\n validation_start = self.convert_to_sas_time_format(validation_start, self.timeid_type)\n testing_start = self.convert_to_sas_time_format(testing_start, self.timeid_type)\n end_time = self.convert_to_sas_time_format(end_time, self.timeid_type)\n\n if testing_start is None:\n testing_start = end_time\n test_statement = ';'\n else:\n test_statement = self.generate_splitting_code(\n self.timeid, testing_start, end_time, \n True, self.partition_var_name, 'test')\n\n if validation_start is None:\n validation_start = testing_start\n valid_statement = ';'\n else:\n if testing_start == end_time:\n valid_statement = self.generate_splitting_code(\n self.timeid, validation_start, testing_start, \n True, self.partition_var_name, 'valid') \n else:\n valid_statement = self.generate_splitting_code(\n self.timeid, validation_start, testing_start, \n False, self.partition_var_name, 'valid')\n\n if validation_start == end_time:\n train_statement = self.generate_splitting_code(\n self.timeid, training_start, validation_start, \n True, self.partition_var_name, 'train')\n else:\n train_statement = self.generate_splitting_code(\n self.timeid, training_start, validation_start, \n False, self.partition_var_name, 'train')\n \n input_tbl_params = self.to_outtable_params()\n input_tbl_name = input_tbl_params['name']\n \n traintbl_name = '_'.join([input_tbl_name, traintbl_suffix])\n validtbl_name = '_'.join([input_tbl_name, validtbl_suffix])\n testtbl_name = '_'.join([input_tbl_name, testtbl_suffix])\n \n splitting_code = '''\n data {4} {5} {6};\n set {0};\n {1}\n {2}\n {3}\n if {7} = 'train' then output {4};\n if {7} = 'valid' then output {5};\n if {7} = 'test' then output {6};\n run;\n '''.format(input_tbl_name, train_statement, valid_statement, test_statement,\n traintbl_name, validtbl_name, testtbl_name, self.partition_var_name)\n \n conn.retrieve('dataStep.runCode', _messagelevel='error', code=splitting_code)\n \n train_out = dict(name=traintbl_name, timeid=self.timeid, groupby_var=self.groupby_var,\n sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)\n \n valid_out = dict(name=validtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,\n sequence_opt=self.sequence_opt, inputs_target=self.inputs_target)\n \n test_out = dict(name=testtbl_name, timeid=self.timeid, groupby_var=self.groupby_var,\n sequence_opt=self.sequence_opt, inputs_target=self.inputs_target) \n \n train_out_tbl = TimeseriesTable(**train_out)\n train_out_tbl.set_connection(conn)\n \n valid_out_tbl = TimeseriesTable(**valid_out)\n valid_out_tbl.set_connection(conn)\n \n test_out_tbl = TimeseriesTable(**test_out)\n test_out_tbl.set_connection(conn)\n \n print('NOTE: Training set has {} observations'.format(train_out_tbl.shape[0]))\n print('NOTE: Validation set has {} observations'.format(valid_out_tbl.shape[0]))\n print('NOTE: Testing set has {} observations'.format(test_out_tbl.shape[0]))\n\n return train_out_tbl, valid_out_tbl, test_out_tbl\n\n @staticmethod\n def generate_splitting_code(timeid, start, end, right_inclusive, \n partition_var_name, partition_val):\n if (start is None) and (end is not None):\n if right_inclusive:\n statement = '''if {0} <= {1} then {2} = '{3}';'''.format(\n timeid, end, partition_var_name, partition_val)\n else:\n statement = '''if {0} < {1} then {2} = '{3}';'''.format(\n timeid, end, partition_var_name, partition_val)\n elif (start is not None) and (end is None):\n statement = '''if {0} >= {1} then {2} = '{3}';'''.format(\n timeid, start, partition_var_name, partition_val)\n elif (start is not None) and (end is not None):\n if right_inclusive:\n statement = '''if {0} >= {1} and {0} <= {2} then {3} = '{4}';'''.format(\n timeid, start, end, partition_var_name, partition_val)\n else:\n statement = '''if {0} >= {1} and {0} < {2} then {3} = '{4}';'''.format(\n timeid, start, end, partition_var_name, partition_val)\n else:\n statement = '''{0} = '{1}';'''.format(partition_var_name, partition_val)\n \n \n return statement\n\n @staticmethod\n def convert_to_sas_time_format(python_time, sas_format_type):\n if sas_format_type == 'date':\n if isinstance(python_time, datetime.date):\n sas_time_str = 'mdy({0},{1},{2})'.format(python_time.month,\n python_time.day, python_time.year)\n return sas_time_str\n elif python_time is None:\n return None\n else:\n raise ValueError('''The timeid type is date format, so the input \n python time variable should be date or datetime format''') \n elif sas_format_type == 'datetime':\n if isinstance(python_time, datetime.datetime):\n sas_time_str = 'dhms(mdy({0},{1},{2}), {3}, {4}, {5})'.format(\n python_time.month, python_time.day, python_time.year, \n python_time.hour, python_time.minute, python_time.second)\n return sas_time_str\n elif isinstance(python_time, datetime.date):\n sas_time_str = 'dhms(mdy({0},{1},{2}), 0, 0, 0)'.format(\n python_time.month, python_time.day, python_time.year)\n return sas_time_str\n elif python_time is None:\n return None\n else:\n raise ValueError('''The timeid type is datetime format, so the input \n python time variable should be date or datetime format''') \n elif sas_format_type == 'double':\n if isinstance(python_time, numbers.Real):\n return python_time\n elif python_time is None:\n return None\n else:\n raise ValueError('''The timeid type is double, so the input \n python time variable should be int or float''') \n else:\n raise DLPyError('''timeid format in CASTable is wrong, consider reload \n the table and formatting it with timeseries_formatting''')\n \n @staticmethod\n def create_lags(varname, nlags, byvar):\n if not isinstance(byvar, list):\n byvar = [byvar]\n \n byvar_strlist = ['first.{}'.format(var) for var in byvar] \n \n sasCode = ''\n for i in range(nlags):\n if i == 0:\n sasCode += '{0}_lag{1} = lag({0});'.format(varname, i+1)\n else:\n sasCode += '{0}_lag{1} = lag({0}_lag{2});'.format(varname, i+1, i)\n \n if len(byvar) > 0:\n sasCode += 'if ' + ' or '.join(byvar_strlist)\n sasCode += ' then {0}_lag{1} = .;'.format(varname, i+1)\n \n return sasCode \n\n @staticmethod\n def find_file_caslib(conn, path):\n '''\n Check whether the specified path is in the caslibs of the current session\n \n Parameters\n ----------\n conn : CAS\n Specifies the CAS connection object\n path : string\n Specifies the name of the path.\n \n Returns\n -------\n ( flag, caslib_name )\n flag specifies if path exist in session.\n caslib_name specifies the name of the caslib that contains the path.\n \n '''\n paths = conn.caslibinfo().CASLibInfo.Path.tolist()\n caslibs = conn.caslibinfo().CASLibInfo.Name.tolist()\n subdirs = conn.caslibinfo().CASLibInfo.Subdirs.tolist()\n \n server_type = get_cas_host_type(conn).lower()\n \n if server_type.startswith(\"lin\") or server_type.startswith(\"osx\"):\n sep = '/'\n else:\n sep = '\\\\'\n \n for i, directory in enumerate(paths):\n if path.startswith(directory) and (subdirs[i]==1):\n rest_path = path[len(directory):]\n caslibname = caslibs[i]\n return (caslibname, rest_path)\n elif path.startswith(directory) and (subdirs[i]==0):\n rest_path = path[len(directory):]\n if sep in rest_path:\n continue\n else:\n caslibname = caslibs[i]\n return (caslibname, rest_path)\n \n return (None, None)\n\n\n\n" ]
[ [ "matplotlib.dates.DateFormatter", "pandas.to_datetime", "numpy.issubdtype", "matplotlib.pyplot.subplots", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GBillotey/Fractalshades
[ "e100b12db031f016bf1a8a1f4fad9ca1c64a0302", "e100b12db031f016bf1a8a1f4fad9ca1c64a0302" ]
[ "examples/batch_mode/14-burning_ship-deeper_DEM.py", "examples/interactive_deepzoom/D02_run_BS_interactive.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n============================\n14 - Burning ship deeper DEM\n============================\n\nPlotting of a distance estimation for the Burning ship (power-2).\nThis zoom is deeper, featuring a miniship at 1.e-101\n\nReference:\n`fractalshades.models.Perturbation_burning_ship`\n\"\"\"\n\nimport os\nimport numpy as np\n\nimport fractalshades as fs\nimport fractalshades.models as fsm\n\nimport fractalshades.colors as fscolors\nfrom fractalshades.postproc import (\n Postproc_batch,\n Continuous_iter_pp,\n DEM_normal_pp,\n DEM_pp,\n Raw_pp,\n)\nfrom fractalshades.colors.layers import (\n Color_layer,\n Bool_layer,\n Normal_map_layer,\n Virtual_layer,\n Blinn_lighting,\n)\n\n\ndef plot(plot_dir):\n fs.settings.enable_multithreading = True\n fs.settings.inspect_calc = True\n\n # A simple showcase using perturbation technique\n x = '0.533551593577038561769721161491702555962775680136595415306315189524970818968817900068355227861158570104764433694'\n y = '1.26175074578870311547721223871955368990255513054155186351034363459852900933566891849764050954410207620093433856'\n dx = '7.072814368784043e-101'\n precision = 150\n nx = 2400\n xy_ratio = 1.8\n \n sign = 1.0\n DEM_min = 5.e-5\n zmin = 0.0\n zmax = 1.0\n \n # As this formula is non-analytic, we will 'unskew' based on the \n # influencing miniship \"size estimate\" matrix.\n has_skew = True\n skew_00 = 1.3141410612942215\n skew_01 = 0.8651590600810832\n skew_10 = 0.6372176654581702\n skew_11 = 1.1804627997751416\n\n calc_name=\"Burning_ship\"\n colormap = fscolors.cmap_register[\"dawn\"]\n\n # Run the calculation\n f = fsm.Perturbation_burning_ship(plot_dir)\n # f.clean_up()\n\n f.zoom(\n precision=precision,\n x=x,\n y=y,\n dx=dx,\n nx=nx,\n xy_ratio=xy_ratio,\n theta_deg=-2., \n projection=\"cartesian\",\n antialiasing=False,\n has_skew=has_skew,\n skew_00=skew_00,\n skew_01=skew_01,\n skew_10=skew_10,\n skew_11=skew_11\n )\n\n f.calc_std_div(\n calc_name=calc_name,\n subset=None,\n max_iter=50000,\n M_divergence=1.e3,\n BLA_params={\"eps\": 1.e-6},\n )\n\n f.run()\n print(\"has been run\")\n # Plot the image\n pp = Postproc_batch(f, calc_name)\n pp.add_postproc(\"continuous_iter\", Continuous_iter_pp())\n pp.add_postproc(\"distance_estimation\", DEM_pp())\n pp.add_postproc(\"interior\", Raw_pp(\"stop_reason\", func=\"x != 1.\"))\n pp.add_postproc(\"DEM_map\", DEM_normal_pp(kind=\"potential\"))\n\n plotter = fs.Fractal_plotter(pp) \n plotter.add_layer(Bool_layer(\"interior\", output=False))\n plotter.add_layer(Normal_map_layer(\"DEM_map\", max_slope=50, output=False))\n plotter.add_layer(\n Virtual_layer(\"continuous_iter\", func=None, output=False)\n )\n \n cmap_func = lambda x: sign * np.where(\n np.isinf(x),\n np.log(DEM_min),\n np.log(np.clip(x, DEM_min, None))\n )\n plotter.add_layer(Color_layer(\n \"distance_estimation\",\n func=cmap_func,\n colormap=colormap,\n probes_z=[zmin, zmax],\n probes_kind=\"relative\",\n output=True\n ))\n\n plotter[\"distance_estimation\"].set_mask(plotter[\"interior\"],\n mask_color=(0.0, 0.22745098173618317, 0.9803921580314636))\n plotter[\"DEM_map\"].set_mask(plotter[\"interior\"], mask_color=(0., 0., 0.))\n\n # define the lighting and apply the shading\n light = Blinn_lighting(0.4, np.array([1., 1., 1.]))\n light.add_light_source(\n k_diffuse=0.4,\n k_specular=3.,\n shininess=100.,\n angles=(45., 40.),\n coords=None,\n color=np.array([1.0, 1.0, 0.98]))\n# light.add_light_source(\n# k_diffuse=0.8,\n# k_specular=1.,\n# shininess=40.,\n# angles=(90., 20.),\n# coords=None,\n# color=np.array([1., 1., 1.]))\n plotter[\"distance_estimation\"].shade(plotter[\"DEM_map\"], light)\n\n plotter.plot()\n\n\nif __name__ == \"__main__\":\n # Some magic to get the directory for plotting: with a name that matches\n # the file or a temporary dir if we are building the documentation\n try:\n realpath = os.path.realpath(__file__)\n plot_dir = os.path.splitext(realpath)[0]\n plot(plot_dir)\n except NameError:\n import tempfile\n with tempfile.TemporaryDirectory() as plot_dir:\n fs.utils.exec_no_output(plot, plot_dir)\n", "# -*- coding: utf-8 -*-\n\"\"\"\n===============================================\nD02 - Burning Ship arbitrary-precision explorer\n===============================================\n\nThis is a template to explore the Burning Ship set with\narbitrary precision through a GUI.\nIt features the main postprocessing options (continuous\niteration, distance estimation based shading)\n\nAs the Burning ship is a non-holomorphic fractal, some areas can exibit a heavy\nskew. This explorer allows you to use an unskewing matrice and continue\nthe exploration.\nA suitable unskew matrice is usually given by the influencing mini-ship, which\nyou can get as part of a Newton search results : right click on the image and \nselect \"Newton search\".\nWhen the skew parameters are changed, hit rerun to continue the exploration.\n\nGood exploration !\n\nReference:\n`fractalshades.models.Perturbation_burning_ship`\n\"\"\"\nimport typing\nimport os\n\nimport numpy as np\nfrom PyQt6 import QtGui\n\nimport fractalshades as fs\nimport fractalshades.models as fsm\nimport fractalshades.settings as settings\nimport fractalshades.colors as fscolors\nimport fractalshades.gui as fsgui\n\nfrom fractalshades.postproc import (\n Postproc_batch,\n Continuous_iter_pp,\n DEM_pp,\n DEM_normal_pp,\n Raw_pp,\n)\nfrom fractalshades.colors.layers import (\n Color_layer,\n Bool_layer,\n Normal_map_layer,\n Virtual_layer,\n Blinn_lighting\n)\n\n\ndef plot(plot_dir):\n \"\"\"\n Example interactive\n \"\"\"\n import mpmath\n\n x = '-0.5'\n y = '0.5'\n dx = '5.0'\n calc_name = 'test'\n \n xy_ratio = 1.0\n dps = 16\n max_iter = 1500\n nx = 800\n theta_deg = 0.\n has_skew = False\n eps = 1.e-6\n\n base_layer = \"continuous_iter\"\n colormap = fscolors.cmap_register[\"classic\"]\n cmap_z_kind = \"relative\"\n zmin = 0.30\n zmax = 0.60\n \n shade_kind=\"glossy\"\n\n # Set to True to enable multi-threading\n settings.enable_multithreading = True\n\n directory = plot_dir\n fractal = fsm.Perturbation_burning_ship(directory)\n\n def func(\n fractal: fsm.Perturbation_burning_ship=fractal,\n calc_name: str=calc_name,\n\n _1: fsgui.separator=\"Zoom parameters\",\n x: mpmath.mpf=x,\n y: mpmath.mpf=y,\n dx: mpmath.mpf=dx,\n xy_ratio: float=xy_ratio,\n theta_deg: float=theta_deg,\n dps: int=dps,\n nx: int=nx,\n\n _1b: fsgui.separator=\"Skew parameters /!\\ Re-run when modified!\",\n has_skew: bool=has_skew,\n skew_00: float=1.,\n skew_01: float=0.,\n skew_10: float=0.,\n skew_11: float=1.,\n\n _2: fsgui.separator=\"Calculation parameters\",\n max_iter: int=max_iter,\n\n _3: fsgui.separator=\"Bilinear series parameters\",\n eps: float=eps,\n\n _4: fsgui.separator=\"Plotting parameters: base field\",\n base_layer: typing.Literal[\n \"continuous_iter\",\n \"distance_estimation\"\n ]=base_layer,\n interior_color: QtGui.QColor=(0.1, 0.1, 0.1),\n colormap: fscolors.Fractal_colormap=colormap,\n invert_cmap: bool=False,\n DEM_min: float=1.e-6,\n cmap_z_kind: typing.Literal[\"relative\", \"absolute\"]=cmap_z_kind,\n zmin: float=zmin,\n zmax: float=zmax,\n\n _5: fsgui.separator=\"Plotting parameters: shading\",\n shade_kind: typing.Literal[\"None\", \"standard\", \"glossy\"]=shade_kind,\n gloss_intensity: float=10.,\n light_angle_deg: float=65.,\n light_color: QtGui.QColor=(1.0, 1.0, 1.0),\n gloss_light_color: QtGui.QColor=(1.0, 1.0, 1.0),\n ):\n\n\n fractal.zoom(precision=dps, x=x, y=y, dx=dx, nx=nx, xy_ratio=xy_ratio,\n theta_deg=theta_deg, projection=\"cartesian\", antialiasing=False,\n has_skew=has_skew, skew_00=skew_00, skew_01=skew_01,\n skew_10=skew_10, skew_11=skew_11\n )\n\n fractal.calc_std_div(\n calc_name=calc_name,\n subset=None,\n max_iter=max_iter,\n M_divergence=1.e3,\n BLA_params={\"eps\": eps},\n )\n\n if fractal.res_available():\n print(\"RES AVAILABLE, no compute\")\n else:\n print(\"RES NOT AVAILABLE, clean-up\")\n fractal.clean_up(calc_name)\n\n fractal.run()\n\n pp = Postproc_batch(fractal, calc_name)\n \n if base_layer == \"continuous_iter\":\n pp.add_postproc(base_layer, Continuous_iter_pp())\n elif base_layer == \"distance_estimation\":\n pp.add_postproc(\"continuous_iter\", Continuous_iter_pp())\n pp.add_postproc(base_layer, DEM_pp())\n\n pp.add_postproc(\"interior\", Raw_pp(\"stop_reason\",\n func=lambda x: x != 1))\n if shade_kind != \"None\":\n pp.add_postproc(\"DEM_map\", DEM_normal_pp(kind=\"potential\"))\n\n plotter = fs.Fractal_plotter(pp) \n plotter.add_layer(Bool_layer(\"interior\", output=False))\n\n if shade_kind != \"None\":\n plotter.add_layer(Normal_map_layer(\n \"DEM_map\", max_slope=60, output=True\n ))\n\n if base_layer != 'continuous_iter':\n plotter.add_layer(\n Virtual_layer(\"continuous_iter\", func=None, output=False)\n )\n\n sign = {False: 1., True: -1.}[invert_cmap]\n if base_layer == 'distance_estimation':\n cmap_func = lambda x: sign * np.where(\n np.isinf(x),\n np.log(DEM_min),\n np.log(np.clip(x, DEM_min, None))\n )\n else:\n cmap_func = lambda x: sign * np.log(x)\n\n plotter.add_layer(Color_layer(\n base_layer,\n func=cmap_func,\n colormap=colormap,\n probes_z=[zmin, zmax],\n probes_kind=cmap_z_kind,\n output=True))\n plotter[base_layer].set_mask(\n plotter[\"interior\"], mask_color=interior_color\n )\n if shade_kind != \"None\":\n light = Blinn_lighting(0.4, np.array([1., 1., 1.]))\n light.add_light_source(\n k_diffuse=0.8,\n k_specular=.0,\n shininess=350.,\n angles=(light_angle_deg, 20.),\n coords=None,\n color=np.array(light_color))\n\n if shade_kind == \"glossy\":\n light.add_light_source(\n k_diffuse=0.2,\n k_specular=gloss_intensity,\n shininess=400.,\n angles=(light_angle_deg, 20.),\n coords=None,\n color=np.array(gloss_light_color))\n\n plotter[base_layer].shade(plotter[\"DEM_map\"], light)\n\n plotter.plot()\n \n # Renaming output to match expected from the Fractal GUI\n layer = plotter[base_layer]\n file_name = \"{}_{}\".format(type(layer).__name__, layer.postname)\n src_path = os.path.join(fractal.directory, file_name + \".png\")\n dest_path = os.path.join(fractal.directory, calc_name + \".png\")\n if os.path.isfile(dest_path):\n os.unlink(dest_path)\n os.link(src_path, dest_path)\n\n\n gui = fsgui.Fractal_GUI(func)\n gui.connect_image(image_param=\"calc_name\")\n gui.connect_mouse(\n x=\"x\", y=\"y\", dx=\"dx\", xy_ratio=\"xy_ratio\", dps=\"dps\",\n has_skew=\"has_skew\", skew_00=\"skew_00\", skew_01=\"skew_01\",\n skew_10=\"skew_10\", skew_11=\"skew_11\"\n )\n gui.show()\n\n\nif __name__ == \"__main__\":\n # Some magic to get the directory for plotting: with a name that matches\n # the file or a temporary dir if we are building the documentation\n try:\n realpath = os.path.realpath(__file__)\n plot_dir = os.path.splitext(realpath)[0]\n plot(plot_dir)\n except NameError:\n import tempfile\n with tempfile.TemporaryDirectory() as plot_dir:\n fs.utils.exec_no_output(plot, plot_dir)\n\n" ]
[ [ "numpy.log", "numpy.array", "numpy.isinf", "numpy.clip" ], [ "numpy.log", "numpy.array", "numpy.isinf", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jmacdonald2010/mean-variance-standard-deviation-calculator
[ "badae42c099081610fd55ea5a788867c352da6c0" ]
[ "mean_var_std.py" ]
[ "import numpy as np\n\ndef calculate(list):\n if len(list) != 9:\n raise ValueError('List must contain nine numbers.')\n input_array = np.array([[list[0], list[1], list[2]], [list[3], list[4], list[5]], [list[6], list[7], list[8]]])\n calculations = dict()\n print(input_array)\n\n # calc mean\n c_mean = np.mean(input_array, axis=0) # axis 0 is column\n r_mean = np.mean(input_array, axis=1)\n f_mean = np.mean(input_array)\n calculations['mean'] = [c_mean.tolist(), r_mean.tolist(), f_mean]\n\n # variance\n c_var = np.var(input_array, axis=0)\n r_var = np.var(input_array, axis=1)\n f_var = np.var(input_array)\n calculations['variance'] = [c_var.tolist(), r_var.tolist(), f_var]\n\n # standard dev\n c_std = np.std(input_array, axis=0)\n r_std = np.std(input_array, axis=1)\n f_std = np.std(input_array)\n calculations['standard deviation'] = [c_std.tolist(), r_std.tolist(), f_std]\n\n # max\n c_max = np.amax(input_array, axis=0)\n r_max = np.amax(input_array, axis=1)\n f_max = np.amax(input_array)\n calculations['max'] = [c_max.tolist(), r_max.tolist(), f_max]\n\n # min\n c_min = np.amin(input_array, axis=0)\n r_min = np.amin(input_array, axis=1)\n f_min = np.amin(input_array)\n calculations['min'] = [c_min.tolist(), r_min.tolist(), f_min]\n\n # sum\n c_sum = np.sum(input_array, axis=0)\n r_sum = np.sum(input_array, axis=1)\n f_sum = np.sum(input_array)\n calculations['sum'] = [c_sum.tolist(), r_sum.tolist(), f_sum]\n\n return calculations\n\n# this code below is for testing the function, and what the dict should look like when outputting data\n# test calculations\nprint(calculate([0,1,2,3,4,5,6,7,8]))\n# should return:\n'''\n{\n 'mean': [[3.0, 4.0, 5.0], [1.0, 4.0, 7.0], 4.0], \n 'variance': [[6.0, 6.0, 6.0], [0.6666666666666666, 0.6666666666666666, 0.6666666666666666], 6.666666666666667], \n 'standard deviation': [[2.449489742783178, 2.449489742783178, 2.449489742783178], [0.816496580927726, 0.816496580927726, 0.816496580927726], 2.581988897471611],\n 'max': [[6, 7, 8], [2, 5, 8], 8],\n 'min': [[0, 1, 2], [0, 3, 6], 0],\n 'sum': [[9, 12, 15], [3, 12, 21], 36]\n}'''" ]
[ [ "numpy.amax", "numpy.amin", "numpy.std", "numpy.mean", "numpy.var", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
semohr/pymc3
[ "198d13e2ed6f32b33fd8f4b591a47dc8dd8fe2df", "198d13e2ed6f32b33fd8f4b591a47dc8dd8fe2df" ]
[ "pymc3/tests/test_distributions.py", "pymc3/sampling_jax.py" ]
[ "# Copyright 2020 The PyMC Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport sys\n\nfrom .helpers import SeededTest, select_by_precision\nfrom ..vartypes import continuous_types\nfrom ..model import Model, Point, Deterministic\nfrom ..blocking import DictToVarBijection\nfrom ..distributions import (\n DensityDist,\n Categorical,\n Multinomial,\n VonMises,\n Dirichlet,\n MvStudentT,\n MvNormal,\n MatrixNormal,\n ZeroInflatedPoisson,\n ZeroInflatedNegativeBinomial,\n Constant,\n Poisson,\n Bernoulli,\n Beta,\n BetaBinomial,\n HalfStudentT,\n StudentT,\n Weibull,\n Pareto,\n InverseGamma,\n Gamma,\n Cauchy,\n HalfCauchy,\n Lognormal,\n Laplace,\n NegativeBinomial,\n Geometric,\n Exponential,\n ExGaussian,\n Normal,\n TruncatedNormal,\n Flat,\n LKJCorr,\n Wald,\n ChiSquared,\n HalfNormal,\n DiscreteUniform,\n Bound,\n Uniform,\n Triangular,\n Binomial,\n SkewNormal,\n DiscreteWeibull,\n Gumbel,\n Logistic,\n OrderedLogistic,\n LogitNormal,\n Interpolated,\n ZeroInflatedBinomial,\n HalfFlat,\n AR1,\n KroneckerNormal,\n Rice,\n Kumaraswamy,\n Moyal,\n HyperGeometric,\n)\n\nfrom ..distributions import continuous\nfrom pymc3.theanof import floatX\nimport pymc3 as pm\nfrom numpy import array, inf, log, exp\nfrom numpy.testing import assert_almost_equal, assert_allclose, assert_equal\nimport numpy.random as nr\nimport numpy as np\nimport pytest\n\nfrom scipy import integrate\nimport scipy.stats.distributions as sp\nimport scipy.stats\nfrom scipy.special import logit\nimport theano\nimport theano.tensor as tt\nfrom ..math import kronecker\n\n\ndef get_lkj_cases():\n \"\"\"\n Log probabilities calculated using the formulas in:\n http://www.sciencedirect.com/science/article/pii/S0047259X09000876\n \"\"\"\n tri = np.array([0.7, 0.0, -0.7])\n return [\n (tri, 1, 3, 1.5963125911388549),\n (tri, 3, 3, -7.7963493376312742),\n (tri, 0, 3, -np.inf),\n (np.array([1.1, 0.0, -0.7]), 1, 3, -np.inf),\n (np.array([0.7, 0.0, -1.1]), 1, 3, -np.inf),\n ]\n\n\nLKJ_CASES = get_lkj_cases()\n\n\nclass Domain:\n def __init__(self, vals, dtype=None, edges=None, shape=None):\n avals = array(vals, dtype=dtype)\n if dtype is None and not str(avals.dtype).startswith(\"int\"):\n avals = avals.astype(theano.config.floatX)\n vals = [array(v, dtype=avals.dtype) for v in vals]\n\n if edges is None:\n edges = array(vals[0]), array(vals[-1])\n vals = vals[1:-1]\n if shape is None:\n shape = avals[0].shape\n\n self.vals = vals\n self.shape = shape\n\n self.lower, self.upper = edges\n self.dtype = avals.dtype\n\n def __add__(self, other):\n return Domain(\n [v + other for v in self.vals],\n self.dtype,\n (self.lower + other, self.upper + other),\n self.shape,\n )\n\n def __mul__(self, other):\n try:\n return Domain(\n [v * other for v in self.vals],\n self.dtype,\n (self.lower * other, self.upper * other),\n self.shape,\n )\n except TypeError:\n return Domain(\n [v * other for v in self.vals],\n self.dtype,\n (self.lower, self.upper),\n self.shape,\n )\n\n def __neg__(self):\n return Domain([-v for v in self.vals], self.dtype, (-self.lower, -self.upper), self.shape)\n\n\ndef product(domains, n_samples=-1):\n \"\"\"Get an iterator over a product of domains.\n\n Args:\n domains: a dictionary of (name, object) pairs, where the objects\n must be \"domain-like\", as in, have a `.vals` property\n n_samples: int, maximum samples to return. -1 to return whole product\n\n Returns:\n list of the cartesian product of the domains\n \"\"\"\n try:\n names, domains = zip(*domains.items())\n except ValueError: # domains.items() is empty\n return [{}]\n all_vals = [zip(names, val) for val in itertools.product(*[d.vals for d in domains])]\n if n_samples > 0 and len(all_vals) > n_samples:\n return (all_vals[j] for j in nr.choice(len(all_vals), n_samples, replace=False))\n return all_vals\n\n\nR = Domain([-inf, -2.1, -1, -0.01, 0.0, 0.01, 1, 2.1, inf])\nRplus = Domain([0, 0.01, 0.1, 0.9, 0.99, 1, 1.5, 2, 100, inf])\nRplusbig = Domain([0, 0.5, 0.9, 0.99, 1, 1.5, 2, 20, inf])\nRminusbig = Domain([-inf, -2, -1.5, -1, -0.99, -0.9, -0.5, -0.01, 0])\nUnit = Domain([0, 0.001, 0.1, 0.5, 0.75, 0.99, 1])\n\nCirc = Domain([-np.pi, -2.1, -1, -0.01, 0.0, 0.01, 1, 2.1, np.pi])\n\nRunif = Domain([-1, -0.4, 0, 0.4, 1])\nRdunif = Domain([-10, 0, 10.0])\nRplusunif = Domain([0, 0.5, inf])\nRplusdunif = Domain([2, 10, 100], \"int64\")\n\nI = Domain([-1000, -3, -2, -1, 0, 1, 2, 3, 1000], \"int64\")\n\nNatSmall = Domain([0, 3, 4, 5, 1000], \"int64\")\nNat = Domain([0, 1, 2, 3, 2000], \"int64\")\nNatBig = Domain([0, 1, 2, 3, 5000, 50000], \"int64\")\nPosNat = Domain([1, 2, 3, 2000], \"int64\")\n\nBool = Domain([0, 0, 1, 1], \"int64\")\n\n\ndef build_model(distfam, valuedomain, vardomains, extra_args=None):\n if extra_args is None:\n extra_args = {}\n with Model() as m:\n vals = {}\n for v, dom in vardomains.items():\n vals[v] = Flat(v, dtype=dom.dtype, shape=dom.shape, testval=dom.vals[0])\n vals.update(extra_args)\n distfam(\"value\", shape=valuedomain.shape, transform=None, **vals)\n return m\n\n\ndef integrate_nd(f, domain, shape, dtype):\n if shape == () or shape == (1,):\n if dtype in continuous_types:\n return integrate.quad(f, domain.lower, domain.upper, epsabs=1e-8)[0]\n else:\n return sum(f(j) for j in range(domain.lower, domain.upper + 1))\n elif shape == (2,):\n\n def f2(a, b):\n return f([a, b])\n\n return integrate.dblquad(\n f2,\n domain.lower[0],\n domain.upper[0],\n lambda _: domain.lower[1],\n lambda _: domain.upper[1],\n )[0]\n elif shape == (3,):\n\n def f3(a, b, c):\n return f([a, b, c])\n\n return integrate.tplquad(\n f3,\n domain.lower[0],\n domain.upper[0],\n lambda _: domain.lower[1],\n lambda _: domain.upper[1],\n lambda _, __: domain.lower[2],\n lambda _, __: domain.upper[2],\n )[0]\n else:\n raise ValueError(\"Dont know how to integrate shape: \" + str(shape))\n\n\ndef multinomial_logpdf(value, n, p):\n if value.sum() == n and (0 <= value).all() and (value <= n).all():\n logpdf = scipy.special.gammaln(n + 1)\n logpdf -= scipy.special.gammaln(value + 1).sum()\n logpdf += logpow(p, value).sum()\n return logpdf\n else:\n return -inf\n\n\ndef beta_mu_sigma(value, mu, sigma):\n kappa = mu * (1 - mu) / sigma ** 2 - 1\n if kappa > 0:\n return sp.beta.logpdf(value, mu * kappa, (1 - mu) * kappa)\n else:\n return -inf\n\n\nclass ProductDomain:\n def __init__(self, domains):\n self.vals = list(itertools.product(*[d.vals for d in domains]))\n self.shape = (len(domains),) + domains[0].shape\n self.lower = [d.lower for d in domains]\n self.upper = [d.upper for d in domains]\n self.dtype = domains[0].dtype\n\n\ndef Vector(D, n):\n return ProductDomain([D] * n)\n\n\ndef SortedVector(n):\n vals = []\n np.random.seed(42)\n for _ in range(10):\n vals.append(np.sort(np.random.randn(n)))\n return Domain(vals, edges=(None, None))\n\n\ndef UnitSortedVector(n):\n vals = []\n np.random.seed(42)\n for _ in range(10):\n vals.append(np.sort(np.random.rand(n)))\n return Domain(vals, edges=(None, None))\n\n\ndef RealMatrix(n, m):\n vals = []\n np.random.seed(42)\n for _ in range(10):\n vals.append(np.random.randn(n, m))\n return Domain(vals, edges=(None, None))\n\n\ndef simplex_values(n):\n if n == 1:\n yield array([1.0])\n else:\n for v in Unit.vals:\n for vals in simplex_values(n - 1):\n yield np.concatenate([[v], (1 - v) * vals])\n\n\ndef normal_logpdf_tau(value, mu, tau):\n return normal_logpdf_cov(value, mu, np.linalg.inv(tau)).sum()\n\n\ndef normal_logpdf_cov(value, mu, cov):\n return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()\n\n\ndef normal_logpdf_chol(value, mu, chol):\n return normal_logpdf_cov(value, mu, np.dot(chol, chol.T)).sum()\n\n\ndef normal_logpdf_chol_upper(value, mu, chol):\n return normal_logpdf_cov(value, mu, np.dot(chol.T, chol)).sum()\n\n\ndef matrix_normal_logpdf_cov(value, mu, rowcov, colcov):\n return scipy.stats.matrix_normal.logpdf(value, mu, rowcov, colcov)\n\n\ndef matrix_normal_logpdf_chol(value, mu, rowchol, colchol):\n return matrix_normal_logpdf_cov(\n value, mu, np.dot(rowchol, rowchol.T), np.dot(colchol, colchol.T)\n )\n\n\ndef kron_normal_logpdf_cov(value, mu, covs, sigma):\n cov = kronecker(*covs).eval()\n if sigma is not None:\n cov += sigma ** 2 * np.eye(*cov.shape)\n return scipy.stats.multivariate_normal.logpdf(value, mu, cov).sum()\n\n\ndef kron_normal_logpdf_chol(value, mu, chols, sigma):\n covs = [np.dot(chol, chol.T) for chol in chols]\n return kron_normal_logpdf_cov(value, mu, covs, sigma=sigma)\n\n\ndef kron_normal_logpdf_evd(value, mu, evds, sigma):\n covs = []\n for eigs, Q in evds:\n try:\n eigs = eigs.eval()\n except AttributeError:\n pass\n try:\n Q = Q.eval()\n except AttributeError:\n pass\n covs.append(np.dot(Q, np.dot(np.diag(eigs), Q.T)))\n return kron_normal_logpdf_cov(value, mu, covs, sigma)\n\n\ndef betafn(a):\n return floatX(scipy.special.gammaln(a).sum(-1) - scipy.special.gammaln(a.sum(-1)))\n\n\ndef logpow(v, p):\n return np.choose(v == 0, [p * np.log(v), 0])\n\n\ndef discrete_weibull_logpmf(value, q, beta):\n return floatX(\n np.log(\n np.power(floatX(q), np.power(floatX(value), floatX(beta)))\n - np.power(floatX(q), np.power(floatX(value + 1), floatX(beta)))\n )\n )\n\n\ndef dirichlet_logpdf(value, a):\n return floatX((-betafn(a) + logpow(value, a - 1).sum(-1)).sum())\n\n\ndef categorical_logpdf(value, p):\n if value >= 0 and value <= len(p):\n return floatX(np.log(np.moveaxis(p, -1, 0)[value]))\n else:\n return -inf\n\n\ndef mvt_logpdf(value, nu, Sigma, mu=0):\n d = len(Sigma)\n dist = np.atleast_2d(value) - mu\n chol = np.linalg.cholesky(Sigma)\n trafo = np.linalg.solve(chol, dist.T).T\n logdet = np.log(np.diag(chol)).sum()\n\n lgamma = scipy.special.gammaln\n norm = lgamma((nu + d) / 2.0) - 0.5 * d * np.log(nu * np.pi) - lgamma(nu / 2.0)\n logp = norm - logdet - (nu + d) / 2.0 * np.log1p((trafo * trafo).sum(-1) / nu)\n return logp.sum()\n\n\ndef AR1_logpdf(value, k, tau_e):\n tau = tau_e * (1 - k ** 2)\n return (\n sp.norm(loc=0, scale=1 / np.sqrt(tau)).logpdf(value[0])\n + sp.norm(loc=k * value[:-1], scale=1 / np.sqrt(tau_e)).logpdf(value[1:]).sum()\n )\n\n\ndef invlogit(x, eps=sys.float_info.epsilon):\n return (1.0 - 2.0 * eps) / (1.0 + np.exp(-x)) + eps\n\n\ndef orderedlogistic_logpdf(value, eta, cutpoints):\n c = np.concatenate(([-np.inf], cutpoints, [np.inf]))\n ps = np.array([invlogit(eta - cc) - invlogit(eta - cc1) for cc, cc1 in zip(c[:-1], c[1:])])\n p = ps[value]\n return np.where(np.all(ps >= 0), np.log(p), -np.inf)\n\n\nclass Simplex:\n def __init__(self, n):\n self.vals = list(simplex_values(n))\n self.shape = (n,)\n self.dtype = Unit.dtype\n\n\nclass MultiSimplex:\n def __init__(self, n_dependent, n_independent):\n self.vals = []\n for simplex_value in itertools.product(simplex_values(n_dependent), repeat=n_independent):\n self.vals.append(np.vstack(simplex_value))\n self.shape = (n_independent, n_dependent)\n self.dtype = Unit.dtype\n\n\ndef PdMatrix(n):\n if n == 1:\n return PdMatrix1\n elif n == 2:\n return PdMatrix2\n elif n == 3:\n return PdMatrix3\n else:\n raise ValueError(\"n out of bounds\")\n\n\nPdMatrix1 = Domain([np.eye(1), [[0.5]]], edges=(None, None))\n\nPdMatrix2 = Domain([np.eye(2), [[0.5, 0.05], [0.05, 4.5]]], edges=(None, None))\n\nPdMatrix3 = Domain([np.eye(3), [[0.5, 0.1, 0], [0.1, 1, 0], [0, 0, 2.5]]], edges=(None, None))\n\n\nPdMatrixChol1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))\nPdMatrixChol2 = Domain([np.eye(2), [[0.1, 0], [10, 1]]], edges=(None, None))\nPdMatrixChol3 = Domain([np.eye(3), [[0.1, 0, 0], [10, 100, 0], [0, 1, 10]]], edges=(None, None))\n\n\ndef PdMatrixChol(n):\n if n == 1:\n return PdMatrixChol1\n elif n == 2:\n return PdMatrixChol2\n elif n == 3:\n return PdMatrixChol3\n else:\n raise ValueError(\"n out of bounds\")\n\n\nPdMatrixCholUpper1 = Domain([np.eye(1), [[0.001]]], edges=(None, None))\nPdMatrixCholUpper2 = Domain([np.eye(2), [[0.1, 10], [0, 1]]], edges=(None, None))\nPdMatrixCholUpper3 = Domain(\n [np.eye(3), [[0.1, 10, 0], [0, 100, 1], [0, 0, 10]]], edges=(None, None)\n)\n\n\ndef PdMatrixCholUpper(n):\n if n == 1:\n return PdMatrixCholUpper1\n elif n == 2:\n return PdMatrixCholUpper2\n elif n == 3:\n return PdMatrixCholUpper3\n else:\n raise ValueError(\"n out of bounds\")\n\n\ndef RandomPdMatrix(n):\n A = np.random.rand(n, n)\n return np.dot(A, A.T) + n * np.identity(n)\n\n\nclass TestMatchesScipy(SeededTest):\n def pymc3_matches_scipy(\n self,\n pymc3_dist,\n domain,\n paramdomains,\n scipy_dist,\n decimal=None,\n extra_args=None,\n scipy_args=None,\n ):\n if extra_args is None:\n extra_args = {}\n if scipy_args is None:\n scipy_args = {}\n model = build_model(pymc3_dist, domain, paramdomains, extra_args)\n value = model.named_vars[\"value\"]\n\n def logp(args):\n args.update(scipy_args)\n return scipy_dist(**args)\n\n self.check_logp(model, value, domain, paramdomains, logp, decimal=decimal)\n\n def check_logp(self, model, value, domain, paramdomains, logp_reference, decimal=None):\n domains = paramdomains.copy()\n domains[\"value\"] = domain\n logp = model.fastlogp\n for pt in product(domains, n_samples=100):\n pt = Point(pt, model=model)\n if decimal is None:\n decimal = select_by_precision(float64=6, float32=3)\n assert_almost_equal(logp(pt), logp_reference(pt), decimal=decimal, err_msg=str(pt))\n\n def check_logcdf(\n self,\n pymc3_dist,\n domain,\n paramdomains,\n scipy_logcdf,\n decimal=None,\n n_samples=100,\n ):\n domains = paramdomains.copy()\n domains[\"value\"] = domain\n if decimal is None:\n decimal = select_by_precision(float64=6, float32=3)\n for pt in product(domains, n_samples=n_samples):\n params = dict(pt)\n scipy_cdf = scipy_logcdf(**params)\n value = params.pop(\"value\")\n dist = pymc3_dist.dist(**params)\n assert_almost_equal(\n dist.logcdf(value).tag.test_value,\n scipy_cdf,\n decimal=decimal,\n err_msg=str(pt),\n )\n\n def check_int_to_1(self, model, value, domain, paramdomains):\n pdf = model.fastfn(exp(model.logpt))\n for pt in product(paramdomains, n_samples=10):\n pt = Point(pt, value=value.tag.test_value, model=model)\n bij = DictToVarBijection(value, (), pt)\n pdfx = bij.mapf(pdf)\n area = integrate_nd(pdfx, domain, value.dshape, value.dtype)\n assert_almost_equal(area, 1, err_msg=str(pt))\n\n def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None):\n if checks is None:\n checks = (self.check_int_to_1,)\n\n if extra_args is None:\n extra_args = {}\n m = build_model(distfam, valuedomain, vardomains, extra_args=extra_args)\n for check in checks:\n check(m, m.named_vars[\"value\"], valuedomain, vardomains)\n\n def test_uniform(self):\n self.pymc3_matches_scipy(\n Uniform,\n Runif,\n {\"lower\": -Rplusunif, \"upper\": Rplusunif},\n lambda value, lower, upper: sp.uniform.logpdf(value, lower, upper - lower),\n )\n self.check_logcdf(\n Uniform,\n Runif,\n {\"lower\": -Rplusunif, \"upper\": Rplusunif},\n lambda value, lower, upper: sp.uniform.logcdf(value, lower, upper - lower),\n )\n\n def test_triangular(self):\n self.pymc3_matches_scipy(\n Triangular,\n Runif,\n {\"lower\": -Rplusunif, \"c\": Runif, \"upper\": Rplusunif},\n lambda value, c, lower, upper: sp.triang.logpdf(value, c - lower, lower, upper - lower),\n )\n self.check_logcdf(\n Triangular,\n Runif,\n {\"lower\": -Rplusunif, \"c\": Runif, \"upper\": Rplusunif},\n lambda value, c, lower, upper: sp.triang.logcdf(value, c - lower, lower, upper - lower),\n )\n\n def test_bound_normal(self):\n PositiveNormal = Bound(Normal, lower=0.0)\n self.pymc3_matches_scipy(\n PositiveNormal,\n Rplus,\n {\"mu\": Rplus, \"sigma\": Rplus},\n lambda value, mu, sigma: sp.norm.logpdf(value, mu, sigma),\n decimal=select_by_precision(float64=6, float32=-1),\n )\n with Model():\n x = PositiveNormal(\"x\", mu=0, sigma=1, transform=None)\n assert np.isinf(x.logp({\"x\": -1}))\n\n def test_discrete_unif(self):\n self.pymc3_matches_scipy(\n DiscreteUniform,\n Rdunif,\n {\"lower\": -Rplusdunif, \"upper\": Rplusdunif},\n lambda value, lower, upper: sp.randint.logpmf(value, lower, upper + 1),\n )\n\n def test_flat(self):\n self.pymc3_matches_scipy(Flat, Runif, {}, lambda value: 0)\n with Model():\n x = Flat(\"a\")\n assert_allclose(x.tag.test_value, 0)\n self.check_logcdf(Flat, Runif, {}, lambda value: np.log(0.5))\n # Check infinite cases individually.\n assert 0.0 == Flat.dist().logcdf(np.inf).tag.test_value\n assert -np.inf == Flat.dist().logcdf(-np.inf).tag.test_value\n\n def test_half_flat(self):\n self.pymc3_matches_scipy(HalfFlat, Rplus, {}, lambda value: 0)\n with Model():\n x = HalfFlat(\"a\", shape=2)\n assert_allclose(x.tag.test_value, 1)\n assert x.tag.test_value.shape == (2,)\n self.check_logcdf(HalfFlat, Runif, {}, lambda value: -np.inf)\n # Check infinite cases individually.\n assert 0.0 == HalfFlat.dist().logcdf(np.inf).tag.test_value\n assert -np.inf == HalfFlat.dist().logcdf(-np.inf).tag.test_value\n\n def test_normal(self):\n self.pymc3_matches_scipy(\n Normal,\n R,\n {\"mu\": R, \"sigma\": Rplus},\n lambda value, mu, sigma: sp.norm.logpdf(value, mu, sigma),\n decimal=select_by_precision(float64=6, float32=1),\n )\n self.check_logcdf(\n Normal,\n R,\n {\"mu\": R, \"sigma\": Rplus},\n lambda value, mu, sigma: sp.norm.logcdf(value, mu, sigma),\n )\n\n def test_truncated_normal(self):\n def scipy_logp(value, mu, sigma, lower, upper):\n return sp.truncnorm.logpdf(\n value, (lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma\n )\n\n self.pymc3_matches_scipy(\n TruncatedNormal,\n R,\n {\"mu\": R, \"sigma\": Rplusbig, \"lower\": -Rplusbig, \"upper\": Rplusbig},\n scipy_logp,\n decimal=select_by_precision(float64=6, float32=1),\n )\n\n def test_half_normal(self):\n self.pymc3_matches_scipy(\n HalfNormal,\n Rplus,\n {\"sigma\": Rplus},\n lambda value, sigma: sp.halfnorm.logpdf(value, scale=sigma),\n decimal=select_by_precision(float64=6, float32=-1),\n )\n self.check_logcdf(\n HalfNormal,\n Rplus,\n {\"sigma\": Rplus},\n lambda value, sigma: sp.halfnorm.logcdf(value, scale=sigma),\n )\n\n def test_chi_squared(self):\n self.pymc3_matches_scipy(\n ChiSquared,\n Rplus,\n {\"nu\": Rplusdunif},\n lambda value, nu: sp.chi2.logpdf(value, df=nu),\n )\n\n @pytest.mark.xfail(reason=\"Poor CDF in SciPy. See scipy/scipy#869 for details.\")\n def test_wald_scipy(self):\n self.pymc3_matches_scipy(\n Wald,\n Rplus,\n {\"mu\": Rplus, \"alpha\": Rplus},\n lambda value, mu, alpha: sp.invgauss.logpdf(value, mu=mu, loc=alpha),\n decimal=select_by_precision(float64=6, float32=1),\n )\n self.check_logcdf(\n Wald,\n Rplus,\n {\"mu\": Rplus, \"alpha\": Rplus},\n lambda value, mu, alpha: sp.invgauss.logcdf(value, mu=mu, loc=alpha),\n )\n\n @pytest.mark.parametrize(\n \"value,mu,lam,phi,alpha,logp\",\n [\n (0.5, 0.001, 0.5, None, 0.0, -124500.7257914),\n (1.0, 0.5, 0.001, None, 0.0, -4.3733162),\n (2.0, 1.0, None, None, 0.0, -2.2086593),\n (5.0, 2.0, 2.5, None, 0.0, -3.4374500),\n (7.5, 5.0, None, 1.0, 0.0, -3.2199074),\n (15.0, 10.0, None, 0.75, 0.0, -4.0360623),\n (50.0, 15.0, None, 0.66666, 0.0, -6.1801249),\n (0.5, 0.001, 0.5, None, 0.0, -124500.7257914),\n (1.0, 0.5, 0.001, None, 0.5, -3.3330954),\n (2.0, 1.0, None, None, 1.0, -0.9189385),\n (5.0, 2.0, 2.5, None, 2.0, -2.2128783),\n (7.5, 5.0, None, 1.0, 2.5, -2.5283764),\n (15.0, 10.0, None, 0.75, 5.0, -3.3653647),\n (50.0, 15.0, None, 0.666666, 10.0, -5.6481874),\n ],\n )\n def test_wald(self, value, mu, lam, phi, alpha, logp):\n # Log probabilities calculated using the dIG function from the R package gamlss.\n # See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or\n # http://www.gamlss.org/.\n with Model() as model:\n Wald(\"wald\", mu=mu, lam=lam, phi=phi, alpha=alpha, transform=None)\n pt = {\"wald\": value}\n decimals = select_by_precision(float64=6, float32=1)\n assert_almost_equal(model.fastlogp(pt), logp, decimal=decimals, err_msg=str(pt))\n\n def test_beta(self):\n self.pymc3_matches_scipy(\n Beta,\n Unit,\n {\"alpha\": Rplus, \"beta\": Rplus},\n lambda value, alpha, beta: sp.beta.logpdf(value, alpha, beta),\n )\n self.pymc3_matches_scipy(Beta, Unit, {\"mu\": Unit, \"sigma\": Rplus}, beta_mu_sigma)\n self.check_logcdf(\n Beta,\n Unit,\n {\"alpha\": Rplus, \"beta\": Rplus},\n lambda value, alpha, beta: sp.beta.logcdf(value, alpha, beta),\n )\n\n def test_kumaraswamy(self):\n # Scipy does not have a built-in Kumaraswamy pdf\n def scipy_log_pdf(value, a, b):\n return (\n np.log(a) + np.log(b) + (a - 1) * np.log(value) + (b - 1) * np.log(1 - value ** a)\n )\n\n self.pymc3_matches_scipy(Kumaraswamy, Unit, {\"a\": Rplus, \"b\": Rplus}, scipy_log_pdf)\n\n def test_exponential(self):\n self.pymc3_matches_scipy(\n Exponential,\n Rplus,\n {\"lam\": Rplus},\n lambda value, lam: sp.expon.logpdf(value, 0, 1 / lam),\n )\n self.check_logcdf(\n Exponential,\n Rplus,\n {\"lam\": Rplus},\n lambda value, lam: sp.expon.logcdf(value, 0, 1 / lam),\n )\n\n def test_geometric(self):\n self.pymc3_matches_scipy(\n Geometric, Nat, {\"p\": Unit}, lambda value, p: np.log(sp.geom.pmf(value, p))\n )\n\n def test_hypergeometric(self):\n self.pymc3_matches_scipy(\n HyperGeometric,\n Nat,\n {\"N\": NatSmall, \"k\": NatSmall, \"n\": NatSmall},\n lambda value, N, k, n: sp.hypergeom.logpmf(value, N, k, n),\n )\n\n def test_negative_binomial(self):\n def test_fun(value, mu, alpha):\n return sp.nbinom.logpmf(value, alpha, 1 - mu / (mu + alpha))\n\n self.pymc3_matches_scipy(NegativeBinomial, Nat, {\"mu\": Rplus, \"alpha\": Rplus}, test_fun)\n self.pymc3_matches_scipy(\n NegativeBinomial,\n Nat,\n {\"p\": Unit, \"n\": Rplus},\n lambda value, p, n: sp.nbinom.logpmf(value, n, p),\n )\n\n @pytest.mark.parametrize(\n \"mu, p, alpha, n, expected\",\n [\n (5, None, None, None, \"Must specify either alpha or n.\"),\n (None, 0.5, None, None, \"Must specify either alpha or n.\"),\n (None, None, None, None, \"Must specify either alpha or n.\"),\n (5, None, 2, 2, \"Can't specify both alpha and n.\"),\n (None, 0.5, 2, 2, \"Can't specify both alpha and n.\"),\n (None, None, 2, 2, \"Can't specify both alpha and n.\"),\n (None, None, 2, None, \"Must specify either mu or p.\"),\n (None, None, None, 2, \"Must specify either mu or p.\"),\n (5, 0.5, 2, None, \"Can't specify both mu and p.\"),\n (5, 0.5, None, 2, \"Can't specify both mu and p.\"),\n ],\n )\n def test_negative_binomial_init_fail(self, mu, p, alpha, n, expected):\n with Model():\n with pytest.raises(ValueError, match=f\"Incompatible parametrization. {expected}\"):\n NegativeBinomial(\"x\", mu=mu, p=p, alpha=alpha, n=n)\n\n def test_laplace(self):\n self.pymc3_matches_scipy(\n Laplace,\n R,\n {\"mu\": R, \"b\": Rplus},\n lambda value, mu, b: sp.laplace.logpdf(value, mu, b),\n )\n self.check_logcdf(\n Laplace,\n R,\n {\"mu\": R, \"b\": Rplus},\n lambda value, mu, b: sp.laplace.logcdf(value, mu, b),\n )\n\n def test_lognormal(self):\n self.pymc3_matches_scipy(\n Lognormal,\n Rplus,\n {\"mu\": R, \"tau\": Rplusbig},\n lambda value, mu, tau: floatX(sp.lognorm.logpdf(value, tau ** -0.5, 0, np.exp(mu))),\n )\n self.check_logcdf(\n Lognormal,\n Rplus,\n {\"mu\": R, \"tau\": Rplusbig},\n lambda value, mu, tau: sp.lognorm.logcdf(value, tau ** -0.5, 0, np.exp(mu)),\n )\n\n def test_t(self):\n self.pymc3_matches_scipy(\n StudentT,\n R,\n {\"nu\": Rplus, \"mu\": R, \"lam\": Rplus},\n lambda value, nu, mu, lam: sp.t.logpdf(value, nu, mu, lam ** -0.5),\n )\n self.check_logcdf(\n StudentT,\n R,\n {\"nu\": Rplus, \"mu\": R, \"lam\": Rplus},\n lambda value, nu, mu, lam: sp.t.logcdf(value, nu, mu, lam ** -0.5),\n n_samples=10,\n )\n\n def test_cauchy(self):\n self.pymc3_matches_scipy(\n Cauchy,\n R,\n {\"alpha\": R, \"beta\": Rplusbig},\n lambda value, alpha, beta: sp.cauchy.logpdf(value, alpha, beta),\n )\n self.check_logcdf(\n Cauchy,\n R,\n {\"alpha\": R, \"beta\": Rplusbig},\n lambda value, alpha, beta: sp.cauchy.logcdf(value, alpha, beta),\n )\n\n def test_half_cauchy(self):\n self.pymc3_matches_scipy(\n HalfCauchy,\n Rplus,\n {\"beta\": Rplusbig},\n lambda value, beta: sp.halfcauchy.logpdf(value, scale=beta),\n )\n self.check_logcdf(\n HalfCauchy,\n Rplus,\n {\"beta\": Rplusbig},\n lambda value, beta: sp.halfcauchy.logcdf(value, scale=beta),\n )\n\n def test_gamma(self):\n self.pymc3_matches_scipy(\n Gamma,\n Rplus,\n {\"alpha\": Rplusbig, \"beta\": Rplusbig},\n lambda value, alpha, beta: sp.gamma.logpdf(value, alpha, scale=1.0 / beta),\n )\n\n def test_fun(value, mu, sigma):\n return sp.gamma.logpdf(value, mu ** 2 / sigma ** 2, scale=1.0 / (mu / sigma ** 2))\n\n self.pymc3_matches_scipy(Gamma, Rplus, {\"mu\": Rplusbig, \"sigma\": Rplusbig}, test_fun)\n\n self.check_logcdf(\n Gamma,\n Rplus,\n {\"alpha\": Rplusbig, \"beta\": Rplusbig},\n lambda value, alpha, beta: sp.gamma.logcdf(value, alpha, scale=1.0 / beta),\n )\n\n @pytest.mark.xfail(\n condition=(theano.config.floatX == \"float32\"),\n reason=\"Fails on float32 due to numerical issues\",\n )\n def test_inverse_gamma(self):\n self.pymc3_matches_scipy(\n InverseGamma,\n Rplus,\n {\"alpha\": Rplus, \"beta\": Rplus},\n lambda value, alpha, beta: sp.invgamma.logpdf(value, alpha, scale=beta),\n )\n self.check_logcdf(\n InverseGamma,\n Rplus,\n {\"alpha\": Rplus, \"beta\": Rplus},\n lambda value, alpha, beta: sp.invgamma.logcdf(value, alpha, scale=beta),\n )\n\n @pytest.mark.xfail(\n condition=(theano.config.floatX == \"float32\"),\n reason=\"Fails on float32 due to scaling issues\",\n )\n def test_inverse_gamma_alt_params(self):\n def test_fun(value, mu, sigma):\n alpha, beta = InverseGamma._get_alpha_beta(None, None, mu, sigma)\n return sp.invgamma.logpdf(value, alpha, scale=beta)\n\n self.pymc3_matches_scipy(InverseGamma, Rplus, {\"mu\": Rplus, \"sigma\": Rplus}, test_fun)\n\n def test_pareto(self):\n self.pymc3_matches_scipy(\n Pareto,\n Rplus,\n {\"alpha\": Rplusbig, \"m\": Rplusbig},\n lambda value, alpha, m: sp.pareto.logpdf(value, alpha, scale=m),\n )\n self.check_logcdf(\n Pareto,\n Rplus,\n {\"alpha\": Rplusbig, \"m\": Rplusbig},\n lambda value, alpha, m: sp.pareto.logcdf(value, alpha, scale=m),\n )\n\n @pytest.mark.xfail(\n condition=(theano.config.floatX == \"float32\"),\n reason=\"Fails on float32 due to inf issues\",\n )\n def test_weibull(self):\n self.pymc3_matches_scipy(\n Weibull,\n Rplus,\n {\"alpha\": Rplusbig, \"beta\": Rplusbig},\n lambda value, alpha, beta: sp.exponweib.logpdf(value, 1, alpha, scale=beta),\n )\n self.check_logcdf(\n Weibull,\n Rplus,\n {\"alpha\": Rplusbig, \"beta\": Rplusbig},\n lambda value, alpha, beta: sp.exponweib.logcdf(value, 1, alpha, scale=beta),\n )\n\n def test_half_studentt(self):\n # this is only testing for nu=1 (halfcauchy)\n self.pymc3_matches_scipy(\n HalfStudentT,\n Rplus,\n {\"sigma\": Rplus},\n lambda value, sigma: sp.halfcauchy.logpdf(value, 0, sigma),\n )\n\n def test_skew_normal(self):\n self.pymc3_matches_scipy(\n SkewNormal,\n R,\n {\"mu\": R, \"sigma\": Rplusbig, \"alpha\": R},\n lambda value, alpha, mu, sigma: sp.skewnorm.logpdf(value, alpha, mu, sigma),\n )\n\n def test_binomial(self):\n self.pymc3_matches_scipy(\n Binomial,\n Nat,\n {\"n\": NatSmall, \"p\": Unit},\n lambda value, n, p: sp.binom.logpmf(value, n, p),\n )\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_beta_binomial(self):\n self.checkd(BetaBinomial, Nat, {\"alpha\": Rplus, \"beta\": Rplus, \"n\": NatSmall})\n\n def test_bernoulli(self):\n self.pymc3_matches_scipy(\n Bernoulli,\n Bool,\n {\"logit_p\": R},\n lambda value, logit_p: sp.bernoulli.logpmf(value, scipy.special.expit(logit_p)),\n )\n self.pymc3_matches_scipy(\n Bernoulli, Bool, {\"p\": Unit}, lambda value, p: sp.bernoulli.logpmf(value, p)\n )\n\n def test_discrete_weibull(self):\n self.pymc3_matches_scipy(\n DiscreteWeibull,\n Nat,\n {\"q\": Unit, \"beta\": Rplusdunif},\n discrete_weibull_logpmf,\n )\n\n def test_poisson(self):\n self.pymc3_matches_scipy(\n Poisson, Nat, {\"mu\": Rplus}, lambda value, mu: sp.poisson.logpmf(value, mu)\n )\n\n def test_bound_poisson(self):\n NonZeroPoisson = Bound(Poisson, lower=1.0)\n self.pymc3_matches_scipy(\n NonZeroPoisson,\n PosNat,\n {\"mu\": Rplus},\n lambda value, mu: sp.poisson.logpmf(value, mu),\n )\n\n with Model():\n x = NonZeroPoisson(\"x\", mu=4)\n assert np.isinf(x.logp({\"x\": 0}))\n\n def test_constantdist(self):\n self.pymc3_matches_scipy(Constant, I, {\"c\": I}, lambda value, c: np.log(c == value))\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_zeroinflatedpoisson(self):\n self.checkd(ZeroInflatedPoisson, Nat, {\"theta\": Rplus, \"psi\": Unit})\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_zeroinflatednegativebinomial(self):\n self.checkd(\n ZeroInflatedNegativeBinomial,\n Nat,\n {\"mu\": Rplusbig, \"alpha\": Rplusbig, \"psi\": Unit},\n )\n\n # Too lazy to propagate decimal parameter through the whole chain of deps\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_zeroinflatedbinomial(self):\n self.checkd(ZeroInflatedBinomial, Nat, {\"n\": NatSmall, \"p\": Unit, \"psi\": Unit})\n\n @pytest.mark.parametrize(\"n\", [1, 2, 3])\n def test_mvnormal(self, n):\n self.pymc3_matches_scipy(\n MvNormal,\n RealMatrix(5, n),\n {\"mu\": Vector(R, n), \"tau\": PdMatrix(n)},\n normal_logpdf_tau,\n )\n self.pymc3_matches_scipy(\n MvNormal,\n Vector(R, n),\n {\"mu\": Vector(R, n), \"tau\": PdMatrix(n)},\n normal_logpdf_tau,\n )\n self.pymc3_matches_scipy(\n MvNormal,\n RealMatrix(5, n),\n {\"mu\": Vector(R, n), \"cov\": PdMatrix(n)},\n normal_logpdf_cov,\n )\n self.pymc3_matches_scipy(\n MvNormal,\n Vector(R, n),\n {\"mu\": Vector(R, n), \"cov\": PdMatrix(n)},\n normal_logpdf_cov,\n )\n self.pymc3_matches_scipy(\n MvNormal,\n RealMatrix(5, n),\n {\"mu\": Vector(R, n), \"chol\": PdMatrixChol(n)},\n normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=-1),\n )\n self.pymc3_matches_scipy(\n MvNormal,\n Vector(R, n),\n {\"mu\": Vector(R, n), \"chol\": PdMatrixChol(n)},\n normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=0),\n )\n\n def MvNormalUpper(*args, **kwargs):\n return MvNormal(lower=False, *args, **kwargs)\n\n self.pymc3_matches_scipy(\n MvNormalUpper,\n Vector(R, n),\n {\"mu\": Vector(R, n), \"chol\": PdMatrixCholUpper(n)},\n normal_logpdf_chol_upper,\n decimal=select_by_precision(float64=6, float32=0),\n )\n\n @pytest.mark.xfail(\n condition=(theano.config.floatX == \"float32\"),\n reason=\"Fails on float32 due to inf issues\",\n )\n def test_mvnormal_indef(self):\n cov_val = np.array([[1, 0.5], [0.5, -2]])\n cov = tt.matrix(\"cov\")\n cov.tag.test_value = np.eye(2)\n mu = floatX(np.zeros(2))\n x = tt.vector(\"x\")\n x.tag.test_value = np.zeros(2)\n logp = MvNormal.dist(mu=mu, cov=cov).logp(x)\n f_logp = theano.function([cov, x], logp)\n assert f_logp(cov_val, np.ones(2)) == -np.inf\n dlogp = tt.grad(logp, cov)\n f_dlogp = theano.function([cov, x], dlogp)\n assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))\n\n logp = MvNormal.dist(mu=mu, tau=cov).logp(x)\n f_logp = theano.function([cov, x], logp)\n assert f_logp(cov_val, np.ones(2)) == -np.inf\n dlogp = tt.grad(logp, cov)\n f_dlogp = theano.function([cov, x], dlogp)\n assert not np.all(np.isfinite(f_dlogp(cov_val, np.ones(2))))\n\n def test_mvnormal_init_fail(self):\n with Model():\n with pytest.raises(ValueError):\n x = MvNormal(\"x\", mu=np.zeros(3), shape=3)\n with pytest.raises(ValueError):\n x = MvNormal(\"x\", mu=np.zeros(3), cov=np.eye(3), tau=np.eye(3), shape=3)\n\n @pytest.mark.parametrize(\"n\", [1, 2, 3])\n def test_matrixnormal(self, n):\n mat_scale = 1e3 # To reduce logp magnitude\n mean_scale = 0.1\n self.pymc3_matches_scipy(\n MatrixNormal,\n RealMatrix(n, n),\n {\n \"mu\": RealMatrix(n, n) * mean_scale,\n \"rowcov\": PdMatrix(n) * mat_scale,\n \"colcov\": PdMatrix(n) * mat_scale,\n },\n matrix_normal_logpdf_cov,\n )\n self.pymc3_matches_scipy(\n MatrixNormal,\n RealMatrix(2, n),\n {\n \"mu\": RealMatrix(2, n) * mean_scale,\n \"rowcov\": PdMatrix(2) * mat_scale,\n \"colcov\": PdMatrix(n) * mat_scale,\n },\n matrix_normal_logpdf_cov,\n )\n self.pymc3_matches_scipy(\n MatrixNormal,\n RealMatrix(3, n),\n {\n \"mu\": RealMatrix(3, n) * mean_scale,\n \"rowchol\": PdMatrixChol(3) * mat_scale,\n \"colchol\": PdMatrixChol(n) * mat_scale,\n },\n matrix_normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=-1),\n )\n self.pymc3_matches_scipy(\n MatrixNormal,\n RealMatrix(n, 3),\n {\n \"mu\": RealMatrix(n, 3) * mean_scale,\n \"rowchol\": PdMatrixChol(n) * mat_scale,\n \"colchol\": PdMatrixChol(3) * mat_scale,\n },\n matrix_normal_logpdf_chol,\n decimal=select_by_precision(float64=6, float32=0),\n )\n\n @pytest.mark.parametrize(\"n\", [2, 3])\n @pytest.mark.parametrize(\"m\", [3])\n @pytest.mark.parametrize(\"sigma\", [None, 1.0])\n def test_kroneckernormal(self, n, m, sigma):\n np.random.seed(5)\n N = n * m\n covs = [RandomPdMatrix(n), RandomPdMatrix(m)]\n chols = list(map(np.linalg.cholesky, covs))\n evds = list(map(np.linalg.eigh, covs))\n dom = Domain([np.random.randn(N) * 0.1], edges=(None, None), shape=N)\n mu = Domain([np.random.randn(N) * 0.1], edges=(None, None), shape=N)\n\n std_args = {\"mu\": mu}\n cov_args = {\"covs\": covs}\n chol_args = {\"chols\": chols}\n evd_args = {\"evds\": evds}\n if sigma is not None and sigma != 0:\n std_args[\"sigma\"] = Domain([sigma], edges=(None, None))\n else:\n for args in [cov_args, chol_args, evd_args]:\n args[\"sigma\"] = sigma\n\n self.pymc3_matches_scipy(\n KroneckerNormal,\n dom,\n std_args,\n kron_normal_logpdf_cov,\n extra_args=cov_args,\n scipy_args=cov_args,\n )\n self.pymc3_matches_scipy(\n KroneckerNormal,\n dom,\n std_args,\n kron_normal_logpdf_chol,\n extra_args=chol_args,\n scipy_args=chol_args,\n )\n self.pymc3_matches_scipy(\n KroneckerNormal,\n dom,\n std_args,\n kron_normal_logpdf_evd,\n extra_args=evd_args,\n scipy_args=evd_args,\n )\n\n dom = Domain([np.random.randn(2, N) * 0.1], edges=(None, None), shape=(2, N))\n\n self.pymc3_matches_scipy(\n KroneckerNormal,\n dom,\n std_args,\n kron_normal_logpdf_cov,\n extra_args=cov_args,\n scipy_args=cov_args,\n )\n self.pymc3_matches_scipy(\n KroneckerNormal,\n dom,\n std_args,\n kron_normal_logpdf_chol,\n extra_args=chol_args,\n scipy_args=chol_args,\n )\n self.pymc3_matches_scipy(\n KroneckerNormal,\n dom,\n std_args,\n kron_normal_logpdf_evd,\n extra_args=evd_args,\n scipy_args=evd_args,\n )\n\n @pytest.mark.parametrize(\"n\", [1, 2])\n def test_mvt(self, n):\n self.pymc3_matches_scipy(\n MvStudentT,\n Vector(R, n),\n {\"nu\": Rplus, \"Sigma\": PdMatrix(n), \"mu\": Vector(R, n)},\n mvt_logpdf,\n )\n self.pymc3_matches_scipy(\n MvStudentT,\n RealMatrix(2, n),\n {\"nu\": Rplus, \"Sigma\": PdMatrix(n), \"mu\": Vector(R, n)},\n mvt_logpdf,\n )\n\n @pytest.mark.parametrize(\"n\", [2, 3, 4])\n def test_AR1(self, n):\n self.pymc3_matches_scipy(AR1, Vector(R, n), {\"k\": Unit, \"tau_e\": Rplus}, AR1_logpdf)\n\n @pytest.mark.parametrize(\"n\", [2, 3])\n def test_wishart(self, n):\n # This check compares the autodiff gradient to the numdiff gradient.\n # However, due to the strict constraints of the wishart,\n # it is impossible to numerically determine the gradient as a small\n # pertubation breaks the symmetry. Thus disabling. Also, numdifftools was\n # removed in June 2019, so an alternative would be needed.\n #\n # self.checkd(Wishart, PdMatrix(n), {'n': Domain([2, 3, 4, 2000]), 'V': PdMatrix(n)},\n # checks=[self.check_dlogp])\n pass\n\n @pytest.mark.parametrize(\"x,eta,n,lp\", LKJ_CASES)\n def test_lkj(self, x, eta, n, lp):\n with Model() as model:\n LKJCorr(\"lkj\", eta=eta, n=n, transform=None)\n\n pt = {\"lkj\": x}\n decimals = select_by_precision(float64=6, float32=4)\n assert_almost_equal(model.fastlogp(pt), lp, decimal=decimals, err_msg=str(pt))\n\n @pytest.mark.parametrize(\"n\", [2, 3])\n def test_dirichlet(self, n):\n self.pymc3_matches_scipy(Dirichlet, Simplex(n), {\"a\": Vector(Rplus, n)}, dirichlet_logpdf)\n\n def test_dirichlet_shape(self):\n a = tt.as_tensor_variable(np.r_[1, 2])\n with pytest.warns(DeprecationWarning):\n dir_rv = Dirichlet.dist(a)\n assert dir_rv.shape == (2,)\n\n with pytest.warns(DeprecationWarning), theano.change_flags(compute_test_value=\"ignore\"):\n dir_rv = Dirichlet.dist(tt.vector())\n\n def test_dirichlet_2D(self):\n self.pymc3_matches_scipy(\n Dirichlet,\n MultiSimplex(2, 2),\n {\"a\": Vector(Vector(Rplus, 2), 2)},\n dirichlet_logpdf,\n )\n\n @pytest.mark.parametrize(\"n\", [2, 3])\n def test_multinomial(self, n):\n self.pymc3_matches_scipy(\n Multinomial, Vector(Nat, n), {\"p\": Simplex(n), \"n\": Nat}, multinomial_logpdf\n )\n\n @pytest.mark.parametrize(\n \"p,n\",\n [\n [[0.25, 0.25, 0.25, 0.25], 1],\n [[0.3, 0.6, 0.05, 0.05], 2],\n [[0.3, 0.6, 0.05, 0.05], 10],\n ],\n )\n def test_multinomial_mode(self, p, n):\n _p = np.array(p)\n with Model() as model:\n m = Multinomial(\"m\", n, _p, _p.shape)\n assert_allclose(m.distribution.mode.eval().sum(), n)\n _p = np.array([p, p])\n with Model() as model:\n m = Multinomial(\"m\", n, _p, _p.shape)\n assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)\n\n @pytest.mark.parametrize(\n \"p, shape, n\",\n [\n [[0.25, 0.25, 0.25, 0.25], 4, 2],\n [[0.25, 0.25, 0.25, 0.25], (1, 4), 3],\n # 3: expect to fail\n # [[.25, .25, .25, .25], (10, 4)],\n [[0.25, 0.25, 0.25, 0.25], (10, 1, 4), 5],\n # 5: expect to fail\n # [[[.25, .25, .25, .25]], (2, 4), [7, 11]],\n [[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (2, 4), 13],\n [[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (1, 2, 4), [23, 29]],\n [\n [[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]],\n (10, 2, 4),\n [31, 37],\n ],\n [[[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]], (2, 4), [17, 19]],\n ],\n )\n def test_multinomial_random(self, p, shape, n):\n p = np.asarray(p)\n with Model() as model:\n m = Multinomial(\"m\", n=n, p=p, shape=shape)\n m.random()\n\n def test_multinomial_mode_with_shape(self):\n n = [1, 10]\n p = np.asarray([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]])\n with Model() as model:\n m = Multinomial(\"m\", n=n, p=p, shape=(2, 4))\n assert_allclose(m.distribution.mode.eval().sum(axis=-1), n)\n\n def test_multinomial_vec(self):\n vals = np.array([[2, 4, 4], [3, 3, 4]])\n p = np.array([0.2, 0.3, 0.5])\n n = 10\n\n with Model() as model_single:\n Multinomial(\"m\", n=n, p=p, shape=len(p))\n\n with Model() as model_many:\n Multinomial(\"m\", n=n, p=p, shape=vals.shape)\n\n assert_almost_equal(\n scipy.stats.multinomial.logpmf(vals, n, p),\n np.asarray([model_single.fastlogp({\"m\": val}) for val in vals]),\n decimal=4,\n )\n\n assert_almost_equal(\n scipy.stats.multinomial.logpmf(vals, n, p),\n model_many.free_RVs[0].logp_elemwise({\"m\": vals}).squeeze(),\n decimal=4,\n )\n\n assert_almost_equal(\n sum([model_single.fastlogp({\"m\": val}) for val in vals]),\n model_many.fastlogp({\"m\": vals}),\n decimal=4,\n )\n\n def test_multinomial_vec_1d_n(self):\n vals = np.array([[2, 4, 4], [4, 3, 4]])\n p = np.array([0.2, 0.3, 0.5])\n ns = np.array([10, 11])\n\n with Model() as model:\n Multinomial(\"m\", n=ns, p=p, shape=vals.shape)\n\n assert_almost_equal(\n sum([multinomial_logpdf(val, n, p) for val, n in zip(vals, ns)]),\n model.fastlogp({\"m\": vals}),\n decimal=4,\n )\n\n def test_multinomial_vec_1d_n_2d_p(self):\n vals = np.array([[2, 4, 4], [4, 3, 4]])\n ps = np.array([[0.2, 0.3, 0.5], [0.9, 0.09, 0.01]])\n ns = np.array([10, 11])\n\n with Model() as model:\n Multinomial(\"m\", n=ns, p=ps, shape=vals.shape)\n\n assert_almost_equal(\n sum([multinomial_logpdf(val, n, p) for val, n, p in zip(vals, ns, ps)]),\n model.fastlogp({\"m\": vals}),\n decimal=4,\n )\n\n def test_multinomial_vec_2d_p(self):\n vals = np.array([[2, 4, 4], [3, 3, 4]])\n ps = np.array([[0.2, 0.3, 0.5], [0.3, 0.3, 0.4]])\n n = 10\n\n with Model() as model:\n Multinomial(\"m\", n=n, p=ps, shape=vals.shape)\n\n assert_almost_equal(\n sum([multinomial_logpdf(val, n, p) for val, p in zip(vals, ps)]),\n model.fastlogp({\"m\": vals}),\n decimal=4,\n )\n\n def test_batch_multinomial(self):\n n = 10\n vals = np.zeros((4, 5, 3), dtype=\"int32\")\n p = np.zeros_like(vals, dtype=theano.config.floatX)\n inds = np.random.randint(vals.shape[-1], size=vals.shape[:-1])[..., None]\n np.put_along_axis(vals, inds, n, axis=-1)\n np.put_along_axis(p, inds, 1, axis=-1)\n\n dist = Multinomial.dist(n=n, p=p, shape=vals.shape)\n value = tt.tensor3(dtype=\"int32\")\n value.tag.test_value = np.zeros_like(vals, dtype=\"int32\")\n logp = tt.exp(dist.logp(value))\n f = theano.function(inputs=[value], outputs=logp)\n assert_almost_equal(\n f(vals),\n np.ones(vals.shape[:-1] + (1,)),\n decimal=select_by_precision(float64=6, float32=3),\n )\n\n sample = dist.random(size=2)\n assert_allclose(sample, np.stack([vals, vals], axis=0))\n\n def test_categorical_bounds(self):\n with Model():\n x = Categorical(\"x\", p=np.array([0.2, 0.3, 0.5]))\n assert np.isinf(x.logp({\"x\": -1}))\n assert np.isinf(x.logp({\"x\": 3}))\n\n def test_categorical_valid_p(self):\n with Model():\n x = Categorical(\"x\", p=np.array([-0.2, 0.3, 0.5]))\n assert np.isinf(x.logp({\"x\": 0}))\n assert np.isinf(x.logp({\"x\": 1}))\n assert np.isinf(x.logp({\"x\": 2}))\n with Model():\n # A model where p sums to 1 but contains negative values\n x = Categorical(\"x\", p=np.array([-0.2, 0.7, 0.5]))\n assert np.isinf(x.logp({\"x\": 0}))\n assert np.isinf(x.logp({\"x\": 1}))\n assert np.isinf(x.logp({\"x\": 2}))\n with Model():\n # Hard edge case from #2082\n # Early automatic normalization of p's sum would hide the negative\n # entries if there is a single or pair number of negative values\n # and the rest are zero\n x = Categorical(\"x\", p=np.array([-1, -1, 0, 0]))\n assert np.isinf(x.logp({\"x\": 0}))\n assert np.isinf(x.logp({\"x\": 1}))\n assert np.isinf(x.logp({\"x\": 2}))\n assert np.isinf(x.logp({\"x\": 3}))\n\n @pytest.mark.parametrize(\"n\", [2, 3, 4])\n def test_categorical(self, n):\n self.pymc3_matches_scipy(\n Categorical,\n Domain(range(n), \"int64\"),\n {\"p\": Simplex(n)},\n lambda value, p: categorical_logpdf(value, p),\n )\n\n @pytest.mark.parametrize(\"n\", [2, 3, 4])\n def test_orderedlogistic(self, n):\n self.pymc3_matches_scipy(\n OrderedLogistic,\n Domain(range(n), \"int64\"),\n {\"eta\": R, \"cutpoints\": Vector(R, n - 1)},\n lambda value, eta, cutpoints: orderedlogistic_logpdf(value, eta, cutpoints),\n )\n\n def test_densitydist(self):\n def logp(x):\n return -log(2 * 0.5) - abs(x - 0.5) / 0.5\n\n self.checkd(DensityDist, R, {}, extra_args={\"logp\": logp})\n\n def test_get_tau_sigma(self):\n sigma = np.array([2])\n assert_almost_equal(continuous.get_tau_sigma(sigma=sigma), [1.0 / sigma ** 2, sigma])\n\n @pytest.mark.parametrize(\n \"value,mu,sigma,nu,logp\",\n [\n (0.5, -50.000, 0.500, 0.500, -99.8068528),\n (1.0, -1.000, 0.001, 0.001, -1992.5922447),\n (2.0, 0.001, 1.000, 1.000, -1.6720416),\n (5.0, 0.500, 2.500, 2.500, -2.4543644),\n (7.5, 2.000, 5.000, 5.000, -2.8259429),\n (15.0, 5.000, 7.500, 7.500, -3.3093854),\n (50.0, 50.000, 10.000, 10.000, -3.6436067),\n (1000.0, 500.000, 10.000, 20.000, -27.8707323),\n ],\n )\n def test_ex_gaussian(self, value, mu, sigma, nu, logp):\n \"\"\"Log probabilities calculated using the dexGAUS function from the R package gamlss.\n See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or http://www.gamlss.org/.\"\"\"\n with Model() as model:\n ExGaussian(\"eg\", mu=mu, sigma=sigma, nu=nu)\n pt = {\"eg\": value}\n assert_almost_equal(\n model.fastlogp(pt),\n logp,\n decimal=select_by_precision(float64=6, float32=2),\n err_msg=str(pt),\n )\n\n @pytest.mark.parametrize(\n \"value,mu,sigma,nu,logcdf\",\n [\n (0.5, -50.000, 0.500, 0.500, 0.0000000),\n (1.0, -1.000, 0.001, 0.001, 0.0000000),\n (2.0, 0.001, 1.000, 1.000, -0.2365674),\n (5.0, 0.500, 2.500, 2.500, -0.2886489),\n (7.5, 2.000, 5.000, 5.000, -0.5655104),\n (15.0, 5.000, 7.500, 7.500, -0.4545255),\n (50.0, 50.000, 10.000, 10.000, -1.433714),\n (1000.0, 500.000, 10.000, 20.000, -1.573708e-11),\n ],\n )\n def test_ex_gaussian_cdf(self, value, mu, sigma, nu, logcdf):\n \"\"\"Log probabilities calculated using the pexGAUS function from the R package gamlss.\n See e.g., doi: 10.1111/j.1467-9876.2005.00510.x, or http://www.gamlss.org/.\"\"\"\n assert_almost_equal(\n ExGaussian.dist(mu=mu, sigma=sigma, nu=nu).logcdf(value).tag.test_value,\n logcdf,\n decimal=select_by_precision(float64=6, float32=2),\n err_msg=str((value, mu, sigma, nu, logcdf)),\n )\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_vonmises(self):\n self.pymc3_matches_scipy(\n VonMises,\n R,\n {\"mu\": Circ, \"kappa\": Rplus},\n lambda value, mu, kappa: floatX(sp.vonmises.logpdf(value, kappa, loc=mu)),\n )\n\n def test_gumbel(self):\n def gumbel(value, mu, beta):\n return floatX(sp.gumbel_r.logpdf(value, loc=mu, scale=beta))\n\n self.pymc3_matches_scipy(Gumbel, R, {\"mu\": R, \"beta\": Rplusbig}, gumbel)\n\n def gumbellcdf(value, mu, beta):\n return floatX(sp.gumbel_r.logcdf(value, loc=mu, scale=beta))\n\n self.check_logcdf(Gumbel, R, {\"mu\": R, \"beta\": Rplusbig}, gumbellcdf)\n\n def test_logistic(self):\n self.pymc3_matches_scipy(\n Logistic,\n R,\n {\"mu\": R, \"s\": Rplus},\n lambda value, mu, s: sp.logistic.logpdf(value, mu, s),\n decimal=select_by_precision(float64=6, float32=1),\n )\n self.check_logcdf(\n Logistic,\n R,\n {\"mu\": R, \"s\": Rplus},\n lambda value, mu, s: sp.logistic.logcdf(value, mu, s),\n decimal=select_by_precision(float64=6, float32=1),\n )\n\n def test_logitnormal(self):\n self.pymc3_matches_scipy(\n LogitNormal,\n Unit,\n {\"mu\": R, \"sigma\": Rplus},\n lambda value, mu, sigma: (\n sp.norm.logpdf(logit(value), mu, sigma) - (np.log(value) + np.log1p(-value))\n ),\n decimal=select_by_precision(float64=6, float32=1),\n )\n\n def test_multidimensional_beta_construction(self):\n with Model():\n Beta(\"beta\", alpha=1.0, beta=1.0, shape=(10, 20))\n\n def test_rice(self):\n self.pymc3_matches_scipy(\n Rice,\n Rplus,\n {\"nu\": Rplus, \"sigma\": Rplusbig},\n lambda value, nu, sigma: sp.rice.logpdf(value, b=nu / sigma, loc=0, scale=sigma),\n )\n self.pymc3_matches_scipy(\n Rice,\n Rplus,\n {\"b\": Rplus, \"sigma\": Rplusbig},\n lambda value, b, sigma: sp.rice.logpdf(value, b=b, loc=0, scale=sigma),\n )\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_moyal(self):\n self.pymc3_matches_scipy(\n Moyal,\n R,\n {\"mu\": R, \"sigma\": Rplusbig},\n lambda value, mu, sigma: floatX(sp.moyal.logpdf(value, mu, sigma)),\n )\n self.check_logcdf(\n Moyal,\n R,\n {\"mu\": R, \"sigma\": Rplusbig},\n lambda value, mu, sigma: floatX(sp.moyal.logcdf(value, mu, sigma)),\n )\n\n @pytest.mark.xfail(condition=(theano.config.floatX == \"float32\"), reason=\"Fails on float32\")\n def test_interpolated(self):\n for mu in R.vals:\n for sigma in Rplus.vals:\n # pylint: disable=cell-var-from-loop\n xmin = mu - 5 * sigma\n xmax = mu + 5 * sigma\n\n class TestedInterpolated(Interpolated):\n def __init__(self, **kwargs):\n x_points = np.linspace(xmin, xmax, 100000)\n pdf_points = sp.norm.pdf(x_points, loc=mu, scale=sigma)\n super().__init__(x_points=x_points, pdf_points=pdf_points, **kwargs)\n\n def ref_pdf(value):\n return np.where(\n np.logical_and(value >= xmin, value <= xmax),\n sp.norm.logpdf(value, mu, sigma),\n -np.inf * np.ones(value.shape),\n )\n\n self.pymc3_matches_scipy(TestedInterpolated, R, {}, ref_pdf)\n\n\ndef test_bound():\n np.random.seed(42)\n UnboundNormal = Bound(Normal)\n dist = UnboundNormal.dist(mu=0, sigma=1)\n assert dist.transform is None\n assert dist.default() == 0.0\n assert isinstance(dist.random(), np.ndarray)\n\n LowerNormal = Bound(Normal, lower=1)\n dist = LowerNormal.dist(mu=0, sigma=1)\n assert dist.logp(0).eval() == -np.inf\n assert dist.default() > 1\n assert dist.transform is not None\n assert np.all(dist.random() > 1)\n\n UpperNormal = Bound(Normal, upper=-1)\n dist = UpperNormal.dist(mu=0, sigma=1)\n assert dist.logp(-0.5).eval() == -np.inf\n assert dist.default() < -1\n assert dist.transform is not None\n assert np.all(dist.random() < -1)\n\n ArrayNormal = Bound(Normal, lower=[1, 2], upper=[2, 3])\n dist = ArrayNormal.dist(mu=0, sigma=1, shape=2)\n assert_equal(dist.logp([0.5, 3.5]).eval(), -np.array([np.inf, np.inf]))\n assert_equal(dist.default(), np.array([1.5, 2.5]))\n assert dist.transform is not None\n with pytest.raises(ValueError) as err:\n dist.random()\n err.match(\"Drawing samples from distributions with array-valued\")\n\n with Model():\n a = ArrayNormal(\"c\", shape=2)\n assert_equal(a.tag.test_value, np.array([1.5, 2.5]))\n\n lower = tt.vector(\"lower\")\n lower.tag.test_value = np.array([1, 2]).astype(theano.config.floatX)\n upper = 3\n ArrayNormal = Bound(Normal, lower=lower, upper=upper)\n dist = ArrayNormal.dist(mu=0, sigma=1, shape=2)\n logp = dist.logp([0.5, 3.5]).eval({lower: lower.tag.test_value})\n assert_equal(logp, -np.array([np.inf, np.inf]))\n assert_equal(dist.default(), np.array([2, 2.5]))\n assert dist.transform is not None\n\n with Model():\n a = ArrayNormal(\"c\", shape=2)\n assert_equal(a.tag.test_value, np.array([2, 2.5]))\n\n rand = Bound(Binomial, lower=10).dist(n=20, p=0.3).random()\n assert rand.dtype in [np.int16, np.int32, np.int64]\n assert rand >= 10\n\n rand = Bound(Binomial, upper=10).dist(n=20, p=0.8).random()\n assert rand.dtype in [np.int16, np.int32, np.int64]\n assert rand <= 10\n\n rand = Bound(Binomial, lower=5, upper=8).dist(n=10, p=0.6).random()\n assert rand.dtype in [np.int16, np.int32, np.int64]\n assert rand >= 5 and rand <= 8\n\n with Model():\n BoundPoisson = Bound(Poisson, upper=6)\n BoundPoisson(name=\"y\", mu=1)\n\n with Model():\n BoundNormalNamedArgs = Bound(Normal, upper=6)(\"y\", mu=2.0, sd=1.0)\n BoundNormalPositionalArgs = Bound(Normal, upper=6)(\"x\", 2.0, 1.0)\n\n with Model():\n BoundPoissonNamedArgs = Bound(Poisson, upper=6)(\"y\", mu=2.0)\n BoundPoissonPositionalArgs = Bound(Poisson, upper=6)(\"x\", 2.0)\n\n\nclass TestStrAndLatexRepr:\n def setup_class(self):\n # True parameter values\n alpha, sigma = 1, 1\n beta = [1, 2.5]\n\n # Size of dataset\n size = 100\n\n # Predictor variable\n X = np.random.normal(size=(size, 2)).dot(np.array([[1, 0], [0, 0.2]]))\n\n # Simulate outcome variable\n Y = alpha + X.dot(beta) + np.random.randn(size) * sigma\n with Model() as self.model:\n # Priors for unknown model parameters\n alpha = Normal(\"alpha\", mu=0, sigma=10)\n b = Normal(\"beta\", mu=0, sigma=10, shape=(2,), observed=beta)\n sigma = HalfNormal(\"sigma\", sigma=1)\n\n # Test Cholesky parameterization\n Z = MvNormal(\"Z\", mu=np.zeros(2), chol=np.eye(2), shape=(2,))\n\n # NegativeBinomial representations to test issue 4186\n nb1 = pm.NegativeBinomial(\n \"nb_with_mu_alpha\", mu=pm.Normal(\"nbmu\"), alpha=pm.Gamma(\"nbalpha\", mu=6, sigma=1)\n )\n nb2 = pm.NegativeBinomial(\"nb_with_p_n\", p=pm.Uniform(\"nbp\"), n=10)\n\n # Expected value of outcome\n mu = Deterministic(\"mu\", floatX(alpha + tt.dot(X, b)))\n\n # add a bounded variable as well\n bound_var = Bound(Normal, lower=1.0)(\"bound_var\", mu=0, sigma=10)\n\n # KroneckerNormal\n n, m = 3, 4\n covs = [np.eye(n), np.eye(m)]\n kron_normal = KroneckerNormal(\"kron_normal\", mu=np.zeros(n * m), covs=covs, shape=n * m)\n\n # MatrixNormal\n matrix_normal = MatrixNormal(\n \"mat_normal\",\n mu=np.random.normal(size=n),\n rowcov=np.eye(n),\n colchol=np.linalg.cholesky(np.eye(n)),\n shape=(n, n),\n )\n\n # Likelihood (sampling distribution) of observations\n Y_obs = Normal(\"Y_obs\", mu=mu, sigma=sigma, observed=Y)\n\n self.distributions = [alpha, sigma, mu, b, Z, nb1, nb2, Y_obs, bound_var]\n self.expected = {\n \"latex\": (\n r\"$\\text{alpha} \\sim \\text{Normal}$\",\n r\"$\\text{sigma} \\sim \\text{HalfNormal}$\",\n r\"$\\text{mu} \\sim \\text{Deterministic}$\",\n r\"$\\text{beta} \\sim \\text{Normal}$\",\n r\"$\\text{Z} \\sim \\text{MvNormal}$\",\n r\"$\\text{nb_with_mu_alpha} \\sim \\text{NegativeBinomial}$\",\n r\"$\\text{nb_with_p_n} \\sim \\text{NegativeBinomial}$\",\n r\"$\\text{Y_obs} \\sim \\text{Normal}$\",\n r\"$\\text{bound_var} \\sim \\text{Bound}$ -- \\text{Normal}$\",\n r\"$\\text{kron_normal} \\sim \\text{KroneckerNormal}$\",\n r\"$\\text{mat_normal} \\sim \\text{MatrixNormal}$\",\n ),\n \"plain\": (\n r\"alpha ~ Normal\",\n r\"sigma ~ HalfNormal\",\n r\"mu ~ Deterministic\",\n r\"beta ~ Normal\",\n r\"Z ~ MvNormal\",\n r\"nb_with_mu_alpha ~ NegativeBinomial\",\n r\"nb_with_p_n ~ NegativeBinomial\",\n r\"Y_obs ~ Normal\",\n r\"bound_var ~ Bound-Normal\",\n r\"kron_normal ~ KroneckerNormal\",\n r\"mat_normal ~ MatrixNormal\",\n ),\n \"latex_with_params\": (\n r\"$\\text{alpha} \\sim \\text{Normal}(\\mathit{mu}=0.0,~\\mathit{sigma}=10.0)$\",\n r\"$\\text{sigma} \\sim \\text{HalfNormal}(\\mathit{sigma}=1.0)$\",\n r\"$\\text{mu} \\sim \\text{Deterministic}(\\text{alpha},~\\text{Constant},~\\text{beta})$\",\n r\"$\\text{beta} \\sim \\text{Normal}(\\mathit{mu}=0.0,~\\mathit{sigma}=10.0)$\",\n r\"$\\text{Z} \\sim \\text{MvNormal}(\\mathit{mu}=array,~\\mathit{chol_cov}=array)$\",\n r\"$\\text{nb_with_mu_alpha} \\sim \\text{NegativeBinomial}(\\mathit{mu}=\\text{nbmu},~\\mathit{alpha}=\\text{nbalpha})$\",\n r\"$\\text{nb_with_p_n} \\sim \\text{NegativeBinomial}(\\mathit{p}=\\text{nbp},~\\mathit{n}=10)$\",\n r\"$\\text{Y_obs} \\sim \\text{Normal}(\\mathit{mu}=\\text{mu},~\\mathit{sigma}=f(\\text{sigma}))$\",\n r\"$\\text{bound_var} \\sim \\text{Bound}(\\mathit{lower}=1.0,~\\mathit{upper}=\\text{None})$ -- \\text{Normal}(\\mathit{mu}=0.0,~\\mathit{sigma}=10.0)$\",\n r\"$\\text{kron_normal} \\sim \\text{KroneckerNormal}(\\mathit{mu}=array)$\",\n r\"$\\text{mat_normal} \\sim \\text{MatrixNormal}(\\mathit{mu}=array,~\\mathit{rowcov}=array,~\\mathit{colchol_cov}=array)$\",\n ),\n \"plain_with_params\": (\n r\"alpha ~ Normal(mu=0.0, sigma=10.0)\",\n r\"sigma ~ HalfNormal(sigma=1.0)\",\n r\"mu ~ Deterministic(alpha, Constant, beta)\",\n r\"beta ~ Normal(mu=0.0, sigma=10.0)\",\n r\"Z ~ MvNormal(mu=array, chol_cov=array)\",\n r\"nb_with_mu_alpha ~ NegativeBinomial(mu=nbmu, alpha=nbalpha)\",\n r\"nb_with_p_n ~ NegativeBinomial(p=nbp, n=10)\",\n r\"Y_obs ~ Normal(mu=mu, sigma=f(sigma))\",\n r\"bound_var ~ Bound(lower=1.0, upper=None)-Normal(mu=0.0, sigma=10.0)\",\n r\"kron_normal ~ KroneckerNormal(mu=array)\",\n r\"mat_normal ~ MatrixNormal(mu=array, rowcov=array, colchol_cov=array)\",\n ),\n }\n\n def test__repr_latex_(self):\n for distribution, tex in zip(self.distributions, self.expected[\"latex_with_params\"]):\n assert distribution._repr_latex_() == tex\n\n model_tex = self.model._repr_latex_()\n\n # make sure each variable is in the model\n for tex in self.expected[\"latex\"]:\n for segment in tex.strip(\"$\").split(r\"\\sim\"):\n assert segment in model_tex\n\n def test___latex__(self):\n for distribution, tex in zip(self.distributions, self.expected[\"latex_with_params\"]):\n assert distribution._repr_latex_() == distribution.__latex__()\n assert self.model._repr_latex_() == self.model.__latex__()\n\n def test___str__(self):\n for distribution, str_repr in zip(self.distributions, self.expected[\"plain\"]):\n assert distribution.__str__() == str_repr\n\n model_str = self.model.__str__()\n for str_repr in self.expected[\"plain\"]:\n assert str_repr in model_str\n\n def test_str(self):\n for distribution, str_repr in zip(self.distributions, self.expected[\"plain\"]):\n assert str(distribution) == str_repr\n\n model_str = str(self.model)\n for str_repr in self.expected[\"plain\"]:\n assert str_repr in model_str\n\n\ndef test_discrete_trafo():\n with pytest.raises(ValueError) as err:\n Binomial.dist(n=5, p=0.5, transform=\"log\")\n err.match(\"Transformations for discrete distributions\")\n with Model():\n with pytest.raises(ValueError) as err:\n Binomial(\"a\", n=5, p=0.5, transform=\"log\")\n err.match(\"Transformations for discrete distributions\")\n\n\[email protected](\"shape\", [tuple(), (1,), (3, 1), (3, 2)], ids=str)\ndef test_orderedlogistic_dimensions(shape):\n # Test for issue #3535\n loge = np.log10(np.exp(1))\n size = 7\n p = np.ones(shape + (10,)) / 10\n cutpoints = np.tile(logit(np.linspace(0, 1, 11)[1:-1]), shape + (1,))\n obs = np.random.randint(0, 1, size=(size,) + shape)\n with Model():\n ol = OrderedLogistic(\n \"ol\", eta=np.zeros(shape), cutpoints=cutpoints, shape=shape, observed=obs\n )\n c = Categorical(\"c\", p=p, shape=shape, observed=obs)\n ologp = ol.logp({\"ol\": 1}) * loge\n clogp = c.logp({\"c\": 1}) * loge\n expected = -np.prod((size,) + shape)\n\n assert c.distribution.p.ndim == (len(shape) + 1)\n assert np.allclose(clogp, expected)\n assert ol.distribution.p.ndim == (len(shape) + 1)\n assert np.allclose(ologp, expected)\n\n\nclass TestBugfixes:\n @pytest.mark.parametrize(\n \"dist_cls,kwargs\", [(MvNormal, dict(mu=0)), (MvStudentT, dict(mu=0, nu=2))]\n )\n @pytest.mark.parametrize(\"dims\", [1, 2, 4])\n def test_issue_3051(self, dims, dist_cls, kwargs):\n d = dist_cls.dist(**kwargs, cov=np.eye(dims), shape=(dims,))\n\n X = np.random.normal(size=(20, dims))\n actual_t = d.logp(X)\n assert isinstance(actual_t, tt.TensorVariable)\n actual_a = actual_t.eval()\n assert isinstance(actual_a, np.ndarray)\n assert actual_a.shape == (X.shape[0],)\n pass\n\n\ndef test_serialize_density_dist():\n def func(x):\n return -2 * (x ** 2).sum()\n\n with pm.Model():\n pm.Normal(\"x\")\n y = pm.DensityDist(\"y\", func)\n pm.sample(draws=5, tune=1, mp_ctx=\"spawn\")\n\n import pickle\n\n pickle.loads(pickle.dumps(y))\n", "# pylint: skip-file\nimport os\nimport re\nimport warnings\n\nxla_flags = os.getenv(\"XLA_FLAGS\", \"\").lstrip(\"--\")\nxla_flags = re.sub(r\"xla_force_host_platform_device_count=.+\\s\", \"\", xla_flags).split()\nos.environ[\"XLA_FLAGS\"] = \" \".join([\"--xla_force_host_platform_device_count={}\".format(100)])\n\nimport arviz as az\nimport jax\nimport numpy as np\nimport pandas as pd\nimport theano\nimport theano.sandbox.jax_linker\nimport theano.sandbox.jaxify\n\nimport pymc3 as pm\n\nfrom pymc3 import modelcontext\n\nwarnings.warn(\"This module is experimental.\")\n\n# Disable C compilation by default\n# theano.config.cxx = \"\"\n# This will make the JAX Linker the default\n# theano.config.mode = \"JAX\"\n\n\ndef sample_tfp_nuts(\n draws=1000,\n tune=1000,\n chains=4,\n target_accept=0.8,\n random_seed=10,\n model=None,\n num_tuning_epoch=2,\n num_compute_step_size=500,\n):\n from tensorflow_probability.substrates import jax as tfp\n import jax\n\n model = modelcontext(model)\n\n seed = jax.random.PRNGKey(random_seed)\n\n fgraph = theano.gof.FunctionGraph(model.free_RVs, [model.logpt])\n fns = theano.sandbox.jaxify.jax_funcify(fgraph)\n logp_fn_jax = fns[0]\n\n rv_names = [rv.name for rv in model.free_RVs]\n init_state = [model.test_point[rv_name] for rv_name in rv_names]\n init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)\n\n @jax.pmap\n def _sample(init_state, seed):\n def gen_kernel(step_size):\n hmc = tfp.mcmc.NoUTurnSampler(target_log_prob_fn=logp_fn_jax, step_size=step_size)\n return tfp.mcmc.DualAveragingStepSizeAdaptation(\n hmc, tune // num_tuning_epoch, target_accept_prob=target_accept\n )\n\n def trace_fn(_, pkr):\n return pkr.new_step_size\n\n def get_tuned_stepsize(samples, step_size):\n return step_size[-1] * jax.numpy.std(samples[-num_compute_step_size:])\n\n step_size = jax.tree_map(jax.numpy.ones_like, init_state)\n for i in range(num_tuning_epoch - 1):\n tuning_hmc = gen_kernel(step_size)\n init_samples, tuning_result, kernel_results = tfp.mcmc.sample_chain(\n num_results=tune // num_tuning_epoch,\n current_state=init_state,\n kernel=tuning_hmc,\n trace_fn=trace_fn,\n return_final_kernel_results=True,\n seed=seed,\n )\n\n step_size = jax.tree_multimap(get_tuned_stepsize, list(init_samples), tuning_result)\n init_state = [x[-1] for x in init_samples]\n\n # Run inference\n sample_kernel = gen_kernel(step_size)\n mcmc_samples, leapfrog_num = tfp.mcmc.sample_chain(\n num_results=draws,\n num_burnin_steps=tune // num_tuning_epoch,\n current_state=init_state,\n kernel=sample_kernel,\n trace_fn=lambda _, pkr: pkr.inner_results.leapfrogs_taken,\n seed=seed,\n )\n\n return mcmc_samples, leapfrog_num\n\n print(\"Compiling...\")\n tic2 = pd.Timestamp.now()\n map_seed = jax.random.split(seed, chains)\n mcmc_samples, leapfrog_num = _sample(init_state_batched, map_seed)\n\n # map_seed = jax.random.split(seed, chains)\n # mcmc_samples = _sample(init_state_batched, map_seed)\n # tic4 = pd.Timestamp.now()\n # print(\"Sampling time = \", tic4 - tic3)\n\n posterior = {k: v for k, v in zip(rv_names, mcmc_samples)}\n\n az_trace = az.from_dict(posterior=posterior)\n tic3 = pd.Timestamp.now()\n print(\"Compilation + sampling time = \", tic3 - tic2)\n return az_trace # , leapfrog_num, tic3 - tic2\n\n\ndef sample_numpyro_nuts(\n draws=1000,\n tune=1000,\n chains=4,\n target_accept=0.8,\n random_seed=10,\n model=None,\n progress_bar=True,\n):\n from numpyro.infer import MCMC, NUTS\n\n from pymc3 import modelcontext\n\n model = modelcontext(model)\n\n seed = jax.random.PRNGKey(random_seed)\n\n fgraph = theano.gof.FunctionGraph(model.free_RVs, [model.logpt])\n fns = theano.sandbox.jaxify.jax_funcify(fgraph)\n logp_fn_jax = fns[0]\n\n rv_names = [rv.name for rv in model.free_RVs]\n init_state = [model.test_point[rv_name] for rv_name in rv_names]\n init_state_batched = jax.tree_map(lambda x: np.repeat(x[None, ...], chains, axis=0), init_state)\n\n @jax.jit\n def _sample(current_state, seed):\n step_size = jax.tree_map(jax.numpy.ones_like, init_state)\n nuts_kernel = NUTS(\n potential_fn=lambda x: -logp_fn_jax(*x),\n # model=model,\n target_accept_prob=target_accept,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n dense_mass=False,\n )\n\n pmap_numpyro = MCMC(\n nuts_kernel,\n num_warmup=tune,\n num_samples=draws,\n num_chains=chains,\n postprocess_fn=None,\n chain_method=\"parallel\",\n progress_bar=progress_bar,\n )\n\n pmap_numpyro.run(seed, init_params=current_state, extra_fields=(\"num_steps\",))\n samples = pmap_numpyro.get_samples(group_by_chain=True)\n leapfrogs_taken = pmap_numpyro.get_extra_fields(group_by_chain=True)[\"num_steps\"]\n return samples, leapfrogs_taken\n\n print(\"Compiling...\")\n tic2 = pd.Timestamp.now()\n map_seed = jax.random.split(seed, chains)\n mcmc_samples, leapfrogs_taken = _sample(init_state_batched, map_seed)\n # map_seed = jax.random.split(seed, chains)\n # mcmc_samples = _sample(init_state_batched, map_seed)\n # tic4 = pd.Timestamp.now()\n # print(\"Sampling time = \", tic4 - tic3)\n\n posterior = {k: v for k, v in zip(rv_names, mcmc_samples)}\n\n az_trace = az.from_dict(posterior=posterior)\n tic3 = pd.Timestamp.now()\n print(\"Compilation + sampling time = \", tic3 - tic2)\n return az_trace # , leapfrogs_taken, tic3 - tic2\n" ]
[ [ "numpy.sqrt", "numpy.all", "numpy.exp", "scipy.stats.distributions.triang.logcdf", "scipy.stats.distributions.invgauss.logpdf", "scipy.stats.distributions.geom.pmf", "scipy.stats.distributions.nbinom.logpmf", "numpy.log1p", "scipy.stats.distributions.norm.logcdf", "numpy.zeros", "numpy.log", "scipy.stats.distributions.halfcauchy.logcdf", "scipy.stats.distributions.invgamma.logcdf", "scipy.stats.distributions.uniform.logpdf", "numpy.array", "scipy.integrate.dblquad", "scipy.stats.distributions.pareto.logpdf", "scipy.stats.distributions.bernoulli.logpmf", "scipy.stats.distributions.invgamma.logpdf", "numpy.vstack", "scipy.stats.distributions.rice.logpdf", "numpy.asarray", "numpy.concatenate", "scipy.stats.distributions.t.logpdf", "scipy.stats.distributions.logistic.logcdf", "numpy.allclose", "scipy.stats.distributions.laplace.logpdf", "scipy.stats.distributions.norm.pdf", "scipy.stats.distributions.beta.logpdf", "numpy.atleast_2d", "numpy.random.rand", "scipy.integrate.quad", "numpy.linalg.solve", "scipy.stats.distributions.gumbel_r.logpdf", "numpy.ones", "scipy.stats.distributions.laplace.logcdf", "scipy.stats.distributions.vonmises.logpdf", "numpy.diag", "scipy.stats.distributions.expon.logcdf", "numpy.linspace", "scipy.stats.distributions.gamma.logcdf", "numpy.zeros_like", "numpy.moveaxis", "scipy.special.logit", "scipy.stats.distributions.moyal.logcdf", "numpy.random.randint", "scipy.stats.distributions.pareto.logcdf", "numpy.eye", "scipy.stats.distributions.cauchy.logpdf", "scipy.stats.distributions.poisson.logpmf", "scipy.stats.distributions.randint.logpmf", "numpy.linalg.inv", "scipy.stats.distributions.chi2.logpdf", "scipy.stats.distributions.gamma.logpdf", "scipy.stats.distributions.triang.logpdf", "scipy.stats.distributions.logistic.logpdf", "numpy.testing.assert_allclose", "scipy.stats.distributions.norm.logpdf", "scipy.stats.distributions.uniform.logcdf", "scipy.stats.distributions.halfcauchy.logpdf", "scipy.stats.distributions.moyal.logpdf", "scipy.stats.distributions.truncnorm.logpdf", "scipy.stats.distributions.exponweib.logcdf", "numpy.dot", "scipy.stats.distributions.expon.logpdf", "scipy.stats.distributions.halfnorm.logcdf", "scipy.stats.distributions.t.logcdf", "numpy.random.randn", "scipy.stats.distributions.skewnorm.logpdf", "scipy.stats.distributions.exponweib.logpdf", "numpy.stack", "scipy.stats.distributions.invgauss.logcdf", "scipy.stats.distributions.gumbel_r.logcdf", "scipy.stats.distributions.binom.logpmf", "numpy.put_along_axis", "scipy.stats.distributions.hypergeom.logpmf", "scipy.stats.distributions.cauchy.logcdf", "numpy.identity", "scipy.integrate.tplquad", "numpy.linalg.cholesky", "numpy.logical_and", "scipy.stats.distributions.halfnorm.logpdf", "numpy.random.seed", "numpy.random.normal", "numpy.prod", "scipy.stats.distributions.beta.logcdf" ], [ "numpy.repeat", "pandas.Timestamp.now" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Naghipourfar/TraderBot
[ "2604c9df7af7394dfab6a54ea9a65a1b0df6a0ce" ]
[ "Code/finance.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom pandas_datareader import data\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport keras\n\nfrom keras.layers import Input, Dense, Dropout, BatchNormalization\nfrom keras.models import Model\nfrom keras.callbacks import History, CSVLogger\n\n\"\"\"\n Created by Mohsen Naghipourfar on 7/23/18.\n Email : [email protected] or [email protected]\n Website: http://ce.sharif.edu/~naghipourfar\n Github: https://github.com/naghipourfar\n Skype: mn7697np\n\"\"\"\n\ntickers = ['AAPL', 'MSFT', '^GSPC'] # Apple, Microsoft and S&P500 index\n\n# We would like all available data from 01/01/2000 until 12/31/2016.\nstart_date = '2010-01-01'\nend_date = '2016-12-31'\n\npanel_data = data.DataReader('INPX', 'google', start_date, end_date)\n''' returns a panel object (3D Object)\n 1st dim: various fields of finance -> open, close, high, low, ...\n 2nd dim: date\n 3rd dim: instrument identifiers \n'''\n\n# df_data = panel_data.to_frame()\nall_weekdays = pd.date_range(start_date, end_date, freq='B')\n\nclose = panel_data['close']\nclose = close.reindex(all_weekdays)\nclose = close.fillna(method='ffill')\n\nshort_rolling = close.rolling(window=20).mean()\nlong_rolling = close.rolling(window=100).mean()\n\nfig, ax = plt.subplots(figsize=(16,9))\n\nax.plot(close.index, close, label='close')\nax.plot(short_rolling.index, short_rolling, label='20 days rolling')\nax.plot(long_rolling.index, long_rolling, label='100 days rolling')\n\nax.set_xlabel('Date')\nax.set_ylabel('Adjusted closing price ($)')\nax.legend()\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
linkserendipity/deep-person-reid
[ "564ccf307336af1b3343fa42c55f9d53df0fa20a" ]
[ "samplers.py" ]
[ "from __future__ import absolute_import\nfrom collections import defaultdict\nimport numpy as np\n\nimport torch\nfrom torch.utils.data.sampler import Sampler\n\nclass RandomIdentitySampler(Sampler):\n \"\"\" \n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n\n Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py.\n\n Args:\n data_source (Dataset): dataset to sample from.\n num_instances (int): number of instances per identity.\n \"\"\"\n def __init__(self, data_source, num_instances=4):\n self.data_source = data_source\n self.num_instances = num_instances\n self.index_dic = defaultdict(list)\n for index, (_, pid, _) in enumerate(data_source):\n self.index_dic[pid].append(index)\n self.pids = list(self.index_dic.keys())\n self.num_identities = len(self.pids)\n\n def __iter__(self):\n # 3004 pictures list 32 batch_size [aaaaaaaaaaaaaaaaaa]\n indices = torch.randperm(self.num_identities) # shuffle for 751 ids\n ret = [] # [1111 2222 3333 4444 5555 6666 7777 ... 751 751 751 751] len(ret)=3004\n for i in indices:\n pid = self.pids[i]\n t = self.index_dic[pid]\n replace = False if len(t) >= self.num_instances else True\n t = np.random.choice(t, size=self.num_instances, replace=replace) # choose 4 pictures from t pictures\n ret.extend(t)\n # from IPython import embed\n # embed()\n return iter(ret)\n\n def __len__(self):\n return self.num_identities * self.num_instances\n\n\n# if __name__ == \"__main__\":\n# from util.data_manager import Market1501\n# dataset = Market1501(root='/home/ls')\n# sampler = RandomIdentitySampler(dataset.train)\n# a = sampler.__iter__()" ]
[ [ "torch.randperm", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
woutergins/satlas2
[ "51afdc445c8c603372bb26abe19d1eb7bd3f3f24" ]
[ "src/satlas2/models/hfsModel.py" ]
[ "from satlas2.core import Model, Parameter\n\nimport numpy as np\nfrom scipy.special import wofz\nfrom sympy.physics.wigner import wigner_6j, wigner_3j\n\n__all__ = ['HFS']\n\nsqrt2 = 2 ** 0.5\nsqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))\nlog2 = np.log(2)\n\nclass HFS(Model):\n def __init__(self, I, J, A=[0, 0], B=[0, 0], C=[0, 0], df=0, fwhm=50, bkg=1, name=None, N=None, offset=0, poisson=0, scale=1.0, racah=True, prefunc=None):\n super().__init__(name=name, prefunc=prefunc)\n J1, J2 = J\n lower_F = np.arange(abs(I - J1), I+J1+1, 1)\n upper_F = np.arange(abs(I - J2), I+J2+1, 1)\n\n self.lines = []\n self.intensities = {}\n self.scaling_Al = {}\n self.scaling_Bl = {}\n self.scaling_Cl = {}\n self.scaling_Au = {}\n self.scaling_Bu = {}\n self.scaling_Cu = {}\n\n for i, F1 in enumerate(lower_F):\n for j, F2 in enumerate(upper_F):\n if abs(F2 - F1) <= 1 and not F2 == F1 == 0.0:\n if F1 % 1 == 0:\n F1_str = '{:.0f}'.format(F1)\n else:\n F1_str = '{:.0f}_2'.format(2*F1)\n\n if F2 % 1 == 0:\n F2_str = '{:.0f}'.format(F2)\n else:\n F2_str = '{:.0f}_2'.format(2*F2)\n\n line = '{}to{}'.format(F1_str, F2_str)\n self.lines.append(line)\n\n C1, D1, E1 = self.calcShift(I, J1, F1)\n C2, D2, E2 = self.calcShift(I, J2, F2)\n\n self.scaling_Al[line] = C1\n self.scaling_Bl[line] = D1\n self.scaling_Cl[line] = E1\n self.scaling_Au[line] = C2\n self.scaling_Bu[line] = D2\n self.scaling_Cu[line] = E2\n\n intens = float((2 * F1 + 1) * (2 * F2 + 1) * \\\n wigner_6j(J2, F2, I, F1, J1, 1.0) ** 2) # DO NOT REMOVE CAST TO FLOAT!!!\n self.intensities['Amp'+line] = Parameter(value=intens, min=0, vary=not racah)\n\n norm = max([p.value for p in self.intensities.values()])\n for n, v in self.intensities.items():\n v.value /= norm\n\n pars = {'centroid': Parameter(value=df),\n 'Al': Parameter(value=A[0]),\n 'Au': Parameter(value=A[1]),\n 'Bl': Parameter(value=B[0]),\n 'Bu': Parameter(value=B[1]),\n 'Cl': Parameter(value=C[0], vary=False),\n 'Cu': Parameter(value=C[1], vary=False),\n 'bkg': Parameter(value=bkg),\n 'FWHMG': Parameter(value=fwhm, min=0.01),\n 'FWHML': Parameter(value=fwhm, min=0.01),\n 'scale': Parameter(value=scale, min=0, vary=racah)}\n if N is not None:\n pars['N'] = Parameter(value=N, vary=False)\n pars['Offset'] = Parameter(value=offset)\n pars['Poisson'] = Parameter(value=poisson, min=0, max=1)\n self.f = self.fShifted\n else:\n self.f = self.fUnshifted\n pars = {**pars, **self.intensities}\n\n self.params = pars\n\n if I < 2 or J1 < 2:\n self.params['Cl'].vary = False\n if I < 2 or J2 < 2:\n self.params['Cu'].vary = False\n if I < 1 or J1 < 1:\n self.params['Bl'].vary = False\n if I < 1 or J2 < 1:\n self.params['Bu'].vary = False\n if I == 0 or J1 == 0: \n self.params['Al'].vary = False\n if I == 0 or J2 == 0:\n self.params['Au'].vary = False\n self.xtransformed = None\n self.xhashed = None\n\n def fUnshifted(self, x):\n centroid = self.params['centroid'].value\n Al = self.params['Al'].value\n Au = self.params['Au'].value\n Bl = self.params['Bl'].value\n Bu = self.params['Bu'].value\n Cl = self.params['Cl'].value\n Cu = self.params['Cu'].value\n FWHMG = self.params['FWHMG'].value\n FWHML = self.params['FWHML'].value\n scale = self.params['scale'].value\n bkg = self.params['bkg'].value\n\n result = np.zeros(len(x))\n x = self.transform(x)\n for line in self.lines:\n pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]\n result += self.params['Amp' + line].value * self.peak(x - pos, FWHMG, FWHML)\n\n return scale * result + bkg\n\n def fShifted(self, x):\n centroid = self.params['centroid'].value\n Al = self.params['Al'].value\n Au = self.params['Au'].value\n Bl = self.params['Bl'].value\n Bu = self.params['Bu'].value\n FWHMG = self.params['FWHMG'].value\n FWHML = self.params['FWHML'].value\n scale = self.params['scale'].value\n N = self.params['N'].value\n offset = self.params['Offset'].value\n poisson = self.params['Poisson'].value\n bkg = self.params['bkg'].value\n\n result = np.zeros(len(x)) \n for line in self.lines:\n pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]\n for i in range(N + 1):\n if self.prefunc:\n result += self.params['Amp' + line].value * self.peak(self.prefunc(x - i * offset) - pos, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)\n else:\n result += self.params['Amp' + line].value * self.peak(x - pos - i * offset, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)\n\n return scale * result + bkg\n\n def peak(self, x, FWHMG, FWHML):\n z = self.preparePeak(x, FWHMG, FWHML)\n n = self.norm(FWHML, FWHMG)\n ret = wofz(z).real\n return ret/n\n\n def norm(self, FWHML, FWHMG):\n return wofz(1j * FWHML / (FWHMG * sqrt2)).real\n\n def preparePeak(self, x, FWHMG, FWHML):\n sigma, gamma = FWHMG / sqrt2log2t2, FWHML / 2\n z = (x + 1j * gamma) / (sigma * sqrt2)\n return z\n\n def calcShift(self, I, J, F):\n phase = (-1)**(I+J+F)\n contrib = []\n for k in range(1, 4):\n n = float(wigner_6j(I, J, F, J, I, k))\n d = float(wigner_3j(I, k, I, -I, 0, I) * wigner_3j(J, k, J, -J, 0, J))\n shift = phase * n / d\n if not np.isfinite(shift):\n contrib.append(0)\n else:\n if k == 1:\n shift = shift * (I*J)\n elif k == 2:\n shift = shift / 4\n contrib.append(shift)\n return contrib\n\n def pos(self):\n centroid = self.params['centroid'].value\n Al = self.params['Al'].value\n Au = self.params['Au'].value\n Bl = self.params['Bl'].value\n Bu = self.params['Bu'].value\n Cl = self.params['Cl'].value\n Cu = self.params['Cu'].value\n pos = []\n for line in self.lines:\n pos.append(centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line])\n return pos\n" ]
[ [ "numpy.log", "scipy.special.wofz", "numpy.isfinite", "numpy.math.factorial" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mcuiteallen/stock
[ "06c56db6c712ab88fabdc67a8812869ad4180f6f" ]
[ "collect/TwHistory.py" ]
[ "import calendar\nimport math\nimport pandas as pd\nimport time\nimport twstock\nimport requests\nfrom datetime import datetime, timedelta\nfrom dateutil import relativedelta\nfrom db.Connection import session\nfrom enum import Enum\nfrom model.StockHistory import StockHistory\nfrom sys import float_info\nfrom talib import abstract\n\nclass HistoryType(Enum):\n DAY = (\"0\", \"日\", \"短線\")\n WEEK = (\"1\", \"週\", \"中短線\")\n MONTH = (\"2\", \"月\", \"中長線\")\n\nclass HistoryTypeTo(Enum):\n DB = 0\n HUMAN = 1\n EXPLAIN = 2\n\nclass TwHistory:\n \"\"\"TwHistory class\"\"\"\n dateFormatForTwStock = None\n dateFormat = None\n rsiDict = None\n williamsDict = None\n macdDict = None\n bbandDict = None\n \n def __init__(self):\n self.dateFormatForTwStock = \"%Y/%m/%d\"\n self.dateFormat = \"%Y-%m-%d\"\n\n def transformStrToDateTimeForTwStock(self, targetStr):\n return datetime.strptime(targetStr, self.dateFormatForTwStock)\n\n def transformStrToDateTime(self, targetStr):\n return datetime.strptime(targetStr, self.dateFormat)\n \n def transformDateTimeToStr(self, date):\n return date.strftime(self.dateFormat)\n \n def retIfNaN(self, num):\n if math.isnan(num):\n return None\n else:\n return num\n \n def createDataFrame(self, history):\n df = pd.DataFrame([h.as_simple_dict() for h in history])\n df['date'] = pd.to_datetime(df['date'])\n df.set_index('date', inplace=True)\n return df\n \n def deleteHistory(self, code, type, startDate, endDate):\n session.query(StockHistory).\\\n filter(StockHistory.code == code).\\\n filter(StockHistory.type == type).\\\n filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\\\n filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\\\n delete()\n session.commit()\n\n def calculateRSI(self, df):\n rsi = abstract.RSI(df, timeperiod=5)\n self.rsiDict = {}\n for index, number in rsi.iteritems():\n self.rsiDict[self.transformDateTimeToStr(index)] = number\n\n def calculateWilliams(self, df):\n williams = abstract.WILLR(df, timeperiod=5)\n self.williamsDict = {}\n for index, number in williams.iteritems():\n self.williamsDict[self.transformDateTimeToStr(index)] = number\n\n def calculateMACD(self, df):\n macd = abstract.MACD(df)\n self.macdDict = {}\n for index, row in macd.iterrows():\n self.macdDict[self.transformDateTimeToStr(index)] = row\n\n def calculateBBAND(self, df):\n bband = abstract.BBANDS(df, timeperiod=22)\n self.bbandDict = {}\n for index, row in bband.iterrows():\n self.bbandDict[self.transformDateTimeToStr(index)] = row\n\n def updateHistoryTechnicalIndicator(self, history):\n date = history.date\n updateFlag = False\n if history.rsi is None:\n history.rsi = self.retIfNaN(self.rsiDict[date])\n updateFlag = updateFlag or history.rsi is not None\n if history.williams is None:\n history.williams = self.retIfNaN(self.williamsDict[date])\n updateFlag = updateFlag or history.williams is not None\n if history.macd is None:\n history.macd = self.retIfNaN(self.macdDict[date].macd)\n updateFlag = updateFlag or history.macd is not None\n if history.macdsignal is None:\n history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)\n updateFlag = updateFlag or history.macdsignal is not None\n if history.macdhist is None:\n history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)\n updateFlag = updateFlag or history.macdhist is not None\n if history.upperband is None:\n history.upperband = self.retIfNaN(self.bbandDict[date].upperband)\n updateFlag = updateFlag or history.upperband is not None\n if history.middleband is None:\n history.middleband = self.retIfNaN(self.bbandDict[date].middleband)\n updateFlag = updateFlag or history.middleband is not None\n if history.lowerband is None:\n history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)\n updateFlag = updateFlag or history.lowerband is not None\n if updateFlag:\n session.merge(history)\n\n def dayHistory(self):\n for k, v in twstock.codes.items():\n if self.isStockOrETF(v.type) and k == '3707':\n print(\"dayHistory code: \" + k)\n dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB) #get type value for db\n history = session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == dayType).\\\n order_by(StockHistory.date.desc()).\\\n first()\n nowDate = datetime.now()\n endDateStr = self.transformDateTimeToStr(nowDate)\n startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date #如果DB撈的到相對應條件的資料,就只抓最後一天\n self.finmindtrade(k, startDateStr, endDateStr, dayType)\n\n def weekHistory(self):\n today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))\n weekStart = today - timedelta(days=today.weekday())\n\n for k, v in twstock.codes.items():\n if self.isStockOrETF(v.type) and self.isHistoryExist(k):\n print(\"weekHistory code: \" + k)\n latestHistoryWeek = session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\\\n order_by(StockHistory.date.desc()).\\\n first()\n\n startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)\n weekStartPast = startdate - timedelta(days=startdate.weekday())\n weekEndPast = weekStartPast + timedelta(days=6)\n\n while weekStartPast <= weekStart:\n self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)\n historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),\n capacity=0, turnover=0, high=0, low=float_info.max, close=0)\n firstFlag = True\n for historyDay in session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\\\n filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\\\n filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\\\n order_by(StockHistory.date.asc()).\\\n all():\n historyWeek.date = self.transformDateTimeToStr(weekStartPast)\n historyWeek.close = historyDay.close\n historyWeek.capacity += historyDay.capacity\n historyWeek.turnover += historyDay.turnover\n if firstFlag:\n historyWeek.open = historyDay.open\n firstFlag = False\n historyWeek.high = max(historyWeek.high, historyDay.high)\n historyWeek.low = min(historyWeek.low, historyDay.low)\n if not firstFlag:\n session.merge(historyWeek)\n weekStartPast += timedelta(days=7)\n weekEndPast += timedelta(days=7)\n\n session.commit()\n\n def monthHistory(self):\n today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))\n monthStart = today.replace(day=1)\n for k, v in twstock.codes.items():\n if self.isStockOrETF(v.type) and self.isHistoryExist(k):\n print(\"monthHistory code: \" + k)\n latestHistoryMonth = session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\\\n order_by(StockHistory.date.desc()).\\\n first()\n\n startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)\n monthStartPast = startdate.replace(day=1)\n monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])\n\n while monthStartPast <= monthStart:\n self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)\n historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),\n capacity=0, turnover=0, high=0, low=float_info.max, close=0)\n firstFlag = True\n for historyDay in session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\\\n filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\\\n filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\\\n order_by(StockHistory.date.asc()).\\\n all():\n historyMonth.date = self.transformDateTimeToStr(monthStartPast)\n historyMonth.close = historyDay.close\n historyMonth.capacity += historyDay.capacity\n historyMonth.turnover += historyDay.turnover\n if firstFlag:\n historyMonth.open = historyDay.open\n firstFlag = False\n historyMonth.high = max(historyMonth.high, historyDay.high)\n historyMonth.low = min(historyMonth.low, historyDay.low)\n if not firstFlag:\n session.merge(historyMonth)\n monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)\n monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])\n\n session.commit()\n\n def technicalIndicator(self):\n for k, v in twstock.codes.items():\n if self.isStockOrETF(v.type) and self.isHistoryExist(k):\n for historyType in HistoryType:\n print(\"technicalIndicator code: \" + k + \", type: \" + self.translate(historyType, HistoryTypeTo.HUMAN))\n historyList = session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\\\n order_by(StockHistory.date.asc()).\\\n all()\n if len(historyList) == 0:\n continue\n df = self.createDataFrame(historyList)\n\n self.calculateRSI(df)\n self.calculateWilliams(df)\n self.calculateMACD(df)\n self.calculateBBAND(df)\n\n for history in historyList:\n self.updateHistoryTechnicalIndicator(history)\n session.commit()\n\n def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):\n turnoverDict = {}\n nameDict = {}\n\n for k, v in twstock.codes.items():\n if self.isStockOrETF(v.type) and self.isHistoryExist(k):\n history = session.query(StockHistory).\\\n filter(StockHistory.code == k).\\\n filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\\\n order_by(StockHistory.date.desc()).\\\n first()\n turnoverDict[k] = history.turnover\n nameDict[k] = v.name\n\n rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}\n\n print(\"按當日成交值由大至小排名,背離條件: rsi > \" + str(highRsi) + \" or rsi < \" + str(lowRsi))\n for rankIdx, code in enumerate(rankDict.keys()):\n closePrice = None\n divergeDict = {}\n for historyType in HistoryType:\n historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)\n historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)\n historyList = session.query(StockHistory).\\\n filter(StockHistory.code == code).\\\n filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\\\n filter(StockHistory.rsi.isnot(None)).\\\n order_by(StockHistory.date.desc()).\\\n limit(self.recentHistoryLimit(historyType)).\\\n all()\n historyListLength = len(historyList)\n if historyListLength > 0:\n closePrice = historyList[0].close\n if historyListLength > 1:\n if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams:\n divergeDict[historyTypeHuman + \" 相鄰背離 \" + historyTypeExplain + \"看空\"] = \"rsi up williams down\"\n elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams:\n divergeDict[historyTypeHuman + \" 相鄰背離 \" + historyTypeExplain + \"看多\"] = \"rsi down williams up\"\n if historyListLength > 2:\n highPeak = []\n lowPeak = []\n for i, history in enumerate(historyList):\n if i == 0 or i == historyListLength - 1:\n continue\n if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:\n highPeak.append(history)\n if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:\n lowPeak.append(history)\n if len(highPeak) == 2 and len(lowPeak) == 2:\n break\n if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):\n if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:\n divergeDict[historyTypeHuman + \" 波峰背離 \" + historyTypeExplain + \"看空: \" + highPeak[1].date + \" and \" + highPeak[0].date] = \"rsi up williams down\"\n elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:\n for low in lowPeak:\n if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:\n divergeDict[historyTypeHuman + \" 波峰背離 反彈不過前高 \" + historyTypeExplain + \"看空: \" + highPeak[1].date + \" and \" + highPeak[0].date] = \"rsi down williams fast up\"\n break\n if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):\n if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:\n divergeDict[historyTypeHuman + \" 波谷背離 \" + historyTypeExplain + \"看多: \" + lowPeak[1].date + \" and \" + lowPeak[0].date] = \"rsi down williams up\"\n elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:\n for high in highPeak:\n if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:\n divergeDict[historyTypeHuman + \" 波谷背離 回測不過前低 \" + historyTypeExplain + \"看多: \" + lowPeak[1].date + \" and \" + lowPeak[0].date] = \"rsi up williams fast down\"\n break\n\n if len(divergeDict) > 0:\n print(\"code: \" + code + \", name: \" + nameDict[code] + \", rank: \" + str(rankIdx+1) + \"/\" + str(len(rankDict)) + \", close price: \" + str(closePrice))\n for k, v in divergeDict.items():\n print(k + \" => \" + v)\n print(\"\")\n print(\"========================================================================================\")\n\n def isStockOrETF(self, type):\n return type == \"股票\" or type == \"ETF\"\n\n def isHistoryExist(self, code):\n if code=='3707':\n return session.query(StockHistory).\\\n filter(StockHistory.code == code).\\\n filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\\\n filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\\\n first() is not None\n return False \n\n def isHighRsi(self, highRsi, historyList):\n for i, history in enumerate(historyList):\n if i < 2 and history.rsi < highRsi:\n return False\n elif i == 2:\n break\n return True\n\n def isLowRsi(self, lowRsi, historyList):\n for i, history in enumerate(historyList):\n if i < 2 and history.rsi > lowRsi:\n return False\n elif i == 2:\n break\n return True\n\n def recentHistoryLimit(self, historyType):\n if historyType == HistoryType.DAY:\n return 40\n elif historyType == HistoryType.WEEK:\n return 16\n else:\n return 6\n\n def translate(self, historyType, historyTypeTo):\n return historyType.value[historyTypeTo.value]\n\n def finmindtrade(self, code, start, end, dayType):\n url = \"https://api.finmindtrade.com/api/v4/data\"\n parameter = {\n \"dataset\": \"TaiwanStockPrice\",\n \"data_id\": code,\n \"start_date\": start,\n \"end_date\": end,\n \"token\": \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0xMC0wMSAxNjoyMzoyNSIsInVzZXJfaWQiOiJtY3VpdGVhbGxlbiIsImlwIjoiMTE4LjE2My4xNDcuMTgyIn0.vXMykagq4kOKGrKOusgfAR3yhgcri0N_Wpe1Nb4DOiA\"\n }\n resp = requests.get(url, params=parameter)\n json = resp.json()\n if json is not None:\n for data in resp.json()[\"data\"]:\n history = StockHistory(code=code, type=dayType, date=data[\"date\"],\n capacity=data[\"Trading_Volume\"], turnover=data[\"Trading_money\"],\n open=data[\"open\"], high=data[\"max\"], low=data[\"min\"], close=data[\"close\"])\n session.merge(history)\n session.commit()\n time.sleep(6.1)\n\ntwHistory = TwHistory()\ntwHistory.dayHistory()\ntwHistory.weekHistory()\ntwHistory.monthHistory()\ntwHistory.technicalIndicator()\n#twHistory.diverge(90, 10, -20, -80)\n#twHistory.diverge(80, 20, -20, -80)\ntwHistory.diverge(70, 30, -20, -80)" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
naomi172839/pandas
[ "c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697", "c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697" ]
[ "pandas/tests/arithmetic/test_datetime64.py", "pandas/tests/frame/conftest.py" ]
[ "# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for datetime64 and datetime64tz dtypes\nfrom datetime import datetime, timedelta\nfrom itertools import product, starmap\nimport operator\nimport warnings\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.conversion import localize_pydatetime\nfrom pandas._libs.tslibs.offsets import shift_months\nfrom pandas.compat.numpy import np_datetime64_compat\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n NaT,\n Period,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import DatetimeArray, TimedeltaArray\nfrom pandas.core.ops import roperator\nfrom pandas.tests.arithmetic.common import (\n assert_invalid_addsub_type,\n assert_invalid_comparison,\n get_upcast_box,\n)\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestDatetime64ArrayLikeComparisons:\n # Comparison tests for datetime64 vectors fully parametrized over\n # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison\n # tests will eventually end up here.\n\n def test_compare_zerodim(self, tz_naive_fixture, box_with_array):\n # Test comparison with zero-dimensional array is unboxed\n tz = tz_naive_fixture\n box = box_with_array\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n dti = date_range(\"20130101\", periods=3, tz=tz)\n\n other = np.array(dti.to_numpy()[0])\n\n dtarr = tm.box_expected(dti, box)\n result = dtarr <= other\n expected = np.array([True, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n \"foo\",\n -1,\n 99,\n 4.0,\n object(),\n timedelta(days=2),\n # GH#19800, GH#19301 datetime.date comparison raises to\n # match DatetimeIndex/Timestamp. This also matches the behavior\n # of stdlib datetime.datetime\n datetime(2001, 1, 1).date(),\n # GH#19301 None and NaN are *not* cast to NaT for comparisons\n None,\n np.nan,\n ],\n )\n def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):\n # GH#22074, GH#15966\n tz = tz_naive_fixture\n\n rng = date_range(\"1/1/2000\", periods=10, tz=tz)\n dtarr = tm.box_expected(rng, box_with_array)\n assert_invalid_comparison(dtarr, other, box_with_array)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n list(range(10)),\n np.arange(10),\n np.arange(10).astype(np.float32),\n np.arange(10).astype(object),\n pd.timedelta_range(\"1ns\", periods=10).array,\n np.array(pd.timedelta_range(\"1ns\", periods=10)),\n list(pd.timedelta_range(\"1ns\", periods=10)),\n pd.timedelta_range(\"1 Day\", periods=10).astype(object),\n pd.period_range(\"1971-01-01\", freq=\"D\", periods=10).array,\n pd.period_range(\"1971-01-01\", freq=\"D\", periods=10).astype(object),\n ],\n )\n def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):\n # We don't parametrize this over box_with_array because listlike\n # other plays poorly with assert_invalid_comparison reversed checks\n tz = tz_naive_fixture\n\n dta = date_range(\"1970-01-01\", freq=\"ns\", periods=10, tz=tz)._data\n assert_invalid_comparison(dta, other, tm.to_array)\n\n def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):\n tz = tz_naive_fixture\n\n dta = date_range(\"1970-01-01\", freq=\"h\", periods=5, tz=tz)._data\n\n other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])\n result = dta == other\n expected = np.array([False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dta != other\n tm.assert_numpy_array_equal(result, ~expected)\n\n msg = \"Invalid comparison between|Cannot compare type|not supported between\"\n with pytest.raises(TypeError, match=msg):\n dta < other\n with pytest.raises(TypeError, match=msg):\n dta > other\n with pytest.raises(TypeError, match=msg):\n dta <= other\n with pytest.raises(TypeError, match=msg):\n dta >= other\n\n def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):\n # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly\n tz = tz_naive_fixture\n box = box_with_array\n xbox = box if box is not pd.Index else np.ndarray\n\n ts = pd.Timestamp.now(tz)\n ser = pd.Series([ts, pd.NaT])\n\n # FIXME: Can't transpose because that loses the tz dtype on\n # the NaT column\n obj = tm.box_expected(ser, box, transpose=False)\n\n expected = pd.Series([True, False], dtype=np.bool_)\n expected = tm.box_expected(expected, xbox, transpose=False)\n\n result = obj == ts\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64SeriesComparison:\n # TODO: moved from tests.series.test_operators; needs cleanup\n\n @pytest.mark.parametrize(\n \"pair\",\n [\n (\n [pd.Timestamp(\"2011-01-01\"), NaT, pd.Timestamp(\"2011-01-03\")],\n [NaT, NaT, pd.Timestamp(\"2011-01-03\")],\n ),\n (\n [pd.Timedelta(\"1 days\"), NaT, pd.Timedelta(\"3 days\")],\n [NaT, NaT, pd.Timedelta(\"3 days\")],\n ),\n (\n [pd.Period(\"2011-01\", freq=\"M\"), NaT, pd.Period(\"2011-03\", freq=\"M\")],\n [NaT, NaT, pd.Period(\"2011-03\", freq=\"M\")],\n ),\n ],\n )\n @pytest.mark.parametrize(\"reverse\", [True, False])\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):\n box = index_or_series\n l, r = pair\n if reverse:\n # add lhs / rhs switched data\n l, r = r, l\n\n left = Series(l, dtype=dtype)\n right = box(r, dtype=dtype)\n # Series, Index\n\n expected = Series([False, False, True])\n tm.assert_series_equal(left == right, expected)\n\n expected = Series([True, True, False])\n tm.assert_series_equal(left != right, expected)\n\n expected = Series([False, False, False])\n tm.assert_series_equal(left < right, expected)\n\n expected = Series([False, False, False])\n tm.assert_series_equal(left > right, expected)\n\n expected = Series([False, False, True])\n tm.assert_series_equal(left >= right, expected)\n\n expected = Series([False, False, True])\n tm.assert_series_equal(left <= right, expected)\n\n def test_comparison_invalid(self, tz_naive_fixture, box_with_array):\n # GH#4968\n # invalid date/int comparisons\n tz = tz_naive_fixture\n ser = Series(range(5))\n ser2 = Series(pd.date_range(\"20010101\", periods=5, tz=tz))\n\n ser = tm.box_expected(ser, box_with_array)\n ser2 = tm.box_expected(ser2, box_with_array)\n\n assert_invalid_comparison(ser, ser2, box_with_array)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [Timestamp(\"2011-01-01\"), NaT, Timestamp(\"2011-01-03\")],\n [Timedelta(\"1 days\"), NaT, Timedelta(\"3 days\")],\n [Period(\"2011-01\", freq=\"M\"), NaT, Period(\"2011-03\", freq=\"M\")],\n ],\n )\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_nat_comparisons_scalar(self, dtype, data, box_with_array):\n if box_with_array is tm.to_array and dtype is object:\n # dont bother testing ndarray comparison methods as this fails\n # on older numpys (since they check object identity)\n return\n\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n\n left = Series(data, dtype=dtype)\n left = tm.box_expected(left, box_with_array)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(left == NaT, expected)\n tm.assert_equal(NaT == left, expected)\n\n expected = [True, True, True]\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(left != NaT, expected)\n tm.assert_equal(NaT != left, expected)\n\n expected = [False, False, False]\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(left < NaT, expected)\n tm.assert_equal(NaT > left, expected)\n tm.assert_equal(left <= NaT, expected)\n tm.assert_equal(NaT >= left, expected)\n\n tm.assert_equal(left > NaT, expected)\n tm.assert_equal(NaT < left, expected)\n tm.assert_equal(left >= NaT, expected)\n tm.assert_equal(NaT <= left, expected)\n\n @pytest.mark.parametrize(\"val\", [datetime(2000, 1, 4), datetime(2000, 1, 5)])\n def test_series_comparison_scalars(self, val):\n series = Series(date_range(\"1/1/2000\", periods=10))\n\n result = series > val\n expected = Series([x > val for x in series])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"left,right\", [(\"lt\", \"gt\"), (\"le\", \"ge\"), (\"eq\", \"eq\"), (\"ne\", \"ne\")]\n )\n def test_timestamp_compare_series(self, left, right):\n # see gh-4982\n # Make sure we can compare Timestamps on the right AND left hand side.\n ser = pd.Series(pd.date_range(\"20010101\", periods=10), name=\"dates\")\n s_nat = ser.copy(deep=True)\n\n ser[0] = pd.Timestamp(\"nat\")\n ser[3] = pd.Timestamp(\"nat\")\n\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # No NaT\n expected = left_f(ser, pd.Timestamp(\"20010109\"))\n result = right_f(pd.Timestamp(\"20010109\"), ser)\n tm.assert_series_equal(result, expected)\n\n # NaT\n expected = left_f(ser, pd.Timestamp(\"nat\"))\n result = right_f(pd.Timestamp(\"nat\"), ser)\n tm.assert_series_equal(result, expected)\n\n # Compare to Timestamp with series containing NaT\n expected = left_f(s_nat, pd.Timestamp(\"20010109\"))\n result = right_f(pd.Timestamp(\"20010109\"), s_nat)\n tm.assert_series_equal(result, expected)\n\n # Compare to NaT with series containing NaT\n expected = left_f(s_nat, pd.Timestamp(\"nat\"))\n result = right_f(pd.Timestamp(\"nat\"), s_nat)\n tm.assert_series_equal(result, expected)\n\n def test_dt64arr_timestamp_equality(self, box_with_array):\n # GH#11034\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n\n ser = pd.Series([pd.Timestamp(\"2000-01-29 01:59:00\"), \"NaT\"])\n ser = tm.box_expected(ser, box_with_array)\n\n result = ser != ser\n expected = tm.box_expected([False, True], xbox)\n tm.assert_equal(result, expected)\n\n result = ser != ser[0]\n expected = tm.box_expected([False, True], xbox)\n tm.assert_equal(result, expected)\n\n result = ser != ser[1]\n expected = tm.box_expected([True, True], xbox)\n tm.assert_equal(result, expected)\n\n result = ser == ser\n expected = tm.box_expected([True, False], xbox)\n tm.assert_equal(result, expected)\n\n result = ser == ser[0]\n expected = tm.box_expected([True, False], xbox)\n tm.assert_equal(result, expected)\n\n result = ser == ser[1]\n expected = tm.box_expected([False, False], xbox)\n tm.assert_equal(result, expected)\n\n\nclass TestDatetimeIndexComparisons:\n\n # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],\n )\n def test_comparators(self, op):\n index = tm.makeDateIndex(100)\n element = index[len(index) // 2]\n element = Timestamp(element).to_datetime64()\n\n arr = np.array(index)\n arr_result = op(arr, element)\n index_result = op(index, element)\n\n assert isinstance(index_result, np.ndarray)\n tm.assert_numpy_array_equal(arr_result, index_result)\n\n @pytest.mark.parametrize(\n \"other\",\n [datetime(2016, 1, 1), Timestamp(\"2016-01-01\"), np.datetime64(\"2016-01-01\")],\n )\n def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):\n tz = tz_naive_fixture\n dti = pd.date_range(\"2016-01-01\", periods=2, tz=tz)\n if tz is not None:\n if isinstance(other, np.datetime64):\n # no tzaware version available\n return\n other = localize_pydatetime(other, dti.tzinfo)\n\n result = dti == other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti > other\n expected = np.array([False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti >= other\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti < other\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti <= other\n expected = np.array([True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_dti_cmp_nat(self, dtype, box_with_array):\n if box_with_array is tm.to_array and dtype is object:\n # dont bother testing ndarray comparison methods as this fails\n # on older numpys (since they check object identity)\n return\n\n xbox = box_with_array if box_with_array is not pd.Index else np.ndarray\n\n left = pd.DatetimeIndex(\n [pd.Timestamp(\"2011-01-01\"), pd.NaT, pd.Timestamp(\"2011-01-03\")]\n )\n right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp(\"2011-01-03\")])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n lhs, rhs = left, right\n if dtype is object:\n lhs, rhs = left.astype(object), right.astype(object)\n\n result = rhs == lhs\n expected = np.array([False, False, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n result = lhs != rhs\n expected = np.array([True, True, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(result, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs == pd.NaT, expected)\n tm.assert_equal(pd.NaT == rhs, expected)\n\n expected = np.array([True, True, True])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs != pd.NaT, expected)\n tm.assert_equal(pd.NaT != lhs, expected)\n\n expected = np.array([False, False, False])\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(lhs < pd.NaT, expected)\n tm.assert_equal(pd.NaT > lhs, expected)\n\n def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):\n fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])\n fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])\n\n didx1 = pd.DatetimeIndex(\n [\"2014-01-01\", pd.NaT, \"2014-03-01\", pd.NaT, \"2014-05-01\", \"2014-07-01\"]\n )\n didx2 = pd.DatetimeIndex(\n [\"2014-02-01\", \"2014-03-01\", pd.NaT, pd.NaT, \"2014-06-01\", \"2014-07-01\"]\n )\n darr = np.array(\n [\n np_datetime64_compat(\"2014-02-01 00:00Z\"),\n np_datetime64_compat(\"2014-03-01 00:00Z\"),\n np_datetime64_compat(\"nat\"),\n np.datetime64(\"nat\"),\n np_datetime64_compat(\"2014-06-01 00:00Z\"),\n np_datetime64_compat(\"2014-07-01 00:00Z\"),\n ]\n )\n\n cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, idx2 in cases:\n\n result = idx1 < idx2\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 > idx1\n expected = np.array([True, False, False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= idx2\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx2 >= idx1\n expected = np.array([True, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == idx2\n expected = np.array([False, False, False, False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != idx2\n expected = np.array([True, True, True, True, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:\n result = idx1 < val\n expected = np.array([False, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, True, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # Check pd.NaT is handles as the same as np.nan\n with tm.assert_produces_warning(None):\n for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:\n result = idx1 < val\n expected = np.array([True, False, False, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 > val\n expected = np.array([False, False, False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 <= val\n expected = np.array([True, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n result = idx1 >= val\n expected = np.array([False, False, True, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 == val\n expected = np.array([False, False, True, False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n result = idx1 != val\n expected = np.array([True, True, False, True, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_comparison_tzawareness_compat(self, op, box_df_fail):\n # GH#18162\n box = box_df_fail\n\n dr = pd.date_range(\"2016-01-01\", periods=6)\n dz = dr.tz_localize(\"US/Pacific\")\n\n dr = tm.box_expected(dr, box)\n dz = tm.box_expected(dz, box)\n\n msg = \"Cannot compare tz-naive and tz-aware\"\n with pytest.raises(TypeError, match=msg):\n op(dr, dz)\n\n # FIXME: DataFrame case fails to raise for == and !=, wrong\n # message for inequalities\n with pytest.raises(TypeError, match=msg):\n op(dr, list(dz))\n with pytest.raises(TypeError, match=msg):\n op(dr, np.array(list(dz), dtype=object))\n with pytest.raises(TypeError, match=msg):\n op(dz, dr)\n\n # FIXME: DataFrame case fails to raise for == and !=, wrong\n # message for inequalities\n with pytest.raises(TypeError, match=msg):\n op(dz, list(dr))\n with pytest.raises(TypeError, match=msg):\n op(dz, np.array(list(dr), dtype=object))\n\n # The aware==aware and naive==naive comparisons should *not* raise\n assert np.all(dr == dr)\n assert np.all(dr == list(dr))\n assert np.all(list(dr) == dr)\n assert np.all(np.array(list(dr), dtype=object) == dr)\n assert np.all(dr == np.array(list(dr), dtype=object))\n\n assert np.all(dz == dz)\n assert np.all(dz == list(dz))\n assert np.all(list(dz) == dz)\n assert np.all(np.array(list(dz), dtype=object) == dz)\n assert np.all(dz == np.array(list(dz), dtype=object))\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):\n # GH#18162\n dr = pd.date_range(\"2016-01-01\", periods=6)\n dz = dr.tz_localize(\"US/Pacific\")\n\n dr = tm.box_expected(dr, box_with_array)\n dz = tm.box_expected(dz, box_with_array)\n\n # Check comparisons against scalar Timestamps\n ts = pd.Timestamp(\"2000-03-14 01:59\")\n ts_tz = pd.Timestamp(\"2000-03-14 01:59\", tz=\"Europe/Amsterdam\")\n\n assert np.all(dr > ts)\n msg = \"Cannot compare tz-naive and tz-aware\"\n with pytest.raises(TypeError, match=msg):\n op(dr, ts_tz)\n\n assert np.all(dz > ts_tz)\n with pytest.raises(TypeError, match=msg):\n op(dz, ts)\n\n # GH#12601: Check comparison against Timestamps and DatetimeIndex\n with pytest.raises(TypeError, match=msg):\n op(ts, dz)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n @pytest.mark.parametrize(\n \"other\",\n [datetime(2016, 1, 1), Timestamp(\"2016-01-01\"), np.datetime64(\"2016-01-01\")],\n )\n # Bug in NumPy? https://github.com/numpy/numpy/issues/13841\n # Raising in __eq__ will fallback to NumPy, which warns, fails,\n # then re-raises the original exception. So we just need to ignore.\n @pytest.mark.filterwarnings(\"ignore:elementwise comp:DeprecationWarning\")\n @pytest.mark.filterwarnings(\"ignore:Converting timezone-aware:FutureWarning\")\n def test_scalar_comparison_tzawareness(\n self, op, other, tz_aware_fixture, box_with_array\n ):\n tz = tz_aware_fixture\n dti = pd.date_range(\"2016-01-01\", periods=2, tz=tz)\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"Cannot compare tz-naive and tz-aware\"\n with pytest.raises(TypeError, match=msg):\n op(dtarr, other)\n with pytest.raises(TypeError, match=msg):\n op(other, dtarr)\n\n @pytest.mark.parametrize(\n \"op\",\n [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],\n )\n def test_nat_comparison_tzawareness(self, op):\n # GH#19276\n # tzaware DatetimeIndex should not raise when compared to NaT\n dti = pd.DatetimeIndex(\n [\"2014-01-01\", pd.NaT, \"2014-03-01\", pd.NaT, \"2014-05-01\", \"2014-07-01\"]\n )\n expected = np.array([op == operator.ne] * len(dti))\n result = op(dti, pd.NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(dti.tz_localize(\"US/Pacific\"), pd.NaT)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_str(self, tz_naive_fixture):\n # GH#22074\n # regardless of tz, we expect these comparisons are valid\n tz = tz_naive_fixture\n rng = date_range(\"1/1/2000\", periods=10, tz=tz)\n other = \"1/1/2000\"\n\n result = rng == other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng != other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng < other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng <= other\n expected = np.array([True] + [False] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng > other\n expected = np.array([False] + [True] * 9)\n tm.assert_numpy_array_equal(result, expected)\n\n result = rng >= other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_cmp_list(self):\n rng = date_range(\"1/1/2000\", periods=10)\n\n result = rng == list(rng)\n expected = rng == rng\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"other\",\n [\n pd.timedelta_range(\"1D\", periods=10),\n pd.timedelta_range(\"1D\", periods=10).to_series(),\n pd.timedelta_range(\"1D\", periods=10).asi8.view(\"m8[ns]\"),\n ],\n ids=lambda x: type(x).__name__,\n )\n def test_dti_cmp_tdi_tzawareness(self, other):\n # GH#22074\n # reversion test that we _don't_ call _assert_tzawareness_compat\n # when comparing against TimedeltaIndex\n dti = date_range(\"2000-01-01\", periods=10, tz=\"Asia/Tokyo\")\n\n result = dti == other\n expected = np.array([False] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n result = dti != other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n msg = \"Invalid comparison between\"\n with pytest.raises(TypeError, match=msg):\n dti < other\n with pytest.raises(TypeError, match=msg):\n dti <= other\n with pytest.raises(TypeError, match=msg):\n dti > other\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n def test_dti_cmp_object_dtype(self):\n # GH#22074\n dti = date_range(\"2000-01-01\", periods=10, tz=\"Asia/Tokyo\")\n\n other = dti.astype(\"O\")\n\n result = dti == other\n expected = np.array([True] * 10)\n tm.assert_numpy_array_equal(result, expected)\n\n other = dti.tz_localize(None)\n msg = \"Cannot compare tz-naive and tz-aware\"\n with pytest.raises(TypeError, match=msg):\n # tzawareness failure\n dti != other\n\n other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)\n result = dti == other\n expected = np.array([True] * 5 + [False] * 5)\n tm.assert_numpy_array_equal(result, expected)\n msg = \"Cannot compare type\"\n with pytest.raises(TypeError, match=msg):\n dti >= other\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestDatetime64Arithmetic:\n # This class is intended for \"finished\" tests that are fully parametrized\n # over DataFrame/Series/Index/DatetimeArray\n\n # -------------------------------------------------------------\n # Addition/Subtraction of timedelta-like\n\n def test_dt64arr_add_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n # GH#22005, GH#22163 check DataFrame doesn't raise TypeError\n tz = tz_naive_fixture\n\n rng = pd.date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = pd.date_range(\"2000-01-01 02:00\", \"2000-02-01 02:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng + two_hours\n tm.assert_equal(result, expected)\n\n def test_dt64arr_iadd_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = pd.date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = pd.date_range(\"2000-01-01 02:00\", \"2000-02-01 02:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n rng += two_hours\n tm.assert_equal(rng, expected)\n\n def test_dt64arr_sub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = pd.date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = pd.date_range(\"1999-12-31 22:00\", \"2000-01-31 22:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = rng - two_hours\n tm.assert_equal(result, expected)\n\n def test_dt64arr_isub_timedeltalike_scalar(\n self, tz_naive_fixture, two_hours, box_with_array\n ):\n tz = tz_naive_fixture\n\n rng = pd.date_range(\"2000-01-01\", \"2000-02-01\", tz=tz)\n expected = pd.date_range(\"1999-12-31 22:00\", \"2000-01-31 22:00\", tz=tz)\n\n rng = tm.box_expected(rng, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n rng -= two_hours\n tm.assert_equal(rng, expected)\n\n # TODO: redundant with test_dt64arr_add_timedeltalike_scalar\n def test_dt64arr_add_td64_scalar(self, box_with_array):\n # scalar timedeltas/np.timedelta64 objects\n # operate with np.timedelta64 correctly\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n\n expected = Series(\n [Timestamp(\"20130101 9:01:01\"), Timestamp(\"20130101 9:02:01\")]\n )\n\n dtarr = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + np.timedelta64(1, \"s\")\n tm.assert_equal(result, expected)\n result = np.timedelta64(1, \"s\") + dtarr\n tm.assert_equal(result, expected)\n\n expected = Series(\n [Timestamp(\"20130101 9:01:00.005\"), Timestamp(\"20130101 9:02:00.005\")]\n )\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + np.timedelta64(5, \"ms\")\n tm.assert_equal(result, expected)\n result = np.timedelta64(5, \"ms\") + dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):\n # GH#23320 special handling for timedelta64(\"NaT\")\n tz = tz_naive_fixture\n\n dti = pd.date_range(\"1994-04-01\", periods=9, tz=tz, freq=\"QS\")\n other = np.timedelta64(\"NaT\")\n expected = pd.DatetimeIndex([\"NaT\"] * 9, tz=tz)\n\n # FIXME: fails with transpose=True due to tz-aware DataFrame\n # transpose bug\n obj = tm.box_expected(dti, box_with_array, transpose=False)\n expected = tm.box_expected(expected, box_with_array, transpose=False)\n\n result = obj + other\n tm.assert_equal(result, expected)\n result = other + obj\n tm.assert_equal(result, expected)\n result = obj - other\n tm.assert_equal(result, expected)\n msg = \"cannot subtract\"\n with pytest.raises(TypeError, match=msg):\n other - obj\n\n def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):\n\n tz = tz_naive_fixture\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=tz)\n tdi = pd.TimedeltaIndex([\"-1 Day\", \"-1 Day\", \"-1 Day\"])\n tdarr = tdi.values\n\n expected = pd.date_range(\"2015-12-31\", periods=3, tz=tz)\n\n dtarr = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr + tdarr\n tm.assert_equal(result, expected)\n result = tdarr + dtarr\n tm.assert_equal(result, expected)\n\n expected = pd.date_range(\"2016-01-02\", periods=3, tz=tz)\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - tdarr\n tm.assert_equal(result, expected)\n msg = \"cannot subtract|(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n tdarr - dtarr\n\n # -----------------------------------------------------------------\n # Subtraction of datetime-like scalars\n\n @pytest.mark.parametrize(\n \"ts\",\n [\n pd.Timestamp(\"2013-01-01\"),\n pd.Timestamp(\"2013-01-01\").to_pydatetime(),\n pd.Timestamp(\"2013-01-01\").to_datetime64(),\n ],\n )\n def test_dt64arr_sub_dtscalar(self, box_with_array, ts):\n # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype\n idx = pd.date_range(\"2013-01-01\", periods=3)\n idx = tm.box_expected(idx, box_with_array)\n\n expected = pd.TimedeltaIndex([\"0 Days\", \"1 Day\", \"2 Days\"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = idx - ts\n tm.assert_equal(result, expected)\n\n def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):\n # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano\n # for DataFrame operation\n dt64 = np.datetime64(\"2013-01-01\")\n assert dt64.dtype == \"datetime64[D]\"\n\n dti = pd.date_range(\"20130101\", periods=3)\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = pd.TimedeltaIndex([\"0 Days\", \"1 Day\", \"2 Days\"])\n expected = tm.box_expected(expected, box_with_array)\n\n result = dtarr - dt64\n tm.assert_equal(result, expected)\n\n result = dt64 - dtarr\n tm.assert_equal(result, -expected)\n\n def test_dt64arr_sub_timestamp(self, box_with_array):\n ser = pd.date_range(\"2014-03-17\", periods=2, freq=\"D\", tz=\"US/Eastern\")\n ts = ser[0]\n\n ser = tm.box_expected(ser, box_with_array)\n\n delta_series = pd.Series([np.timedelta64(0, \"D\"), np.timedelta64(1, \"D\")])\n expected = tm.box_expected(delta_series, box_with_array)\n\n tm.assert_equal(ser - ts, expected)\n tm.assert_equal(ts - ser, -expected)\n\n def test_dt64arr_sub_NaT(self, box_with_array):\n # GH#18808\n dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp(\"19900315\")])\n ser = tm.box_expected(dti, box_with_array)\n\n result = ser - pd.NaT\n expected = pd.Series([pd.NaT, pd.NaT], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n dti_tz = dti.tz_localize(\"Asia/Tokyo\")\n ser_tz = tm.box_expected(dti_tz, box_with_array)\n\n result = ser_tz - pd.NaT\n expected = pd.Series([pd.NaT, pd.NaT], dtype=\"timedelta64[ns]\")\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(result, expected)\n\n # -------------------------------------------------------------\n # Subtraction of datetime-like array-like\n\n def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=tz_naive_fixture)\n expected = dti - dti\n\n obj = tm.box_expected(dti, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n warn = PerformanceWarning if box_with_array is not pd.DataFrame else None\n with tm.assert_produces_warning(warn):\n result = obj - obj.astype(object)\n tm.assert_equal(result, expected)\n\n def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=None)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n\n expected = dtarr - dtarr\n result = dtarr - dt64vals\n tm.assert_equal(result, expected)\n result = dt64vals - dtarr\n tm.assert_equal(result, expected)\n\n def test_dt64arr_aware_sub_dt64ndarray_raises(\n self, tz_aware_fixture, box_with_array\n ):\n\n tz = tz_aware_fixture\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"subtraction must have the same timezones or\"\n with pytest.raises(TypeError, match=msg):\n dtarr - dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals - dtarr\n\n # -------------------------------------------------------------\n # Addition of datetime-like others (invalid)\n\n def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):\n\n tz = tz_naive_fixture\n dti = pd.date_range(\"2016-01-01\", periods=3, tz=tz)\n dt64vals = dti.values\n\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"cannot add\"\n with pytest.raises(TypeError, match=msg):\n dtarr + dt64vals\n with pytest.raises(TypeError, match=msg):\n dt64vals + dtarr\n\n def test_dt64arr_add_timestamp_raises(self, box_with_array):\n # GH#22163 ensure DataFrame doesn't cast Timestamp to i8\n idx = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"])\n idx = tm.box_expected(idx, box_with_array)\n msg = \"cannot add\"\n with pytest.raises(TypeError, match=msg):\n idx + Timestamp(\"2011-01-01\")\n with pytest.raises(TypeError, match=msg):\n Timestamp(\"2011-01-01\") + idx\n\n # -------------------------------------------------------------\n # Other Invalid Addition/Subtraction\n\n @pytest.mark.parametrize(\n \"other\",\n [\n 3.14,\n np.array([2.0, 3.0]),\n # GH#13078 datetime +/- Period is invalid\n pd.Period(\"2011-01-01\", freq=\"D\"),\n ],\n )\n @pytest.mark.parametrize(\"dti_freq\", [None, \"D\"])\n def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=dti_freq)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"|\".join(\n [\n \"unsupported operand type\",\n \"cannot (add|subtract)\",\n \"cannot use operands with types\",\n \"ufunc '?(add|subtract)'? cannot use operands with types\",\n ]\n )\n assert_invalid_addsub_type(dtarr, other, msg)\n\n @pytest.mark.parametrize(\"pi_freq\", [\"D\", \"W\", \"Q\", \"H\"])\n @pytest.mark.parametrize(\"dti_freq\", [None, \"D\"])\n def test_dt64arr_add_sub_parr(\n self, dti_freq, pi_freq, box_with_array, box_with_array2\n ):\n # GH#20049 subtracting PeriodIndex should raise TypeError\n dti = pd.DatetimeIndex([\"2011-01-01\", \"2011-01-02\"], freq=dti_freq)\n pi = dti.to_period(pi_freq)\n\n dtarr = tm.box_expected(dti, box_with_array)\n parr = tm.box_expected(pi, box_with_array2)\n msg = \"|\".join(\n [\n \"cannot (add|subtract)\",\n \"unsupported operand\",\n \"descriptor.*requires\",\n \"ufunc.*cannot use operands\",\n ]\n )\n assert_invalid_addsub_type(dtarr, parr, msg)\n\n\nclass TestDatetime64DateOffsetArithmetic:\n\n # -------------------------------------------------------------\n # Tick DateOffsets\n\n # TODO: parametrize over timezone?\n def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n expected = Series(\n [Timestamp(\"20130101 9:01:05\"), Timestamp(\"20130101 9:02:05\")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser + pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n\n def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):\n # GH#4532\n # operate with pd.offsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n expected = Series(\n [Timestamp(\"20130101 9:00:55\"), Timestamp(\"20130101 9:01:55\")]\n )\n\n ser = tm.box_expected(ser, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n result = ser - pd.offsets.Second(5)\n tm.assert_equal(result, expected)\n\n result2 = -pd.offsets.Second(5) + ser\n tm.assert_equal(result2, expected)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n pd.offsets.Second(5) - ser\n\n @pytest.mark.parametrize(\n \"cls_name\", [\"Day\", \"Hour\", \"Minute\", \"Second\", \"Milli\", \"Micro\", \"Nano\"]\n )\n def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):\n # GH#4532\n # smoke tests for valid DateOffsets\n ser = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n ser = tm.box_expected(ser, box_with_array)\n\n offset_cls = getattr(pd.offsets, cls_name)\n ser + offset_cls(5)\n offset_cls(5) + ser\n ser - offset_cls(5)\n\n def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):\n # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype\n tz = tz_aware_fixture\n if tz == \"US/Pacific\":\n dates = date_range(\"2012-11-01\", periods=3, tz=tz)\n offset = dates + pd.offsets.Hour(5)\n assert dates[0] + pd.offsets.Hour(5) == offset[0]\n\n dates = date_range(\"2010-11-01 00:00\", periods=3, tz=tz, freq=\"H\")\n expected = DatetimeIndex(\n [\"2010-11-01 05:00\", \"2010-11-01 06:00\", \"2010-11-01 07:00\"],\n freq=\"H\",\n tz=tz,\n )\n\n dates = tm.box_expected(dates, box_with_array)\n expected = tm.box_expected(expected, box_with_array)\n\n # TODO: parametrize over the scalar being added? radd? sub?\n offset = dates + pd.offsets.Hour(5)\n tm.assert_equal(offset, expected)\n offset = dates + np.timedelta64(5, \"h\")\n tm.assert_equal(offset, expected)\n offset = dates + timedelta(hours=5)\n tm.assert_equal(offset, expected)\n\n # -------------------------------------------------------------\n # RelativeDelta DateOffsets\n\n def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):\n # GH#10699\n vec = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-03-31\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n Timestamp(\"2000-05-15\"),\n Timestamp(\"2001-06-15\"),\n ]\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec\n\n # DateOffset relativedelta fastpath\n relative_kwargs = [\n (\"years\", 2),\n (\"months\", 5),\n (\"days\", 3),\n (\"hours\", 5),\n (\"minutes\", 10),\n (\"seconds\", 2),\n (\"microseconds\", 5),\n ]\n for i, kwd in enumerate(relative_kwargs):\n off = pd.DateOffset(**dict([kwd]))\n\n expected = DatetimeIndex([x + off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n\n off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))\n\n expected = DatetimeIndex([x + off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + off)\n\n expected = DatetimeIndex([x - off for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - off)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n off - vec\n\n # -------------------------------------------------------------\n # Non-Tick, Non-RelativeDelta DateOffsets\n\n # TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes\n # tz-aware cases which this does not\n @pytest.mark.parametrize(\n \"cls_and_kwargs\",\n [\n \"YearBegin\",\n (\"YearBegin\", {\"month\": 5}),\n \"YearEnd\",\n (\"YearEnd\", {\"month\": 5}),\n \"MonthBegin\",\n \"MonthEnd\",\n \"SemiMonthEnd\",\n \"SemiMonthBegin\",\n \"Week\",\n (\"Week\", {\"weekday\": 3}),\n \"Week\",\n (\"Week\", {\"weekday\": 6}),\n \"BusinessDay\",\n \"BDay\",\n \"QuarterEnd\",\n \"QuarterBegin\",\n \"CustomBusinessDay\",\n \"CDay\",\n \"CBMonthEnd\",\n \"CBMonthBegin\",\n \"BMonthBegin\",\n \"BMonthEnd\",\n \"BusinessHour\",\n \"BYearBegin\",\n \"BYearEnd\",\n \"BQuarterBegin\",\n (\"LastWeekOfMonth\", {\"weekday\": 2}),\n (\n \"FY5253Quarter\",\n {\n \"qtr_with_extra_week\": 1,\n \"startingMonth\": 1,\n \"weekday\": 2,\n \"variation\": \"nearest\",\n },\n ),\n (\"FY5253\", {\"weekday\": 0, \"startingMonth\": 2, \"variation\": \"nearest\"}),\n (\"WeekOfMonth\", {\"weekday\": 2, \"week\": 2}),\n \"Easter\",\n (\"DateOffset\", {\"day\": 4}),\n (\"DateOffset\", {\"month\": 5}),\n ],\n )\n @pytest.mark.parametrize(\"normalize\", [True, False])\n @pytest.mark.parametrize(\"n\", [0, 5])\n def test_dt64arr_add_sub_DateOffsets(\n self, box_with_array, n, normalize, cls_and_kwargs\n ):\n # GH#10699\n # assert vectorized operation matches pointwise operations\n\n if isinstance(cls_and_kwargs, tuple):\n # If cls_name param is a tuple, then 2nd entry is kwargs for\n # the offset constructor\n cls_name, kwargs = cls_and_kwargs\n else:\n cls_name = cls_and_kwargs\n kwargs = {}\n\n if n == 0 and cls_name in [\n \"WeekOfMonth\",\n \"LastWeekOfMonth\",\n \"FY5253Quarter\",\n \"FY5253\",\n ]:\n # passing n = 0 is invalid for these offset classes\n return\n\n vec = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-03-31\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n Timestamp(\"2000-05-15\"),\n Timestamp(\"2001-06-15\"),\n ]\n )\n vec = tm.box_expected(vec, box_with_array)\n vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec\n\n offset_cls = getattr(pd.offsets, cls_name)\n\n with warnings.catch_warnings(record=True):\n # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being\n # applied to Series or DatetimeIndex\n # we aren't testing that here, so ignore.\n warnings.simplefilter(\"ignore\", PerformanceWarning)\n\n offset = offset_cls(n, normalize=normalize, **kwargs)\n\n expected = DatetimeIndex([x + offset for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec + offset)\n\n expected = DatetimeIndex([x - offset for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, vec - offset)\n\n expected = DatetimeIndex([offset + x for x in vec_items])\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(expected, offset + vec)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n offset - vec\n\n def test_dt64arr_add_sub_DateOffset(self, box_with_array):\n # GH#10699\n s = date_range(\"2000-01-01\", \"2000-01-31\", name=\"a\")\n s = tm.box_expected(s, box_with_array)\n result = s + pd.DateOffset(years=1)\n result2 = pd.DateOffset(years=1) + s\n exp = date_range(\"2001-01-01\", \"2001-01-31\", name=\"a\")\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n result = s - pd.DateOffset(years=1)\n exp = date_range(\"1999-01-01\", \"1999-01-31\", name=\"a\")\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n\n s = DatetimeIndex(\n [\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n s = tm.box_expected(s, box_with_array)\n result = s + pd.offsets.Day()\n result2 = pd.offsets.Day() + s\n exp = DatetimeIndex(\n [\n Timestamp(\"2000-01-16 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-16\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n s = DatetimeIndex(\n [\n Timestamp(\"2000-01-15 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-15\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n s = tm.box_expected(s, box_with_array)\n result = s + pd.offsets.MonthEnd()\n result2 = pd.offsets.MonthEnd() + s\n exp = DatetimeIndex(\n [\n Timestamp(\"2000-01-31 00:15:00\", tz=\"US/Central\"),\n Timestamp(\"2000-02-29\", tz=\"US/Central\"),\n ],\n name=\"a\",\n )\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n # TODO: __sub__, __rsub__\n def test_dt64arr_add_mixed_offset_array(self, box_with_array):\n # GH#10699\n # array of offsets\n s = DatetimeIndex([Timestamp(\"2000-1-1\"), Timestamp(\"2000-2-1\")])\n s = tm.box_expected(s, box_with_array)\n\n warn = None if box_with_array is pd.DataFrame else PerformanceWarning\n with tm.assert_produces_warning(warn):\n other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])\n other = tm.box_expected(other, box_with_array)\n result = s + other\n exp = DatetimeIndex([Timestamp(\"2001-1-1\"), Timestamp(\"2000-2-29\")])\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n\n # same offset\n other = pd.Index(\n [pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]\n )\n other = tm.box_expected(other, box_with_array)\n result = s + other\n exp = DatetimeIndex([Timestamp(\"2001-1-1\"), Timestamp(\"2001-2-1\")])\n exp = tm.box_expected(exp, box_with_array)\n tm.assert_equal(result, exp)\n\n # TODO: overlap with test_dt64arr_add_mixed_offset_array?\n def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):\n # GH#18849\n\n tz = tz_naive_fixture\n dti = pd.date_range(\"2017-01-01\", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n\n other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])\n\n warn = None if box_with_array is pd.DataFrame else PerformanceWarning\n with tm.assert_produces_warning(warn):\n res = dtarr + other\n expected = DatetimeIndex(\n [dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq=\"infer\"\n )\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(res, expected)\n\n with tm.assert_produces_warning(warn):\n res2 = other + dtarr\n tm.assert_equal(res2, expected)\n\n with tm.assert_produces_warning(warn):\n res = dtarr - other\n expected = DatetimeIndex(\n [dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq=\"infer\"\n )\n expected = tm.box_expected(expected, box_with_array)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\n \"op, offset, exp, exp_freq\",\n [\n (\n \"__add__\",\n pd.DateOffset(months=3, days=10),\n [\n Timestamp(\"2014-04-11\"),\n Timestamp(\"2015-04-11\"),\n Timestamp(\"2016-04-11\"),\n Timestamp(\"2017-04-11\"),\n ],\n None,\n ),\n (\n \"__add__\",\n pd.DateOffset(months=3),\n [\n Timestamp(\"2014-04-01\"),\n Timestamp(\"2015-04-01\"),\n Timestamp(\"2016-04-01\"),\n Timestamp(\"2017-04-01\"),\n ],\n \"AS-APR\",\n ),\n (\n \"__sub__\",\n pd.DateOffset(months=3, days=10),\n [\n Timestamp(\"2013-09-21\"),\n Timestamp(\"2014-09-21\"),\n Timestamp(\"2015-09-21\"),\n Timestamp(\"2016-09-21\"),\n ],\n None,\n ),\n (\n \"__sub__\",\n pd.DateOffset(months=3),\n [\n Timestamp(\"2013-10-01\"),\n Timestamp(\"2014-10-01\"),\n Timestamp(\"2015-10-01\"),\n Timestamp(\"2016-10-01\"),\n ],\n \"AS-OCT\",\n ),\n ],\n )\n def test_dti_add_sub_nonzero_mth_offset(\n self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array\n ):\n # GH 26258\n tz = tz_aware_fixture\n date = date_range(start=\"01 Jan 2014\", end=\"01 Jan 2017\", freq=\"AS\", tz=tz)\n date = tm.box_expected(date, box_with_array, False)\n mth = getattr(date, op)\n result = mth(offset)\n\n expected = pd.DatetimeIndex(exp, tz=tz, freq=exp_freq)\n expected = tm.box_expected(expected, box_with_array, False)\n tm.assert_equal(result, expected)\n\n\nclass TestDatetime64OverflowHandling:\n # TODO: box + de-duplicate\n\n def test_dt64_overflow_masking(self, box_with_array):\n # GH#25317\n left = Series([Timestamp(\"1969-12-31\")])\n right = Series([NaT])\n\n left = tm.box_expected(left, box_with_array)\n right = tm.box_expected(right, box_with_array)\n\n expected = TimedeltaIndex([NaT])\n expected = tm.box_expected(expected, box_with_array)\n\n result = left - right\n tm.assert_equal(result, expected)\n\n def test_dt64_series_arith_overflow(self):\n # GH#12534, fixed by GH#19024\n dt = pd.Timestamp(\"1700-01-31\")\n td = pd.Timedelta(\"20000 Days\")\n dti = pd.date_range(\"1949-09-30\", freq=\"100Y\", periods=4)\n ser = pd.Series(dti)\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n ser - dt\n with pytest.raises(OverflowError, match=msg):\n dt - ser\n with pytest.raises(OverflowError, match=msg):\n ser + td\n with pytest.raises(OverflowError, match=msg):\n td + ser\n\n ser.iloc[-1] = pd.NaT\n expected = pd.Series(\n [\"2004-10-03\", \"2104-10-04\", \"2204-10-04\", \"NaT\"], dtype=\"datetime64[ns]\"\n )\n res = ser + td\n tm.assert_series_equal(res, expected)\n res = td + ser\n tm.assert_series_equal(res, expected)\n\n ser.iloc[1:] = pd.NaT\n expected = pd.Series(\n [\"91279 Days\", \"NaT\", \"NaT\", \"NaT\"], dtype=\"timedelta64[ns]\"\n )\n res = ser - dt\n tm.assert_series_equal(res, expected)\n res = dt - ser\n tm.assert_series_equal(res, -expected)\n\n def test_datetimeindex_sub_timestamp_overflow(self):\n dtimax = pd.to_datetime([\"now\", pd.Timestamp.max])\n dtimin = pd.to_datetime([\"now\", pd.Timestamp.min])\n\n tsneg = Timestamp(\"1950-01-01\")\n ts_neg_variants = [\n tsneg,\n tsneg.to_pydatetime(),\n tsneg.to_datetime64().astype(\"datetime64[ns]\"),\n tsneg.to_datetime64().astype(\"datetime64[D]\"),\n ]\n\n tspos = Timestamp(\"1980-01-01\")\n ts_pos_variants = [\n tspos,\n tspos.to_pydatetime(),\n tspos.to_datetime64().astype(\"datetime64[ns]\"),\n tspos.to_datetime64().astype(\"datetime64[D]\"),\n ]\n msg = \"Overflow in int64 addition\"\n for variant in ts_neg_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimax - variant\n\n expected = pd.Timestamp.max.value - tspos.value\n for variant in ts_pos_variants:\n res = dtimax - variant\n assert res[1].value == expected\n\n expected = pd.Timestamp.min.value - tsneg.value\n for variant in ts_neg_variants:\n res = dtimin - variant\n assert res[1].value == expected\n\n for variant in ts_pos_variants:\n with pytest.raises(OverflowError, match=msg):\n dtimin - variant\n\n def test_datetimeindex_sub_datetimeindex_overflow(self):\n # GH#22492, GH#22508\n dtimax = pd.to_datetime([\"now\", pd.Timestamp.max])\n dtimin = pd.to_datetime([\"now\", pd.Timestamp.min])\n\n ts_neg = pd.to_datetime([\"1950-01-01\", \"1950-01-01\"])\n ts_pos = pd.to_datetime([\"1980-01-01\", \"1980-01-01\"])\n\n # General tests\n expected = pd.Timestamp.max.value - ts_pos[1].value\n result = dtimax - ts_pos\n assert result[1].value == expected\n\n expected = pd.Timestamp.min.value - ts_neg[1].value\n result = dtimin - ts_neg\n assert result[1].value == expected\n msg = \"Overflow in int64 addition\"\n with pytest.raises(OverflowError, match=msg):\n dtimax - ts_neg\n\n with pytest.raises(OverflowError, match=msg):\n dtimin - ts_pos\n\n # Edge cases\n tmin = pd.to_datetime([pd.Timestamp.min])\n t1 = tmin + pd.Timedelta.max + pd.Timedelta(\"1us\")\n with pytest.raises(OverflowError, match=msg):\n t1 - tmin\n\n tmax = pd.to_datetime([pd.Timestamp.max])\n t2 = tmax + pd.Timedelta.min - pd.Timedelta(\"1us\")\n with pytest.raises(OverflowError, match=msg):\n tmax - t2\n\n\nclass TestTimestampSeriesArithmetic:\n def test_empty_series_add_sub(self):\n # GH#13844\n a = Series(dtype=\"M8[ns]\")\n b = Series(dtype=\"m8[ns]\")\n tm.assert_series_equal(a, a + b)\n tm.assert_series_equal(a, a - b)\n tm.assert_series_equal(a, b + a)\n msg = \"cannot subtract\"\n with pytest.raises(TypeError, match=msg):\n b - a\n\n def test_operators_datetimelike(self):\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [\n pd.Timestamp(\"20111230\"),\n pd.Timestamp(\"20120101\"),\n pd.Timestamp(\"20120103\"),\n ]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [\n pd.Timestamp(\"20111231\"),\n pd.Timestamp(\"20120102\"),\n pd.Timestamp(\"20120104\"),\n ]\n )\n dt1 - dt2\n dt2 - dt1\n\n # datetime64 with timetimedelta\n dt1 + td1\n td1 + dt1\n dt1 - td1\n\n # timetimedelta with datetime64\n td1 + dt1\n dt1 + td1\n\n def test_dt64ser_sub_datetime_dtype(self):\n ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))\n dt = datetime(1993, 6, 22, 13, 30)\n ser = Series([ts])\n result = pd.to_timedelta(np.abs(ser - dt))\n assert result.dtype == \"timedelta64[ns]\"\n\n # -------------------------------------------------------------\n # TODO: This next block of tests came from tests.series.test_operators,\n # needs to be de-duplicated and parametrized over `box` classes\n\n def test_operators_datetimelike_invalid(self, all_arithmetic_operators):\n # these are all TypeEror ops\n op_str = all_arithmetic_operators\n\n def check(get_ser, test_ser):\n\n # check that we are getting a TypeError\n # with 'operate' (from core/ops.py) for the ops that are not\n # defined\n op = getattr(get_ser, op_str, None)\n # Previously, _validate_for_numeric_binop in core/indexes/base.py\n # did this for us.\n with pytest.raises(\n TypeError, match=\"operate|[cC]annot|unsupported operand\"\n ):\n op(test_ser)\n\n # ## timedelta64 ###\n td1 = Series([timedelta(minutes=5, seconds=3)] * 3)\n td1.iloc[2] = np.nan\n\n # ## datetime64 ###\n dt1 = Series(\n [Timestamp(\"20111230\"), Timestamp(\"20120101\"), Timestamp(\"20120103\")]\n )\n dt1.iloc[2] = np.nan\n dt2 = Series(\n [Timestamp(\"20111231\"), Timestamp(\"20120102\"), Timestamp(\"20120104\")]\n )\n if op_str not in [\"__sub__\", \"__rsub__\"]:\n check(dt1, dt2)\n\n # ## datetime64 with timetimedelta ###\n # TODO(jreback) __rsub__ should raise?\n if op_str not in [\"__add__\", \"__radd__\", \"__sub__\"]:\n check(dt1, td1)\n\n # 8260, 10763\n # datetime64 with tz\n tz = \"US/Eastern\"\n dt1 = Series(date_range(\"2000-01-01 09:00:00\", periods=5, tz=tz), name=\"foo\")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n td1 = Series(pd.timedelta_range(\"1 days 1 min\", periods=5, freq=\"H\"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n\n if op_str not in [\"__add__\", \"__radd__\", \"__sub__\", \"__rsub__\"]:\n check(dt2, td2)\n\n def test_sub_single_tz(self):\n # GH#12290\n s1 = Series([pd.Timestamp(\"2016-02-10\", tz=\"America/Sao_Paulo\")])\n s2 = Series([pd.Timestamp(\"2016-02-08\", tz=\"America/Sao_Paulo\")])\n result = s1 - s2\n expected = Series([Timedelta(\"2days\")])\n tm.assert_series_equal(result, expected)\n result = s2 - s1\n expected = Series([Timedelta(\"-2days\")])\n tm.assert_series_equal(result, expected)\n\n def test_dt64tz_series_sub_dtitz(self):\n # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series\n # (with same tz) raises, fixed by #19024\n dti = pd.date_range(\"1999-09-30\", periods=10, tz=\"US/Pacific\")\n ser = pd.Series(dti)\n expected = pd.Series(pd.TimedeltaIndex([\"0days\"] * 10))\n\n res = dti - ser\n tm.assert_series_equal(res, expected)\n res = ser - dti\n tm.assert_series_equal(res, expected)\n\n def test_sub_datetime_compat(self):\n # see GH#14088\n s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])\n dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)\n exp = Series([Timedelta(\"1 days\"), pd.NaT])\n tm.assert_series_equal(s - dt, exp)\n tm.assert_series_equal(s - Timestamp(dt), exp)\n\n def test_dt64_series_add_mixed_tick_DateOffset(self):\n # GH#4532\n # operate with pd.offsets\n s = Series([Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")])\n\n result = s + pd.offsets.Milli(5)\n result2 = pd.offsets.Milli(5) + s\n expected = Series(\n [Timestamp(\"20130101 9:01:00.005\"), Timestamp(\"20130101 9:02:00.005\")]\n )\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)\n expected = Series(\n [Timestamp(\"20130101 9:06:00.005\"), Timestamp(\"20130101 9:07:00.005\")]\n )\n tm.assert_series_equal(result, expected)\n\n def test_datetime64_ops_nat(self):\n # GH#11349\n datetime_series = Series([NaT, Timestamp(\"19900315\")])\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_datetime = Series([NaT], dtype=\"datetime64[ns]\")\n\n # subtraction\n tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)\n msg = \"Unary negative expects\"\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + datetime_series\n\n tm.assert_series_equal(\n -NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n with pytest.raises(TypeError, match=msg):\n -single_nat_dtype_datetime + nat_series_dtype_timestamp\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp\n )\n\n # -------------------------------------------------------------\n # Invalid Operations\n # TODO: this block also needs to be de-duplicated and parametrized\n\n @pytest.mark.parametrize(\n \"dt64_series\",\n [\n Series([Timestamp(\"19900315\"), Timestamp(\"19900315\")]),\n Series([pd.NaT, Timestamp(\"19900315\")]),\n Series([pd.NaT, pd.NaT], dtype=\"datetime64[ns]\"),\n ],\n )\n @pytest.mark.parametrize(\"one\", [1, 1.0, np.array(1)])\n def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):\n # multiplication\n msg = \"cannot perform .* with this index type\"\n with pytest.raises(TypeError, match=msg):\n dt64_series * one\n with pytest.raises(TypeError, match=msg):\n one * dt64_series\n\n # division\n with pytest.raises(TypeError, match=msg):\n dt64_series / one\n with pytest.raises(TypeError, match=msg):\n one / dt64_series\n\n # TODO: parametrize over box\n @pytest.mark.parametrize(\"op\", [\"__add__\", \"__radd__\", \"__sub__\", \"__rsub__\"])\n @pytest.mark.parametrize(\"tz\", [None, \"Asia/Tokyo\"])\n def test_dt64_series_add_intlike(self, tz, op):\n # GH#19123\n dti = pd.DatetimeIndex([\"2016-01-02\", \"2016-02-03\", \"NaT\"], tz=tz)\n ser = Series(dti)\n\n other = Series([20, 30, 40], dtype=\"uint8\")\n\n method = getattr(ser, op)\n msg = \"|\".join(\n [\n \"Addition/subtraction of integers and integer-arrays\",\n \"cannot subtract .* from ndarray\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n method(1)\n with pytest.raises(TypeError, match=msg):\n method(other)\n with pytest.raises(TypeError, match=msg):\n method(np.array(other))\n with pytest.raises(TypeError, match=msg):\n method(pd.Index(other))\n\n # -------------------------------------------------------------\n # Timezone-Centric Tests\n\n def test_operators_datetimelike_with_timezones(self):\n tz = \"US/Eastern\"\n dt1 = Series(date_range(\"2000-01-01 09:00:00\", periods=5, tz=tz), name=\"foo\")\n dt2 = dt1.copy()\n dt2.iloc[2] = np.nan\n\n td1 = Series(pd.timedelta_range(\"1 days 1 min\", periods=5, freq=\"H\"))\n td2 = td1.copy()\n td2.iloc[1] = np.nan\n\n result = dt1 + td1[0]\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2[0]\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n # odd numpy behavior with scalar timedeltas\n result = td1[0] + dt1\n exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = td2[0] + dt2\n exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1[0]\n exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = \"(bad|unsupported) operand type for unary\"\n with pytest.raises(TypeError, match=msg):\n td1[0] - dt1\n\n result = dt2 - td2[0]\n exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n with pytest.raises(TypeError, match=msg):\n td2[0] - dt2\n\n result = dt1 + td1\n exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 + td2\n exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt1 - td1\n exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n\n result = dt2 - td2\n exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)\n tm.assert_series_equal(result, exp)\n msg = \"cannot (add|subtract)\"\n with pytest.raises(TypeError, match=msg):\n td1 - dt1\n with pytest.raises(TypeError, match=msg):\n td2 - dt2\n\n\nclass TestDatetimeIndexArithmetic:\n\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and int\n\n def test_dti_addsub_int(self, tz_naive_fixture, one):\n # Variants of `one` for #19012\n tz = tz_naive_fixture\n rng = pd.date_range(\"2000-01-01 09:00\", freq=\"H\", periods=10, tz=tz)\n msg = \"Addition/subtraction of integers\"\n\n with pytest.raises(TypeError, match=msg):\n rng + one\n with pytest.raises(TypeError, match=msg):\n rng += one\n with pytest.raises(TypeError, match=msg):\n rng - one\n with pytest.raises(TypeError, match=msg):\n rng -= one\n\n # -------------------------------------------------------------\n # __add__/__sub__ with integer arrays\n\n @pytest.mark.parametrize(\"freq\", [\"H\", \"D\"])\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_tick(self, int_holder, freq):\n # GH#19959\n dti = pd.date_range(\"2016-01-01\", periods=2, freq=freq)\n other = int_holder([4, -1])\n\n msg = \"Addition/subtraction of integers|cannot subtract DatetimeArray from\"\n assert_invalid_addsub_type(dti, other, msg)\n\n @pytest.mark.parametrize(\"freq\", [\"W\", \"M\", \"MS\", \"Q\"])\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_non_tick(self, int_holder, freq):\n # GH#19959\n dti = pd.date_range(\"2016-01-01\", periods=2, freq=freq)\n other = int_holder([4, -1])\n\n msg = \"Addition/subtraction of integers|cannot subtract DatetimeArray from\"\n assert_invalid_addsub_type(dti, other, msg)\n\n @pytest.mark.parametrize(\"int_holder\", [np.array, pd.Index])\n def test_dti_add_intarray_no_freq(self, int_holder):\n # GH#19959\n dti = pd.DatetimeIndex([\"2016-01-01\", \"NaT\", \"2017-04-05 06:07:08\"])\n other = int_holder([9, 4, -1])\n msg = \"|\".join(\n [\"cannot subtract DatetimeArray from\", \"Addition/subtraction of integers\"]\n )\n assert_invalid_addsub_type(dti, other, msg)\n\n # -------------------------------------------------------------\n # Binary operations DatetimeIndex and TimedeltaIndex/array\n\n def test_dti_add_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = pd.date_range(\"2017-01-01\", periods=10, tz=tz)\n\n # add with TimdeltaIndex\n result = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = tdi + dti\n tm.assert_index_equal(result, expected)\n\n # add with timedelta64 array\n result = dti + tdi.values\n tm.assert_index_equal(result, expected)\n\n result = tdi.values + dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_iadd_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = pd.date_range(\"2017-01-01\", periods=10, tz=tz)\n\n # iadd with TimdeltaIndex\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result += tdi\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range(\"0 days\", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n # iadd with timedelta64 array\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result += tdi.values\n tm.assert_index_equal(result, expected)\n\n result = pd.timedelta_range(\"0 days\", periods=10)\n result += dti\n tm.assert_index_equal(result, expected)\n\n def test_dti_sub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = pd.date_range(\"2017-01-01\", periods=10, tz=tz, freq=\"-1D\")\n\n # sub with TimedeltaIndex\n result = dti - tdi\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract .*TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi - dti\n\n # sub with timedelta64 array\n result = dti - tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract DatetimeArray from\"\n with pytest.raises(TypeError, match=msg):\n tdi.values - dti\n\n def test_dti_isub_tdi(self, tz_naive_fixture):\n # GH#17558\n tz = tz_naive_fixture\n dti = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n tdi = pd.timedelta_range(\"0 days\", periods=10)\n expected = pd.date_range(\"2017-01-01\", periods=10, tz=tz, freq=\"-1D\")\n\n # isub with TimedeltaIndex\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result -= tdi\n tm.assert_index_equal(result, expected)\n\n msg = \"cannot subtract .* from a TimedeltaArray\"\n with pytest.raises(TypeError, match=msg):\n tdi -= dti\n\n # isub with timedelta64 array\n result = DatetimeIndex([Timestamp(\"2017-01-01\", tz=tz)] * 10)\n result -= tdi.values\n tm.assert_index_equal(result, expected)\n\n msg = \"|\".join(\n [\n \"cannot perform __neg__ with this index type:\",\n \"ufunc subtract cannot use operands with types\",\n \"cannot subtract DatetimeArray from\",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n tdi.values -= dti\n\n # -------------------------------------------------------------\n # Binary Operations DatetimeIndex and datetime-like\n # TODO: A couple other tests belong in this section. Move them in\n # A PR where there isn't already a giant diff.\n\n @pytest.mark.parametrize(\n \"addend\",\n [\n datetime(2011, 1, 1),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]),\n DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]).tz_localize(\"US/Eastern\"),\n np.datetime64(\"2011-01-01\"),\n Timestamp(\"2011-01-01\"),\n ],\n ids=lambda x: type(x).__name__,\n )\n @pytest.mark.parametrize(\"tz\", [None, \"US/Eastern\"])\n def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):\n # GH#9631\n dti = DatetimeIndex([\"2011-01-01\", \"2011-01-02\"]).tz_localize(tz)\n dtarr = tm.box_expected(dti, box_with_array)\n msg = \"cannot add DatetimeArray and\"\n\n with pytest.raises(TypeError, match=msg):\n dtarr + addend\n with pytest.raises(TypeError, match=msg):\n addend + dtarr\n\n # -------------------------------------------------------------\n\n def test_dta_add_sub_index(self, tz_naive_fixture):\n # Check that DatetimeArray defers to Index classes\n dti = date_range(\"20130101\", periods=3, tz=tz_naive_fixture)\n dta = dti.array\n result = dta - dti\n expected = dti - dti\n tm.assert_index_equal(result, expected)\n\n tdi = result\n result = dta + tdi\n expected = dti + tdi\n tm.assert_index_equal(result, expected)\n\n result = dta - tdi\n expected = dti - tdi\n tm.assert_index_equal(result, expected)\n\n def test_sub_dti_dti(self):\n # previously performed setop (deprecated in 0.16.0), now changed to\n # return subtraction -> TimeDeltaIndex (GH ...)\n\n dti = date_range(\"20130101\", periods=3)\n dti_tz = date_range(\"20130101\", periods=3).tz_localize(\"US/Eastern\")\n dti_tz2 = date_range(\"20130101\", periods=3).tz_localize(\"UTC\")\n expected = TimedeltaIndex([0, 0, 0])\n\n result = dti - dti\n tm.assert_index_equal(result, expected)\n\n result = dti_tz - dti_tz\n tm.assert_index_equal(result, expected)\n msg = \"DatetimeArray subtraction must have the same timezones or\"\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti\n\n with pytest.raises(TypeError, match=msg):\n dti - dti_tz\n\n with pytest.raises(TypeError, match=msg):\n dti_tz - dti_tz2\n\n # isub\n dti -= dti\n tm.assert_index_equal(dti, expected)\n\n # different length raises ValueError\n dti1 = date_range(\"20130101\", periods=3)\n dti2 = date_range(\"20130101\", periods=4)\n msg = \"cannot add indices of unequal length\"\n with pytest.raises(ValueError, match=msg):\n dti1 - dti2\n\n # NaN propagation\n dti1 = DatetimeIndex([\"2012-01-01\", np.nan, \"2012-01-03\"])\n dti2 = DatetimeIndex([\"2012-01-02\", \"2012-01-03\", np.nan])\n expected = TimedeltaIndex([\"1 days\", np.nan, np.nan])\n result = dti2 - dti1\n tm.assert_index_equal(result, expected)\n\n # -------------------------------------------------------------------\n # TODO: Most of this block is moved from series or frame tests, needs\n # cleanup, box-parametrization, and de-duplication\n\n @pytest.mark.parametrize(\"op\", [operator.add, operator.sub])\n def test_timedelta64_equal_timedelta_supported_ops(self, op):\n ser = Series(\n [\n Timestamp(\"20130301\"),\n Timestamp(\"20130228 23:00:00\"),\n Timestamp(\"20130228 22:00:00\"),\n Timestamp(\"20130228 21:00:00\"),\n ]\n )\n\n intervals = [\"D\", \"h\", \"m\", \"s\", \"us\"]\n\n def timedelta64(*args):\n # see casting notes in NumPy gh-12927\n return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))\n\n for d, h, m, s, us in product(*([range(2)] * 5)):\n nptd = timedelta64(d, h, m, s, us)\n pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)\n lhs = op(ser, nptd)\n rhs = op(ser, pytd)\n\n tm.assert_series_equal(lhs, rhs)\n\n def test_ops_nat_mixed_datetime64_timedelta64(self):\n # GH#11349\n timedelta_series = Series([NaT, Timedelta(\"1s\")])\n datetime_series = Series([NaT, Timestamp(\"19900315\")])\n nat_series_dtype_timedelta = Series([NaT, NaT], dtype=\"timedelta64[ns]\")\n nat_series_dtype_timestamp = Series([NaT, NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_datetime = Series([NaT], dtype=\"datetime64[ns]\")\n single_nat_dtype_timedelta = Series([NaT], dtype=\"timedelta64[ns]\")\n\n # subtraction\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta\n )\n\n tm.assert_series_equal(\n datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp\n )\n\n # without a Series wrapping the NaT, it is ambiguous\n # whether it is a datetime64 or timedelta64\n # defaults to interpreting it as timedelta64\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_datetime,\n nat_series_dtype_timedelta,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp - single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n -single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n msg = \"cannot subtract a datelike\"\n with pytest.raises(TypeError, match=msg):\n timedelta_series - single_nat_dtype_datetime\n\n # addition\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timestamp + single_nat_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_timedelta + nat_series_dtype_timestamp,\n nat_series_dtype_timestamp,\n )\n\n tm.assert_series_equal(\n nat_series_dtype_timedelta + single_nat_dtype_datetime,\n nat_series_dtype_timestamp,\n )\n tm.assert_series_equal(\n single_nat_dtype_datetime + nat_series_dtype_timedelta,\n nat_series_dtype_timestamp,\n )\n\n def test_ufunc_coercions(self):\n idx = date_range(\"2011-01-01\", periods=3, freq=\"2D\", name=\"x\")\n\n delta = np.timedelta64(1, \"D\")\n exp = date_range(\"2011-01-02\", periods=3, freq=\"2D\", name=\"x\")\n for result in [idx + delta, np.add(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"2D\"\n\n exp = date_range(\"2010-12-31\", periods=3, freq=\"2D\", name=\"x\")\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"2D\"\n\n delta = np.array(\n [np.timedelta64(1, \"D\"), np.timedelta64(2, \"D\"), np.timedelta64(3, \"D\")]\n )\n exp = DatetimeIndex(\n [\"2011-01-02\", \"2011-01-05\", \"2011-01-08\"], freq=\"3D\", name=\"x\"\n )\n for result in [idx + delta, np.add(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"3D\"\n\n exp = DatetimeIndex(\n [\"2010-12-31\", \"2011-01-01\", \"2011-01-02\"], freq=\"D\", name=\"x\"\n )\n for result in [idx - delta, np.subtract(idx, delta)]:\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, exp)\n assert result.freq == \"D\"\n\n @pytest.mark.parametrize(\n \"names\", [(\"foo\", None, None), (\"baz\", \"bar\", None), (\"bar\", \"bar\", \"bar\")]\n )\n @pytest.mark.parametrize(\"tz\", [None, \"America/Chicago\"])\n def test_dti_add_series(self, tz, names):\n # GH#13905\n index = DatetimeIndex(\n [\"2016-06-28 05:30\", \"2016-06-28 05:31\"], tz=tz, name=names[0]\n )\n ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])\n expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])\n\n # passing name arg isn't enough when names[2] is None\n expected.name = names[2]\n assert expected.dtype == index.dtype\n result = ser + index\n tm.assert_series_equal(result, expected)\n result2 = index + ser\n tm.assert_series_equal(result2, expected)\n\n expected = index + Timedelta(seconds=5)\n result3 = ser.values + index\n tm.assert_index_equal(result3, expected)\n result4 = index + ser.values\n tm.assert_index_equal(result4, expected)\n\n @pytest.mark.parametrize(\"op\", [operator.add, roperator.radd, operator.sub])\n @pytest.mark.parametrize(\n \"names\", [(None, None, None), (\"foo\", \"bar\", None), (\"foo\", \"foo\", \"foo\")]\n )\n def test_dti_addsub_offset_arraylike(\n self, tz_naive_fixture, names, op, index_or_series\n ):\n # GH#18849, GH#19744\n box = pd.Index\n other_box = index_or_series\n\n tz = tz_naive_fixture\n dti = pd.date_range(\"2017-01-01\", periods=2, tz=tz, name=names[0])\n other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])\n\n xbox = get_upcast_box(box, other)\n\n with tm.assert_produces_warning(PerformanceWarning):\n res = op(dti, other)\n\n expected = DatetimeIndex(\n [op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq=\"infer\"\n )\n expected = tm.box_expected(expected, xbox)\n tm.assert_equal(res, expected)\n\n @pytest.mark.parametrize(\"other_box\", [pd.Index, np.array])\n def test_dti_addsub_object_arraylike(\n self, tz_naive_fixture, box_with_array, other_box\n ):\n tz = tz_naive_fixture\n\n dti = pd.date_range(\"2017-01-01\", periods=2, tz=tz)\n dtarr = tm.box_expected(dti, box_with_array)\n other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)])\n xbox = get_upcast_box(box_with_array, other)\n\n expected = pd.DatetimeIndex([\"2017-01-31\", \"2017-01-06\"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox)\n\n warn = None if box_with_array is pd.DataFrame else PerformanceWarning\n with tm.assert_produces_warning(warn):\n result = dtarr + other\n tm.assert_equal(result, expected)\n\n expected = pd.DatetimeIndex([\"2016-12-31\", \"2016-12-29\"], tz=tz_naive_fixture)\n expected = tm.box_expected(expected, xbox)\n\n with tm.assert_produces_warning(warn):\n result = dtarr - other\n tm.assert_equal(result, expected)\n\n\[email protected](\"years\", [-1, 0, 1])\[email protected](\"months\", [-2, 0, 2])\ndef test_shift_months(years, months):\n dti = DatetimeIndex(\n [\n Timestamp(\"2000-01-05 00:15:00\"),\n Timestamp(\"2000-01-31 00:23:00\"),\n Timestamp(\"2000-01-01\"),\n Timestamp(\"2000-02-29\"),\n Timestamp(\"2000-12-31\"),\n ]\n )\n actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))\n\n raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]\n expected = DatetimeIndex(raw)\n tm.assert_index_equal(actual, expected)\n\n\ndef test_dt64arr_addsub_object_dtype_2d():\n # block-wise DataFrame operations will require operating on 2D\n # DatetimeArray/TimedeltaArray, so check that specifically.\n dti = pd.date_range(\"1994-02-13\", freq=\"2W\", periods=4)\n dta = dti._data.reshape((4, 1))\n\n other = np.array([[pd.offsets.Day(n)] for n in range(4)])\n assert other.shape == dta.shape\n\n with tm.assert_produces_warning(PerformanceWarning):\n result = dta + other\n with tm.assert_produces_warning(PerformanceWarning):\n expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)\n\n assert isinstance(result, DatetimeArray)\n assert result.freq is None\n tm.assert_numpy_array_equal(result._data, expected._data)\n\n with tm.assert_produces_warning(PerformanceWarning):\n # Case where we expect to get a TimedeltaArray back\n result2 = dta - dta.astype(object)\n\n assert isinstance(result2, TimedeltaArray)\n assert result2.shape == (4, 1)\n assert result2.freq is None\n assert (result2.asi8 == 0).all()\n", "from itertools import product\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame, NaT, date_range\nimport pandas._testing as tm\n\n\[email protected](params=product([True, False], [True, False]))\ndef close_open_fixture(request):\n return request.param\n\n\[email protected]\ndef float_frame_with_na():\n \"\"\"\n Fixture for DataFrame of floats with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D']; some entries are missing\n\n A B C D\n ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997\n DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872\n neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522\n 0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018\n 3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826\n soujjZ0A08 NaN NaN NaN NaN\n 7W6NLGsjB9 NaN NaN NaN NaN\n ... ... ... ... ...\n uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590\n n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717\n ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189\n uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503\n 3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947\n 2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083\n sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517\n\n [30 rows x 4 columns]\n \"\"\"\n df = DataFrame(tm.getSeriesData())\n # set some NAs\n df.loc[5:10] = np.nan\n df.loc[15:20, -2:] = np.nan\n return df\n\n\[email protected]\ndef bool_frame_with_na():\n \"\"\"\n Fixture for DataFrame of booleans with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D']; some entries are missing\n\n A B C D\n zBZxY2IDGd False False False False\n IhBWBMWllt False True True True\n ctjdvZSR6R True False True True\n AVTujptmxb False True False True\n G9lrImrSWq False False False True\n sFFwdIUfz2 NaN NaN NaN NaN\n s15ptEJnRb NaN NaN NaN NaN\n ... ... ... ... ...\n UW41KkDyZ4 True True False False\n l9l6XkOdqV True False False False\n X2MeZfzDYA False True False False\n xWkIKU7vfX False True False True\n QOhL6VmpGU False False False True\n 22PwkRJdat False True False False\n kfboQ3VeIK True False True False\n\n [30 rows x 4 columns]\n \"\"\"\n df = DataFrame(tm.getSeriesData()) > 0\n df = df.astype(object)\n # set some NAs\n df.loc[5:10] = np.nan\n df.loc[15:20, -2:] = np.nan\n return df\n\n\[email protected]\ndef int_frame():\n \"\"\"\n Fixture for DataFrame of ints with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D']\n\n A B C D\n vpBeWjM651 1 0 1 0\n 5JyxmrP1En -1 0 0 0\n qEDaoD49U2 -1 1 0 0\n m66TkTfsFe 0 0 0 0\n EHPaNzEUFm -1 0 -1 0\n fpRJCevQhi 2 0 0 0\n OlQvnmfi3Q 0 0 -2 0\n ... .. .. .. ..\n uB1FPlz4uP 0 0 0 1\n EcSe6yNzCU 0 0 -1 0\n L50VudaiI8 -1 1 -2 0\n y3bpw4nwIp 0 -1 0 0\n H0RdLLwrCT 1 1 0 0\n rY82K0vMwm 0 0 0 0\n 1OPIUjnkjk 2 0 0 0\n\n [30 rows x 4 columns]\n \"\"\"\n df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})\n # force these all to int64 to avoid platform testing issues\n return DataFrame({c: s for c, s in df.items()}, dtype=np.int64)\n\n\[email protected]\ndef datetime_frame():\n \"\"\"\n Fixture for DataFrame of floats with DatetimeIndex\n\n Columns are ['A', 'B', 'C', 'D']\n\n A B C D\n 2000-01-03 -1.122153 0.468535 0.122226 1.693711\n 2000-01-04 0.189378 0.486100 0.007864 -1.216052\n 2000-01-05 0.041401 -0.835752 -0.035279 -0.414357\n 2000-01-06 0.430050 0.894352 0.090719 0.036939\n 2000-01-07 -0.620982 -0.668211 -0.706153 1.466335\n 2000-01-10 -0.752633 0.328434 -0.815325 0.699674\n 2000-01-11 -2.236969 0.615737 -0.829076 -1.196106\n ... ... ... ... ...\n 2000-02-03 1.642618 -0.579288 0.046005 1.385249\n 2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351\n 2000-02-07 -2.656149 -0.601387 1.410148 0.444150\n 2000-02-08 -1.201881 -1.289040 0.772992 -1.445300\n 2000-02-09 1.377373 0.398619 1.008453 -0.928207\n 2000-02-10 0.473194 -0.636677 0.984058 0.511519\n 2000-02-11 -0.965556 0.408313 -1.312844 -0.381948\n\n [30 rows x 4 columns]\n \"\"\"\n return DataFrame(tm.getTimeSeriesData())\n\n\[email protected]\ndef float_string_frame():\n \"\"\"\n Fixture for DataFrame of floats and strings with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D', 'foo'].\n\n A B C D foo\n w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar\n PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar\n ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar\n 3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar\n khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar\n LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar\n HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar\n ... ... ... ... ... ...\n 9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar\n h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar\n mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar\n oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar\n 9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar\n jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar\n lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar\n\n [30 rows x 5 columns]\n \"\"\"\n df = DataFrame(tm.getSeriesData())\n df[\"foo\"] = \"bar\"\n return df\n\n\[email protected]\ndef mixed_float_frame():\n \"\"\"\n Fixture for DataFrame of different float types with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n\n A B C D\n GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993\n KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588\n VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731\n kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607\n CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266\n 0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541\n tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710\n ... ... ... ... ...\n 7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237\n 4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612\n B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653\n hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427\n 1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827\n 9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204\n xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502\n\n [30 rows x 4 columns]\n \"\"\"\n df = DataFrame(tm.getSeriesData())\n df.A = df.A.astype(\"float32\")\n df.B = df.B.astype(\"float32\")\n df.C = df.C.astype(\"float16\")\n df.D = df.D.astype(\"float64\")\n return df\n\n\[email protected]\ndef mixed_int_frame():\n \"\"\"\n Fixture for DataFrame of different int types with index of unique strings\n\n Columns are ['A', 'B', 'C', 'D'].\n\n A B C D\n mUrCZ67juP 0 1 2 2\n rw99ACYaKS 0 1 0 0\n 7QsEcpaaVU 0 1 1 1\n xkrimI2pcE 0 1 0 0\n dz01SuzoS8 0 1 255 255\n ccQkqOHX75 -1 1 0 0\n DN0iXaoDLd 0 1 0 0\n ... .. .. ... ...\n Dfb141wAaQ 1 1 254 254\n IPD8eQOVu5 0 1 0 0\n CcaKulsCmv 0 1 0 0\n rIBa8gu7E5 0 1 0 0\n RP6peZmh5o 0 1 1 1\n NMb9pipQWQ 0 1 0 0\n PqgbJEzjib 0 1 3 3\n\n [30 rows x 4 columns]\n \"\"\"\n df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})\n df.A = df.A.astype(\"int32\")\n df.B = np.ones(len(df.B), dtype=\"uint64\")\n df.C = df.C.astype(\"uint8\")\n df.D = df.C.astype(\"int64\")\n return df\n\n\[email protected]\ndef mixed_type_frame():\n \"\"\"\n Fixture for DataFrame of float/int/string columns with RangeIndex\n Columns are ['a', 'b', 'c', 'float32', 'int32'].\n \"\"\"\n return DataFrame(\n {\n \"a\": 1.0,\n \"b\": 2,\n \"c\": \"foo\",\n \"float32\": np.array([1.0] * 10, dtype=\"float32\"),\n \"int32\": np.array([1] * 10, dtype=\"int32\"),\n },\n index=np.arange(10),\n )\n\n\[email protected]\ndef timezone_frame():\n \"\"\"\n Fixture for DataFrame of date_range Series with different time zones\n\n Columns are ['A', 'B', 'C']; some entries are missing\n\n A B C\n 0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00\n 1 2013-01-02 NaT NaT\n 2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00\n \"\"\"\n df = DataFrame(\n {\n \"A\": date_range(\"20130101\", periods=3),\n \"B\": date_range(\"20130101\", periods=3, tz=\"US/Eastern\"),\n \"C\": date_range(\"20130101\", periods=3, tz=\"CET\"),\n }\n )\n df.iloc[1, 1] = NaT\n df.iloc[1, 2] = NaT\n return df\n\n\[email protected]\ndef uint64_frame():\n \"\"\"\n Fixture for DataFrame with uint64 values\n\n Columns are ['A', 'B']\n \"\"\"\n return DataFrame(\n {\"A\": np.arange(3), \"B\": [2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10]}, dtype=np.uint64\n )\n\n\[email protected]\ndef simple_frame():\n \"\"\"\n Fixture for simple 3x3 DataFrame\n\n Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].\n\n one two three\n a 1.0 2.0 3.0\n b 4.0 5.0 6.0\n c 7.0 8.0 9.0\n \"\"\"\n arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n\n return DataFrame(arr, columns=[\"one\", \"two\", \"three\"], index=[\"a\", \"b\", \"c\"])\n\n\[email protected]\ndef frame_of_index_cols():\n \"\"\"\n Fixture for DataFrame of columns that can be used for indexing\n\n Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')];\n 'A' & 'B' contain duplicates (but are jointly unique), the rest are unique.\n\n A B C D E (tuple, as, label)\n 0 foo one a 0.608477 -0.012500 -1.664297\n 1 foo two b -0.633460 0.249614 -0.364411\n 2 foo three c 0.615256 2.154968 -0.834666\n 3 bar one d 0.234246 1.085675 0.718445\n 4 bar two e 0.533841 -0.005702 -3.533912\n \"\"\"\n df = DataFrame(\n {\n \"A\": [\"foo\", \"foo\", \"foo\", \"bar\", \"bar\"],\n \"B\": [\"one\", \"two\", \"three\", \"one\", \"two\"],\n \"C\": [\"a\", \"b\", \"c\", \"d\", \"e\"],\n \"D\": np.random.randn(5),\n \"E\": np.random.randn(5),\n (\"tuple\", \"as\", \"label\"): np.random.randn(5),\n }\n )\n return df\n" ]
[ [ "pandas.to_datetime", "pandas.Series", "pandas.offsets.Day", "pandas.tests.arithmetic.common.assert_invalid_addsub_type", "pandas.offsets.DateOffset", "numpy.all", "pandas.tests.arithmetic.common.get_upcast_box", "pandas._testing.box_expected", "pandas._testing.makeDateIndex", "pandas._libs.tslibs.conversion.localize_pydatetime", "pandas.offsets.Hour", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "numpy.subtract", "pandas.Index", "pandas.DatetimeIndex", "pandas.offsets.MonthEnd", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas._testing.assert_produces_warning", "pandas._libs.tslibs.offsets.shift_months", "pandas.compat.numpy.np_datetime64_compat", "pandas.offsets.Milli", "pandas.Timedelta", "numpy.timedelta64", "pandas.date_range", "pandas.offsets.Second", "numpy.array", "pandas.timedelta_range", "pandas.DateOffset", "pandas._testing.assert_equal", "pandas.TimedeltaIndex", "numpy.abs", "pandas.period_range", "pandas.tests.arithmetic.common.assert_invalid_comparison", "numpy.datetime64", "pandas.offsets.Minute", "pandas.Timestamp.now", "pandas.Period", "numpy.add", "pandas.Timestamp" ], [ "pandas._testing.getTimeSeriesData", "numpy.arange", "pandas.DataFrame", "numpy.random.randn", "pandas.date_range", "numpy.array", "pandas._testing.getSeriesData" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
argriffing/matplotlib
[ "5555f5463fb5f995a59f7651c0034a5d6a4c7e84", "5555f5463fb5f995a59f7651c0034a5d6a4c7e84", "330aefbd031ee227213afe655c5158320015d45b", "330aefbd031ee227213afe655c5158320015d45b", "5555f5463fb5f995a59f7651c0034a5d6a4c7e84", "330aefbd031ee227213afe655c5158320015d45b", "330aefbd031ee227213afe655c5158320015d45b", "5555f5463fb5f995a59f7651c0034a5d6a4c7e84" ]
[ "examples/pylab_examples/contour_corner_mask.py", "lib/matplotlib/spines.py", "examples/pylab_examples/psd_demo_complex.py", "examples/pylab_examples/image_demo.py", "examples/pylab_examples/logo.py", "examples/pylab_examples/tripcolor_demo.py", "examples/mplot3d/mixed_subplots_demo.py", "examples/pylab_examples/legend_demo3.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nIllustrate the difference between corner_mask=False and corner_mask=True\nfor masked contour plots.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Data to plot.\nx, y = np.meshgrid(np.arange(7), np.arange(10))\nz = np.sin(0.5*x)*np.cos(0.52*y)\n\n# Mask various z values.\nmask = np.zeros_like(z, dtype=np.bool)\nmask[2, 3:5] = True\nmask[3:5, 4] = True\nmask[7, 2] = True\nmask[5, 0] = True\nmask[0, 6] = True\nz = np.ma.array(z, mask=mask)\n\ncorner_masks = [False, True]\nfor i, corner_mask in enumerate(corner_masks):\n plt.subplot(1, 2, i+1)\n cs = plt.contourf(x, y, z, corner_mask=corner_mask)\n plt.contour(cs, colors='k')\n plt.title('corner_mask = {0}'.format(corner_mask))\n\n # Plot grid.\n plt.grid(c='k', ls='-', alpha=0.3)\n\n # Indicate masked points with red circles.\n plt.plot(np.ma.array(x, mask=~mask), y, 'ro')\n\nplt.show()\n", "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport matplotlib\n\nimport matplotlib.artist as martist\nfrom matplotlib.artist import allow_rasterization\nfrom matplotlib import docstring\nimport matplotlib.transforms as mtransforms\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport matplotlib.path as mpath\nimport matplotlib.cbook as cbook\nimport numpy as np\nimport warnings\n\nrcParams = matplotlib.rcParams\n\n\nclass Spine(mpatches.Patch):\n \"\"\"an axis spine -- the line noting the data area boundaries\n\n Spines are the lines connecting the axis tick marks and noting the\n boundaries of the data area. They can be placed at arbitrary\n positions. See function:`~matplotlib.spines.Spine.set_position`\n for more information.\n\n The default position is ``('outward',0)``.\n\n Spines are subclasses of class:`~matplotlib.patches.Patch`, and\n inherit much of their behavior.\n\n Spines draw a line or a circle, depending if\n function:`~matplotlib.spines.Spine.set_patch_line` or\n function:`~matplotlib.spines.Spine.set_patch_circle` has been\n called. Line-like is the default.\n\n \"\"\"\n def __str__(self):\n return \"Spine\"\n\n @docstring.dedent_interpd\n def __init__(self, axes, spine_type, path, **kwargs):\n \"\"\"\n - *axes* : the Axes instance containing the spine\n - *spine_type* : a string specifying the spine type\n - *path* : the path instance used to draw the spine\n\n Valid kwargs are:\n %(Patch)s\n \"\"\"\n super(Spine, self).__init__(**kwargs)\n self.axes = axes\n self.set_figure(self.axes.figure)\n self.spine_type = spine_type\n self.set_facecolor('none')\n self.set_edgecolor(rcParams['axes.edgecolor'])\n self.set_linewidth(rcParams['axes.linewidth'])\n self.set_capstyle('projecting')\n self.axis = None\n\n self.set_zorder(2.5)\n self.set_transform(self.axes.transData) # default transform\n\n self._bounds = None # default bounds\n self._smart_bounds = False\n\n # Defer initial position determination. (Not much support for\n # non-rectangular axes is currently implemented, and this lets\n # them pass through the spines machinery without errors.)\n self._position = None\n if not isinstance(path, matplotlib.path.Path):\n msg = \"'path' must be an instance of 'matplotlib.path.Path'\"\n raise ValueError(msg)\n self._path = path\n\n # To support drawing both linear and circular spines, this\n # class implements Patch behavior two ways. If\n # self._patch_type == 'line', behave like a mpatches.PathPatch\n # instance. If self._patch_type == 'circle', behave like a\n # mpatches.Ellipse instance.\n self._patch_type = 'line'\n\n # Behavior copied from mpatches.Ellipse:\n # Note: This cannot be calculated until this is added to an Axes\n self._patch_transform = mtransforms.IdentityTransform()\n\n def set_smart_bounds(self, value):\n \"\"\"set the spine and associated axis to have smart bounds\"\"\"\n self._smart_bounds = value\n\n # also set the axis if possible\n if self.spine_type in ('left', 'right'):\n self.axes.yaxis.set_smart_bounds(value)\n elif self.spine_type in ('top', 'bottom'):\n self.axes.xaxis.set_smart_bounds(value)\n\n def get_smart_bounds(self):\n \"\"\"get whether the spine has smart bounds\"\"\"\n return self._smart_bounds\n\n def set_patch_circle(self, center, radius):\n \"\"\"set the spine to be circular\"\"\"\n self._patch_type = 'circle'\n self._center = center\n self._width = radius * 2\n self._height = radius * 2\n self._angle = 0\n # circle drawn on axes transform\n self.set_transform(self.axes.transAxes)\n\n def set_patch_line(self):\n \"\"\"set the spine to be linear\"\"\"\n self._patch_type = 'line'\n\n # Behavior copied from mpatches.Ellipse:\n def _recompute_transform(self):\n \"\"\"NOTE: This cannot be called until after this has been added\n to an Axes, otherwise unit conversion will fail. This\n maxes it very important to call the accessor method and\n not directly access the transformation member variable.\n \"\"\"\n assert self._patch_type == 'circle'\n center = (self.convert_xunits(self._center[0]),\n self.convert_yunits(self._center[1]))\n width = self.convert_xunits(self._width)\n height = self.convert_yunits(self._height)\n self._patch_transform = mtransforms.Affine2D() \\\n .scale(width * 0.5, height * 0.5) \\\n .rotate_deg(self._angle) \\\n .translate(*center)\n\n def get_patch_transform(self):\n if self._patch_type == 'circle':\n self._recompute_transform()\n return self._patch_transform\n else:\n return super(Spine, self).get_patch_transform()\n\n def get_path(self):\n return self._path\n\n def _ensure_position_is_set(self):\n if self._position is None:\n # default position\n self._position = ('outward', 0.0) # in points\n self.set_position(self._position)\n\n def register_axis(self, axis):\n \"\"\"register an axis\n\n An axis should be registered with its corresponding spine from\n the Axes instance. This allows the spine to clear any axis\n properties when needed.\n \"\"\"\n self.axis = axis\n if self.axis is not None:\n self.axis.cla()\n\n def cla(self):\n \"\"\"Clear the current spine\"\"\"\n self._position = None # clear position\n if self.axis is not None:\n self.axis.cla()\n\n def is_frame_like(self):\n \"\"\"return True if directly on axes frame\n\n This is useful for determining if a spine is the edge of an\n old style MPL plot. If so, this function will return True.\n \"\"\"\n self._ensure_position_is_set()\n position = self._position\n if cbook.is_string_like(position):\n if position == 'center':\n position = ('axes', 0.5)\n elif position == 'zero':\n position = ('data', 0)\n if len(position) != 2:\n raise ValueError(\"position should be 2-tuple\")\n position_type, amount = position\n if position_type == 'outward' and amount == 0:\n return True\n else:\n return False\n\n def _adjust_location(self):\n \"\"\"automatically set spine bounds to the view interval\"\"\"\n\n if self.spine_type == 'circle':\n return\n\n if self._bounds is None:\n if self.spine_type in ('left', 'right'):\n low, high = self.axes.viewLim.intervaly\n elif self.spine_type in ('top', 'bottom'):\n low, high = self.axes.viewLim.intervalx\n else:\n raise ValueError('unknown spine spine_type: %s' %\n self.spine_type)\n\n if self._smart_bounds:\n # attempt to set bounds in sophisticated way\n if low > high:\n # handle inverted limits\n low, high = high, low\n\n viewlim_low = low\n viewlim_high = high\n\n del low, high\n\n if self.spine_type in ('left', 'right'):\n datalim_low, datalim_high = self.axes.dataLim.intervaly\n ticks = self.axes.get_yticks()\n elif self.spine_type in ('top', 'bottom'):\n datalim_low, datalim_high = self.axes.dataLim.intervalx\n ticks = self.axes.get_xticks()\n # handle inverted limits\n ticks = list(ticks)\n ticks.sort()\n ticks = np.array(ticks)\n if datalim_low > datalim_high:\n datalim_low, datalim_high = datalim_high, datalim_low\n\n if datalim_low < viewlim_low:\n # Data extends past view. Clip line to view.\n low = viewlim_low\n else:\n # Data ends before view ends.\n cond = (ticks <= datalim_low) & (ticks >= viewlim_low)\n tickvals = ticks[cond]\n if len(tickvals):\n # A tick is less than or equal to lowest data point.\n low = tickvals[-1]\n else:\n # No tick is available\n low = datalim_low\n low = max(low, viewlim_low)\n\n if datalim_high > viewlim_high:\n # Data extends past view. Clip line to view.\n high = viewlim_high\n else:\n # Data ends before view ends.\n cond = (ticks >= datalim_high) & (ticks <= viewlim_high)\n tickvals = ticks[cond]\n if len(tickvals):\n # A tick is greater than or equal to highest data\n # point.\n high = tickvals[0]\n else:\n # No tick is available\n high = datalim_high\n high = min(high, viewlim_high)\n\n else:\n low, high = self._bounds\n\n v1 = self._path.vertices\n assert v1.shape == (2, 2), 'unexpected vertices shape'\n if self.spine_type in ['left', 'right']:\n v1[0, 1] = low\n v1[1, 1] = high\n elif self.spine_type in ['bottom', 'top']:\n v1[0, 0] = low\n v1[1, 0] = high\n else:\n raise ValueError('unable to set bounds for spine \"%s\"' %\n self.spine_type)\n\n @allow_rasterization\n def draw(self, renderer):\n self._adjust_location()\n return super(Spine, self).draw(renderer)\n\n def _calc_offset_transform(self):\n \"\"\"calculate the offset transform performed by the spine\"\"\"\n self._ensure_position_is_set()\n position = self._position\n if cbook.is_string_like(position):\n if position == 'center':\n position = ('axes', 0.5)\n elif position == 'zero':\n position = ('data', 0)\n assert len(position) == 2, \"position should be 2-tuple\"\n position_type, amount = position\n assert position_type in ('axes', 'outward', 'data')\n if position_type == 'outward':\n if amount == 0:\n # short circuit commonest case\n self._spine_transform = ('identity',\n mtransforms.IdentityTransform())\n elif self.spine_type in ['left', 'right', 'top', 'bottom']:\n offset_vec = {'left': (-1, 0),\n 'right': (1, 0),\n 'bottom': (0, -1),\n 'top': (0, 1),\n }[self.spine_type]\n # calculate x and y offset in dots\n offset_x = amount * offset_vec[0] / 72.0\n offset_y = amount * offset_vec[1] / 72.0\n self._spine_transform = ('post',\n mtransforms.ScaledTranslation(\n offset_x,\n offset_y,\n self.figure.dpi_scale_trans))\n else:\n warnings.warn('unknown spine type \"%s\": no spine '\n 'offset performed' % self.spine_type)\n self._spine_transform = ('identity',\n mtransforms.IdentityTransform())\n elif position_type == 'axes':\n if self.spine_type in ('left', 'right'):\n self._spine_transform = ('pre',\n mtransforms.Affine2D.from_values(\n # keep y unchanged, fix x at\n # amount\n 0, 0, 0, 1, amount, 0))\n elif self.spine_type in ('bottom', 'top'):\n self._spine_transform = ('pre',\n mtransforms.Affine2D.from_values(\n # keep x unchanged, fix y at\n # amount\n 1, 0, 0, 0, 0, amount))\n else:\n warnings.warn('unknown spine type \"%s\": no spine '\n 'offset performed' % self.spine_type)\n self._spine_transform = ('identity',\n mtransforms.IdentityTransform())\n elif position_type == 'data':\n if self.spine_type in ('right', 'top'):\n # The right and top spines have a default position of 1 in\n # axes coordinates. When specifying the position in data\n # coordinates, we need to calculate the position relative to 0.\n amount -= 1\n if self.spine_type in ('left', 'right'):\n self._spine_transform = ('data',\n mtransforms.Affine2D().translate(\n amount, 0))\n elif self.spine_type in ('bottom', 'top'):\n self._spine_transform = ('data',\n mtransforms.Affine2D().translate(\n 0, amount))\n else:\n warnings.warn('unknown spine type \"%s\": no spine '\n 'offset performed' % self.spine_type)\n self._spine_transform = ('identity',\n mtransforms.IdentityTransform())\n\n def set_position(self, position):\n \"\"\"set the position of the spine\n\n Spine position is specified by a 2 tuple of (position type,\n amount). The position types are:\n\n * 'outward' : place the spine out from the data area by the\n specified number of points. (Negative values specify placing the\n spine inward.)\n\n * 'axes' : place the spine at the specified Axes coordinate (from\n 0.0-1.0).\n\n * 'data' : place the spine at the specified data coordinate.\n\n Additionally, shorthand notations define a special positions:\n\n * 'center' -> ('axes',0.5)\n * 'zero' -> ('data', 0.0)\n\n \"\"\"\n if position in ('center', 'zero'):\n # special positions\n pass\n else:\n if len(position) != 2:\n raise ValueError(\"position should be 'center' or 2-tuple\")\n if position[0] not in ['outward', 'axes', 'data']:\n msg = (\"position[0] should be in [ 'outward' | 'axes' |\"\n \" 'data' ]\")\n raise ValueError(msg)\n self._position = position\n self._calc_offset_transform()\n\n self.set_transform(self.get_spine_transform())\n\n if self.axis is not None:\n self.axis.reset_ticks()\n\n def get_position(self):\n \"\"\"get the spine position\"\"\"\n self._ensure_position_is_set()\n return self._position\n\n def get_spine_transform(self):\n \"\"\"get the spine transform\"\"\"\n self._ensure_position_is_set()\n what, how = self._spine_transform\n\n if what == 'data':\n # special case data based spine locations\n data_xform = self.axes.transScale + \\\n (how + self.axes.transLimits + self.axes.transAxes)\n if self.spine_type in ['left', 'right']:\n result = mtransforms.blended_transform_factory(\n data_xform, self.axes.transData)\n elif self.spine_type in ['top', 'bottom']:\n result = mtransforms.blended_transform_factory(\n self.axes.transData, data_xform)\n else:\n raise ValueError('unknown spine spine_type: %s' %\n self.spine_type)\n return result\n\n if self.spine_type in ['left', 'right']:\n base_transform = self.axes.get_yaxis_transform(which='grid')\n elif self.spine_type in ['top', 'bottom']:\n base_transform = self.axes.get_xaxis_transform(which='grid')\n else:\n raise ValueError('unknown spine spine_type: %s' %\n self.spine_type)\n\n if what == 'identity':\n return base_transform\n elif what == 'post':\n return base_transform + how\n elif what == 'pre':\n return how + base_transform\n else:\n raise ValueError(\"unknown spine_transform type: %s\" % what)\n\n def set_bounds(self, low, high):\n \"\"\"Set the bounds of the spine.\"\"\"\n if self.spine_type == 'circle':\n raise ValueError(\n 'set_bounds() method incompatible with circular spines')\n self._bounds = (low, high)\n\n def get_bounds(self):\n \"\"\"Get the bounds of the spine.\"\"\"\n return self._bounds\n\n @classmethod\n def linear_spine(cls, axes, spine_type, **kwargs):\n \"\"\"\n (staticmethod) Returns a linear :class:`Spine`.\n \"\"\"\n # all values of 13 get replaced upon call to set_bounds()\n if spine_type == 'left':\n path = mpath.Path([(0.0, 13), (0.0, 13)])\n elif spine_type == 'right':\n path = mpath.Path([(1.0, 13), (1.0, 13)])\n elif spine_type == 'bottom':\n path = mpath.Path([(13, 0.0), (13, 0.0)])\n elif spine_type == 'top':\n path = mpath.Path([(13, 1.0), (13, 1.0)])\n else:\n raise ValueError('unable to make path for spine \"%s\"' % spine_type)\n result = cls(axes, spine_type, path, **kwargs)\n return result\n\n @classmethod\n def circular_spine(cls, axes, center, radius, **kwargs):\n \"\"\"\n (staticmethod) Returns a circular :class:`Spine`.\n \"\"\"\n path = mpath.Path.unit_circle()\n spine_type = 'circle'\n result = cls(axes, spine_type, path, **kwargs)\n result.set_patch_circle(center, radius)\n return result\n\n def set_color(self, c):\n \"\"\"\n Set the edgecolor.\n\n ACCEPTS: matplotlib color arg or sequence of rgba tuples\n\n .. seealso::\n\n :meth:`set_facecolor`, :meth:`set_edgecolor`\n For setting the edge or face color individually.\n \"\"\"\n # The facecolor of a spine is always 'none' by default -- let\n # the user change it manually if desired.\n self.set_edgecolor(c)\n", "# This is a ported version of a MATLAB example from the signal processing\n# toolbox that showed some difference at one time between Matplotlib's and\n# MATLAB's scaling of the PSD. This differs from psd_demo3.py in that\n# this uses a complex signal, so we can see that complex PSD's work properly\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\n\nfs = 1000\nt = np.linspace(0, 0.3, 301)\nA = np.array([2, 8]).reshape(-1, 1)\nf = np.array([150, 140]).reshape(-1, 1)\nxn = (A * np.exp(2j * np.pi * f * t)).sum(axis=0) + 5 * np.random.randn(*t.shape)\n\nyticks = np.arange(-50, 30, 10)\nxticks = np.arange(-500, 550, 100)\nplt.subplots_adjust(hspace=0.45, wspace=0.3)\nax = plt.subplot(1, 2, 1)\n\nplt.psd(xn, NFFT=301, Fs=fs, window=mlab.window_none, pad_to=1024,\n scale_by_freq=True)\nplt.title('Periodogram')\nplt.yticks(yticks)\nplt.xticks(xticks)\nplt.grid(True)\nplt.xlim(-500, 500)\n\nplt.subplot(1, 2, 2, sharex=ax, sharey=ax)\nplt.psd(xn, NFFT=150, Fs=fs, window=mlab.window_none, noverlap=75, pad_to=512,\n scale_by_freq=True)\nplt.title('Welch')\nplt.xticks(xticks)\nplt.yticks(yticks)\nplt.ylabel('')\nplt.grid(True)\nplt.xlim(-500, 500)\n\nplt.show()\n", "#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\n\ndelta = 0.025\nx = y = np.arange(-3.0, 3.0, delta)\nX, Y = np.meshgrid(x, y)\nZ1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)\nZ2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)\nZ = Z2 - Z1 # difference of Gaussians\n\nim = plt.imshow(Z, interpolation='bilinear', cmap=cm.RdYlGn,\n origin='lower', extent=[-3, 3, -3, 3],\n vmax=abs(Z).max(), vmin=-abs(Z).max())\n\nplt.show()\n", "#!/usr/bin/env python\n# This file generates the matplotlib web page logo\n\nfrom __future__ import print_function\nfrom pylab import *\nimport matplotlib.cbook as cbook\n\n# convert data to mV\ndatafile = cbook.get_sample_data('membrane.dat', asfileobj=False)\nprint('loading', datafile)\n\nx = 1000*0.1*fromstring(open(datafile, 'rb').read(), float32)\n# 0.0005 is the sample interval\nt = 0.0005*arange(len(x))\nfigure(1, figsize=(7, 1), dpi=100)\nax = subplot(111, axisbg='y')\nplot(t, x)\ntext(0.5, 0.5, 'matplotlib', color='r',\n fontsize=40, fontname=['Courier', 'Bitstream Vera Sans Mono'],\n horizontalalignment='center',\n verticalalignment='center',\n transform=ax.transAxes,\n )\naxis([1, 1.72, -60, 10])\nsetp(gca(), 'xticklabels', [])\nsetp(gca(), 'yticklabels', [])\n\nshow()\n", "\"\"\"\nPseudocolor plots of unstructured triangular grids.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as tri\nimport numpy as np\nimport math\n\n# Creating a Triangulation without specifying the triangles results in the\n# Delaunay triangulation of the points.\n\n# First create the x and y coordinates of the points.\nn_angles = 36\nn_radii = 8\nmin_radius = 0.25\nradii = np.linspace(min_radius, 0.95, n_radii)\n\nangles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)\nangles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\nangles[:, 1::2] += math.pi/n_angles\n\nx = (radii*np.cos(angles)).flatten()\ny = (radii*np.sin(angles)).flatten()\nz = (np.cos(radii)*np.cos(angles*3.0)).flatten()\n\n# Create the Triangulation; no triangles so Delaunay triangulation created.\ntriang = tri.Triangulation(x, y)\n\n# Mask off unwanted triangles.\nxmid = x[triang.triangles].mean(axis=1)\nymid = y[triang.triangles].mean(axis=1)\nmask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)\ntriang.set_mask(mask)\n\n# tripcolor plot.\nplt.figure()\nplt.gca().set_aspect('equal')\nplt.tripcolor(triang, z, shading='flat', cmap=plt.cm.rainbow)\nplt.colorbar()\nplt.title('tripcolor of Delaunay triangulation, flat shading')\n\n# Illustrate Gouraud shading.\nplt.figure()\nplt.gca().set_aspect('equal')\nplt.tripcolor(triang, z, shading='gouraud', cmap=plt.cm.rainbow)\nplt.colorbar()\nplt.title('tripcolor of Delaunay triangulation, gouraud shading')\n\n\n# You can specify your own triangulation rather than perform a Delaunay\n# triangulation of the points, where each triangle is given by the indices of\n# the three points that make up the triangle, ordered in either a clockwise or\n# anticlockwise manner.\n\nxy = np.asarray([\n [-0.101, 0.872], [-0.080, 0.883], [-0.069, 0.888], [-0.054, 0.890],\n [-0.045, 0.897], [-0.057, 0.895], [-0.073, 0.900], [-0.087, 0.898],\n [-0.090, 0.904], [-0.069, 0.907], [-0.069, 0.921], [-0.080, 0.919],\n [-0.073, 0.928], [-0.052, 0.930], [-0.048, 0.942], [-0.062, 0.949],\n [-0.054, 0.958], [-0.069, 0.954], [-0.087, 0.952], [-0.087, 0.959],\n [-0.080, 0.966], [-0.085, 0.973], [-0.087, 0.965], [-0.097, 0.965],\n [-0.097, 0.975], [-0.092, 0.984], [-0.101, 0.980], [-0.108, 0.980],\n [-0.104, 0.987], [-0.102, 0.993], [-0.115, 1.001], [-0.099, 0.996],\n [-0.101, 1.007], [-0.090, 1.010], [-0.087, 1.021], [-0.069, 1.021],\n [-0.052, 1.022], [-0.052, 1.017], [-0.069, 1.010], [-0.064, 1.005],\n [-0.048, 1.005], [-0.031, 1.005], [-0.031, 0.996], [-0.040, 0.987],\n [-0.045, 0.980], [-0.052, 0.975], [-0.040, 0.973], [-0.026, 0.968],\n [-0.020, 0.954], [-0.006, 0.947], [ 0.003, 0.935], [ 0.006, 0.926],\n [ 0.005, 0.921], [ 0.022, 0.923], [ 0.033, 0.912], [ 0.029, 0.905],\n [ 0.017, 0.900], [ 0.012, 0.895], [ 0.027, 0.893], [ 0.019, 0.886],\n [ 0.001, 0.883], [-0.012, 0.884], [-0.029, 0.883], [-0.038, 0.879],\n [-0.057, 0.881], [-0.062, 0.876], [-0.078, 0.876], [-0.087, 0.872],\n [-0.030, 0.907], [-0.007, 0.905], [-0.057, 0.916], [-0.025, 0.933],\n [-0.077, 0.990], [-0.059, 0.993]])\nx = xy[:, 0]*180/3.14159\ny = xy[:, 1]*180/3.14159\n\ntriangles = np.asarray([\n [67, 66, 1], [65, 2, 66], [ 1, 66, 2], [64, 2, 65], [63, 3, 64],\n [60, 59, 57], [ 2, 64, 3], [ 3, 63, 4], [ 0, 67, 1], [62, 4, 63],\n [57, 59, 56], [59, 58, 56], [61, 60, 69], [57, 69, 60], [ 4, 62, 68],\n [ 6, 5, 9], [61, 68, 62], [69, 68, 61], [ 9, 5, 70], [ 6, 8, 7],\n [ 4, 70, 5], [ 8, 6, 9], [56, 69, 57], [69, 56, 52], [70, 10, 9],\n [54, 53, 55], [56, 55, 53], [68, 70, 4], [52, 56, 53], [11, 10, 12],\n [69, 71, 68], [68, 13, 70], [10, 70, 13], [51, 50, 52], [13, 68, 71],\n [52, 71, 69], [12, 10, 13], [71, 52, 50], [71, 14, 13], [50, 49, 71],\n [49, 48, 71], [14, 16, 15], [14, 71, 48], [17, 19, 18], [17, 20, 19],\n [48, 16, 14], [48, 47, 16], [47, 46, 16], [16, 46, 45], [23, 22, 24],\n [21, 24, 22], [17, 16, 45], [20, 17, 45], [21, 25, 24], [27, 26, 28],\n [20, 72, 21], [25, 21, 72], [45, 72, 20], [25, 28, 26], [44, 73, 45],\n [72, 45, 73], [28, 25, 29], [29, 25, 31], [43, 73, 44], [73, 43, 40],\n [72, 73, 39], [72, 31, 25], [42, 40, 43], [31, 30, 29], [39, 73, 40],\n [42, 41, 40], [72, 33, 31], [32, 31, 33], [39, 38, 72], [33, 72, 38],\n [33, 38, 34], [37, 35, 38], [34, 38, 35], [35, 37, 36]])\n\nxmid = x[triangles].mean(axis=1)\nymid = y[triangles].mean(axis=1)\nx0 = -5\ny0 = 52\nzfaces = np.exp(-0.01*((xmid - x0)*(xmid - x0) + (ymid - y0)*(ymid - y0)))\n\n# Rather than create a Triangulation object, can simply pass x, y and triangles\n# arrays to tripcolor directly. It would be better to use a Triangulation\n# object if the same triangulation was to be used more than once to save\n# duplicated calculations.\n# Can specify one color value per face rather than one per point by using the\n# facecolors kwarg.\nplt.figure()\nplt.gca().set_aspect('equal')\nplt.tripcolor(x, y, triangles, facecolors=zfaces, edgecolors='k')\nplt.colorbar()\nplt.title('tripcolor of user-specified triangulation')\nplt.xlabel('Longitude (degrees)')\nplt.ylabel('Latitude (degrees)')\n\nplt.show()\n", "\"\"\"\nDemonstrate the mixing of 2d and 3d subplots\n\"\"\"\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef f(t):\n s1 = np.cos(2*np.pi*t)\n e1 = np.exp(-t)\n return np.multiply(s1, e1)\n\n\n################\n# First subplot\n################\nt1 = np.arange(0.0, 5.0, 0.1)\nt2 = np.arange(0.0, 5.0, 0.02)\nt3 = np.arange(0.0, 2.0, 0.01)\n\n# Twice as tall as it is wide.\nfig = plt.figure(figsize=plt.figaspect(2.))\nfig.suptitle('A tale of 2 subplots')\nax = fig.add_subplot(2, 1, 1)\nl = ax.plot(t1, f(t1), 'bo',\n t2, f(t2), 'k--', markerfacecolor='green')\nax.grid(True)\nax.set_ylabel('Damped oscillation')\n\n\n#################\n# Second subplot\n#################\nax = fig.add_subplot(2, 1, 2, projection='3d')\nX = np.arange(-5, 5, 0.25)\nxlen = len(X)\nY = np.arange(-5, 5, 0.25)\nylen = len(Y)\nX, Y = np.meshgrid(X, Y)\nR = np.sqrt(X**2 + Y**2)\nZ = np.sin(R)\n\nsurf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,\n linewidth=0, antialiased=False)\n\nax.set_zlim3d(-1, 1)\n\nplt.show()\n", "import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(0, 1)\n\n# Plot the lines y=x**n for n=1..4.\nax = plt.subplot(2, 1, 1)\nfor n in range(1, 5):\n plt.plot(x, x**n, label=\"n={0}\".format(n))\nplt.legend(loc=\"upper left\", bbox_to_anchor=[0, 1],\n ncol=2, shadow=True, title=\"Legend\", fancybox=True)\nax.get_legend().get_title().set_color(\"red\")\n\n# Demonstrate some more complex labels.\nax = plt.subplot(2, 1, 2)\nplt.plot(x, x**2, label=\"multi\\nline\")\nhalf_pi = np.linspace(0, np.pi / 2)\nplt.plot(np.sin(half_pi), np.cos(half_pi), label=r\"$\\frac{1}{2}\\pi$\")\nplt.plot(x, 2**(x**2), label=\"$2^{x^2}$\")\nplt.legend(shadow=True, fancybox=True)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.contourf", "numpy.arange", "numpy.cos", "numpy.sin", "matplotlib.pyplot.subplot", "numpy.zeros_like", "matplotlib.pyplot.contour", "matplotlib.pyplot.grid", "numpy.ma.array", "matplotlib.pyplot.show" ], [ "matplotlib.transforms.Affine2D.from_values", "matplotlib.transforms.Affine2D", "matplotlib.transforms.IdentityTransform", "matplotlib.path.Path", "matplotlib.transforms.ScaledTranslation", "matplotlib.path.Path.unit_circle", "numpy.array", "matplotlib.transforms.blended_transform_factory", "matplotlib.cbook.is_string_like" ], [ "numpy.array", "matplotlib.pyplot.psd", "numpy.linspace", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "numpy.random.randn", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplots_adjust", "numpy.exp", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ], [ "numpy.arange", "matplotlib.mlab.bivariate_normal", "numpy.meshgrid", "matplotlib.pyplot.show" ], [ "matplotlib.cbook.get_sample_data" ], [ "matplotlib.pyplot.tripcolor", "matplotlib.pyplot.gca", "numpy.linspace", "matplotlib.pyplot.title", "numpy.asarray", "numpy.cos", "numpy.sin", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.repeat", "numpy.exp", "matplotlib.tri.Triangulation", "numpy.where", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.figaspect", "numpy.sqrt", "numpy.multiply", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.exp", "numpy.meshgrid", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.legend", "numpy.linspace", "numpy.cos", "numpy.sin", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.13", "1.16", "1.9", "1.18", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JulienStanguennec-Leddartech/leddar_ros2
[ "15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf" ]
[ "leddar_ros2/leddar_sensor.py" ]
[ "\nimport sys\nimport os\nimport time\n\n#Import ros2 py\nimport rclpy \nfrom rclpy.node import Node\n\n#Import messages \nimport sensor_msgs.msg as sensor_msgs\nimport std_msgs.msg as std_msgs\n\n#Import parameters (to read parameters)\nfrom rclpy.parameter import Parameter\n\nimport numpy as np\nimport leddar\n\n\ndef point_cloud(points, parent_frame):\n \"\"\" Creates a point cloud message.\n Args:\n points: Nx3 array of xyz positions.\n parent_frame: frame in which the point cloud is defined\n Returns:\n sensor_msgs/PointCloud2 message\n Code source:\n https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0\n References:\n http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html\n http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html\n http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html\n \"\"\"\n # In a PointCloud2 message, the point cloud is stored as an byte \n # array. In order to unpack it, we also include some parameters \n # which desribes the size of each individual point.\n\n ros_dtype = sensor_msgs.PointField.FLOAT32\n dtype = np.float32\n itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.\n\n data = points.astype(dtype).tobytes() \n\n # The fields specify what the bytes represents. The first 4 bytes \n # represents the x-coordinate, the next 4 the y-coordinate, etc.\n fields = [sensor_msgs.PointField(\n name=n, offset=i*itemsize, datatype=ros_dtype, count=1)\n for i, n in enumerate('xyz')]\n\n # The PointCloud2 message also has a header which specifies which \n # coordinate frame it is represented in. \n header = std_msgs.Header(frame_id=parent_frame)\n\n return sensor_msgs.PointCloud2(\n header=header,\n height=1, \n width=points.shape[0],\n is_dense=False,\n is_bigendian=False,\n fields=fields,\n point_step=(itemsize * 3), # Every point consists of three float32s.\n row_step=(itemsize * 3 * points.shape[0]),\n data=data\n )\n\n\nclass LeddarSensor(Node):\n\n def __init__(self):\n super().__init__('leddar_sensor')\n\n #Declare point cloud publisher topic\n self.publisher = self.create_publisher(sensor_msgs.PointCloud2, 'scan_cloud', 10)\n \n #Declaire parameter for connection to leddar_sensor | Default values for pixell sensor (Ethernet)\n self.declare_parameters(\n namespace='',\n parameters=[\n ('param1', '192.168.0.2'),\n ('device_type', 'Ethernet'),\n ('param3', 48630),\n ('param4', 0)\n ]\n )\n\n #Read parameters for connection to leddar_sensor\n param1 = str(self.get_parameter('param1').value)\n device_type = str(self.get_parameter('device_type').value)\n param3 = int(self.get_parameter('param3').value)\n param4 = int(self.get_parameter('param4').value)\n\n #Create the sensor\n self.dev = leddar.Device()\n\n dev_type = 0\n if(device_type != \"not specified\"):\n dev_type = leddar.device_types[device_type]\n\n if not self.dev.connect(param1, dev_type, param3, param4):\n err_msg = 'Error connecting to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4))\n #rclpy.logerr(err_msg)\n raise RuntimeError(err_msg)\n\n self.get_logger().info('Connected to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4)))\n \n #dev_type_read = self.dev.get_property_value(leddar.property_ids[\"ID_DEVICE_TYPE\"])\n #dev_protocol = self.dev.get_property_value(leddar.property_ids[\"ID_DATA_SERVER_PROTOCOL\"])\n\n #Get info from sensor\n #self.get_logger().info(f'ID_DEVICE_TYPE: {dev_protocol}')\n #self.get_logger().info(f'ID_DATA_SERVER_PROTOCOL: {dev_protocol}')\n\n #Set callback method\n self.dev.set_callback_echo(self.echoes_callback)\n\n #Set datamask to detections\n self.dev.set_data_mask(leddar.data_masks[\"DM_ECHOES\"])\n\n #Optionnal : set the delay between two request to the sensor\n self.dev.set_data_thread_delay(10000)\n self.dev.start_data_thread()\n\n #Callback functions for the data thread\n def echoes_callback(self, echoes):\n \n #keep valid echoes only\n echoes['data'] = echoes['data'][np.bitwise_and(echoes['data']['flags'], 0x01).astype(np.bool)] \n\n #extract data field\n indices, flags, distances, amplitudes, x, y, z = [echoes['data'][x] for x in ['indices', 'flags', 'distances', 'amplitudes', 'x', 'y', 'z']]\n \n #merge xyz into np array\n xyz = np.array([x,y,z])\n \n #convert xyz np array to sensors_msg.PointCloud2\n message = point_cloud(xyz.T, 'map')\n\n #publish PointCloud2\n self.publisher.publish(message)\n\n\ndef main(args=None):\n rclpy.init(args=args)\n\n leddar_sensor = LeddarSensor()\n\n rclpy.spin(leddar_sensor)\n\n # Destroy the node explicitly\n # (optional - otherwise it will be done automatically\n # when the garbage collector destroys the node object)\n leddar_sensor.destroy_node()\n rclpy.shutdown()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.bitwise_and", "numpy.array", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
powerfulbean/StellarWave
[ "877d5113054f391f605c8e39f1a0f60f7bfeeee1", "877d5113054f391f605c8e39f1a0f60f7bfeeee1" ]
[ "StimRespFlow/DataStruct/WaveData.py", "StimRespFlow/DataProcessing/DeepLearning/Factory.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 9 23:21:06 2021\n\n@author: ShiningStone\n\"\"\"\n\nimport datetime\nimport numpy as np\nfrom .Abstract import CWaveData,CTimeStampsGen\n\nclass CDateTimeStampsGen(CTimeStampsGen):\n \n def __init__(self,start:datetime.datetime,delta:datetime.timedelta,nLen):\n super().__init__(start,delta,nLen)\n \n \nclass CBitalinoWaveData(CWaveData): # EEG unit: uV; EOG unit: mv\n\n def __init__(self):\n super().__init__(-1,-1,CTimeStampsGen(0, 0, 1)) #still can't decide this param at this time for bitalino file\n \n def readFile(self,filename,mode = 'EEG'):\n print(\"start reading bitalinofile\")\n from pylab import loadtxt\n #file_name = 'opensignals_001403173836_2019-03-04_12-02-59.txt'\n fullCont = list()\n dataDescription = ''\n import json\n \n #read data description part\n with open(filename,'r') as f:\n for rowCont in f.readlines():\n if(rowCont[0] == '#' and rowCont[2] != '{'):\n pass\n elif(rowCont[2] == '{'):\n rowCont = rowCont[2:]\n dataDescription = json.loads(rowCont)\n break\n else:\n rowArray = rowCont.split(\"\\t\")\n rowArray = rowArray[0:-1]\n fullCont.append(rowArray)\n \n data = loadtxt(filename)\n # rowArrayNum = np.array(fullCont)\n rowArrayNum = data\n \n for key in dataDescription.keys(): #now the key is just the mac address of the device\n dataDescription = dataDescription[key]\n \n self.timestamps = rowArrayNum[:,0]\n self.description = dataDescription\n# print(dateTime.datetime.now())\n if mode=='EEG':\n self.nChan = 1\n self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)\n# self.rawdata = np.expand_dims(rowArrayNum[:,-1],0)\n self.description[\"channelInfo\"] = [[1],['EarEEG']]\n elif mode == 'EOG':\n self.nChan= 1\n self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'mV')), 0)\n self.description[\"channelInfo\"] = [[1],['Eog']]\n elif mode == 'EEGandEOG':\n data1 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)\n data2 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'uV')), 0)\n self.nChan = 2\n self.data = np.concatenate([data1,data2],0)\n self.description['channelInfo'] = [[1,2],['EarEEG','Eog']]\n else:\n print(\"bitalino error: doesn't support this mode!\")\n# print(dateTime.datetime.now())\n \n startTime = datetime.datetime.strptime( dataDescription['date'] + ' ' + dataDescription['time'], '%Y-%m-%d %H:%M:%S.%f')\n self.srate = dataDescription[\"sampling rate\"]\n \n print(\"reading bitalinofile Finished\")\n \n delta = datetime.timedelta(seconds = 1/self.srate) \n self.timeStampsGen = CDateTimeStampsGen(startTime,delta,len(self.timestamps))#initiate the timestamp sequence generator\n self.calTimeStamp(self.timeStampsGen)\n \n return data, dataDescription\n \n def getRealSignal(self,sampleDataArray, bitNumber ,VCC = 3.3 , Geeg = 40000, unit = 'uV'):\n output = [self._eegTransferFuntion(i,bitNumber ,VCC , Geeg) for i in sampleDataArray] \n output = np.array(output)\n if(unit == 'uV'):\n output = output * (10**6)\n elif(unit == 'mV'):\n output = output * (10**3)\n return output\n \n def _eegTransferFuntion(self,sampleValue, bitNumber ,VCC, Geeg):\n output = (( (sampleValue/2**bitNumber) - 1/2) * VCC ) / Geeg\n return output\n \n def __len__(self):\n return len(self.data)\n ", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 14 13:27:33 2020\n\n@author: Jin Dou\n\"\"\"\nimport torch\n\ndef buildDataLoader(*tensors,TorchDataSetType,oSamplerType=None,**Args):\n if(Args.get('DatasetArgs') != None):\n DataSetArgs = Args['DatasetArgs']\n dataset = TorchDataSetType(*tensors,**DataSetArgs)\n else:\n dataset = TorchDataSetType(*tensors)\n \n if(Args.get('DataLoaderArgs') != None):\n DataLoaderArgs = Args['DataLoaderArgs']\n if(oSamplerType == None or Args.get('SamplerArgs') == None):\n dataLoader = torch.utils.data.DataLoader(dataset,**DataLoaderArgs)\n else:\n SamplerArgs = Args.get('SamplerArgs')\n oSampler = oSamplerType(dataset,**SamplerArgs)\n dataLoader = torch.utils.data.DataLoader(dataset,sampler=oSampler,**DataLoaderArgs)\n else:\n dataLoader = torch.utils.data.DataLoader(dataset)\n return dataLoader\n \nclass CPytorch:\n \n def __init__(self):\n self.Lib = self._ImportTorch()\n \n def _ImportTorch(self):\n import torch as root\n return root\n \n def _getNNAttr(self,name:str):\n import torch.nn as NN\n ans = getattr(NN,name)\n return ans\n \nclass CTorchNNYaml(CPytorch):\n \n def __init__(self):\n super().__init__()\n \n def _readYaml(self,filePath):\n import yaml\n ans = None\n with open(filePath,'r') as stream:\n try:\n ans = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return ans\n \n def _ParseType(self,conf:dict):\n if(conf['Type'] == 'Sequential'):\n return self.buildSequential(conf)\n \n def _subListToTuple(self,oInput):\n if type(oInput) == dict:\n for key in oInput:\n if(type(oInput[key]) == list):\n oInput[key] = tuple(oInput[key])\n \n elif type(oInput) == list:\n for idx,attr in enumerate(oInput):\n if type(attr) == list:\n oInput[idx] = tuple(attr)\n \n else:\n raise ValueError(\"_subListToTuple: input should be dict or list\")\n \n def buildSequential(self,conf:dict):\n oSeq = self.Lib.nn.Sequential()\n ModelConfList = conf['Model']\n for idx,ModelConf in enumerate(ModelConfList):\n CModule = self._getNNAttr(ModelConf[0])\n attr = ModelConf[1]\n oModule = None\n name = str(idx)\n \n if(len(ModelConf) > 2 and type(ModelConf[2]) == dict):\n '''if contain aux attribute'''\n auxAttr = ModelConf[2]\n if (auxAttr.get('name')!=None):\n ''' if aux attribute contain name attribute'''\n name = auxAttr['name']\n if(type(attr) == list):\n if len(attr) == 0:\n oModule = CModule()\n elif(type(attr[0]) == list and type(attr[1]) == dict):\n self._subListToTuple(attr[0])\n self._subListToTuple(attr[1])\n oModule = CModule(*attr[0],**attr[1])\n elif(any(type(x) not in [int,float,str,bool,list] for x in attr)):\n raise ValueError('attribute of Module %s (index %d) is invalid' % (ModelConf[0],idx))\n else:\n self._subListToTuple(attr)\n oModule = CModule(*attr)\n elif(type(attr) == dict):\n self._subListToTuple(attr)\n oModule = CModule(**attr)\n else:\n raise ValueError('attribute of Module %s (index %d) is invalid' % (ModelConf[0],idx))\n oSeq.add_module(name,oModule)\n return oSeq\n \n def __call__(self,confFile:str):\n yamlDict = self._readYaml(confFile)\n return self._ParseType(yamlDict)\n \n" ]
[ [ "numpy.concatenate", "numpy.array" ], [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SimonAltrogge/brian2
[ "c212a57cb992b766786b5769ebb830ff12d8a8ad", "c212a57cb992b766786b5769ebb830ff12d8a8ad", "c212a57cb992b766786b5769ebb830ff12d8a8ad" ]
[ "brian2/codegen/generators/numpy_generator.py", "brian2/utils/arrays.py", "brian2/monitors/statemonitor.py" ]
[ "\nimport itertools\n\nimport numpy as np\n\nfrom brian2.parsing.bast import brian_dtype_from_dtype\nfrom brian2.parsing.rendering import NumpyNodeRenderer\nfrom brian2.core.functions import DEFAULT_FUNCTIONS, timestep\nfrom brian2.core.variables import ArrayVariable\nfrom brian2.utils.stringtools import get_identifiers, word_substitute, indent\nfrom brian2.utils.logger import get_logger\n\nfrom .base import CodeGenerator\n\n__all__ = ['NumpyCodeGenerator']\n\n\nlogger = get_logger(__name__)\n\nclass VectorisationError(Exception):\n pass\n\n\nclass NumpyCodeGenerator(CodeGenerator):\n \"\"\"\n Numpy language\n \n Essentially Python but vectorised.\n \"\"\"\n\n class_name = 'numpy'\n\n _use_ufunc_at_vectorisation = True # allow this to be off for testing only\n\n def translate_expression(self, expr):\n expr = word_substitute(expr, self.func_name_replacements)\n return NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(expr, self.variables).strip()\n\n def translate_statement(self, statement):\n # TODO: optimisation, translate arithmetic to a sequence of inplace\n # operations like a=b+c -> add(b, c, a)\n var, op, expr, comment = (statement.var, statement.op,\n statement.expr, statement.comment)\n if op == ':=':\n op = '='\n # For numpy we replace complex expressions involving a single boolean variable into a\n # where(boolvar, expr_if_true, expr_if_false)\n if (statement.used_boolean_variables is not None and len(statement.used_boolean_variables)==1\n and brian_dtype_from_dtype(statement.dtype)=='float'\n and statement.complexity_std>sum(statement.complexities.values())):\n used_boolvars = statement.used_boolean_variables\n bool_simp = statement.boolean_simplified_expressions\n boolvar = used_boolvars[0]\n for bool_assigns, simp_expr in bool_simp.items():\n _, boolval = bool_assigns[0]\n if boolval:\n expr_true = simp_expr\n else:\n expr_false = simp_expr\n code = f'{var} {op} _numpy.where({boolvar}, {expr_true}, {expr_false})'\n else:\n code = f\"{var} {op} {self.translate_expression(expr)}\"\n if len(comment):\n code += f\" # {comment}\"\n return code\n\n def ufunc_at_vectorisation(self, statement, variables, indices,\n conditional_write_vars, created_vars, used_variables):\n if not self._use_ufunc_at_vectorisation:\n raise VectorisationError()\n # Avoids circular import\n from brian2.devices.device import device\n\n # See https://github.com/brian-team/brian2/pull/531 for explanation\n used = set(get_identifiers(statement.expr))\n used = used.intersection(k for k in list(variables.keys()) if k in indices and indices[k]!='_idx')\n used_variables.update(used)\n if statement.var in used_variables:\n raise VectorisationError()\n expr = NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(statement.expr)\n\n if statement.op == ':=' or indices[statement.var] == '_idx' or not statement.inplace:\n if statement.op == ':=':\n op = '='\n else:\n op = statement.op\n line = f'{statement.var} {op} {expr}'\n elif statement.inplace:\n if statement.op == '+=':\n ufunc_name = '_numpy.add'\n elif statement.op == '*=':\n ufunc_name = '_numpy.multiply'\n elif statement.op == '/=':\n ufunc_name = '_numpy.divide'\n elif statement.op == '-=':\n ufunc_name = '_numpy.subtract'\n else:\n raise VectorisationError()\n array_name = device.get_array_name(variables[statement.var])\n idx = indices[statement.var]\n line = f'{ufunc_name}.at({array_name}, {idx}, {expr})'\n line = self.conditional_write(line, statement, variables,\n conditional_write_vars=conditional_write_vars,\n created_vars=created_vars)\n else:\n raise VectorisationError()\n\n if len(statement.comment):\n line += f\" # {statement.comment}\"\n\n return line\n\n def vectorise_code(self, statements, variables, variable_indices, index='_idx'):\n created_vars = {stmt.var for stmt in statements if stmt.op == ':='}\n try:\n lines = []\n used_variables = set()\n for statement in statements:\n lines.append(f'# Abstract code: {statement.var} {statement.op} {statement.expr}')\n # We treat every statement individually with its own read and write code\n # to be on the safe side\n read, write, indices, conditional_write_vars = self.arrays_helper([statement])\n # We make sure that we only add code to `lines` after it went\n # through completely\n ufunc_lines = []\n # No need to load a variable if it is only in read because of\n # the in-place operation\n if (statement.inplace and\n variable_indices[statement.var] != '_idx' and\n statement.var not in get_identifiers(statement.expr)):\n read = read - {statement.var}\n ufunc_lines.extend(self.read_arrays(read, write, indices,\n variables, variable_indices))\n ufunc_lines.append(self.ufunc_at_vectorisation(statement,\n variables,\n variable_indices,\n conditional_write_vars,\n created_vars,\n used_variables,\n ))\n # Do not write back such values, the ufuncs have modified the\n # underlying array already\n if statement.inplace and variable_indices[statement.var] != '_idx':\n write = write - {statement.var}\n ufunc_lines.extend(self.write_arrays([statement], read, write,\n variables,\n variable_indices))\n lines.extend(ufunc_lines)\n except VectorisationError:\n if self._use_ufunc_at_vectorisation:\n logger.info(\"Failed to vectorise code, falling back on Python loop: note that \"\n \"this will be very slow! Switch to another code generation target for \"\n \"best performance (e.g. cython). First line is: \"+str(statements[0]),\n once=True)\n lines = []\n lines.extend(['_full_idx = _idx',\n 'for _idx in _full_idx:',\n ' _vectorisation_idx = _idx'\n ])\n read, write, indices, conditional_write_vars = self.arrays_helper(statements)\n lines.extend(indent(code) for code in\n self.read_arrays(read, write, indices,\n variables, variable_indices))\n for statement in statements:\n line = self.translate_statement(statement)\n if statement.var in conditional_write_vars:\n lines.append(indent(f'if {conditional_write_vars[statement.var]}:'))\n lines.append(indent(line, 2))\n else:\n lines.append(indent(line))\n lines.extend(indent(code) for code in\n self.write_arrays(statements, read, write,\n variables, variable_indices))\n return lines\n\n def read_arrays(self, read, write, indices, variables, variable_indices):\n # index and read arrays (index arrays first)\n lines = []\n for varname in itertools.chain(indices, read):\n var = variables[varname]\n index = variable_indices[varname]\n # if index in iterate_all:\n # line = '{varname} = {array_name}'\n # else:\n # line = '{varname} = {array_name}.take({index})'\n # line = line.format(varname=varname, array_name=self.get_array_name(var), index=index)\n line = f\"{varname} = {self.get_array_name(var)}\"\n if not index in self.iterate_all:\n line += f\"[{index}]\"\n elif varname in write:\n # avoid potential issues with aliased variables, see github #259\n line += '.copy()'\n lines.append(line)\n return lines\n\n def write_arrays(self, statements, read, write, variables, variable_indices):\n # write arrays\n lines = []\n for varname in write:\n var = variables[varname]\n index_var = variable_indices[varname]\n # check if all operations were inplace and we're operating on the\n # whole vector, if so we don't need to write the array back\n if index_var not in self.iterate_all or varname in read:\n all_inplace = False\n else:\n all_inplace = True\n for stmt in statements:\n if stmt.var == varname and not stmt.inplace:\n all_inplace = False\n break\n if not all_inplace:\n line = self.get_array_name(var)\n if index_var in self.iterate_all:\n line = f\"{line}[:]\"\n else:\n line = f\"{line}[{index_var}]\"\n line = f\"{line} = {varname}\"\n lines.append(line)\n return lines\n\n def conditional_write(self, line, stmt, variables, conditional_write_vars,\n created_vars):\n if stmt.var in conditional_write_vars:\n subs = {}\n index = conditional_write_vars[stmt.var]\n # we replace all var with var[index], but actually we use this repl_string first because\n # we don't want to end up with lines like x[not_refractory[not_refractory]] when\n # multiple substitution passes are invoked\n repl_string = '#$(@#&$@$*U#@)$@(#' # this string shouldn't occur anywhere I hope! :)\n for varname, var in list(variables.items()):\n if isinstance(var, ArrayVariable) and not var.scalar:\n subs[varname] = f\"{varname}[{repl_string}]\"\n # all newly created vars are arrays and will need indexing\n for varname in created_vars:\n subs[varname] = f\"{varname}[{repl_string}]\"\n # Also index _vectorisation_idx so that e.g. rand() works correctly\n subs['_vectorisation_idx'] = f\"_vectorisation_idx[{repl_string}]\"\n\n line = word_substitute(line, subs)\n line = line.replace(repl_string, index)\n return line\n\n def translate_one_statement_sequence(self, statements, scalar=False):\n variables = self.variables\n variable_indices = self.variable_indices\n read, write, indices, conditional_write_vars = self.arrays_helper(statements)\n lines = []\n\n all_unique = not self.has_repeated_indices(statements)\n\n if scalar or all_unique:\n # Simple translation\n lines.extend(self.read_arrays(read, write, indices, variables,\n variable_indices))\n created_vars = {stmt.var for stmt in statements if stmt.op == ':='}\n for stmt in statements:\n\n line = self.translate_statement(stmt)\n line = self.conditional_write(line, stmt, variables,\n conditional_write_vars,\n created_vars)\n lines.append(line)\n lines.extend(self.write_arrays(statements, read, write, variables,\n variable_indices))\n else:\n # More complex translation to deal with repeated indices\n lines.extend(self.vectorise_code(statements, variables,\n variable_indices))\n\n return lines\n\n def determine_keywords(self):\n try:\n import scipy\n scipy_available = True\n except ImportError:\n scipy_available = False\n\n return {'_scipy_available': scipy_available}\n\n################################################################################\n# Implement functions\n################################################################################\n# Functions that exist under the same name in numpy\nfor func_name, func in [('sin', np.sin), ('cos', np.cos), ('tan', np.tan),\n ('sinh', np.sinh), ('cosh', np.cosh), ('tanh', np.tanh),\n ('exp', np.exp), ('log', np.log), ('log10', np.log10),\n ('sqrt', np.sqrt), ('arcsin', np.arcsin),\n ('arccos', np.arccos), ('arctan', np.arctan),\n ('abs', np.abs), ('sign', np.sign)]:\n DEFAULT_FUNCTIONS[func_name].implementations.add_implementation(NumpyCodeGenerator,\n code=func)\n\n# Functions that are implemented in a somewhat special way\ndef randn_func(vectorisation_idx):\n try:\n N = len(vectorisation_idx)\n return np.random.randn(N)\n except TypeError:\n # scalar value\n return np.random.randn()\n\n\ndef rand_func(vectorisation_idx):\n try:\n N = len(vectorisation_idx)\n return np.random.rand(N)\n except TypeError:\n # scalar value\n return np.random.rand()\n\ndef poisson_func(lam, vectorisation_idx):\n try:\n N = len(vectorisation_idx)\n return np.random.poisson(lam, size=N)\n except TypeError:\n # scalar value\n return np.random.poisson(lam)\n\nDEFAULT_FUNCTIONS['randn'].implementations.add_implementation(NumpyCodeGenerator,\n code=randn_func)\nDEFAULT_FUNCTIONS['rand'].implementations.add_implementation(NumpyCodeGenerator,\n code=rand_func)\nDEFAULT_FUNCTIONS['poisson'].implementations.add_implementation(NumpyCodeGenerator,\n code=poisson_func)\nclip_func = lambda array, a_min, a_max: np.clip(array, a_min, a_max)\nDEFAULT_FUNCTIONS['clip'].implementations.add_implementation(NumpyCodeGenerator,\n code=clip_func)\nint_func = lambda value: np.int32(value)\nDEFAULT_FUNCTIONS['int'].implementations.add_implementation(NumpyCodeGenerator,\n code=int_func)\nceil_func = lambda value: np.int32(np.ceil(value))\nDEFAULT_FUNCTIONS['ceil'].implementations.add_implementation(NumpyCodeGenerator,\n code=ceil_func)\nfloor_func = lambda value: np.int32(np.floor(value))\nDEFAULT_FUNCTIONS['floor'].implementations.add_implementation(NumpyCodeGenerator,\n code=floor_func)\n\n# We need to explicitly add an implementation for the timestep function,\n# otherwise Brian would *add* units during simulation, thinking that the\n# timestep function would not work correctly otherwise. This would slow the\n# function down significantly.\nDEFAULT_FUNCTIONS['timestep'].implementations.add_implementation(NumpyCodeGenerator,\n code=timestep)\n", "\"\"\"\nHelper module containing functions that operate on numpy arrays.\n\"\"\"\n\nimport numpy as np\n\n\ndef calc_repeats(delay):\n \"\"\"\n Calculates offsets corresponding to an array, where repeated values are\n subsequently numbered, i.e. if there n identical values, the returned array\n will have values from 0 to n-1 at their positions.\n The code is complex because tricks are needed for vectorisation.\n\n This function is used in the Python `SpikeQueue` to calculate the offset\n array for the insertion of spikes with their respective delays into the\n queue and in the numpy code for synapse creation to calculate how many\n synapses for each source-target pair exist.\n\n Examples\n --------\n >>> import numpy as np\n >>> print(calc_repeats(np.array([7, 5, 7, 3, 7, 5])))\n [0 0 1 0 2 1]\n \"\"\"\n # We use merge sort because it preserves the input order of equal\n # elements in the sorted output\n I = np.argsort(delay, kind='mergesort')\n xs = delay[I]\n J = (xs[1:] != xs[:-1])\n A = np.hstack((0, np.cumsum(J)))\n B = np.hstack((0, np.cumsum(np.logical_not(J))))\n BJ = np.hstack((0, B[:-1][J]))\n ei = B-BJ[A]\n ofs = np.zeros_like(delay, dtype=np.int32)\n ofs[I] = np.array(ei, dtype=ofs.dtype)\n return ofs\n", "from collections.abc import Sequence\nimport numbers\n\nimport numpy as np\n\nfrom brian2.core.variables import Variables, get_dtype\nfrom brian2.groups.group import Group, CodeRunner\nfrom brian2.utils.logger import get_logger\nfrom brian2.units.fundamentalunits import Quantity\nfrom brian2.units.allunits import second\n\n__all__ = ['StateMonitor']\n\nlogger = get_logger(__name__)\n\n\nclass StateMonitorView(object):\n def __init__(self, monitor, item):\n self.monitor = monitor\n self.item = item\n self.indices = self._calc_indices(item)\n self._group_attribute_access_active = True\n\n def __getattr__(self, item):\n # We do this because __setattr__ and __getattr__ are not active until\n # _group_attribute_access_active attribute is set, and if it is set,\n # then __getattr__ will not be called. Therefore, if getattr is called\n # with this name, it is because it hasn't been set yet and so this\n # method should raise an AttributeError to agree that it hasn't been\n # called yet.\n if item == '_group_attribute_access_active':\n raise AttributeError\n if not hasattr(self, '_group_attribute_access_active'):\n raise AttributeError\n\n mon = self.monitor\n if item == 't':\n return Quantity(mon.variables['t'].get_value(), dim=second.dim)\n elif item == 't_':\n return mon.variables['t'].get_value()\n elif item in mon.record_variables:\n dims = mon.variables[item].dim\n return Quantity(mon.variables[item].get_value().T[self.indices],\n dim=dims, copy=True)\n elif item.endswith('_') and item[:-1] in mon.record_variables:\n return mon.variables[item[:-1]].get_value().T[self.indices].copy()\n else:\n raise AttributeError(f'Unknown attribute {item}')\n\n def _calc_indices(self, item):\n \"\"\"\n Convert the neuron indices to indices into the stored values. For example, if neurons [0, 5, 10] have been\n recorded, [5, 10] is converted to [1, 2].\n \"\"\"\n dtype = get_dtype(item)\n # scalar value\n if np.issubdtype(dtype, np.signedinteger) and not isinstance(item, np.ndarray):\n indices = np.nonzero(self.monitor.record == item)[0]\n if len(indices) == 0:\n raise IndexError(f'Index number {int(item)} has not been recorded')\n return indices[0]\n\n if self.monitor.record_all:\n return item\n indices = []\n for index in item:\n if index in self.monitor.record:\n indices.append(np.nonzero(self.monitor.record == index)[0][0])\n else:\n raise IndexError(f'Index number {int(index)} has not been recorded')\n return np.array(indices)\n\n def __repr__(self):\n classname = self.__class__.__name__\n return (f\"<{classname}, giving access to elements {self.item!r} recorded by \" \n f\"{self.monitor.name}>\")\n\n\nclass StateMonitor(Group, CodeRunner):\n \"\"\"\n Record values of state variables during a run\n \n To extract recorded values after a run, use the ``t`` attribute for the\n array of times at which values were recorded, and variable name attribute\n for the values. The values will have shape ``(len(indices), len(t))``,\n where ``indices`` are the array indices which were recorded. When indexing\n the `StateMonitor` directly, the returned object can be used to get the\n recorded values for the specified indices, i.e. the indexing semantic\n refers to the indices in ``source``, not to the relative indices of the\n recorded values. For example, when recording only neurons with even numbers,\n `mon[[0, 2]].v` will return the values for neurons 0 and 2, whereas\n `mon.v[[0, 2]]` will return the values for the first and third *recorded*\n neurons, i.e. for neurons 0 and 4.\n\n Parameters\n ----------\n source : `Group`\n Which object to record values from.\n variables : str, sequence of str, True\n Which variables to record, or ``True`` to record all variables\n (note that this may use a great deal of memory).\n record : bool, sequence of ints\n Which indices to record, nothing is recorded for ``False``,\n everything is recorded for ``True`` (warning: may use a great deal of\n memory), or a specified subset of indices.\n dt : `Quantity`, optional\n The time step to be used for the monitor. Cannot be combined with\n the `clock` argument.\n clock : `Clock`, optional\n The update clock to be used. If neither a clock, nor the ``dt`` argument\n is specified, the clock of the `source` will be used.\n when : str, optional\n At which point during a time step the values should be recorded.\n Defaults to ``'start'``.\n order : int, optional\n The priority of of this group for operations occurring at the same time\n step and in the same scheduling slot. Defaults to 0.\n name : str, optional\n A unique name for the object, otherwise will use\n ``source.name+'statemonitor_0'``, etc.\n codeobj_class : `CodeObject`, optional\n The `CodeObject` class to create.\n\n Examples\n --------\n \n Record all variables, first 5 indices::\n \n eqs = '''\n dV/dt = (2-V)/(10*ms) : 1\n '''\n threshold = 'V>1'\n reset = 'V = 0'\n G = NeuronGroup(100, eqs, threshold=threshold, reset=reset)\n G.V = rand(len(G))\n M = StateMonitor(G, True, record=range(5))\n run(100*ms)\n plot(M.t, M.V.T)\n show()\n\n Notes\n -----\n\n Since this monitor by default records in the ``'start'`` time slot,\n recordings of the membrane potential in integrate-and-fire models may look\n unexpected: the recorded membrane potential trace will never be above\n threshold in an integrate-and-fire model, because the reset statement will\n have been applied already. Set the ``when`` keyword to a different value if\n this is not what you want.\n\n Note that ``record=True`` only works in runtime mode for synaptic variables.\n This is because the actual array of indices has to be calculated and this is\n not possible in standalone mode, where the synapses have not been created\n yet at this stage. Consider using an explicit array of indices instead,\n i.e. something like ``record=np.arange(n_synapses)``.\n \"\"\"\n invalidates_magic_network = False\n add_to_magic_network = True\n def __init__(self, source, variables, record, dt=None, clock=None,\n when='start', order=0, name='statemonitor*', codeobj_class=None):\n self.source = source\n # Make the monitor use the explicitly defined namespace of its source\n # group (if it exists)\n self.namespace = getattr(source, 'namespace', None)\n self.codeobj_class = codeobj_class\n\n # run by default on source clock at the end\n if dt is None and clock is None:\n clock = source.clock\n\n # variables should always be a list of strings\n if variables is True:\n variables = source.equations.names\n elif isinstance(variables, str):\n variables = [variables]\n #: The variables to record\n self.record_variables = variables\n\n # record should always be an array of ints\n self.record_all = False\n if hasattr(record, '_indices'):\n # The ._indices method always returns absolute indices\n # If the source is already a subgroup of another group, we therefore\n # have to shift the indices to become relative to the subgroup\n record = record._indices() - getattr(source, '_offset', 0)\n if record is True:\n self.record_all = True\n try:\n record = np.arange(len(source), dtype=np.int32)\n except NotImplementedError:\n # In standalone mode, this is not possible for synaptic\n # variables because the number of synapses is not defined yet\n raise NotImplementedError(\"Cannot determine the actual \"\n \"indices to record for record=True. \"\n \"This can occur for example in \"\n \"standalone mode when trying to \"\n \"record a synaptic variable. \"\n \"Consider providing an explicit \"\n \"array of indices for the record \"\n \"argument.\")\n elif record is False:\n record = np.array([], dtype=np.int32)\n elif isinstance(record, numbers.Number):\n record = np.array([record], dtype=np.int32)\n else:\n record = np.asarray(record, dtype=np.int32)\n\n #: The array of recorded indices\n self.record = record\n self.n_indices = len(record)\n\n # Some dummy code so that code generation takes care of the indexing\n # and subexpressions\n code = [f'_to_record_{v} = _source_{v}'\n for v in variables]\n code = '\\n'.join(code)\n\n CodeRunner.__init__(self, group=self, template='statemonitor',\n code=code, name=name,\n clock=clock,\n dt=dt,\n when=when,\n order=order,\n check_units=False)\n\n self.add_dependency(source)\n\n # Setup variables\n self.variables = Variables(self)\n\n self.variables.add_dynamic_array('t', size=0, dimensions=second.dim,\n constant=False,\n dtype=self._clock.variables['t'].dtype)\n self.variables.add_array('N', dtype=np.int32, size=1, scalar=True,\n read_only=True)\n self.variables.add_array('_indices', size=len(self.record),\n dtype=self.record.dtype, constant=True,\n read_only=True, values=self.record)\n self.variables.create_clock_variables(self._clock,\n prefix='_clock_')\n for varname in variables:\n var = source.variables[varname]\n if var.scalar and len(self.record) > 1:\n logger.warn(('Variable %s is a shared variable but it will be '\n 'recorded once for every target.' % varname),\n once=True)\n index = source.variables.indices[varname]\n self.variables.add_reference(f'_source_{varname}',\n source, varname, index=index)\n if not index in ('_idx', '0') and index not in variables:\n self.variables.add_reference(index, source)\n self.variables.add_dynamic_array(varname,\n size=(0, len(self.record)),\n resize_along_first=True,\n dimensions=var.dim,\n dtype=var.dtype,\n constant=False,\n read_only=True)\n\n for varname in variables:\n var = self.source.variables[varname]\n self.variables.add_auxiliary_variable(f\"_to_record_{varname}\",\n dimensions=var.dim,\n dtype=var.dtype,\n scalar=var.scalar)\n\n self.recorded_variables = dict([(varname, self.variables[varname])\n for varname in variables])\n recorded_names = [varname for varname in variables]\n\n self.needed_variables = recorded_names\n self.template_kwds = {'_recorded_variables': self.recorded_variables}\n self.written_readonly_vars = {self.variables[varname]\n for varname in self.record_variables}\n self._enable_group_attributes()\n\n def resize(self, new_size):\n self.variables['N'].set_value(new_size)\n self.variables['t'].resize(new_size)\n\n for var in self.recorded_variables.values():\n var.resize((new_size, self.n_indices))\n\n def reinit(self):\n raise NotImplementedError()\n\n def __getitem__(self, item):\n dtype = get_dtype(item)\n if np.issubdtype(dtype, np.signedinteger):\n return StateMonitorView(self, item)\n elif isinstance(item, Sequence):\n index_array = np.array(item)\n if not np.issubdtype(index_array.dtype, np.signedinteger):\n raise TypeError(\"Index has to be an integer or a sequence \"\n \"of integers\")\n return StateMonitorView(self, item)\n elif hasattr(item, '_indices'):\n # objects that support the indexing interface will return absolute\n # indices but here we need relative ones\n # TODO: How to we prevent the use of completely unrelated objects here?\n source_offset = getattr(self.source, '_offset', 0)\n return StateMonitorView(self, item._indices() - source_offset)\n else:\n raise TypeError(f'Cannot use object of type {type(item)} as an index')\n\n def __getattr__(self, item):\n # We do this because __setattr__ and __getattr__ are not active until\n # _group_attribute_access_active attribute is set, and if it is set,\n # then __getattr__ will not be called. Therefore, if getattr is called\n # with this name, it is because it hasn't been set yet and so this\n # method should raise an AttributeError to agree that it hasn't been\n # called yet.\n if item == '_group_attribute_access_active':\n raise AttributeError\n if not hasattr(self, '_group_attribute_access_active'):\n raise AttributeError\n if item in self.record_variables:\n var_dim = self.variables[item].dim\n return Quantity(self.variables[item].get_value().T,\n dim=var_dim, copy=True)\n elif item.endswith('_') and item[:-1] in self.record_variables:\n return self.variables[item[:-1]].get_value().T\n else:\n return Group.__getattr__(self, item)\n\n def __repr__(self):\n classname = self.__class__.__name__\n variables = repr(self.record_variables)\n return f\"<{classname}, recording {variables} from '{self.source.name}'>\"\n\n def record_single_timestep(self):\n \"\"\"\n Records a single time step. Useful for recording the values at the end\n of the simulation -- otherwise a `StateMonitor` will not record the\n last simulated values since its ``when`` attribute defaults to\n ``'start'``, i.e. the last recording is at the *beginning* of the last\n time step.\n\n Notes\n -----\n This function will only work if the `StateMonitor` has been already run,\n but a run with a length of ``0*ms`` does suffice.\n\n Examples\n --------\n >>> from brian2 import *\n >>> G = NeuronGroup(1, 'dv/dt = -v/(5*ms) : 1')\n >>> G.v = 1\n >>> mon = StateMonitor(G, 'v', record=True)\n >>> run(0.5*ms)\n >>> print(np.array_str(mon.v[:], precision=3))\n [[ 1. 0.98 0.961 0.942 0.923]]\n >>> print(mon.t[:])\n [ 0. 100. 200. 300. 400.] us\n >>> print(np.array_str(G.v[:], precision=3)) # last value had not been recorded\n [ 0.905]\n >>> mon.record_single_timestep()\n >>> print(mon.t[:])\n [ 0. 100. 200. 300. 400. 500.] us\n >>> print(np.array_str(mon.v[:], precision=3))\n [[ 1. 0.98 0.961 0.942 0.923 0.905]]\n \"\"\"\n if self.codeobj is None:\n raise TypeError(\"Can only record a single time step after the \"\n \"network has been run once.\")\n self.codeobj()\n" ]
[ [ "numpy.clip", "numpy.int32", "numpy.random.poisson", "numpy.ceil", "numpy.random.randn", "numpy.random.rand", "numpy.floor" ], [ "numpy.hstack", "numpy.logical_not", "numpy.cumsum", "numpy.zeros_like", "numpy.argsort", "numpy.array" ], [ "numpy.asarray", "numpy.issubdtype", "numpy.array", "numpy.nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skywolf829/CSE5559_Final_Project
[ "c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7" ]
[ "CNN/extract.py" ]
[ "## Basic Python libraries\nimport os\nfrom PIL import Image\n\n## Deep learning and array processing libraries\nimport numpy as np \nimport torch\nimport torch.nn.functional as F \nimport torchvision\nimport torchvision.transforms as transforms \n\n## Inner-project imports\nfrom model import EncoderCNN, DecoderRNN\n\n##### Code begins #####\n\n# Path to config file\nimage_directory = './CNN/images/'\nnetwork_directory = './CNN/models/'\n\n# Setting up other necessary paths\nencoder_path = f'{network_directory}encoder-5-3000.pkl'\n\n# Define the compute device (either GPU or CPU)\nif torch.cuda.is_available():\n compute_device = torch.device('cuda:0')\nelse:\n compute_device = torch.device('cpu')\nprint(f'Using device: {compute_device}')\n\n# Create the data transforms for evaluating\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n\n# Configure network\nnetwork = EncoderCNN(embed_size=256)\nnetwork = network.eval()\nnetwork.load_state_dict(torch.load(encoder_path, map_location='cpu'))\nnetwork = network.to(compute_device)\n\ndef get_visual_features(img):\n \"\"\"\n Extracts the visual features from an input image. Converts input\n into PIL Image, normalizes the image, then feeds it through a CNN.\n The features returned from the CNN are then pooled into a 1x512x1x1\n and finally squeezed to produce our [512] array output.\n\n Input\n img :: 3D NumPy array\n Takes a [x, y, 3] NumPy array to be converted into a PIL Image\n\n Output\n features :: 1D NumPy array\n Returns a [512] NumPy array of the visual features from the CNN\n \"\"\"\n\n # Convert to PIL Image and perform transformation\n img = Image.fromarray(img).convert('RGB')\n img = img.resize([224, 224], Image.LANCZOS)\n img = transform(img)\n\n # Add a 4th dimension and send to compute device (GPU or CPU)\n img = img.unsqueeze(0)\n img = img.to(compute_device)\n\n # Feed input through CNN\n features = network(img)\n\n # Squeeze into a [512] vector\n features = features.squeeze()\n\n # Convert to NumPy\n features = features.cpu().detach().numpy()\n return features\n\n# Below is only there for testing, commented out for now\n\"\"\"\nif __name__ == '__main__':\n # Inference\n img = Image.open(f'{image_directory}input/1.png')\n img = np.asarray(img)\n features = get_visual_features(img)\n print('End')\n\"\"\"" ]
[ [ "torch.device", "torch.cuda.is_available", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Saibo-creator/Text-Summrize-Project
[ "d5ce54193110452a18cc0b223360c2bd004b4b28", "d5ce54193110452a18cc0b223360c2bd004b4b28" ]
[ "checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py", "data_loaders/mask_asp_1_with_summ_dataset.py" ]
[ "# generate_from_lm.py\n\n\"\"\"\nLoad a trained language model and generate text\n\nExample usage:\nPYTHONPATH=. python generate_from_lm.py \\\n--init=\"Although the food\" --tau=0.5 \\\n--sample_method=gumbel --g_eps=1e-5 \\\n--load_model='checkpoints/lm/mlstm/hotel/batch_size_64/lm_e9_2.93.pt' \\\n--dataset='hotel' --cpu=1 --sample_method=greedy\n\"\"\"\nimport pdb\n\nimport torch\nimport torch.nn as nn\n\nfrom models.custom_parallel import DataParallelModel\nfrom models.mlstm import StackedLSTMEncoderDecoder\nfrom models.nn_utils import move_to_cuda, setup_gpus, logits_to_prob, prob_to_vocab_id\nfrom project_settings import HParams, PAD_ID, DatasetConfig\nfrom utils import load_file, create_argparse_and_update_hp\n\n#######################################\n#\n# Setup\n#\n#######################################\n\nhp = HParams()\nhp, run_name, parser = create_argparse_and_update_hp(hp)\n\nparser.add_argument('--dataset', default='yelp',\n help='yelp,amazon; will determine which subwordenc to use')\nparser.add_argument('--init', default='The meaning of life is ',\n help=\"Initial text \")\nparser.add_argument('--load_model', default=None,\n help=\"Path to model to load\")\nparser.add_argument('--seq_len', type=int, default=50,\n help=\"Maximum sequence length\")\n\nparser.add_argument('--softmax_method', type=str, default='softmax',\n help=\"softmax or gumbel\")\nparser.add_argument('--sample_method', type=str, default='sample',\n help=\"sample or greedy\")\nparser.add_argument('--gumbel_hard', type=bool, default=False,\n help=\"whether to produce one-hot from Gumbel softmax\")\n\nparser.add_argument('--beam_size', type=int, default=1,\n help=\"Width for beam search\")\nparser.add_argument('--len_norm_factor', type=float, default=0.0,\n help=\"Normalization factor\")\nparser.add_argument('--len_norm_const', type=float, default=5.0,\n help=\"Normalization constant\")\n\nparser.add_argument('--gpus', default='0',\n help=\"CUDA visible devices, e.g. 2,3\")\n\nparser.add_argument('--cpu', default=False,\n help=\"if want to run on cpu, set --cpu=True\")\n\nopt = parser.parse_args()\n\nsetup_gpus(opt.gpus, hp.seed)\n\nds_conf = DatasetConfig(opt.dataset)\nif opt.load_model is None:\n opt.load_model = ds_conf.lm_path\n\n#######################################\n#\n# Run\n#\n#######################################\n\n\ndef batchify(data, batch_size):\n \"\"\"\n Args:\n data: 1D Tensor\n batch_size: int\n Returns:\n data: reshaped Tensor of size (batch_size, -1)\n Example where data is non-negative integers and batch_size = 4\n [[0 1 2 3 4 5 6 ]\n [7 8 9 10 11 12 13]\n [14 15 16 17 18 19 20]\n [21 22 23 24 25 26 27]]\n Note: not currently using this anymore. Was used when reading in data from text fileW\n \"\"\"\n nbatch = data.size(0) // batch_size\n data = data.narrow(0, 0, nbatch * batch_size) # same as slice\n data = data.view(batch_size, -1).contiguous()\n return data\n\n\n#\n# Prepare initial input text\n#\nsubwordenc = load_file(ds_conf.subwordenc_path)\ninit_texts = [init for init in opt.init.split('|')]\ninit_tokens = [subwordenc.encode(init) for init in init_texts]\ninit_lens = [len(init) for init in init_tokens]\nmax_len = max(init_lens)\ninit_tokens_padded = [tokens + [PAD_ID for _ in range(max_len - len(tokens))] for tokens in init_tokens]\ninit_tensor = [batchify(torch.LongTensor(init), 1) for init in init_tokens_padded]\ninit_tensor = torch.cat(init_tensor, dim=0) # [batch, lens\ninit_tensor = move_to_cuda(init_tensor)\nbatch_size = init_tensor.size(0)\n\n#\n# Load and set up model\n#\n\n\nif opt.cpu:\n checkpoint = torch.load(opt.load_model, map_location='cpu')\n\nelif torch.cuda.is_available():\n checkpoint = torch.load(opt.load_model) # StackedLSTMEncoder\n\nmodel = checkpoint['model']\nif isinstance(model, nn.DataParallel):\n model = model.module\n\nngpus = 1 if len(opt.gpus) == 1 else len(opt.gpus.split(','))\n\n#\n# Generate\n# #\nif 'mlstm' in opt.load_model:\n # Set up encoder decoder\n embed, rnn = model.embed, model.rnn\n enc_dec = StackedLSTMEncoderDecoder(embed, rnn)\n if torch.cuda.is_available():\n enc_dec.cuda()\n enc_dec = DataParallelModel(enc_dec) if ngpus > 1 else enc_dec\n enc_dec.eval()\n\n # Generate\n result = enc_dec(init_tensor,\n dec_kwargs={'seq_len': opt.seq_len,\n 'softmax_method': opt.softmax_method,\n 'sample_method': opt.sample_method,\n 'tau': hp.tau,\n 'gumbel_hard': opt.gumbel_hard,\n 'k': opt.beam_size,\n 'subwordenc': subwordenc})\n probs, ids, texts, extra = zip(*result) if ngpus > 1 else result\n if ngpus > 1: # flatten: each gpu returns lists of texts\n texts = [batch_text for gpu_texts in texts for batch_text in gpu_texts]\n\n for i in range(batch_size):\n print(init_texts[i] + texts[i])\n print('-' * 100)\n", "# hotel_dataset.py\n\n\"\"\"\nData preparation and loaders for Hotel dataset. Here, an item is one \"store\" or \"business\"\n\"\"\"\nimport random\nimport torch\nfrom collections import Counter, defaultdict\nimport json\nimport math\nimport nltk\nimport numpy as np\nimport os\nimport pdb\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import Sampler\n\nfrom data_loaders.summ_dataset import SummReviewDataset, SummDataset\nfrom project_settings import HParams, DatasetConfig\nfrom utils import load_file, save_file\n\n\nclass Hotel_Mask_PytorchDataset(Dataset):\n \"\"\"\n Implements Pytorch Dataset\n\n One data point for model is n_reviews reviews for one item. When training, we want to have batch_size items and\n sample n_reviews reviews for each item. If a item has less than n_reviews reviews, we sample with replacement\n (sampling with replacement as then you'll be summarizing repeated reviews, but this shouldn't happen right now\n as only items with a minimum number of reviews is used (50). These items and their reviews are selected\n in YelpDataset.save_processed_splits().\n\n There is now also the option for variable n_docs -- see the documentations for n_reviews_min\n and n_reviews_max.\n \"\"\"\n\n def __init__(self,\n split=None,\n n_reviews=None,\n n_reviews_min=None,\n n_reviews_max=None,\n subset=None,\n seed=0,\n sample_reviews=True,\n item_max_reviews=None):\n \"\"\"\n Args:\n split: str ('train', val', 'test')\n n_reviews: int\n\n n_reviews_min: int\n n_reviews_max: int\n - When these two are provided, then there will be variable n_reviews (i.e. two different\n training examples may be composed of different number of reviews to summarize)\n - Some of this\n\n subset: float (Value in [0.0, 1.0]. If given, then dataset is truncated to subset of the businesses\n seed: int (set seed because we will be using np.random.choice to sample reviews if sample_reviews=True)\n sample_reviews: boolean\n - When True, __getitem_ will sample n_reviews reviews for each item. The number of times a item appears\n in the dataset is dependent on uniform_items.\n - When False, each item will appear math.floor(number of reviews item has / n_reviews) times\n so that almost every review is seen (with up to n_reviews - 1 reviews not seen).\n - Setting False is useful for (a) validation / test, and (b) simply iterating over all the reviews\n (e.g. to build the vocabulary).\n item_max_reviews: int (maximum number of reviews a item can have)\n - This is used to remove outliers from the data. This is especially important if uniform_items=False,\n as there may be a large number of reviews in a training epoch coming from a single item. This also\n still matters when uniform_items=True, as items an outlier number of reviews will have reviews\n that are never sampled.\n \n \n - For the Hotel dataset, there are:\n 257,186 reviews in the test set\n 1,749,158 reviews in the train set\n 216,031 reviews in the dev set\n \n - For the Yelp dataset\n 6,685,900 reviews in all\n 192,609 business\n \n \"\"\"\n self.split = split\n\n self.n_reviews = n_reviews\n self.n_reviews_min = n_reviews_min\n self.n_reviews_max = n_reviews_max\n\n self.subset = subset\n self.sample_reviews = sample_reviews\n item_max_reviews = float('inf') if item_max_reviews is None else item_max_reviews\n self.item_max_reviews = item_max_reviews\n\n self.ds_conf = DatasetConfig('mask_asp_1_with_summ') # used for paths\n\n # Set random seed so that choice is always the same across experiments\n # Especially necessary for test set (along with shuffle=False in the DataLoader)\n np.random.seed(seed)\n\n self.items = self.load_all_items()\n\n # Create map from idx-th data point to item\n item_to_nreviews = load_file(\n os.path.join(self.ds_conf.processed_path, '{}/store-to-nreviews.json'.format(split)))\n self.idx_to_item = {}\n\n if sample_reviews:\n if n_reviews_min and n_reviews_max:\n self.idx_to_nreviews = {}\n self.idx_to_item_idxs = {} # indices of reviews\n\n ns = [8] #[4, 8, 16]\n # ns = range(n_reviews_min, n_reviews_max+1, 4) # e.g. [4,8,12,16]\n idx = 0\n for item, n_reviews in item_to_nreviews.items():\n item_n = 0\n selected_idxs = set()\n while item_n < n_reviews:\n # Keep selecting batches of reviews from this store (without replacement)\n cur_n = random.choice(ns)\n if item_n + cur_n > n_reviews:\n break\n available_idxs = set(range(n_reviews)).difference(selected_idxs)\n cur_idxs = np.random.choice(list(available_idxs), cur_n, replace=False)\n selected_idxs.update(cur_idxs)\n\n # update\n self.idx_to_item[idx] = item\n self.idx_to_nreviews[idx] = cur_n\n self.idx_to_item_idxs[idx] = cur_idxs\n item_n += cur_n\n idx += 1\n\n else:\n # Get the number of times each item will appear in a pass through this dataset\n item_min_reviews = min(item_to_nreviews.values())\n if item_max_reviews == float('inf'):\n n_per_item = math.ceil(item_min_reviews / n_reviews)\n else:\n n_per_item = np.mean([n for n in item_to_nreviews.values() if n <= item_max_reviews])\n n_per_item = math.ceil(n_per_item / n_reviews)\n # print('Each item will appear {} times'.format(n_per_item))\n\n idx = 0\n for item, n_reviews in item_to_nreviews.items():\n if n_reviews <= item_max_reviews:\n for _ in range(n_per_item):\n self.idx_to_item[idx] = item\n idx += 1\n else:\n # __getitem__ will not sample\n idx = 0\n self.idx_to_item_startidx = {}\n # idx items idx of one dataset item. item_startidx is the idx within that item's reviews.\n tot = 0\n for item, item_n_reviews in item_to_nreviews.items():\n if item_n_reviews <= item_max_reviews:\n tot += item_n_reviews\n item_startidx = 0\n for _ in range(math.floor(item_n_reviews / n_reviews)):\n self.idx_to_item[idx] = item\n self.idx_to_item_startidx[idx] = item_startidx\n idx += 1\n item_startidx += n_reviews\n\n if self.subset:\n end = int(self.subset * len(self.idx_to_item))\n for idx in range(end, len(self.idx_to_item)):\n del self.idx_to_item[idx]\n\n self.n = len(self.idx_to_item)\n\n def load_all_items(self):\n \"\"\"\n Return dictionary from item id to dict\n \"\"\"\n print('Loading all items')\n items = {}\n with open(self.ds_conf.businesses_path, 'r', encoding='utf-8') as f:\n business=json.load(f) #business is a list of length 2,222,373\n for el in business: #el ={'hotel_url':...;'text':.....; 'rating': }\n items[el['hotel_url']] = el\n return items\n\n def __getitem__(self, idx):\n # Map idx to item and load reviews\n item = self.idx_to_item[idx] # id\n fp = os.path.join(self.ds_conf.processed_path, '{}/{}.json'.format(self.split, item))\n reviews = load_file(fp)\n\n # Get reviews from item\n if self.sample_reviews:\n if self.n_reviews_min and self.n_reviews_max:\n review_idxs = self.idx_to_item_idxs[idx]\n reviews = [reviews[r_idx] for r_idx in review_idxs]\n else:\n if len(reviews) < self.n_reviews:\n reviews = np.random.choice(reviews, size=self.n_reviews, replace=True)\n else:\n reviews = np.random.choice(reviews, size=self.n_reviews, replace=False)\n else:\n start_idx = self.idx_to_item_startidx[idx]\n reviews = reviews[start_idx:start_idx + self.n_reviews]\n\n # Collect data for this item\n hotel_ids,texts, ratings = zip(*[(s['hotel_url'],s['text'], s['rating']) for s in reviews])\n texts = SummDataset.concat_docs(texts, edok_token=True)\n avg_rating = int(np.round(np.mean(ratings)))\n hotel_id=hotel_ids[0]\n\n try:\n categories = '---'.join(self.items[item]['categories'])\n except Exception as e:\n categories = '---'\n # print('-'*10)\n # print('xxxx',self.items)\n # print('-' * 10)\n # print(\"xxxx\",item)\n # print('-' * 10)\n metadata = {'item': self.items[item]['hotel_url'],\n 'short_summary': self.items[item]['short_summary'],\n 'long_summary': self.items[item]['long_summary']}\n\n\n\n return hotel_id, texts, avg_rating, metadata\n\n def __len__(self):\n return self.n\n\nclass VariableNDocsSampler(Sampler):\n \"\"\"\n Produce indices for variable n_docs at a time. Used in conjunction with\n n_docs_min and n_docs_max, which creates the dictionaries needed in\n YelpPytorchDataset.\n\n Arguments:\n data_source (Dataset): dataset to sample from\n \"\"\"\n\n def __init__(self, dataset):\n super(VariableNDocsSampler, self).__init__(dataset)\n self.dataset = dataset\n\n # Group data points together by how the number of reviews\n # This way the SummarizationModel will be fed a batch of points, each summarizing\n # the same number of reviews. This is important as the model reshapes tensors\n # by n_docs, which allows it to be done in parallel.\n nreviews_to_idxs = defaultdict(list)\n for idx, nreviews in dataset.idx_to_nreviews.items():\n nreviews_to_idxs[nreviews].append(idx)\n\n # This is hard-coded: the summarization model I've been training takes about\n # 10 GB on one GPU for batch_size = 4 and n_docs = 8. We'll scale the batch_size\n # relative to the n_docs for the given minibatch such that\n # batch_size * n_docs / ngpus = 32, so as to use all the GPU memory as much\n # as possible. For n_docs_min=4 and n_docs_max=16, n_docs is in [4,8,12,16]\n # (this is hard-coded in currently in YelpPytorchDataset).\n ngpus = torch.cuda.device_count()\n\n dataloader_idxs = [] # list of lists of indices, each sublist is a minibatch\n for nreviews, idxs in nreviews_to_idxs.items():\n batch_size = int(32 / nreviews * ngpus)\n print(nreviews, batch_size)\n selected_idxs = set()\n while len(set(idxs).difference(selected_idxs)) > batch_size:\n # There is enough unselected points to form a batch\n available_idxs = set(idxs).difference(selected_idxs)\n cur_idxs = np.random.choice(list(available_idxs), batch_size, replace=False)\n dataloader_idxs.append(cur_idxs)\n selected_idxs.update(cur_idxs)\n\n random.shuffle(dataloader_idxs)\n\n self.dataloader_idxs = dataloader_idxs\n\n def __iter__(self):\n return iter(self.dataloader_idxs)\n\n def __len__(self):\n return len(self.dataloader_idxs)\n\n\nclass Mask_Asp_1_With_Summ_Dataset(SummReviewDataset):\n \"\"\"\n Main class for using Hotel dataset\n \"\"\"\n def __init__(self):\n super(Mask_Asp_1_With_Summ_Dataset, self).__init__()\n self.name = 'mask_asp_1_with_summ'\n self.conf = DatasetConfig('mask_asp_1_with_summ')\n self.n_ratings_labels = 5\n self.reviews = None\n self.subwordenc = load_file(self.conf.subwordenc_path)\n\n ####################################\n #\n # Utils\n #\n ####################################\n\n\n####### below done #####\n def load_all_reviews(self):\n \"\"\"\n Return list of dictionaries\n \"\"\"\n print('Loading all reviews')\n reviews = []\n with open(self.conf.reviews_path, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n reviews.append(json.loads(line))\n return reviews\n####### above done #####\n\n\n def get_data_loader(self, split='train',\n n_docs=8, n_docs_min=None, n_docs_max=None,\n subset=None, seed=0, sample_reviews=True,\n category=None, # for compatability with AmazonDataset, which filters in AmazonPytorchDataset\n batch_size=64, shuffle=True, num_workers=4):\n \"\"\"\n Return iterator over specific split in dataset(providing mini_mbtch)\n \"\"\"\n ds = Hotel_Mask_PytorchDataset(split=split,\n n_reviews=n_docs, n_reviews_min=n_docs_min, n_reviews_max=n_docs_max,\n subset=subset, seed=seed, sample_reviews=sample_reviews,\n item_max_reviews=self.conf.item_max_reviews)\n\n if n_docs_min and n_docs_max:\n loader = DataLoader(ds, batch_sampler=VariableNDocsSampler(ds), num_workers=num_workers)\n else:\n loader = DataLoader(ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)\n return loader\n\n ####################################\n #\n # One off functions\n #\n ####################################\n def save_processed_splits(self):\n \"\"\"\n Save train, val, and test splits. Splits are across items (e.g. a item is either in train, val, or test).\n Iterates over all reviews in the original dataset. Tries to get close to a 80-10-10 split.\n\n Args:\n review_max_len: int (maximum length in subtokens a review can be)\n item_min_reviews: int (min number of reviews a item must have)\n \"\"\"\n review_max_len = self.conf.review_max_len\n item_min_reviews = self.conf.item_min_reviews\n\n print('Saving processed splits')\n if self.reviews is None:\n self.reviews = self.load_all_reviews()\n\n print('Filtering reviews longer than: {}'.format(review_max_len))\n item_to_reviews = defaultdict(list)\n \n for r in self.reviews[0]:\n if len(self.subwordenc.encode(r['text'])) < review_max_len:\n item_to_reviews[r['hotel_url']].append(r)\n\n # Calculate target amount of reviews per item\n n = sum([len(revs) for revs in item_to_reviews.values()])\n print('Total number of reviews before filtering: {}'.format(len(self.reviews[0])))\n print('Total number of reviews after filtering: {}'.format(n))\n\n print('Filtering items with less than {} reviews'.format(item_min_reviews))\n item_to_n = {}\n for item in list(item_to_reviews.keys()): # have to do list and keys for python3 to delete in-place\n # for item, reviews in item_to_reviews.items():\n n = len(item_to_reviews[item])\n if n < item_min_reviews:\n del item_to_reviews[item]\n else:\n item_to_n[item] = n\n n = sum(item_to_n.values())\n print('Total number of reviews after filtering: {}'.format(n))\n print('Total number of items after filtering: {}'.format(len(item_to_n)))\n\n # Construct splits\n n_tr, n_val, n_te = int(0.8 * n), int(0.1 * n), int(0.1 * n)\n cur_n_tr, cur_n_val, cur_n_te = 0, 0, 0\n split_to_item_to_nreviews = {'train': {}, 'val': {}, 'test': {}}\n # In descending order of number of reviews per item\n for i, (item, n) in enumerate(sorted(item_to_n.items(), key=lambda x: -x[1])):\n # once every ten items, save to val / test if we haven't yet hit the target number\n if (i % 10 == 8) and (cur_n_val < n_val):\n split = 'val'\n cur_n_val += n\n elif (i % 10 == 9) and (cur_n_te < n_te):\n split = 'test'\n cur_n_te += n\n else:\n split = 'train'\n cur_n_tr += n\n\n out_fp = os.path.join(self.conf.processed_path, '{}/{}.json'.format(split, item))\n save_file(item_to_reviews[item], out_fp, verbose=False)\n\n split_to_item_to_nreviews[split][item] = n\n\n print('Number of train reviews: {} / {}'.format(cur_n_tr, n_tr))\n print('Number of val reviews: {} / {}'.format(cur_n_val, n_val))\n print('Number of test reviews: {} / {}'.format(cur_n_te, n_te))\n\n # This file is used by YelpPytorchDataset\n for split, item_to_nreviews in split_to_item_to_nreviews.items():\n out_fp = os.path.join(self.conf.processed_path, '{}/store-to-nreviews.json'.format(split))\n save_file(item_to_nreviews, out_fp)\n\n\n\nif __name__ == '__main__':\n from data_loaders.summ_dataset_factory import SummDatasetFactory\n\n hp = HParams()\n ds = SummDatasetFactory.get('mask_asp_1_with_summ')\n ds.save_processed_splits()\n \n" ]
[ [ "torch.LongTensor", "torch.load", "torch.cuda.is_available", "torch.cat" ], [ "numpy.random.seed", "numpy.random.choice", "torch.utils.data.DataLoader", "numpy.mean", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jxhe/fairseq
[ "214e3fed5619733efa4f1f82c61db58e5ce08ad8", "3ba384cc6c58a139f0ccfbc4e7f183e7c4dfd839" ]
[ "fairseq/progress_bar.py", "tests/speech_recognition/asr_test_base.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nWrapper around various loggers and progress bars (e.g., tqdm).\n\"\"\"\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport json\nimport logging\nfrom numbers import Number\nimport os\nimport sys\n\nimport torch\n\nfrom fairseq import distributed_utils\nfrom fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):\n if args.log_format is None:\n args.log_format = no_progress_bar if args.no_progress_bar else default\n\n if args.log_format == 'tqdm' and not sys.stderr.isatty():\n args.log_format = 'simple'\n\n if args.log_format == 'json':\n bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)\n elif args.log_format == 'none':\n bar = noop_progress_bar(iterator, epoch, prefix)\n elif args.log_format == 'simple':\n bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)\n elif args.log_format == 'tqdm':\n bar = tqdm_progress_bar(iterator, epoch, prefix)\n else:\n raise ValueError('Unknown log format: {}'.format(args.log_format))\n\n if args.tensorboard_logdir and distributed_utils.is_master(args):\n try:\n # [FB only] custom wrapper for TensorBoard\n import palaas # noqa\n from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper\n bar = fb_tbmf_wrapper(bar, args, args.log_interval)\n except ImportError:\n bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)\n\n return bar\n\n\ndef format_stat(stat):\n if isinstance(stat, Number):\n stat = '{:g}'.format(stat)\n elif isinstance(stat, AverageMeter):\n stat = '{:.3f}'.format(stat.avg)\n elif isinstance(stat, TimeMeter):\n stat = '{:g}'.format(round(stat.avg))\n elif isinstance(stat, StopwatchMeter):\n stat = '{:g}'.format(round(stat.sum))\n elif torch.is_tensor(stat):\n stat = stat.tolist()\n return stat\n\n\nclass progress_bar(object):\n \"\"\"Abstract class for progress bars.\"\"\"\n def __init__(self, iterable, epoch=None, prefix=None):\n self.iterable = iterable\n self.offset = getattr(iterable, 'offset', 0)\n self.epoch = epoch\n self.prefix = ''\n if epoch is not None:\n self.prefix += 'epoch {:03d}'.format(epoch)\n if prefix is not None:\n self.prefix += ' | {}'.format(prefix)\n\n def __len__(self):\n return len(self.iterable)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n return False\n\n def __iter__(self):\n raise NotImplementedError\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n raise NotImplementedError\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n raise NotImplementedError\n\n def _str_commas(self, stats):\n return ', '.join(key + '=' + stats[key].strip()\n for key in stats.keys())\n\n def _str_pipes(self, stats):\n return ' | '.join(key + ' ' + stats[key].strip()\n for key in stats.keys())\n\n def _format_stats(self, stats):\n postfix = OrderedDict(stats)\n # Preprocess stats according to datatype\n for key in postfix.keys():\n postfix[key] = str(format_stat(postfix[key]))\n return postfix\n\n\n@contextmanager\ndef rename_logger(logger, new_name):\n old_name = logger.name\n if new_name is not None:\n logger.name = new_name\n yield logger\n logger.name = old_name\n\n\nclass json_progress_bar(progress_bar):\n \"\"\"Log output in JSON format.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.stats = None\n self.tag = None\n\n def __iter__(self):\n size = float(len(self.iterable))\n for i, obj in enumerate(self.iterable, start=self.offset):\n yield obj\n if (\n self.stats is not None\n and i > 0\n and self.log_interval is not None\n and (i + 1) % self.log_interval == 0\n ):\n update = (\n self.epoch - 1 + float(i / size)\n if self.epoch is not None\n else None\n )\n stats = self._format_stats(self.stats, epoch=self.epoch, update=update)\n with rename_logger(logger, self.tag):\n logger.info(json.dumps(stats))\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.stats = stats\n self.tag = tag\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n self.stats = stats\n if tag is not None:\n self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])\n stats = self._format_stats(self.stats, epoch=self.epoch)\n with rename_logger(logger, tag):\n logger.info(json.dumps(stats))\n\n def _format_stats(self, stats, epoch=None, update=None):\n postfix = OrderedDict()\n if epoch is not None:\n postfix['epoch'] = epoch\n if update is not None:\n postfix['update'] = round(update, 3)\n # Preprocess stats according to datatype\n for key in stats.keys():\n postfix[key] = format_stat(stats[key])\n return postfix\n\n\nclass noop_progress_bar(progress_bar):\n \"\"\"No logging.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None):\n super().__init__(iterable, epoch, prefix)\n\n def __iter__(self):\n for obj in self.iterable:\n yield obj\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n pass\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n pass\n\n\nclass simple_progress_bar(progress_bar):\n \"\"\"A minimal logger for non-TTY environments.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):\n super().__init__(iterable, epoch, prefix)\n self.log_interval = log_interval\n self.stats = None\n self.tag = None\n\n def __iter__(self):\n size = len(self.iterable)\n for i, obj in enumerate(self.iterable, start=self.offset):\n yield obj\n if (\n self.stats is not None\n and i > 0\n and self.log_interval is not None\n and (i + 1) % self.log_interval == 0\n ):\n postfix = self._str_commas(self.stats)\n with rename_logger(logger, self.tag):\n logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.stats = self._format_stats(stats)\n self.tag = tag\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n with rename_logger(logger, tag):\n logger.info('{} | {}'.format(self.prefix, postfix))\n\n\nclass tqdm_progress_bar(progress_bar):\n \"\"\"Log to tqdm.\"\"\"\n\n def __init__(self, iterable, epoch=None, prefix=None):\n super().__init__(iterable, epoch, prefix)\n from tqdm import tqdm\n self.tqdm = tqdm(iterable, self.prefix, leave=False)\n\n def __iter__(self):\n return iter(self.tqdm)\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats according to log_interval.\"\"\"\n self.tqdm.set_postfix(self._format_stats(stats), refresh=False)\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n postfix = self._str_pipes(self._format_stats(stats))\n self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))\n\n\ntry:\n from tensorboardX import SummaryWriter\n _tensorboard_writers = {}\nexcept ImportError:\n SummaryWriter = None\n\n\nclass tensorboard_log_wrapper(progress_bar):\n \"\"\"Log to tensorboard.\"\"\"\n\n def __init__(self, wrapped_bar, tensorboard_logdir, args):\n self.wrapped_bar = wrapped_bar\n self.tensorboard_logdir = tensorboard_logdir\n self.args = args\n\n if SummaryWriter is None:\n logger.warning(\n \"tensorboard or required dependencies not found, please see README \"\n \"for using tensorboard. (e.g. pip install tensorboardX)\"\n )\n\n def _writer(self, key):\n if SummaryWriter is None:\n return None\n _writers = _tensorboard_writers\n if key not in _writers:\n _writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))\n _writers[key].add_text('args', str(vars(self.args)))\n _writers[key].add_text('sys.argv', \" \".join(sys.argv))\n return _writers[key]\n\n def __iter__(self):\n return iter(self.wrapped_bar)\n\n def log(self, stats, tag=None, step=None):\n \"\"\"Log intermediate stats to tensorboard.\"\"\"\n self._log_to_tensorboard(stats, tag, step)\n self.wrapped_bar.log(stats, tag=tag, step=step)\n\n def print(self, stats, tag=None, step=None):\n \"\"\"Print end-of-epoch stats.\"\"\"\n self._log_to_tensorboard(stats, tag, step)\n self.wrapped_bar.print(stats, tag=tag, step=step)\n\n def _log_to_tensorboard(self, stats, tag=None, step=None):\n writer = self._writer(tag or '')\n if writer is None:\n return\n if step is None:\n step = stats['num_updates']\n for key in stats.keys() - {'num_updates'}:\n if isinstance(stats[key], AverageMeter):\n writer.add_scalar(key, stats[key].val, step)\n elif isinstance(stats[key], Number):\n writer.add_scalar(key, stats[key], step)\n", "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport unittest\nfrom inspect import currentframe, getframeinfo\n\nimport numpy as np\nimport torch\nfrom fairseq.data import data_utils as fairseq_data_utils\nfrom fairseq.data.dictionary import Dictionary\nfrom fairseq.models import (\n BaseFairseqModel,\n FairseqDecoder,\n FairseqEncoder,\n FairseqEncoderDecoderModel,\n FairseqEncoderModel,\n FairseqModel,\n)\nfrom fairseq.tasks.fairseq_task import FairseqTask\nfrom examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask\n\n\nDEFAULT_TEST_VOCAB_SIZE = 100\n\n\n# ///////////////////////////////////////////////////////////////////////////\n# utility function to setup dummy dict/task/input\n# ///////////////////////////////////////////////////////////////////////////\n\n\ndef get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):\n dummy_dict = Dictionary()\n # add dummy symbol to satisfy vocab size\n for id, _ in enumerate(range(vocab_size)):\n dummy_dict.add_symbol(\"{}\".format(id), 1000)\n return dummy_dict\n\n\nclass DummyTask(FairseqTask):\n def __init__(self, args):\n super().__init__(args)\n self.dictionary = get_dummy_dictionary()\n if getattr(self.args, \"ctc\", False):\n self.dictionary.add_symbol(\"<ctc_blank>\")\n self.tgt_dict = self.dictionary\n\n @property\n def target_dictionary(self):\n return self.dictionary\n\n\ndef get_dummy_task_and_parser():\n \"\"\"\n to build a fariseq model, we need some dummy parse and task. This function\n is used to create dummy task and parser to faciliate model/criterion test\n\n Note: we use FbSpeechRecognitionTask as the dummy task. You may want\n to use other task by providing another function\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"test_dummy_s2s_task\", argument_default=argparse.SUPPRESS\n )\n DummyTask.add_args(parser)\n args = parser.parse_args([])\n task = DummyTask.setup_task(args)\n return task, parser\n\n\ndef get_dummy_input(T=100, D=80, B=5, K=100):\n forward_input = {}\n # T max sequence length\n # D feature vector dimension\n # B batch size\n # K target dimension size\n feature = torch.randn(B, T, D)\n # this (B, T, D) layout is just a convention, you can override it by\n # write your own _prepare_forward_input function\n src_lengths = torch.from_numpy(\n np.random.randint(low=1, high=T, size=B, dtype=np.int64)\n )\n src_lengths[0] = T # make sure the maximum length matches\n prev_output_tokens = []\n for b in range(B):\n token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1)\n tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)\n prev_output_tokens.append(torch.from_numpy(tokens))\n\n prev_output_tokens = fairseq_data_utils.collate_tokens(\n prev_output_tokens,\n pad_idx=1,\n eos_idx=2,\n left_pad=False,\n move_eos_to_beginning=False,\n )\n src_lengths, sorted_order = src_lengths.sort(descending=True)\n forward_input[\"src_tokens\"] = feature.index_select(0, sorted_order)\n forward_input[\"src_lengths\"] = src_lengths\n forward_input[\"prev_output_tokens\"] = prev_output_tokens\n\n return forward_input\n\n\ndef get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):\n \"\"\"\n This only provides an example to generate dummy encoder output\n \"\"\"\n (T, B, D) = encoder_out_shape\n encoder_out = {}\n\n encoder_out[\"encoder_out\"] = torch.from_numpy(\n np.random.randn(*encoder_out_shape).astype(np.float32)\n )\n seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))\n # some dummy mask\n encoder_out[\"encoder_padding_mask\"] = torch.arange(T).view(1, T).expand(\n B, -1\n ) >= seq_lengths.view(B, 1).expand(-1, T)\n encoder_out[\"encoder_padding_mask\"].t_()\n\n # encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate\n # whether encoder_out[t, b] is valid (=0) or not (=1)\n return encoder_out\n\n\ndef _current_postion_info():\n cf = currentframe()\n frameinfo = \" (at {}:{})\".format(\n os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno\n )\n return frameinfo\n\n\ndef check_encoder_output(encoder_output, batch_size=None):\n \"\"\"we expect encoder_output to be a dict with the following\n key/value pairs:\n - encoder_out: a Torch.Tensor\n - encoder_padding_mask: a binary Torch.Tensor\n \"\"\"\n if not isinstance(encoder_output, dict):\n msg = (\n \"FairseqEncoderModel.forward(...) must be a dict\" + _current_postion_info()\n )\n return False, msg\n\n if \"encoder_out\" not in encoder_output:\n msg = (\n \"FairseqEncoderModel.forward(...) must contain encoder_out\"\n + _current_postion_info()\n )\n return False, msg\n\n if \"encoder_padding_mask\" not in encoder_output:\n msg = (\n \"FairseqEncoderModel.forward(...) must contain encoder_padding_mask\"\n + _current_postion_info()\n )\n return False, msg\n\n if not isinstance(encoder_output[\"encoder_out\"], torch.Tensor):\n msg = \"encoder_out must be a torch.Tensor\" + _current_postion_info()\n return False, msg\n\n if encoder_output[\"encoder_out\"].dtype != torch.float32:\n msg = \"encoder_out must have float32 dtype\" + _current_postion_info()\n return False, msg\n\n mask = encoder_output[\"encoder_padding_mask\"]\n if mask is not None:\n if not isinstance(mask, torch.Tensor):\n msg = (\n \"encoder_padding_mask must be a torch.Tensor\" + _current_postion_info()\n )\n return False, msg\n if (\n mask.dtype != torch.uint8\n and (not hasattr(torch, 'bool') or mask.dtype != torch.bool)\n ):\n msg = (\n \"encoder_padding_mask must have dtype of uint8\"\n + _current_postion_info()\n )\n return False, msg\n\n if mask.dim() != 2:\n msg = (\n \"we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)\"\n + _current_postion_info()\n )\n return False, msg\n\n if batch_size is not None and mask.size(1) != batch_size:\n msg = (\n \"we expect encoder_padding_mask to be a 2-d tensor, with size(1)\"\n + \" being the batch size\"\n + _current_postion_info()\n )\n return False, msg\n return True, None\n\n\ndef check_decoder_output(decoder_output):\n \"\"\"we expect output from a decoder is a tuple with the following constraint:\n - the first element is a torch.Tensor\n - the second element can be anything (reserved for future use)\n \"\"\"\n if not isinstance(decoder_output, tuple):\n msg = \"FariseqDecoder output must be a tuple\" + _current_postion_info()\n return False, msg\n\n if len(decoder_output) != 2:\n msg = \"FairseqDecoder output must be 2-elem tuple\" + _current_postion_info()\n return False, msg\n\n if not isinstance(decoder_output[0], torch.Tensor):\n msg = (\n \"FariseqDecoder output[0] must be a torch.Tensor\" + _current_postion_info()\n )\n return False, msg\n\n return True, None\n\n\n# ///////////////////////////////////////////////////////////////////////////\n# Base Test class\n# ///////////////////////////////////////////////////////////////////////////\n\n\nclass TestBaseFairseqModelBase(unittest.TestCase):\n \"\"\"\n This class is used to facilitate writing unittest for any class derived from\n `BaseFairseqModel`.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n if cls is TestBaseFairseqModelBase:\n raise unittest.SkipTest(\"Skipping test case in base\")\n super().setUpClass()\n\n def setUpModel(self, model):\n self.assertTrue(isinstance(model, BaseFairseqModel))\n self.model = model\n\n def setupInput(self):\n pass\n\n def setUp(self):\n self.model = None\n self.forward_input = None\n pass\n\n\nclass TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):\n \"\"\"\n base code to test FairseqEncoderDecoderModel (formally known as\n `FairseqModel`) must be derived from this base class\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n if cls is TestFairseqEncoderDecoderModelBase:\n raise unittest.SkipTest(\"Skipping test case in base\")\n super().setUpClass()\n\n def setUpModel(self, model_cls, extra_args_setters=None):\n self.assertTrue(\n issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)),\n msg=\"This class only tests for FairseqModel subclasses\",\n )\n\n task, parser = get_dummy_task_and_parser()\n model_cls.add_args(parser)\n\n args = parser.parse_args([])\n if extra_args_setters is not None:\n for args_setter in extra_args_setters:\n args_setter(args)\n model = model_cls.build_model(args, task)\n self.model = model\n\n def setUpInput(self, input=None):\n self.forward_input = get_dummy_input() if input is None else input\n\n def setUp(self):\n super().setUp()\n\n def test_forward(self):\n if self.model and self.forward_input:\n forward_output = self.model.forward(**self.forward_input)\n # for FairseqEncoderDecoderModel, forward returns a tuple of two\n # elements, the first one is a Torch.Tensor\n succ, msg = check_decoder_output(forward_output)\n if not succ:\n self.assertTrue(succ, msg=msg)\n self.forward_output = forward_output\n\n def test_get_normalized_probs(self):\n if self.model and self.forward_input:\n forward_output = self.model.forward(**self.forward_input)\n logprob = self.model.get_normalized_probs(forward_output, log_probs=True)\n prob = self.model.get_normalized_probs(forward_output, log_probs=False)\n\n # in order for different models/criterion to play with each other\n # we need to know whether the logprob or prob output is batch_first\n # or not. We assume an additional attribute will be attached to logprob\n # or prob. If you find your code failed here, simply override\n # FairseqModel.get_normalized_probs, see example at\n # https://fburl.com/batch_first_example\n self.assertTrue(hasattr(logprob, \"batch_first\"))\n self.assertTrue(hasattr(prob, \"batch_first\"))\n\n self.assertTrue(torch.is_tensor(logprob))\n self.assertTrue(torch.is_tensor(prob))\n\n\nclass TestFairseqEncoderModelBase(TestBaseFairseqModelBase):\n \"\"\"\n base class to test FairseqEncoderModel\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n if cls is TestFairseqEncoderModelBase:\n raise unittest.SkipTest(\"Skipping test case in base\")\n super().setUpClass()\n\n def setUpModel(self, model_cls, extra_args_setters=None):\n self.assertTrue(\n issubclass(model_cls, FairseqEncoderModel),\n msg=\"This class is only used for testing FairseqEncoderModel\",\n )\n task, parser = get_dummy_task_and_parser()\n model_cls.add_args(parser)\n args = parser.parse_args([])\n if extra_args_setters is not None:\n for args_setter in extra_args_setters:\n args_setter(args)\n\n model = model_cls.build_model(args, task)\n self.model = model\n\n def setUpInput(self, input=None):\n self.forward_input = get_dummy_input() if input is None else input\n # get_dummy_input() is originally for s2s, here we delete extra dict\n # items, so it can be used for EncoderModel / Encoder as well\n self.forward_input.pop(\"prev_output_tokens\", None)\n\n def setUp(self):\n super().setUp()\n\n def test_forward(self):\n if self.forward_input and self.model:\n bsz = self.forward_input[\"src_tokens\"].size(0)\n forward_output = self.model.forward(**self.forward_input)\n\n # we expect forward_output to be a dict with the following\n # key/value pairs:\n # - encoder_out: a Torch.Tensor\n # - encoder_padding_mask: a binary Torch.Tensor\n succ, msg = check_encoder_output(forward_output, batch_size=bsz)\n if not succ:\n self.assertTrue(succ, msg=msg)\n self.forward_output = forward_output\n\n def test_get_normalized_probs(self):\n if self.model and self.forward_input:\n forward_output = self.model.forward(**self.forward_input)\n logprob = self.model.get_normalized_probs(forward_output, log_probs=True)\n prob = self.model.get_normalized_probs(forward_output, log_probs=False)\n\n # in order for different models/criterion to play with each other\n # we need to know whether the logprob or prob output is batch_first\n # or not. We assume an additional attribute will be attached to logprob\n # or prob. If you find your code failed here, simply override\n # FairseqModel.get_normalized_probs, see example at\n # https://fburl.com/batch_first_example\n self.assertTrue(hasattr(logprob, \"batch_first\"))\n self.assertTrue(hasattr(prob, \"batch_first\"))\n\n self.assertTrue(torch.is_tensor(logprob))\n self.assertTrue(torch.is_tensor(prob))\n\n\nclass TestFairseqEncoderBase(unittest.TestCase):\n \"\"\"\n base class to test FairseqEncoder\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n if cls is TestFairseqEncoderBase:\n raise unittest.SkipTest(\"Skipping test case in base\")\n super().setUpClass()\n\n def setUpEncoder(self, encoder):\n self.assertTrue(\n isinstance(encoder, FairseqEncoder),\n msg=\"This class is only used for test FairseqEncoder\",\n )\n self.encoder = encoder\n\n def setUpInput(self, input=None):\n self.forward_input = get_dummy_input() if input is None else input\n # get_dummy_input() is originally for s2s, here we delete extra dict\n # items, so it can be used for EncoderModel / Encoder as well\n self.forward_input.pop(\"prev_output_tokens\", None)\n\n def setUp(self):\n self.encoder = None\n self.forward_input = None\n\n def test_forward(self):\n if self.encoder and self.forward_input:\n bsz = self.forward_input[\"src_tokens\"].size(0)\n\n forward_output = self.encoder.forward(**self.forward_input)\n succ, msg = check_encoder_output(forward_output, batch_size=bsz)\n if not succ:\n self.assertTrue(succ, msg=msg)\n self.forward_output = forward_output\n\n\nclass TestFairseqDecoderBase(unittest.TestCase):\n \"\"\"\n base class to test FairseqDecoder\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n if cls is TestFairseqDecoderBase:\n raise unittest.SkipTest(\"Skipping test case in base\")\n super().setUpClass()\n\n def setUpDecoder(self, decoder):\n self.assertTrue(\n isinstance(decoder, FairseqDecoder),\n msg=\"This class is only used for test FairseqDecoder\",\n )\n self.decoder = decoder\n\n def setUpInput(self, input=None):\n self.forward_input = get_dummy_encoder_output() if input is None else input\n\n def setUpPrevOutputTokens(self, tokens=None):\n if tokens is None:\n self.encoder_input = get_dummy_input()\n self.prev_output_tokens = self.encoder_input[\"prev_output_tokens\"]\n else:\n self.prev_output_tokens = tokens\n\n def setUp(self):\n self.decoder = None\n self.forward_input = None\n self.prev_output_tokens = None\n\n def test_forward(self):\n if (\n self.decoder is not None\n and self.forward_input is not None\n and self.prev_output_tokens is not None\n ):\n forward_output = self.decoder.forward(\n prev_output_tokens=self.prev_output_tokens,\n encoder_out=self.forward_input,\n )\n succ, msg = check_decoder_output(forward_output)\n if not succ:\n self.assertTrue(succ, msg=msg)\n self.forward_input = forward_output\n\n\nclass DummyEncoderModel(FairseqEncoderModel):\n def __init__(self, encoder):\n super().__init__(encoder)\n\n @classmethod\n def build_model(cls, args, task):\n return cls(DummyEncoder())\n\n def get_logits(self, net_output):\n # Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as\n # F.binary_cross_entropy_with_logits combines sigmoid and CE\n return torch.log(\n torch.div(net_output[\"encoder_out\"], 1 - net_output[\"encoder_out\"])\n )\n\n def get_normalized_probs(self, net_output, log_probs, sample=None):\n lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample)\n lprobs.batch_first = True\n return lprobs\n\n\nclass DummyEncoder(FairseqEncoder):\n def __init__(self):\n super().__init__(None)\n\n def forward(self, src_tokens, src_lengths):\n mask, max_len = lengths_to_encoder_padding_mask(src_lengths)\n return {\"encoder_out\": src_tokens, \"encoder_padding_mask\": mask}\n\n\nclass CrossEntropyCriterionTestBase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n if cls is CrossEntropyCriterionTestBase:\n raise unittest.SkipTest(\"Skipping base class test case\")\n super().setUpClass()\n\n def setUpArgs(self):\n args = argparse.Namespace()\n args.sentence_avg = False\n args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion\n return args\n\n def setUp(self):\n args = self.setUpArgs()\n self.model = DummyEncoderModel(encoder=DummyEncoder())\n self.criterion = self.criterion_cls(args=args, task=DummyTask(args))\n\n def get_src_tokens(self, correct_prediction, aggregate):\n \"\"\"\n correct_prediction: True if the net_output (src_tokens) should\n predict the correct target\n aggregate: True if the criterion expects net_output (src_tokens)\n aggregated across time axis\n \"\"\"\n predicted_idx = 0 if correct_prediction else 1\n if aggregate:\n src_tokens = torch.zeros((2, 2), dtype=torch.float)\n for b in range(2):\n src_tokens[b][predicted_idx] = 1.0\n else:\n src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)\n for b in range(2):\n for t in range(10):\n src_tokens[b][t][predicted_idx] = 1.0\n return src_tokens\n\n def get_target(self, soft_target):\n if soft_target:\n target = torch.zeros((2, 2), dtype=torch.float)\n for b in range(2):\n target[b][0] = 1.0\n else:\n target = torch.zeros((2, 10), dtype=torch.long)\n return target\n\n def get_test_sample(self, correct, soft_target, aggregate):\n src_tokens = self.get_src_tokens(correct, aggregate)\n target = self.get_target(soft_target)\n L = src_tokens.size(1)\n return {\n \"net_input\": {\"src_tokens\": src_tokens, \"src_lengths\": torch.tensor([L])},\n \"target\": target,\n \"ntokens\": src_tokens.size(0) * src_tokens.size(1),\n }\n" ]
[ [ "torch.is_tensor" ], [ "torch.div", "torch.zeros", "torch.randn", "torch.from_numpy", "torch.is_tensor", "torch.tensor", "numpy.random.randn", "torch.arange", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wsyCUHK/cogdl
[ "7a0e36326fc653d85378e3845ec14ebd9425a9b6" ]
[ "cogdl/models/emb/netsmf.py" ]
[ "import numpy as np\nimport networkx as nx\nimport scipy.sparse as sp\nfrom sklearn import preprocessing\nfrom sklearn.utils.extmath import randomized_svd\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nimport time\n\nfrom cogdl.utils import alias_draw, alias_setup\nfrom .. import BaseModel\n\n\nclass NetSMF(BaseModel):\n r\"\"\"The NetSMF model from the `\"NetSMF: Large-Scale Network Embedding as Sparse Matrix Factorization\"\n <http://arxiv.org/abs/1710.02971>`_ paper.\n\n Args:\n hidden_size (int) : The dimension of node representation.\n window_size (int) : The actual context size which is considered in language model.\n negative (int) : The number of nagative samples in negative sampling.\n num_round (int) : The number of round in NetSMF.\n worker (int) : The number of workers for NetSMF.\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument(\"--window-size\", type=int, default=10,\n help=\"Window size of approximate matrix. Default is 10.\")\n parser.add_argument(\"--negative\", type=int, default=1,\n help=\"Number of negative node in sampling. Default is 1.\")\n parser.add_argument(\"--num-round\", type=int, default=100,\n help=\"Number of round in NetSMF. Default is 100.\")\n parser.add_argument(\"--worker\", type=int, default=10,\n help=\"Number of parallel workers. Default is 10.\")\n parser.add_argument(\"--hidden-size\", type=int, default=128)\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.hidden_size,\n args.window_size,\n args.negative,\n args.num_round,\n args.worker,\n )\n\n def __init__(self, dimension, window_size, negative, num_round, worker):\n super(NetSMF, self).__init__()\n self.dimension = dimension\n self.window_size = window_size\n self.negative = negative\n self.worker = worker\n self.num_round = num_round\n\n def train(self, graph, return_dict=False):\n return self.forward(graph, return_dict)\n\n def forward(self, graph, return_dict=False):\n self.G = graph.to_networkx()\n node2id = dict([(node, vid) for vid, node in enumerate(self.G.nodes())])\n self.is_directed = nx.is_directed(self.G)\n self.num_node = self.G.number_of_nodes()\n self.num_edge = self.G.number_of_edges()\n self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]\n\n id2node = dict(zip(node2id.values(), node2id.keys()))\n\n self.num_neigh = np.asarray([len(list(self.G.neighbors(id2node[i]))) for i in range(self.num_node)])\n self.neighbors = [[node2id[v] for v in self.G.neighbors(id2node[i])] for i in range(self.num_node)]\n s = time.time()\n self.alias_nodes = {}\n self.node_weight = {}\n for i in range(self.num_node):\n unnormalized_probs = [self.G[id2node[i]][nbr].get(\"weight\", 1.0) for nbr in self.G.neighbors(id2node[i])]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]\n self.alias_nodes[i] = alias_setup(normalized_probs)\n self.node_weight[i] = dict(\n zip(\n [node2id[nbr] for nbr in self.G.neighbors(id2node[i])],\n unnormalized_probs,\n )\n )\n\n t = time.time()\n print(\"alias_nodes\", t - s)\n\n # run netsmf algorithm with multiprocessing and apply randomized svd\n print(\"number of sample edges \", self.num_round * self.num_edge * self.window_size)\n print(\"random walk start...\")\n t0 = time.time()\n results = []\n pool = Pool(processes=self.worker)\n for i in range(self.worker):\n results.append(pool.apply_async(func=self._random_walk_matrix, args=(i,)))\n pool.close()\n pool.join()\n print(\"random walk time\", time.time() - t0)\n\n matrix = sp.csr_matrix((self.num_node, self.num_node))\n A = sp.csr_matrix(nx.adjacency_matrix(self.G))\n degree = sp.diags(np.array(A.sum(axis=0))[0], format=\"csr\")\n degree_inv = degree.power(-1)\n\n t1 = time.time()\n for res in results:\n matrix += res.get()\n t2 = time.time()\n print(\"construct random walk matrix time\", time.time() - t1)\n\n L = sp.csgraph.laplacian(matrix, normed=False, return_diag=False)\n M = degree_inv.dot(degree - L).dot(degree_inv)\n M = M * A.sum() / self.negative\n M.data[M.data <= 1] = 1\n M.data = np.log(M.data)\n M.eliminate_zeros()\n print(\"number of nzz\", M.nnz)\n print(\"construct matrix sparsifier time\", time.time() - t2)\n\n embeddings = self._get_embedding_rand(M)\n\n if return_dict:\n features_matrix = dict()\n for vid, node in enumerate(self.G.nodes()):\n features_matrix[node] = embeddings[vid]\n else:\n features_matrix = np.zeros((graph.num_nodes, embeddings.shape[1]))\n nx_nodes = self.G.nodes()\n features_matrix[nx_nodes] = embeddings[np.arange(graph.num_nodes)]\n return features_matrix\n\n def _get_embedding_rand(self, matrix):\n # Sparse randomized tSVD for fast embedding\n t1 = time.time()\n l = matrix.shape[0] # noqa E741\n smat = sp.csc_matrix(matrix)\n print(\"svd sparse\", smat.data.shape[0] * 1.0 / l ** 2)\n U, Sigma, VT = randomized_svd(smat, n_components=self.dimension, n_iter=5, random_state=None)\n U = U * np.sqrt(Sigma)\n U = preprocessing.normalize(U, \"l2\")\n print(\"sparsesvd time\", time.time() - t1)\n return U\n\n def _path_sampling(self, u, v, r):\n # sample a r-length path from edge(u, v) and return path end node\n k = np.random.randint(r) + 1\n zp, rand_u, rand_v = 2.0 / self.node_weight[u][v], k - 1, r - k\n for i in range(rand_u):\n new_u = self.neighbors[u][alias_draw(self.alias_nodes[u][0], self.alias_nodes[u][1])]\n zp += 2.0 / self.node_weight[u][new_u]\n u = new_u\n for j in range(rand_v):\n new_v = self.neighbors[v][alias_draw(self.alias_nodes[v][0], self.alias_nodes[v][1])]\n zp += 2.0 / self.node_weight[v][new_v]\n v = new_v\n return u, v, zp\n\n def _random_walk_matrix(self, pid):\n # construct matrix based on random walk\n np.random.seed(pid)\n matrix = sp.lil_matrix((self.num_node, self.num_node))\n for i in tqdm(range(self.num_edge * self.num_round // self.worker)):\n u, v = self.edges[i % self.num_edge]\n if not self.is_directed and np.random.rand() > 0.5:\n v, u = u, v\n for r in range(1, self.window_size + 1):\n u_, v_, zp = self._path_sampling(u, v, r)\n matrix[u_, v_] += 2 * r / self.window_size / self.num_round / zp\n return matrix.tocsr()\n" ]
[ [ "scipy.sparse.csc_matrix", "numpy.log", "sklearn.utils.extmath.randomized_svd", "numpy.sqrt", "numpy.random.seed", "numpy.arange", "scipy.sparse.csgraph.laplacian", "scipy.sparse.csr_matrix", "sklearn.preprocessing.normalize", "numpy.random.randint", "numpy.random.rand", "numpy.zeros", "scipy.sparse.lil_matrix" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
quidditymaster/thimbles
[ "b122654a012f0eb4f043d1ee757f884707c97615" ]
[ "thimbles/charts/radar_chart.py" ]
[ "\"\"\"\nhttp://matplotlib.org/examples/api/radar_chart.html\n\nExample of creating a radar chart (a.k.a. a spider or star chart) [1]_.\n\nAlthough this example allows a frame of either 'circle' or 'polygon', polygon\nframes don't have proper gridlines (the lines are circles instead of polygons).\nIt's possible to get a polygon grid by setting GRIDLINE_INTERPOLATION_STEPS in\nmatplotlib.axis to the desired number of vertices, but the orientation of the\npolygon is not aligned with the radial axes.\n\n.. [1] http://en.wikipedia.org/wiki/Radar_chart\n\"\"\"\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nfrom matplotlib.spines import Spine\nfrom matplotlib.projections.polar import PolarAxes\nfrom matplotlib.projections import register_projection\n\n\ndef radar_factory(num_vars, frame='circle'):\n \"\"\"Create a radar chart with `num_vars` axes.\n\n This function creates a RadarAxes projection and registers it.\n\n Parameters\n ----------\n num_vars : int\n Number of variables for radar chart.\n frame : {'circle' | 'polygon'}\n Shape of frame surrounding axes.\n\n \"\"\"\n # calculate evenly-spaced axis angles\n theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)\n # rotate theta such that the first axis is at the top\n theta += np.pi/2\n\n def draw_poly_patch(self):\n verts = unit_poly_verts(theta)\n return plt.Polygon(verts, closed=True, edgecolor='k')\n\n def draw_circle_patch(self):\n # unit circle centered on (0.5, 0.5)\n return plt.Circle((0.5, 0.5), 0.5)\n\n patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}\n if frame not in patch_dict:\n raise ValueError('unknown value for `frame`: %s' % frame)\n\n class RadarAxes(PolarAxes):\n \n name = 'radar'\n # use 1 line segment to connect specified points\n RESOLUTION = 1\n # define draw_frame method\n draw_patch = patch_dict[frame]\n \n def fill(self, *args, **kwargs):\n \"\"\"Override fill so that line is closed by default\"\"\"\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)\n\n def plot(self, *args, **kwargs):\n \"\"\"Override plot so that line is closed by default\"\"\"\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)\n\n def _close_line(self, line):\n x, y = line.get_data()\n # FIXME: markers at x[0], y[0] get doubled-up\n if x[0] != x[-1]:\n x = np.concatenate((x, [x[0]]))\n y = np.concatenate((y, [y[0]]))\n line.set_data(x, y)\n\n def set_varlabels(self, labels):\n self.set_thetagrids(theta * 180/np.pi, labels)\n\n def _gen_axes_patch(self):\n return self.draw_patch()\n\n def _gen_axes_spines(self):\n if frame == 'circle':\n return PolarAxes._gen_axes_spines(self)\n # The following is a hack to get the spines (i.e. the axes frame)\n # to draw correctly for a polygon frame.\n\n # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\n spine_type = 'circle'\n verts = unit_poly_verts(theta)\n # close off polygon by repeating first vertex\n verts.append(verts[0])\n path = Path(verts)\n\n spine = Spine(self, spine_type, path)\n spine.set_transform(self.transAxes)\n return {'polar': spine}\n\n register_projection(RadarAxes)\n return theta\n\ndef unit_poly_verts(theta):\n \"\"\"Return vertices of polygon for subplot axes.\n\n This polygon is circumscribed by a unit circle centered at (0.5, 0.5)\n \"\"\"\n x0, y0, r = [0.5] * 3\n verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]\n return verts\n\nif __name__ == \"__main__\":\n n_spokes = 5\n theta = radar_factory(n_spokes, frame=\"polygon\")\n fig = plt.figure()\n \n ax = fig.add_subplot(1, 1, 1, projection=\"radar\")\n datapoints = np.random.random(n_spokes)\n ax.plot(theta, datapoints)\n ax.fill(theta, datapoints)\n plt.show()\n \n" ]
[ [ "matplotlib.projections.polar.PolarAxes._gen_axes_spines", "numpy.random.random", "matplotlib.projections.register_projection", "numpy.linspace", "matplotlib.path.Path", "numpy.cos", "numpy.sin", "matplotlib.pyplot.Circle", "matplotlib.pyplot.Polygon", "matplotlib.spines.Spine", "numpy.concatenate", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NetKet/netket
[ "96758e814fc3128e6821564d6cc2852bac40ecf2", "96758e814fc3128e6821564d6cc2852bac40ecf2", "96758e814fc3128e6821564d6cc2852bac40ecf2" ]
[ "netket/sampler/metropolis_numpy.py", "Examples/Legacy/CustomSampler/exchange_kernel.py", "netket/hilbert/abstract_hilbert.py" ]
[ "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom dataclasses import dataclass\nfrom functools import partial\n\nfrom typing import Any, Tuple, Callable\n\nimport numpy as np\nfrom numba import jit\nfrom jax import numpy as jnp\nimport jax\n\nfrom netket.hilbert import AbstractHilbert\nfrom netket.utils.mpi import mpi_sum, n_nodes\nfrom netket.utils.types import PyTree\nfrom netket.utils.deprecation import deprecated\n\nimport netket.jax as nkjax\n\nfrom .metropolis import MetropolisSampler\n\n\n@dataclass\nclass MetropolisNumpySamplerState:\n σ: np.ndarray\n \"\"\"Holds the current configuration.\"\"\"\n σ1: np.ndarray\n \"\"\"Holds a proposed configuration (preallocation).\"\"\"\n\n log_values: np.ndarray\n \"\"\"Holds model(pars, σ) for the current σ (preallocation).\"\"\"\n log_values_1: np.ndarray\n \"\"\"Holds model(pars, σ1) for the last σ1 (preallocation).\"\"\"\n log_prob_corr: np.ndarray\n \"\"\"Holds optional acceptance correction (preallocation).\"\"\"\n\n rule_state: Any\n \"\"\"The optional state of the rule.\"\"\"\n rng: Any\n \"\"\"A numpy random generator.\"\"\"\n\n n_steps_proc: int = 0\n \"\"\"Number of moves performed along the chains in this process since the last reset.\"\"\"\n n_accepted_proc: int = 0\n \"\"\"Number of accepted transitions among the chains in this process since the last reset.\"\"\"\n\n @property\n def acceptance(self) -> float:\n \"\"\"The fraction of accepted moves across all chains and MPI processes.\n\n The rate is computed since the last reset of the sampler.\n Will return None if no sampling has been performed since then.\n \"\"\"\n if self.n_steps == 0:\n return None\n\n return self.n_accepted / self.n_steps\n\n @property\n @deprecated(\n \"\"\"Please use the attribute `.acceptance` instead of\n `.acceptance_ratio`. The new attribute `.acceptance` returns the\n acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`\n returning a percentage, which is a bug.\"\"\"\n )\n def acceptance_ratio(self) -> float:\n \"\"\"DEPRECATED: Please use the attribute `.acceptance` instead of\n `.acceptance_ratio`. The new attribute `.acceptance` returns the\n acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`\n returning a percentage, which is a bug.\n\n The percentage of accepted moves across all chains and MPI processes.\n\n The rate is computed since the last reset of the sampler.\n Will return None if no sampling has been performed since then.\n \"\"\"\n return self.acceptance * 100\n\n @property\n def n_steps(self) -> int:\n \"\"\"Total number of moves performed across all processes since the last reset.\"\"\"\n return self.n_steps_proc * n_nodes\n\n @property\n def n_accepted(self) -> int:\n \"\"\"Total number of moves accepted across all processes since the last reset.\"\"\"\n return mpi_sum(self.n_accepted_proc)\n\n def __repr__(self):\n if self.n_steps > 0:\n acc_string = \"# accepted = {}/{} ({}%), \".format(\n self.n_accepted, self.n_steps, self.acceptance * 100\n )\n else:\n acc_string = \"\"\n\n return f\"MetropolisNumpySamplerState({acc_string}rng state={self.rng})\"\n\n\n@partial(jax.jit, static_argnums=0)\ndef apply_model(machine, pars, weights):\n return machine.apply(pars, weights)\n\n\nclass MetropolisSamplerNumpy(MetropolisSampler):\n \"\"\"\n Metropolis-Hastings sampler for an Hilbert space according to a specific transition\n rule executed on CPU through Numpy.\n\n This sampler is equivalent to :ref:`netket.sampler.MetropolisSampler` but instead of\n executing the whole sampling inside a jax-jitted function, only evaluates the forward\n pass inside a jax-jitted function, while proposing new steps and accepting/rejecting\n them is performed in numpy.\n\n Because of Jax dispatch cost, and especially for small system, this sampler performs\n poorly, while asymptotically it should have the same performance of standard Jax samplers.\n\n However, some transition rules don't work on GPU, and some samplers (Hamiltonian) work\n very poorly on jax so this is a good workaround.\n\n See :ref:`netket.sampler.MetropolisSampler` for more informations.\n \"\"\"\n\n def _init_state(sampler, machine, parameters, key):\n rgen = np.random.default_rng(np.asarray(key))\n\n σ = np.zeros((sampler.n_batches, sampler.hilbert.size), dtype=sampler.dtype)\n\n ma_out = jax.eval_shape(machine.apply, parameters, σ)\n\n state = MetropolisNumpySamplerState(\n σ=σ,\n σ1=np.copy(σ),\n log_values=np.zeros(sampler.n_batches, dtype=ma_out.dtype),\n log_values_1=np.zeros(sampler.n_batches, dtype=ma_out.dtype),\n log_prob_corr=np.zeros(\n sampler.n_batches, dtype=nkjax.dtype_real(ma_out.dtype)\n ),\n rng=rgen,\n rule_state=sampler.rule.init_state(sampler, machine, parameters, rgen),\n )\n\n if not sampler.reset_chains:\n key = jnp.asarray(\n state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32\n )\n\n state.σ = np.copy(\n sampler.rule.random_state(sampler, machine, parameters, state, key)\n )\n\n return state\n\n def _reset(sampler, machine, parameters, state):\n if sampler.reset_chains:\n # directly generate a PRNGKey which is a [2xuint32] array\n key = jnp.asarray(\n state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32\n )\n state.σ = np.copy(\n sampler.rule.random_state(sampler, machine, parameters, state, key)\n )\n\n state.rule_state = sampler.rule.reset(sampler, machine, parameters, state)\n state.log_values = np.copy(apply_model(machine, parameters, state.σ))\n\n state._accepted_samples = 0\n state._total_samples = 0\n\n return state\n\n def _sample_next(sampler, machine, parameters, state):\n σ = state.σ\n σ1 = state.σ1\n log_values = state.log_values\n log_values_1 = state.log_values_1\n log_prob_corr = state.log_prob_corr\n mpow = sampler.machine_pow\n\n rgen = state.rng\n\n accepted = 0\n\n for sweep in range(sampler.n_sweeps):\n # Propose a new state using the transition kernel\n # σp, log_prob_correction =\n sampler.rule.transition(sampler, machine, parameters, state, state.rng, σ)\n\n log_values_1 = np.asarray(apply_model(machine, parameters, σ1))\n\n random_uniform = rgen.uniform(0, 1, size=σ.shape[0])\n\n # Acceptance Kernel\n accepted += acceptance_kernel(\n σ,\n σ1,\n log_values,\n log_values_1,\n log_prob_corr,\n mpow,\n random_uniform,\n )\n\n state.n_steps_proc += sampler.n_sweeps * sampler.n_chains\n state.n_accepted_proc += accepted\n\n return state, state.σ\n\n def _sample_chain(\n sampler,\n machine: Callable,\n parameters: PyTree,\n state: MetropolisNumpySamplerState,\n chain_length: int,\n ) -> Tuple[jnp.ndarray, MetropolisNumpySamplerState]:\n\n samples = np.empty(\n (chain_length, sampler.n_chains, sampler.hilbert.size), dtype=sampler.dtype\n )\n\n for i in range(chain_length):\n state, σ = sampler.sample_next(machine, parameters, state)\n samples[i] = σ\n\n return samples, state\n\n def __repr__(sampler):\n return (\n \"MetropolisSamplerNumpy(\"\n + \"\\n hilbert = {},\".format(sampler.hilbert)\n + \"\\n rule = {},\".format(sampler.rule)\n + \"\\n n_chains = {},\".format(sampler.n_chains)\n + \"\\n machine_power = {},\".format(sampler.machine_pow)\n + \"\\n reset_chains = {},\".format(sampler.reset_chains)\n + \"\\n n_sweeps = {},\".format(sampler.n_sweeps)\n + \"\\n dtype = {},\".format(sampler.dtype)\n + \")\"\n )\n\n def __str__(sampler):\n return (\n \"MetropolisSamplerNumpy(\"\n + \"rule = {}, \".format(sampler.rule)\n + \"n_chains = {}, \".format(sampler.n_chains)\n + \"machine_power = {}, \".format(sampler.machine_pow)\n + \"n_sweeps = {}, \".format(sampler.n_sweeps)\n + \"dtype = {})\".format(sampler.dtype)\n )\n\n\n@jit(nopython=True)\ndef acceptance_kernel(\n σ, σ1, log_values, log_values_1, log_prob_corr, machine_pow, random_uniform\n):\n accepted = 0\n\n for i in range(σ.shape[0]):\n prob = np.exp(\n machine_pow * (log_values_1[i] - log_values[i]).real + log_prob_corr[i]\n )\n assert not math.isnan(prob)\n\n if prob > random_uniform[i]:\n log_values[i] = log_values_1[i]\n σ[i] = σ1[i]\n accepted += 1\n\n return accepted\n\n\ndef MetropolisLocalNumpy(hilbert: AbstractHilbert, *args, **kwargs):\n from .rules import LocalRuleNumpy\n\n rule = LocalRuleNumpy()\n return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)\n\n\ndef MetropolisHamiltonianNumpy(hilbert: AbstractHilbert, hamiltonian, *args, **kwargs):\n from .rules import HamiltonianRuleNumpy\n\n rule = HamiltonianRuleNumpy(hamiltonian)\n return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)\n\n\ndef MetropolisCustomNumpy(\n hilbert: AbstractHilbert, move_operators, move_weights=None, *args, **kwargs\n):\n from .rules import CustomRuleNumpy\n\n rule = CustomRuleNumpy(move_operators, move_weights)\n return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)\n", "from netket import legacy as nk\nimport numpy as np\n\n# 1D Lattice\ng = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)\n\n# Hilbert space of spins on the graph\n# with total Sz equal to 0\nhi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes, total_sz=0)\n\n# Heisenberg hamiltonian\nha = nk.operator.Heisenberg(hilbert=hi)\n\n# Symmetric RBM Spin Machine\nma = nk.machine.RbmSpin(alpha=1, hilbert=hi)\nma.init_random_parameters(seed=1234, sigma=0.01)\n\n# Defining a custom kernel for MetropolisHastings\n# Notice that this sampler exchanges two random sites\n# thus preserving the total magnetization\n# Also notice that it is not recommended to define custom kernels in python\n# For speed reasons it is better to define exchange kernels using CustomSampler\ndef exchange_kernel(v, vnew, loprobcorr):\n\n vnew[:, :] = v[:, :]\n loprobcorr[:] = 0.0\n\n rands = np.random.randint(v.shape[1], size=(v.shape[0], 2))\n\n for i in range(v.shape[0]):\n iss = rands[i, 0]\n jss = rands[i, 1]\n\n vnew[i, iss], vnew[i, jss] = vnew[i, jss], vnew[i, iss]\n\n\nsa = nk.sampler.MetropolisHastings(ma, exchange_kernel, n_chains=16)\n\n# Optimizer\nop = nk.optimizer.Sgd(learning_rate=0.05)\n\n# Stochastic reconfiguration\ngs = nk.variational.VMC(\n hamiltonian=ha,\n sampler=sa,\n optimizer=op,\n n_samples=1000,\n diag_shift=0.1,\n method=\"Sr\",\n)\n\ngs.run(n_iter=300, out=\"test\")\n", "# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\n\nfrom typing import Tuple, Optional, Union, Iterable\n\nimport jax.numpy as jnp\nimport numpy as np\n\nmax_states = np.iinfo(np.int32).max\n\"\"\"int: Maximum number of states that can be indexed\"\"\"\n\n\nclass AbstractHilbert(abc.ABC):\n \"\"\"Abstract class for NetKet hilbert objects.\n\n This class defines the common interface used to interact with Hilbert spaces.\n\n An AbstractHilbert object identifies an Hilbert space and a computational basis on\n such hilbert space, such as the z-basis for spins on a lattice, or the\n position-basis for particles in a box.\n\n Hilbert Spaces are generally immutable python objects that must be hashable in order\n to be used as static arguments to `jax.jit` functions.\n \"\"\"\n\n def __init__(self):\n self._hash = None\n\n @property\n @abc.abstractmethod\n def size(self) -> int:\n r\"\"\"The number number of degrees of freedom in the basis of this\n Hilbert space.\"\"\"\n raise NotImplementedError() # pragma: no cover\n\n def random_state(\n self,\n key=None,\n size: Optional[int] = None,\n dtype=np.float32,\n ) -> jnp.ndarray:\n r\"\"\"Generates either a single or a batch of uniformly distributed random states.\n Runs as :code:`random_state(self, key, size=None, dtype=np.float32)` by default.\n\n Args:\n key: rng state from a jax-style functional generator.\n size: If provided, returns a batch of configurations of the form\n :code:`(size, N)` if size is an integer or :code:`(*size, N)` if it is\n a tuple and where :math:`N` is the Hilbert space size.\n By default, a single random configuration with shape\n :code:`(#,)` is returned.\n dtype: DType of the resulting vector.\n\n Returns:\n A state or batch of states sampled from the uniform distribution on the\n hilbert space.\n\n Example:\n\n >>> import netket, jax\n >>> hi = netket.hilbert.Qubit(N=2)\n >>> k1, k2 = jax.random.split(jax.random.PRNGKey(1))\n >>> print(hi.random_state(key=k1))\n [1. 0.]\n >>> print(hi.random_state(key=k2, size=2))\n [[0. 0.]\n [0. 1.]]\n \"\"\"\n from netket.hilbert import random\n\n return random.random_state(self, key, size, dtype=dtype)\n\n def ptrace(self, sites: Union[int, Iterable]) -> \"AbstractHilbert\":\n \"\"\"Returns the hilbert space without the selected sites.\n\n Not all hilbert spaces support this operation.\n\n Args:\n sites: a site or list of sites to trace away\n\n Returns:\n The partially-traced hilbert space. The type of the resulting hilbert space\n might be different from the starting one.\n \"\"\"\n raise NotImplementedError(\"Ptrace not implemented for this hilbert space type.\")\n\n @property\n def is_indexable(self) -> bool:\n \"\"\"Whether the space can be indexed with an integer\"\"\"\n return False\n\n @property\n @abc.abstractmethod\n def _attrs(self) -> Tuple:\n \"\"\"\n Tuple of hashable attributes, used to compute the immutable\n hash of this Hilbert space\n \"\"\"\n\n def __eq__(self, other) -> bool:\n if isinstance(other, type(self)):\n return self._attrs == other._attrs\n\n return False\n\n def __hash__(self):\n if self._hash is None:\n self._hash = hash(self._attrs)\n\n return self._hash\n" ]
[ [ "numpy.asarray", "numpy.copy", "numpy.exp", "numpy.zeros", "numpy.empty" ], [ "numpy.random.randint" ], [ "numpy.iinfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Anurag14/Inflow-prediction-Bhakra
[ "d440ec552032084991878877ba5154ea2c452264" ]
[ "LSTM/graphs/graph1.py" ]
[ "import os\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv('../data1.csv')\ndf=df.values\n#time series vs reservoir levels(ft) graph\nsns.set_style('darkgrid')\nplt.plot(df[:,0],df[:,1],label=\"\")\nplt.plot(df[:,0],df[:,2])\nplt.xlabel('Time Series')\nplt.ylabel('Reservoir Levels(ft)')\nplt.title('Dialy Bhakhra Reservoir Levels for past 20 years')\nplt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
junjihashimoto/wav2vec-2-nix
[ "f104280586cf78d0fc5f280ea013f6bc676cd05e" ]
[ "src/main-ja.py" ]
[ "\n# https://huggingface.co/vumichien/wav2vec2-large-xlsr-japanese\n\nimport torch\nimport torchaudio\nimport librosa\nfrom datasets import load_dataset\nimport MeCab\nfrom transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\nimport re\n\n# config\nwakati = MeCab.Tagger(\"-Owakati\")\nchars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・]'\n\n# load data, processor and model\ntest_dataset = load_dataset(\"common_voice\", \"ja\", split=\"test[:2%]\")\nprocessor = Wav2Vec2Processor.from_pretrained(\"vumichien/wav2vec2-large-xlsr-japanese\")\nmodel = Wav2Vec2ForCTC.from_pretrained(\"vumichien/wav2vec2-large-xlsr-japanese\")\nresampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)\n\n# Preprocessing the datasets.\ndef speech_file_to_array_fn(batch):\n batch[\"sentence\"] = wakati.parse(batch[\"sentence\"]).strip()\n batch[\"sentence\"] = re.sub(chars_to_ignore_regex,'', batch[\"sentence\"]).strip()\n speech_array, sampling_rate = torchaudio.load(batch[\"path\"])\n batch[\"speech\"] = resampler(sampling_rate, speech_array).squeeze()\n print(batch[\"sentence\"])\n return batch\ntest_dataset = test_dataset.map(speech_file_to_array_fn)\ninputs = processor(test_dataset[\"speech\"][:2], sampling_rate=16_000, return_tensors=\"pt\", padding=True)\nwith torch.no_grad():\n logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits\npredicted_ids = torch.argmax(logits, dim=-1)\nprint(\"Prediction:\", processor.batch_decode(predicted_ids))\nprint(\"Reference:\", test_dataset[\"sentence\"][:2])\n" ]
[ [ "torch.no_grad", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CheukNgai/estimator
[ "673a50bd5ffa70d0672ce47e40f5075f1cbe0a62" ]
[ "tensorflow_estimator/contrib/estimator/python/estimator/rnn.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Recurrent Neural Network estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow_estimator.contrib.estimator.python.estimator import extenders\nfrom tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc\nfrom tensorflow_estimator.python.estimator import estimator\nfrom tensorflow_estimator.python.estimator.canned import head as head_lib\nfrom tensorflow_estimator.python.estimator.canned import optimizers\nfrom tensorflow.python.feature_column import feature_column as feature_column_lib\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.layers import core as core_layers\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import optimizer as optimizer_lib\nfrom tensorflow.python.training import training_util\n\n\n# The defaults are historical artifacts of the initial implementation, but seem\n# reasonable choices.\n_DEFAULT_LEARNING_RATE = 0.05\n_DEFAULT_CLIP_NORM = 5.0\n\n_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,\n 'lstm': rnn_cell.BasicLSTMCell,\n 'gru': rnn_cell.GRUCell}\n\n# Indicates no value was provided by the user to a kwarg.\nUSE_DEFAULT = object()\n\n\ndef _single_rnn_cell(num_units, cell_type):\n cell_type = _CELL_TYPES.get(cell_type, cell_type)\n if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):\n raise ValueError('Supported cell types are {}; got {}'.format(\n list(_CELL_TYPES.keys()), cell_type))\n return cell_type(num_units=num_units)\n\n\ndef _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):\n \"\"\"Convenience function to create `rnn_cell_fn` for canned RNN Estimators.\n\n Args:\n num_units: Iterable of integer number of hidden units per RNN layer.\n cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying\n the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and\n `'gru'`.\n\n Returns:\n A function that takes a single argument, an instance of\n `tf.estimator.ModeKeys`, and returns an instance derived from\n `tf.nn.rnn_cell.RNNCell`.\n\n Raises:\n ValueError: If cell_type is not supported.\n \"\"\"\n def rnn_cell_fn(mode):\n # Unused. Part of the rnn_cell_fn interface since user specified functions\n # may need different behavior across modes (e.g. dropout).\n del mode\n cells = [_single_rnn_cell(n, cell_type) for n in num_units]\n if len(cells) == 1:\n return cells[0]\n return rnn_cell.MultiRNNCell(cells)\n return rnn_cell_fn\n\n\ndef _concatenate_context_input(sequence_input, context_input):\n \"\"\"Replicates `context_input` across all timesteps of `sequence_input`.\n\n Expands dimension 1 of `context_input` then tiles it `sequence_length` times.\n This value is appended to `sequence_input` on dimension 2 and the result is\n returned.\n\n Args:\n sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,\n padded_length, d0]`.\n context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.\n\n Returns:\n A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,\n d0 + d1]`.\n\n Raises:\n ValueError: If `sequence_input` does not have rank 3 or `context_input` does\n not have rank 2.\n \"\"\"\n seq_rank_check = check_ops.assert_rank(\n sequence_input,\n 3,\n message='sequence_input must have rank 3',\n data=[array_ops.shape(sequence_input)])\n seq_type_check = check_ops.assert_type(\n sequence_input,\n dtypes.float32,\n message='sequence_input must have dtype float32; got {}.'.format(\n sequence_input.dtype))\n ctx_rank_check = check_ops.assert_rank(\n context_input,\n 2,\n message='context_input must have rank 2',\n data=[array_ops.shape(context_input)])\n ctx_type_check = check_ops.assert_type(\n context_input,\n dtypes.float32,\n message='context_input must have dtype float32; got {}.'.format(\n context_input.dtype))\n with ops.control_dependencies(\n [seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):\n padded_length = array_ops.shape(sequence_input)[1]\n tiled_context_input = array_ops.tile(\n array_ops.expand_dims(context_input, 1),\n array_ops.concat([[1], [padded_length], [1]], 0))\n return array_ops.concat([sequence_input, tiled_context_input], 2)\n\n\ndef _select_last_activations(activations, sequence_lengths):\n \"\"\"Selects the nth set of activations for each n in `sequence_length`.\n\n Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not\n `None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If\n `sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.\n\n Args:\n activations: A `Tensor` with shape `[batch_size, padded_length, k]`.\n sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.\n Returns:\n A `Tensor` of shape `[batch_size, k]`.\n \"\"\"\n with ops.name_scope(\n 'select_last_activations', values=[activations, sequence_lengths]):\n activations_shape = array_ops.shape(activations)\n batch_size = activations_shape[0]\n padded_length = activations_shape[1]\n output_units = activations_shape[2]\n if sequence_lengths is None:\n sequence_lengths = padded_length\n start_indices = math_ops.to_int64(\n math_ops.range(batch_size) * padded_length)\n last_indices = start_indices + sequence_lengths - 1\n reshaped_activations = array_ops.reshape(\n activations, [batch_size * padded_length, output_units])\n\n last_activations = array_ops.gather(reshaped_activations, last_indices)\n last_activations.set_shape([activations.shape[0], activations.shape[2]])\n return last_activations\n\n\ndef _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,\n context_feature_columns, input_layer_partitioner,\n return_sequences=False):\n \"\"\"Function builder for a rnn logit_fn.\n\n Args:\n output_units: An int indicating the dimension of the logit layer.\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\n returns an object of type `tf.nn.rnn_cell.RNNCell`.\n sequence_feature_columns: An iterable containing the `FeatureColumn`s\n that represent sequential input.\n context_feature_columns: An iterable containing the `FeatureColumn`s\n that represent contextual input.\n input_layer_partitioner: Partitioner for input layer.\n return_sequences: A boolean indicating whether to return the last output\n in the output sequence, or the full sequence.\n\n Returns:\n A logit_fn (see below).\n\n Raises:\n ValueError: If output_units is not an int.\n \"\"\"\n if not isinstance(output_units, int):\n raise ValueError('output_units must be an int. Given type: {}'.format(\n type(output_units)))\n\n def rnn_logit_fn(features, mode):\n \"\"\"Recurrent Neural Network logit_fn.\n\n Args:\n features: This is the first item returned from the `input_fn`\n passed to `train`, `evaluate`, and `predict`. This should be a\n single `Tensor` or `dict` of same.\n mode: Optional. Specifies if this training, evaluation or prediction. See\n `ModeKeys`.\n\n Returns:\n A `Tensor` representing the logits.\n \"\"\"\n with variable_scope.variable_scope(\n 'sequence_input_layer',\n values=tuple(six.itervalues(features)),\n partitioner=input_layer_partitioner):\n sequence_input, sequence_length = seq_fc.sequence_input_layer(\n features=features, feature_columns=sequence_feature_columns)\n summary.histogram('sequence_length', sequence_length)\n\n if context_feature_columns:\n context_input = feature_column_lib.input_layer(\n features=features,\n feature_columns=context_feature_columns)\n sequence_input = _concatenate_context_input(sequence_input,\n context_input)\n\n cell = rnn_cell_fn(mode)\n # Ignore output state.\n rnn_outputs, _ = rnn.dynamic_rnn(\n cell=cell,\n inputs=sequence_input,\n sequence_length=sequence_length,\n dtype=dtypes.float32,\n time_major=False)\n\n if not return_sequences:\n rnn_outputs = _select_last_activations(rnn_outputs, sequence_length)\n\n with variable_scope.variable_scope('logits', values=(rnn_outputs,)):\n logits = core_layers.dense(\n rnn_outputs,\n units=output_units,\n activation=None,\n kernel_initializer=init_ops.glorot_uniform_initializer())\n return logits\n\n return rnn_logit_fn\n\n\ndef _rnn_model_fn(features,\n labels,\n mode,\n head,\n rnn_cell_fn,\n sequence_feature_columns,\n context_feature_columns,\n return_sequences=False,\n optimizer='Adagrad',\n input_layer_partitioner=None,\n config=None):\n \"\"\"Recurrent Neural Net model_fn.\n\n Args:\n features: dict of `Tensor` and `SparseTensor` objects returned from\n `input_fn`.\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] with labels.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n head: A `head_lib._Head` instance.\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\n returns an object of type `tf.nn.rnn_cell.RNNCell`.\n sequence_feature_columns: Iterable containing `FeatureColumn`s that\n represent sequential model inputs.\n context_feature_columns: Iterable containing `FeatureColumn`s that\n represent model inputs not associated with a specific timestep.\n return_sequences: A boolean indicating whether to return the last output\n in the output sequence, or the full sequence.\n optimizer: String, `tf.Optimizer` object, or callable that creates the\n optimizer to use for training. If not specified, will use the Adagrad\n optimizer with a default learning rate of 0.05 and gradient clip norm of\n 5.0.\n input_layer_partitioner: Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n An `EstimatorSpec` instance.\n\n Raises:\n ValueError: If mode or optimizer is invalid, or features has the wrong type.\n \"\"\"\n if not isinstance(features, dict):\n raise ValueError('features should be a dictionary of `Tensor`s. '\n 'Given type: {}'.format(type(features)))\n\n # If user does not provide an optimizer instance, use the optimizer specified\n # by the string with default learning rate and gradient clipping.\n if not isinstance(optimizer, optimizer_lib.Optimizer):\n optimizer = optimizers.get_optimizer_instance(\n optimizer, learning_rate=_DEFAULT_LEARNING_RATE)\n optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)\n\n num_ps_replicas = config.num_ps_replicas if config else 0\n partitioner = partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas)\n with variable_scope.variable_scope(\n 'rnn',\n values=tuple(six.itervalues(features)),\n partitioner=partitioner):\n input_layer_partitioner = input_layer_partitioner or (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=64 << 20))\n\n logit_fn = _rnn_logit_fn_builder(\n output_units=head.logits_dimension,\n rnn_cell_fn=rnn_cell_fn,\n sequence_feature_columns=sequence_feature_columns,\n context_feature_columns=context_feature_columns,\n input_layer_partitioner=input_layer_partitioner,\n return_sequences=return_sequences)\n logits = logit_fn(features=features, mode=mode)\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n return optimizer.minimize(\n loss,\n global_step=training_util.get_global_step())\n\n return head.create_estimator_spec(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=_train_op_fn,\n logits=logits)\n\n\ndef _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):\n \"\"\"Assert arguments are valid and return rnn_cell_fn.\"\"\"\n if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):\n raise ValueError(\n 'num_units and cell_type must not be specified when using rnn_cell_fn'\n )\n if not rnn_cell_fn:\n if cell_type == USE_DEFAULT:\n cell_type = 'basic_rnn'\n rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)\n return rnn_cell_fn\n\n\nclass RNNClassifier(estimator.Estimator):\n \"\"\"A classifier for TensorFlow RNN models.\n\n Trains a recurrent neural network model to classify instances into one of\n multiple classes.\n\n Example:\n\n ```python\n token_sequence = sequence_categorical_column_with_hash_bucket(...)\n token_emb = embedding_column(categorical_column=token_sequence, ...)\n\n estimator = RNNClassifier(\n sequence_feature_columns=[token_emb],\n num_units=[32, 16], cell_type='lstm')\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column` is not `None`, a feature with\n `key=weight_column` whose value is a `Tensor`.\n * for each `column` in `sequence_feature_columns`:\n - a feature with `key=column.name` whose `value` is a `SparseTensor`.\n * for each `column` in `context_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss is calculated by using softmax cross entropy.\n\n @compatibility(eager)\n Estimators are not compatible with eager execution.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n sequence_feature_columns,\n context_feature_columns=None,\n num_units=None,\n cell_type=USE_DEFAULT,\n rnn_cell_fn=None,\n model_dir=None,\n n_classes=2,\n weight_column=None,\n label_vocabulary=None,\n optimizer='Adagrad',\n loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,\n input_layer_partitioner=None,\n config=None):\n \"\"\"Initializes a `RNNClassifier` instance.\n\n Args:\n sequence_feature_columns: An iterable containing the `FeatureColumn`s\n that represent sequential input. All items in the set should either be\n sequence columns (e.g. `sequence_numeric_column`) or constructed from\n one (e.g. `embedding_column` with `sequence_categorical_column_*` as\n input).\n context_feature_columns: An iterable containing the `FeatureColumn`s\n for contextual input. The data represented by these columns will be\n replicated and given to the RNN at each timestep. These columns must be\n instances of classes derived from `_DenseColumn` such as\n `numeric_column`, not the sequential variants.\n num_units: Iterable of integer number of hidden units per RNN layer. If\n set, `cell_type` must also be specified and `rnn_cell_fn` must be\n `None`.\n cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying\n the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and\n `'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`\n must be `None`.\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\n returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to\n construct the RNN. If set, `num_units` and `cell_type` cannot be set.\n This is for advanced users who need additional customization beyond\n `num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is\n needed for stacked RNNs.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n n_classes: Number of label classes. Defaults to 2, namely binary\n classification. Must be > 1.\n weight_column: A string or a `_NumericColumn` created by\n `tf.feature_column.numeric_column` defining feature column representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example. If it is a string, it is\n used as a key to fetch weight tensor from the `features`. If it is a\n `_NumericColumn`, raw tensor is fetched by key `weight_column.key`,\n then weight_column.normalizer_fn is applied on it to get weight tensor.\n label_vocabulary: A list of strings represents possible label values. If\n given, labels must be string type and have any value in\n `label_vocabulary`. If it is not given, that means labels are\n already encoded as integer or float within [0, 1] for `n_classes=2` and\n encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .\n Also there will be errors if vocabulary is not provided and labels are\n string.\n optimizer: An instance of `tf.Optimizer` or string specifying optimizer\n type. Defaults to Adagrad optimizer.\n loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how\n to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n\n Raises:\n ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not\n compatible.\n \"\"\"\n rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)\n\n if n_classes == 2:\n head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n else:\n head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access\n n_classes,\n weight_column=weight_column,\n label_vocabulary=label_vocabulary,\n loss_reduction=loss_reduction)\n\n def _model_fn(features, labels, mode, config):\n return _rnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n rnn_cell_fn=rnn_cell_fn,\n sequence_feature_columns=tuple(sequence_feature_columns or []),\n context_feature_columns=tuple(context_feature_columns or []),\n return_sequences=False,\n optimizer=optimizer,\n input_layer_partitioner=input_layer_partitioner,\n config=config)\n super(RNNClassifier, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config)\n\n\nclass RNNEstimator(estimator.Estimator):\n \"\"\"An Estimator for TensorFlow RNN models with user-specified head.\n\n Example:\n\n ```python\n token_sequence = sequence_categorical_column_with_hash_bucket(...)\n token_emb = embedding_column(categorical_column=token_sequence, ...)\n\n estimator = RNNEstimator(\n head=tf.contrib.estimator.regression_head(),\n sequence_feature_columns=[token_emb],\n num_units=[32, 16], cell_type='lstm')\n\n # Or with custom RNN cell:\n def rnn_cell_fn(mode):\n cells = [ tf.contrib.rnn.LSTMCell(size) for size in [32, 16] ]\n if mode == tf.estimator.ModeKeys.TRAIN:\n cells = [ tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=0.5)\n for cell in cells ]\n return tf.contrib.rnn.MultiRNNCell(cells)\n\n estimator = RNNEstimator(\n head=tf.contrib.estimator.regression_head(),\n sequence_feature_columns=[token_emb],\n rnn_cell_fn=rnn_cell_fn)\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.train(input_fn=input_fn_train, steps=100)\n\n def input_fn_eval: # returns x, y\n pass\n metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)\n def input_fn_predict: # returns x, None\n pass\n predictions = estimator.predict(input_fn=input_fn_predict)\n ```\n\n Input of `train` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if the head's `weight_column` is not `None`, a feature with\n `key=weight_column` whose value is a `Tensor`.\n * for each `column` in `sequence_feature_columns`:\n - a feature with `key=column.name` whose `value` is a `SparseTensor`.\n * for each `column` in `context_feature_columns`:\n - if `column` is a `_CategoricalColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `_WeightedCategoricalColumn`, two features: the first\n with `key` the id column name, the second with `key` the weight column\n name. Both features' `value` must be a `SparseTensor`.\n - if `column` is a `_DenseColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n\n Loss and predicted output are determined by the specified head.\n\n @compatibility(eager)\n Estimators are not compatible with eager execution.\n @end_compatibility\n \"\"\"\n\n def __init__(self,\n head,\n sequence_feature_columns,\n context_feature_columns=None,\n num_units=None,\n cell_type=USE_DEFAULT,\n rnn_cell_fn=None,\n return_sequences=False,\n model_dir=None,\n optimizer='Adagrad',\n input_layer_partitioner=None,\n config=None):\n \"\"\"Initializes a `RNNEstimator` instance.\n\n Args:\n head: A `_Head` instance constructed with a method such as\n `tf.contrib.estimator.multi_label_head`. This specifies the model's\n output and loss function to be optimized.\n sequence_feature_columns: An iterable containing the `FeatureColumn`s\n that represent sequential input. All items in the set should either be\n sequence columns (e.g. `sequence_numeric_column`) or constructed from\n one (e.g. `embedding_column` with `sequence_categorical_column_*` as\n input).\n context_feature_columns: An iterable containing the `FeatureColumn`s\n for contextual input. The data represented by these columns will be\n replicated and given to the RNN at each timestep. These columns must be\n instances of classes derived from `_DenseColumn` such as\n `numeric_column`, not the sequential variants.\n num_units: Iterable of integer number of hidden units per RNN layer. If\n set, `cell_type` must also be specified and `rnn_cell_fn` must be\n `None`.\n cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying\n the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and\n `'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`\n must be `None`.\n rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and\n returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to\n construct the RNN. If set, `num_units` and `cell_type` cannot be set.\n This is for advanced users who need additional customization beyond\n `num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is\n needed for stacked RNNs.\n return_sequences: A boolean indicating whether to return the last output\n in the output sequence, or the full sequence.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n optimizer: An instance of `tf.Optimizer` or string specifying optimizer\n type. Defaults to Adagrad optimizer.\n input_layer_partitioner: Optional. Partitioner for input layer. Defaults\n to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.\n config: `RunConfig` object to configure the runtime settings.\n\n Raises:\n ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not\n compatible.\n \"\"\"\n rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)\n\n def _model_fn(features, labels, mode, config):\n return _rnn_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head,\n rnn_cell_fn=rnn_cell_fn,\n sequence_feature_columns=tuple(sequence_feature_columns or []),\n context_feature_columns=tuple(context_feature_columns or []),\n return_sequences=return_sequences,\n optimizer=optimizer,\n input_layer_partitioner=input_layer_partitioner,\n config=config)\n super(RNNEstimator, self).__init__(\n model_fn=_model_fn, model_dir=model_dir, config=config)\n" ]
[ [ "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.contrib.feature_column.python.feature_column.sequence_feature_column.sequence_input_layer", "tensorflow.python.training.training_util.get_global_step", "tensorflow.python.ops.init_ops.glorot_uniform_initializer", "tensorflow.python.ops.rnn_cell.MultiRNNCell", "tensorflow.python.summary.summary.histogram", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.rnn.dynamic_rnn", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.feature_column.feature_column.input_layer", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] } ]
Mirofil/nasbench-1shot1
[ "c34bf9c0222f07a30ba1518b3e52e120a3560aa4", "c34bf9c0222f07a30ba1518b3e52e120a3560aa4" ]
[ "optimizers/bohb_one_shot/plots/util.py", "experiments/analysis/experiment_database.py" ]
[ "import os\nimport pickle\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom IPython import embed\n\n\ncolors={\n 'BOHB-PC-DARTS': 'darkorange',\n 'BOHB-DARTS': 'dodgerblue',\n 'BOHB-GDAS' : 'forestgreen',\n 'RE': 'crimson',\n\t\t'RS': 'darkorchid',\n\t\t'RL': 'sienna',\n\t\t'TPE': 'deepskyblue',\n 'SMAC': 'violet',\n 'HB': 'darkgray',\n 'BOHB': 'gold'\n}\n\nmarkers={\n 'BOHB-DARTS': '^',\n 'BOHB-PC-DARTS': 'v',\n 'BOHB-GDAS' : 'x',\n 'RS': 'D',\n\t\t'RE': 'o',\n\t\t'RL': 's',\n\t\t'SMAC': 'h',\n 'HB': '>',\n 'BOHB': '*',\n 'TPE': '<'\n}\n\n\ndef get_incumbent(losses, time_stamps):\n return_dict = {'time_stamps': [],\n 'losses': [],\n }\n\n current_incumbent = float('inf')\n incumbent_budget = -float('inf')\n\n for l, t in zip(losses, time_stamps):\n if l < current_incumbent:\n current_incumbent = l\n return_dict['losses'].append(l)\n return_dict['time_stamps'].append(t)\n else:\n return_dict['losses'].append(return_dict['losses'][-1])\n return_dict['time_stamps'].append(t)\n return return_dict.values()\n\n\ndef get_trajectories(args, global_min, path='regularized_evolution',\n methods=['RE', 'RS']):\n all_trajectories = {}\n for m in methods:\n dfs = []\n for seed in range(500):\n filename = os.path.join(path, m,\n 'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,\n seed))\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n losses = [1 - x.test_accuracy - global_min for x in data]\n times = np.array([x.training_time for x in data])\n times = [np.sum(times[:i+1]) for i in range(len(times))]\n if m in ['HB', 'BOHB']:\n costs = np.array([x.budget for x in data])\n costs = np.array(\n [np.sum(costs[:i+1]) for i in range(len(costs))]\n )\n n = len(np.where(costs <= 280*108)[0])\n times, losses = get_incumbent(losses[:n], times[:n])\n else:\n times, losses = get_incumbent(losses, times)\n print(seed, ' MIN: ', min(losses))\n df = pd.DataFrame({str(seed): losses}, index=times)\n #embed()\n dfs.append(df)\n except FileNotFoundError:\n break\n df = merge_and_fill_trajectories(dfs, default_value=None)\n if df.empty:\n continue\n print(m, df.shape)\n\n all_trajectories[m] = {\n 'time_stamps': np.array(df.index),\n 'losses': np.array(df.T)\n }\n\n return all_trajectories\n\n\ndef merge_and_fill_trajectories(pandas_data_frames, default_value=None):\n\t# merge all tracjectories keeping all time steps\n\tdf = pd.DataFrame().join(pandas_data_frames, how='outer')\n\n\t# forward fill to make it a propper step function\n\tdf=df.fillna(method='ffill')\n\n\tif default_value is None:\n\t# backward fill to replace the NaNs for the early times by\n\t# the performance of a random configuration\n\t\tdf=df.fillna(method='bfill')\n\telse:\n\t\tdf=df.fillna(default_value)\n\n\treturn(df)\n\n\ndef plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,\n incumbent=None, show=True, linewidth=3, marker_size=10,\n xscale='log', xlabel='wall clock time [s]', yscale='log',\n ylabel=None, legend_loc = 'best', xlim=None, ylim=None,\n plot_mean=True, labels={}, markers=markers, colors=colors,\n figsize=(16,9)):\n\n if regret:\n if ylabel is None: ylabel = 'regret'\n\t\t# find lowest performance in the data to update incumbent\n\n if incumbent is None:\n incumbent = np.inf\n for tr in incumbent_trajectories.values():\n incumbent = min(tr['losses'][:,-1].min(), incumbent)\n print('incumbent value: ', incumbent)\n\n for m,tr in incumbent_trajectories.items():\n trajectory = np.copy(tr['losses'])\n if (trajectory.shape[0] == 0): continue\n if regret: trajectory -= incumbent\n\n sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])\n if plot_mean:\n mean = trajectory.mean(axis=0)\n else:\n mean = np.median(trajectory,axis=0)\n sem *= 1.253\n\n if 'DARTS' in m or 'GDAS' in m:\n ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,\n color=colors[m], alpha=0.2)\n\n ax.plot(tr['time_stamps'],mean,\n label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,\n marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))\n\n if axins is not None:\n axins.plot(tr['time_stamps'],mean,\n label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,\n marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))\n\n return (fig, ax)\n", "import json\nimport os\nimport pickle\n\nimport numpy as np\n\nfrom experiments.analysis.utils import parse_log, read_in_correlation\nfrom nasbench_analysis.search_spaces.search_space_1 import SearchSpace1\nfrom nasbench_analysis.search_spaces.search_space_2 import SearchSpace2\nfrom nasbench_analysis.search_spaces.search_space_3 import SearchSpace3\n\n\ndef search_space_id_to_obj(id):\n if int(id) == 1:\n return SearchSpace1()\n elif int(id) == 2:\n return SearchSpace2()\n elif int(id) == 3:\n return SearchSpace3()\n else:\n raise ValueError('Search space unknown.')\n\n\ndef get_directory_list(path):\n \"\"\"Find directory containing config.json files\"\"\"\n directory_list = []\n # return nothing if path is a file\n if os.path.isfile(path):\n return []\n # add dir to directorylist if it contains .json files\n if len([f for f in os.listdir(path) if f == 'config.json']) > 0:\n directory_list.append(path)\n for d in os.listdir(path):\n new_path = os.path.join(path, d)\n if os.path.isdir(new_path):\n directory_list += get_directory_list(new_path)\n return directory_list\n\n\ndef get_key_from_scalar_configs(configs, key):\n metrics_to_stack = [list(config['scalars'][key]) for config in configs]\n shortest_metric = min([len(m) for m in metrics_to_stack])\n\n if 'validation_errors' == key or 'test_errors' == key:\n search_space = search_space_id_to_obj(configs[0]['search_space'])\n if 'test' in key:\n minimum = search_space.test_min_error\n elif 'valid' in key:\n minimum = search_space.valid_min_error\n else:\n raise ValueError('incorrect name in key')\n else:\n minimum = 0\n\n return np.mean(np.stack([metric[:shortest_metric] for metric in metrics_to_stack], axis=0), axis=-1) - minimum\n\n\nclass ExperimentDatabase:\n def __init__(self, root_dir):\n \"\"\"Load all directories with trainings.\"\"\"\n self._load(root_dir=root_dir)\n\n def query(self, conditions):\n searched_config = []\n for config in self._database:\n # Only select config if all conditions are satisfied\n conds_satisfied = [config.get(cond_key, None) == cond_val for cond_key, cond_val in conditions.items()]\n if all(conds_satisfied):\n searched_config.append(config)\n\n return searched_config\n\n def query_correlation(self, conditions):\n searched_config = []\n for config in self._database:\n # Only select config if all conditions are satisfied\n conds_satisfied = [config.get(cond_key, None) == cond_val for cond_key, cond_val in conditions.items()]\n if all(conds_satisfied):\n if config['scalars']['correlation_total'] is not None:\n searched_config.append(config)\n\n return searched_config\n\n def _load(self, root_dir):\n self._database = []\n for directory in get_directory_list(root_dir):\n try:\n self._database.append(self._get_run_dictionary(directory))\n except Exception as e:\n print('Error occurred in loading', directory, e)\n\n def _get_run_dictionary(self, path):\n with open(os.path.join(path, 'config.json')) as fp:\n config = json.load(fp)\n\n with open(os.path.join(path, 'one_shot_validation_errors.obj'), 'rb') as fp:\n validation_errors = pickle.load(fp)\n\n with open(os.path.join(path, 'one_shot_test_errors.obj'), 'rb') as fp:\n test_errors = pickle.load(fp)\n\n one_shot_validation_errors, one_shot_training_errors = parse_log(path)\n correlation_total, correlation_top = read_in_correlation(path, config)\n\n config['scalars'] = {}\n config['scalars']['validation_errors'] = validation_errors\n config['scalars']['test_errors'] = test_errors\n config['scalars']['one_shot_validation_errors'] = one_shot_validation_errors\n config['scalars']['one_shot_training_errors'] = one_shot_training_errors\n config['scalars']['correlation_total'] = correlation_total\n config['scalars']['correlation_top'] = correlation_top\n\n return config\n\n\ndef main():\n experiment_database = ExperimentDatabase(root_dir='experiments/darts')\n pass\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.median", "pandas.DataFrame", "numpy.copy", "numpy.array", "numpy.where", "numpy.sum" ], [ "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]