repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
bibofeng/DeepRL-1 | [
"7b14d9720a8ea1e08b05a2889d699a70174caf8f",
"7b14d9720a8ea1e08b05a2889d699a70174caf8f",
"7b14d9720a8ea1e08b05a2889d699a70174caf8f"
] | [
"utils/normalizer.py",
"component/replay.py",
"network/network.py"
] | [
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\nimport torch\nimport numpy as np\n\nclass Normalizer:\n def __init__(self, o_size):\n self.stats = SharedStats(o_size)\n\n def __call__(self, o_):\n o = torch.FloatTensor(o_)\n self.stats.feed(o)\n std = (self.stats.v + 1e-6) ** .5\n o = (o - self.stats.m) / std\n return o.numpy().reshape(o_.shape)\n\nclass StaticNormalizer:\n def __init__(self, o_size):\n self.offline_stats = SharedStats(o_size)\n self.online_stats = SharedStats(o_size)\n\n def __call__(self, o_):\n if np.isscalar(o_):\n o = torch.FloatTensor([o_])\n else:\n o = torch.FloatTensor(o_)\n self.online_stats.feed(o)\n if self.offline_stats.n[0] == 0:\n return o_\n std = (self.offline_stats.v + 1e-6) ** .5\n o = (o - self.offline_stats.m) / std\n o = o.numpy()\n if np.isscalar(o_):\n o = np.asscalar(o)\n else:\n o = o.reshape(o_.shape)\n return o\n\nclass SharedStats:\n def __init__(self, o_size):\n self.m = torch.zeros(o_size)\n self.v = torch.zeros(o_size)\n self.n = torch.zeros(1)\n self.m.share_memory_()\n self.v.share_memory_()\n self.n.share_memory_()\n\n def feed(self, o):\n n = self.n[0]\n new_m = self.m * (n / (n + 1)) + o / (n + 1)\n self.v.copy_(self.v * (n / (n + 1)) + (o - self.m) * (o - new_m) / (n + 1))\n self.m.copy_(new_m)\n self.n.add_(1)\n\n def zero(self):\n self.m.zero_()\n self.v.zero_()\n self.n.zero_()\n\n def load(self, stats):\n self.m.copy_(stats.m)\n self.v.copy_(stats.v)\n self.n.copy_(stats.n)\n\n def merge(self, B):\n A = self\n n_A = self.n[0]\n n_B = B.n[0]\n n = n_A + n_B\n delta = B.m - A.m\n m = A.m + delta * n_B / n\n v = A.v * n_A + B.v * n_B + delta * delta * n_A * n_B / n\n v /= n\n self.m.copy_(m)\n self.v.copy_(v)\n self.n.add_(B.n)\n\n def state_dict(self):\n return {'m': self.m.numpy(),\n 'v': self.v.numpy(),\n 'n': self.n.numpy()}\n\n def load_state_dict(self, saved):\n self.m = torch.FloatTensor(saved['m'])\n self.v = torch.FloatTensor(saved['v'])\n self.n = torch.FloatTensor(saved['n'])",
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport torch\nimport random\n\nclass Replay:\n def __init__(self, memory_size, batch_size, dtype=np.float32):\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.dtype = dtype\n\n self.states = None\n self.actions = np.empty(self.memory_size, dtype=np.int8)\n self.rewards = np.empty(self.memory_size)\n self.next_states = None\n self.terminals = np.empty(self.memory_size, dtype=np.int8)\n\n self.pos = 0\n self.full = False\n\n\n def feed(self, experience):\n state, action, reward, next_state, done = experience\n\n if self.states is None:\n self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)\n self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)\n\n self.states[self.pos][:] = state\n self.actions[self.pos] = action\n self.rewards[self.pos] = reward\n self.next_states[self.pos][:] = next_state\n self.terminals[self.pos] = done\n\n self.pos += 1\n if self.pos == self.memory_size:\n self.full = True\n self.pos = 0\n\n def sample(self):\n upper_bound = self.memory_size if self.full else self.pos\n sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)\n return [self.states[sampled_indices],\n self.actions[sampled_indices],\n self.rewards[sampled_indices],\n self.next_states[sampled_indices],\n self.terminals[sampled_indices]]\n\nclass HybridRewardReplay:\n def __init__(self, memory_size, batch_size, dtype=np.float32):\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.dtype = dtype\n\n self.states = None\n self.actions = np.empty(self.memory_size, dtype=np.int8)\n self.rewards = None\n self.next_states = None\n self.terminals = np.empty(self.memory_size, dtype=np.int8)\n\n self.pos = 0\n self.full = False\n\n\n def feed(self, experience):\n state, action, reward, next_state, done = experience\n\n if self.states is None:\n self.rewards = np.empty((self.memory_size, ) + reward.shape, dtype=self.dtype)\n self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)\n self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)\n\n self.states[self.pos][:] = state\n self.actions[self.pos] = action\n self.rewards[self.pos][:] = reward\n self.next_states[self.pos][:] = next_state\n self.terminals[self.pos] = done\n\n self.pos += 1\n if self.pos == self.memory_size:\n self.full = True\n self.pos = 0\n\n def sample(self):\n upper_bound = self.memory_size if self.full else self.pos\n sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)\n return [self.states[sampled_indices],\n self.actions[sampled_indices],\n self.rewards[sampled_indices],\n self.next_states[sampled_indices],\n self.terminals[sampled_indices]]\n\nclass HighDimActionReplay:\n def __init__(self, memory_size, batch_size, dtype=np.float32):\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.dtype = dtype\n\n self.states = None\n self.actions = None\n self.rewards = np.empty(self.memory_size)\n self.next_states = None\n self.terminals = np.empty(self.memory_size, dtype=np.int8)\n\n self.pos = 0\n self.full = False\n\n\n def feed(self, experience):\n state, action, reward, next_state, done = experience\n\n if self.states is None:\n self.states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)\n self.actions = np.empty((self.memory_size, ) + action.shape)\n self.next_states = np.empty((self.memory_size, ) + state.shape, dtype=self.dtype)\n\n self.states[self.pos][:] = state\n self.actions[self.pos][:] = action\n self.rewards[self.pos] = reward\n self.next_states[self.pos][:] = next_state\n self.terminals[self.pos] = done\n\n self.pos += 1\n if self.pos == self.memory_size:\n self.full = True\n self.pos = 0\n\n def sample(self):\n upper_bound = self.memory_size if self.full else self.pos\n sampled_indices = np.random.randint(0, upper_bound, size=self.batch_size)\n return [self.states[sampled_indices],\n self.actions[sampled_indices],\n self.rewards[sampled_indices],\n self.next_states[sampled_indices],\n self.terminals[sampled_indices]]\n\nclass GeneralReplay:\n def __init__(self, memory_size, batch_size):\n self.buffer = []\n self.memory_size = memory_size\n self.batch_size = batch_size\n\n def feed(self, experiences):\n for experience in zip(*experiences):\n self.buffer.append(experience)\n if len(self.buffer) > self.memory_size:\n del self.buffer[0]\n\n def sample(self):\n sampled = zip(*random.sample(self.buffer, self.batch_size))\n return sampled\n\n def clear(self):\n self.buffer = []\n\n def full(self):\n return len(self.buffer) == self.memory_size\n",
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n# Base class for all kinds of network\nclass BasicNet:\n def __init__(self, optimizer_fn, gpu, LSTM=False):\n if optimizer_fn is not None:\n self.optimizer = optimizer_fn(self.parameters())\n self.gpu = gpu and torch.cuda.is_available()\n self.LSTM = LSTM\n if self.gpu:\n self.cuda()\n self.FloatTensor = torch.cuda.FloatTensor\n else:\n self.FloatTensor = torch.FloatTensor\n\n def to_torch_variable(self, x, dtype='float32'):\n if isinstance(x, Variable):\n return x\n if not isinstance(x, torch.FloatTensor):\n x = torch.from_numpy(np.asarray(x, dtype=dtype))\n if self.gpu:\n x = x.cuda()\n return Variable(x)\n\n def reset(self, terminal):\n if not self.LSTM:\n return\n if terminal:\n self.h.data.zero_()\n self.c.data.zero_()\n self.h = Variable(self.h.data)\n self.c = Variable(self.c.data)\n\n# Base class for value based methods\nclass VanillaNet(BasicNet):\n def predict(self, x, to_numpy=False):\n y = self.forward(x)\n if to_numpy:\n if type(y) is list:\n y = [y_.cpu().data.numpy() for y_ in y]\n else:\n y = y.cpu().data.numpy()\n return y\n\n# Base class for actor critic method\nclass ActorCriticNet(BasicNet):\n def predict(self, x):\n phi = self.forward(x, True)\n pre_prob = self.fc_actor(phi)\n prob = F.softmax(pre_prob)\n log_prob = F.log_softmax(pre_prob)\n value = self.fc_critic(phi)\n return prob, log_prob, value\n\n def critic(self, x):\n phi = self.forward(x, False)\n return self.fc_critic(phi)\n\n# Base class for dueling architecture\nclass DuelingNet(BasicNet):\n def predict(self, x, to_numpy=False):\n phi = self.forward(x)\n value = self.fc_value(phi)\n advantange = self.fc_advantage(phi)\n q = value.expand_as(advantange) + (advantange - advantange.mean(1).expand_as(advantange))\n if to_numpy:\n return q.cpu().data.numpy()\n return q"
] | [
[
"numpy.asscalar",
"torch.FloatTensor",
"numpy.isscalar",
"torch.zeros"
],
[
"numpy.empty",
"numpy.random.randint"
],
[
"torch.nn.functional.softmax",
"torch.nn.functional.log_softmax",
"numpy.asarray",
"torch.cuda.is_available",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DagonDD/google-research | [
"ccd5d36e7a8ee1d672c93a801634bfd8f2e0c3eb"
] | [
"t5_closed_book_qa/t5_cbqa/preprocessors.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"T5 CBQA preprocessors.\"\"\"\nimport tensorflow.compat.v1 as tf\n\n\ndef natural_questions_nocontext(\n dataset,\n prefix='nq question: ',\n drop_yes_no=False,\n max_tokens=None,\n max_answers=None,\n ):\n \"\"\"Convert Natural Questions TFDS to open domain with multiple answers.\n\n Examples with no short or yes/no answers are filtered. All short and yes/no\n answers (even across annotations) are emitted, so the targets produced by this\n preprocessor are invalid in the case of multiple annotations. However, these\n should not occur in the train set.\n\n The function takes the natural_questions TFDS dataset an emits examples of the\n form:\n {\n 'inputs': 'nq question: what are the names of the olsen twins'\n 'targets': 'answer: Mary-Kate answer: Ashley'\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n drop_yes_no: bool, whether to drop yes/no answers, keeping only short\n answers.\n max_tokens: (Optional) int, the maximum number of tokens (as specified by\n NQ) beyond which a short answer is dropped. None are dropped if set to\n `None`.\n max_answers: (Optional) int, the maximum number of answers to include in the\n targets. Will be selected deterministically from the beginning of the\n list. All answers are included if set to `None`.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n def nq_map(ex):\n \"\"\"Map Natural Questions example to text-to-text example.\"\"\"\n inputs = prefix + ex['question']['text']\n\n annotations = ex['annotations']\n\n yes_no_labels = annotations['yes_no_answer']\n if drop_yes_no:\n yes_no_labels = -1 * tf.ones_like(yes_no_labels)\n yes_no_answers = tf.boolean_mask(yes_no_labels, yes_no_labels > -1)\n yes_no_answers = tf.where_v2(tf.equal(yes_no_answers, 1), 'yes', 'no')\n\n short_answers = annotations['short_answers']['text'].flat_values\n short_answer_starts = annotations['short_answers']['text'].row_starts()\n if max_tokens:\n start_tokens = annotations['short_answers']['start_token']\n end_tokens = annotations['short_answers']['end_token']\n dropped_answers = end_tokens - start_tokens > max_tokens\n short_answers = tf.boolean_mask(\n short_answers, tf.math.logical_not(dropped_answers.values))\n # Subtract dropped answers from row starts.\n row_drop_count = tf.math.reduce_sum(\n tf.cast(dropped_answers, tf.int64), axis=1)\n short_answer_starts -= tf.concat(\n [[0], tf.math.cumsum(row_drop_count[:-1])], axis=0)\n\n answers = tf.concat([yes_no_answers, short_answers], axis=0)\n if max_answers:\n answers = answers[:max_answers]\n targets = tf.strings.reduce_join('answer: ' + answers, separator=' ')\n\n return {\n 'inputs': inputs,\n 'targets': targets,\n 'short_answers/values': short_answers,\n 'short_answers/row_starts': short_answer_starts,\n 'yes_no_answers': yes_no_labels\n }\n\n dataset = dataset.map(\n nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset.filter(lambda ex: tf.strings.length(ex['targets']) > 0)\n\n\ndef natural_questions_open(\n dataset,\n prefix='nq question: '\n ):\n \"\"\"Convert Natural Questions Open TFDS to examples.\n\n If there are multiple answers in the input, selects the first one as the\n target.\n\n The function takes the natural_question_open TFDS dataset and emits examples\n of the form:\n {\n 'inputs': 'nq question: What are the names of the Olsen Twins?'\n 'targets': 'Mary-Kate and Ashley',\n 'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n\n def nq_map(ex):\n \"\"\"Map Natural Questions example to text-to-text example.\"\"\"\n return {\n 'inputs': prefix + ex['question'],\n 'targets': ex['answer'][0],\n 'answers': ex['answer'],\n }\n return dataset.map(nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef trivia_qa_open(\n dataset,\n prefix='trivia_qa question: '\n ):\n \"\"\"Convert TriviaQA dataset to open domain qa examples.\n\n The function takes the trivia_qa TFDS dataset and emits examples of the\n form:\n {\n 'inputs': 'trivia_qa question: What are the names of the Olsen Twins?'\n 'targets': 'Mary-Kate and Ashley',\n 'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n def tqa_map(ex):\n \"\"\"Map TriviaQA example to text-to-text example.\"\"\"\n return {\n 'inputs': prefix + ex['question'],\n 'targets': ex['answer']['value'],\n 'answers': ex['answer']['aliases'],\n }\n\n return dataset.map(tqa_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef web_questions_open(\n dataset,\n prefix='wq question: '\n ):\n \"\"\"Convert WebQuestions TFDS to open domain examples.\n\n If there are multiple answers in the input, selects the first one as the\n target.\n\n The function takes the web_questions TFDS dataset and emits examples of the\n form:\n {\n 'inputs': 'wq question: What are the names of the Olsen Twins?'\n 'targets': 'Mary-Kate and Ashley',\n 'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n\n def wq_map(ex):\n \"\"\"Map WebQuestions example to text-to-text example.\"\"\"\n return {\n 'inputs': prefix + ex['question'],\n 'targets': ex['answers'][0],\n 'answers': ex['answers'],\n }\n return dataset.map(wq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef sample_answer(\n dataset,\n ):\n \"\"\"Replaces target with sampled answer.\"\"\"\n\n def samp_map(ex):\n answers = tf.random.shuffle(ex['answers'])\n return {\n 'inputs': ex['inputs'],\n 'targets': answers[0],\n 'answers': answers,\n }\n return dataset.map(samp_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n"
] | [
[
"tensorflow.compat.v1.random.shuffle",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.strings.reduce_join",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.strings.length",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.math.logical_not",
"tensorflow.compat.v1.math.cumsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vrooje/Data-digging | [
"ae4ee1de0df0d2686115510ac35f5960d5cfaf08",
"ae4ee1de0df0d2686115510ac35f5960d5cfaf08",
"ae4ee1de0df0d2686115510ac35f5960d5cfaf08"
] | [
"example_scripts/basic_project_stats.py",
"example_scripts/seabirdwatch/sites_over_time_bins.py",
"example_scripts/planetary_response_network/caribbean_irma_2017/extract_markings_to1file.py"
] | [
"#Python 2.7.9 (default, Apr 5 2015, 22:21:35)\n# the full environment I used to test this is in basic_project_stats.yml\nimport sys\n\n# file with raw classifications (csv)\n# put this way up here so if there are no inputs we exit quickly before even trying to load everything else\ntry:\n classfile_in = sys.argv[1]\nexcept:\n print(\"\\nUsage: %s classifications_infile\" % sys.argv[0])\n print(\" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.\\n\")\n print(\" Optional inputs:\")\n print(\" workflow_id=N\")\n print(\" specify the program should only consider classifications from workflow id N\")\n print(\" workflow_version=M\")\n print(\" specify the program should only consider classifications from workflow version M\")\n print(\" (note the program will only consider the major version, i.e. the integer part)\")\n print(\" outfile_csv=filename.csv\")\n print(\" if you want the program to save a sub-file with only classification info from the workflow specified, give the filename here\")\n print(\" --time_elapsed\")\n print(\" specify the program should compute classification durations and total classification work effort\")\n print(\" --remove_duplicates\")\n print(\" remove duplicate classifications (subject-user pairs) before analysis.\")\n print(\" memory-intensive for big files; probably best to pair with outfile_csv so you save the output.\")\n print(\" --keep_nonlive\")\n print(\" by default the program ignores classifications made while the project wasn't 'Live'; setting this will keep them in.\")\n print(\" --keep_allcols\")\n print(\" by default the program only keeps columns required for stats; use this with a specified outfile_csv to save all columns, including annotations. (If you're not using outfile_csv this will just waste memory.)\")\n print(\"\\nAll output will be to stdout (about 1-2 paragraphs' worth).\\n\")\n sys.exit(0)\n\n\n\nimport numpy as np # works in 1.10.1\nimport pandas as pd # works in 0.13.1\nimport datetime\nimport dateutil.parser\nimport json, ujson\nimport gc\n\n# default value is not to care about workflow ID or version\nworkflow_id = -1\nworkflow_version = -1\n# by default we won't worry about computing how much time effort the volunteers cumulatively spent\ntime_elapsed = False\n# by default we won't write the subset of classifications we used to a new csv file\noutput_csv = False\n# by default we'll ignore the possibility of duplicate classifications\n# note duplicates are relatively rare, usually <2% of all classifications\n# the Zooniverse has squashed several bugs related to this, but some still\n# happen client-side and there's nothing we can do about that.\nremove_duplicates = False\n# by default, restrict the analysis to \"Live\" classifications\nkeep_nonlive = False\n# by default, don't keep every column of the classifications when writing to an outfile\nkeep_allcols = False\n\n# check for other command-line arguments\nif len(sys.argv) > 2:\n # if there are additional arguments, loop through them\n for i_arg, argstr in enumerate(sys.argv[2:]):\n arg = argstr.split('=')\n\n if arg[0] == \"workflow_id\":\n workflow_id = int(arg[1])\n elif arg[0] == \"workflow_version\":\n workflow_version = float(arg[1])\n elif (arg[0] == \"outfile_csv\") | (arg[0] == \"outfile\"):\n outfile_csv = arg[1]\n output_csv = True\n elif arg[0] == \"--keep_allcols\":\n keep_allcols = True\n elif arg[0] == \"--time_elapsed\":\n time_elapsed = True\n elif arg[0] == \"--remove_duplicates\":\n remove_duplicates = True\n elif arg[0] == \"--keep_nonlive\":\n keep_nonlive = True\n\n\n\n# columns currently in an exported Panoptes classification file:\n# classification_id,user_name,user_id,user_ip,workflow_id,workflow_name,workflow_version,created_at,gold_standard,expert,metadata,annotations,subject_data,subject_ids\n\n# classification_id identifies the specific classification - should be unique for each row in this file\n# user_name is either their registered name or \"not-logged-in\"+their hashed IP\n# user_id is their numeric Zooniverse ID or blank if they're unregistered\n# user_ip is a hashed version of their IP\n# workflow_id is the numeric ID of this workflow, which you can find in the project builder URL for managing the workflow:\n# https://www.zooniverse.org/lab/[project_id]/workflow/[workflow_id]/\n# workflow_name is the name you gave your workflow (for sanity checks)\n# workflow_version is [bigchangecount].[smallchangecount] and is probably pretty big\n# created_at is the date the entry for the classification was recorded\n# gold_standard is 1 if this classification was done in gold standard mode\n# expert is 1 if this classification was done in expert mode... I think\n# metadata (json) is the data the browser sent along with the classification.\n# Includes browser information, language, started_at and finished_at\n# note started_at and finished_at are perhaps the easiest way to calculate the length of a classification\n# (the duration elapsed between consecutive created_at by the same user is another way)\n# the difference here is back-end vs front-end\n# annotations (json) contains the actual classification information\n# which for this analysis we will ignore completely, for now\n# subject_data is cross-matched from the subjects table and is for convenience in data reduction\n# subject_ids has just the subject ids in the given classification\n# here we will ignore this too, except to count subjects once.\n# we'll also ignore classification_id, user_ip, workflow information, gold_standard, and expert.\n#\n\n\n# Print out the input parameters just as a sanity check\nprint(\"Computing project stats using:\")\nprint(\" infile: %s\" % classfile_in)\n\n\n\n\n#################################################################################\n#################################################################################\n#################################################################################\n\n\n# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient\n#\n# The Gini coefficient measures inequality in distributions of things.\n# It was originally conceived for economics (e.g. where is the wealth in a country?\n# in the hands of many citizens or a few?), but it's just as applicable to many\n# other fields. In this case we'll use it to see how classifications are\n# distributed among classifiers.\n# G = 0 is a completely even distribution (everyone does the same number of\n# classifications), and ~1 is uneven (~all the classifications are done\n# by one classifier).\n# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are\n# in the range of 0.7-0.9.\n# That range is generally indicative of a project with a loyal core group of\n# volunteers who contribute the bulk of the classification effort, but balanced\n# out by a regular influx of new classifiers trying out the project, from which\n# you continue to draw to maintain a core group of prolific classifiers.\n# Once your project is fairly well established, you can compare it to past Zooniverse\n# projects to see how you're doing.\n# If your G is << 0.7, you may be having trouble recruiting classifiers into a loyal\n# group of volunteers. People are trying it, but not many are staying.\n# If your G is > 0.9, it's a little more complicated. If your total classification\n# count is lower than you'd like it to be, you may be having trouble recruiting\n# classifiers to the project, such that your classification counts are\n# dominated by a few people.\n# But if you have G > 0.9 and plenty of classifications, this may be a sign that your\n# loyal users are -really- committed, so a very high G is not necessarily a bad thing.\n#\n# Of course the Gini coefficient is a simplified measure that doesn't always capture\n# subtle nuances and so forth, but it's still a useful broad metric.\n\ndef gini(list_of_values):\n sorted_list = sorted(list_of_values)\n height, area = 0, 0\n for value in sorted_list:\n height += value\n area += height - value / 2.\n fair_area = height * len(list_of_values) / 2\n return (fair_area - area) / fair_area\n\n\n\n\n#################################################################################\n#################################################################################\n#################################################################################\n\n\ndef get_duplicate_ids(grp):\n # groupbys and dfs have slightly different indexing and just NOPE\n #thegrp = pd.DataFrame(grp)\n thegrp = grp\n\n if len(thegrp) == 1:\n return\n else:\n # we have a duplicate set, so return the details\n return thegrp\n\n\n\n\ndef get_live_project(meta_json):\n try:\n return meta_json['live_project']\n except:\n # apparently some subject metadata doesn't have this? dunno?\n return False\n\ndef get_live_project_incl_missing(meta_json):\n try:\n return meta_json['live_project']\n except:\n return -1\n\n# Begin the main stuff\n\n\nprint(\"Reading classifications from %s\" % classfile_in)\n\n#classifications = pd.read_csv(classfile_in)\n# the above will work but uses a LOT of memory for projects with > 1 million\n# classifications. Nothing here uses the actual classification data so don't read it\n'''\nIf you are using this code on an older project, where the data export is from\nbefore subject_ids were exported as their own column, change \"subject_id\" below\nto \"subject_data\", and then when you define the groupby \"by_subject\" and count\nsubjects, you'll need to use subject_data instead of subject_ids.\n\nApologies for doing this, but subject_data contains the whole manifest so for\nbig projects with big catalogs it can take up a lot of memory, so we don't want to\nuse it if we don't have to.\n'''\ncols_keep = [\"classification_id\", \"user_name\", \"user_id\", \"user_ip\", \"workflow_id\", \"workflow_version\", \"created_at\", \"metadata\", \"subject_ids\"]\nif not keep_allcols:\n try:\n classifications = pd.read_csv(classfile_in, usecols=cols_keep)\n except:\n print(\"Some columns missing from classifications infile, reading without specifying columns (uses more memory)... \")\n classifications = pd.read_csv(classfile_in)\nelse:\n try:\n classifications = pd.read_csv(classfile_in, low_memory=False)\n except:\n classifications = pd.read_csv(classfile_in)\n\n cols_used = classifications.columns.tolist()\n cols_out = classifications.columns.tolist()\n if not 'created_day' in cols_used:\n cols_used.append('created_day')\n if not 'meta_json' in cols_used:\n cols_used.append('meta_json')\n\nn_class_raw = len(classifications)\n\n# now restrict classifications to a particular workflow id/version if requested\nif (workflow_id > 0) | (workflow_version > 0):\n\n # only keep the stuff that matches these workflow properties\n if (workflow_id > 0):\n\n print(\"Considering only workflow id %d\" % workflow_id)\n\n in_workflow = classifications.workflow_id == workflow_id\n else:\n # the workflow id wasn't specified, so just make an array of true\n in_workflow = np.array([True for q in classifications.workflow_id])\n\n if (workflow_version > 0):\n\n classifications['version_int'] = [int(q) for q in classifications.workflow_version]\n\n print(\"Considering only major workflow version %d\" % int(workflow_version))\n\n # we only care about the major workflow version, not the minor version\n in_version = classifications.version_int == int(workflow_version)\n else:\n in_version = np.array([True for q in classifications.workflow_version])\n\n\n if (sum(in_workflow & in_version) == 0):\n print(\"ERROR: your combination of workflow_id and workflow_version does not exist!\\nIgnoring workflow id/version request and computing stats for ALL classifications instead.\")\n #classifications = classifications_all\n else:\n # select the subset of classifications\n classifications = classifications[in_workflow & in_version]\n\n del in_workflow\n del in_version\n\nelse:\n # just use everything\n #classifications = classifications_all\n\n workflow_ids = classifications.workflow_id.unique()\n # this takes too much CPU time just for a print statement. Just use float versions\n #classifications['version_int'] = [int(q) for q in classifications.workflow_version]\n version_ints = classifications.workflow_version.unique()\n\n print(\"Considering all classifications in workflow ids:\")\n print(workflow_ids)\n print(\" and workflow_versions:\")\n print(version_ints)\n\n\n# Remove classifications collected before the project went Live\n# note: it makes logical sense to do this *before* we extract the classifications\n# from the workflow we care about, *but* the meta_json setting step (which we\n# need in order to extract Live project status) can take a while (up to ~minutes)\n# and adds to memory usage, so I'd rather do it after we've already culled\n# the table of potentially a lot of unused rows.\n# OTOH culling duplicates takes more time and memory than culling unused workflow\n# versions, so wait to do that until after we've removed non-Live classifications\n\n# first, extract the metadata column into a json we can read entries for\n#\n# ujson is quite a bit faster than json but seems to use a bit more memory as it works\nclassifications['meta_json'] = [ujson.loads(q) for q in classifications.metadata]\n\nif keep_nonlive:\n print(\"Retaining all non-live classifications in analysis.\")\nelse:\n # would that we could just do q['live_project'] but if that tag is missing for\n # any classifications (which it is in some cases) it crashes\n classifications['live_project'] = [get_live_project(q) for q in classifications.meta_json]\n\n # if this line gives you an error you've read in this boolean as a string\n # so need to convert \"True\" --> True and \"False\" --> False\n class_live = classifications[classifications.live_project].copy()\n n_class_thiswf = len(classifications)\n n_live = sum(classifications.live_project)\n n_notlive = n_class_thiswf - n_live\n print(\" Removing %d non-live classifications...\" % n_notlive)\n\n # don't make a slice but also save memory\n classifications = pd.DataFrame(class_live)\n del class_live\n gc.collect()\n\n\n\n# if we've been asked to remove duplicates, do that now\nif remove_duplicates:\n '''\n a duplicate can be that the classification id is submitted twice by the client\n but it can also be that the classifier classified the same subject twice in different classification_ids.\n\n So identify duplicates based on username + subject id + workflow info, not based on classification_id.\n '''\n subj_classifications = classifications.groupby('user_name subject_ids workflow_id workflow_version'.split())\n\n n_class = len(classifications)\n # just take the first of each of the groups\n classifications_nodups = subj_classifications.head(1)\n n_class_nodup = len(classifications_nodups)\n\n n_dups = n_class - n_class_nodup\n\n if n_dups == 0:\n print(\"Searched for duplicate classifications; none found.\")\n else:\n duplicate_outfile = classfile_in.replace(\".csv\", \"_duplicated_only.csv\")\n if duplicate_outfile == classfile_in:\n duplicate_outfile += \"_duplicated_only.csv\"\n\n print(\"Found %d duplicate classifications (%.2f percent of total).\" % (n_dups, float(n_dups)/float(n_class)*100.0))\n\n # get the duplicate classifications and save them before we remove them\n #class_dups = pd.DataFrame(subj_classifications.apply(get_duplicate_ids))\n\n # if you want to keep a record of everything with just the dups flagged,\n # this is your thing\n #dups_flagged = pd.merge(classifications, classifications_nodups['classification_id subject_id'.split()], how='outer', on='classification_id', suffixes=('', '_2'), indicator=True)\n # if you just need something that has only the dups in it, here you go\n dups_only = classifications[~classifications.isin(classifications_nodups)].dropna(how='all')\n\n # dups_only has the duplicates only - not the original classification in each set\n # i.e. if classifications 123, 456, and 789 are all from the same user\n # classifying the same subject, dups_only will only contain classifications\n # 456 and 789. When we save the duplicate classifications we want to save\n # the initial classification (that was later duplicated) as well, so we\n # need to retrieve those.\n # I don't see a really easy way to do it based on the groupby we already did\n # (subj_classifications)\n # so let's just define what identifies the duplicate (user_name + subject_ids)\n # and pick them out.\n # even for a reasonably big dataset this is relatively fast (seconds, not minutes)\n try:\n dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(int).astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)\n except:\n dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)\n\n # n_dup_pairs tracks unique user-subject pairs that were duplicated\n dup_pairs = dups_only['user_subj_pair'].unique()\n n_dup_pairs = len(dup_pairs)\n\n try:\n classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(int).astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)\n except:\n classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)\n\n # this keeps things that are any part of a duplicate set, including first\n is_a_dup = classifications['user_subj_pair'].isin(dup_pairs)\n\n class_dups = classifications[is_a_dup].copy()\n # counts any classification that is any part of a duplicate set\n n_partofdup = len(class_dups)\n\n class_dups.to_csv(duplicate_outfile)\n #print(class_dups.head(3))\n\n # now throw away the duplicates (but keep the first of each set) from\n # the main classifications table\n classifications = pd.DataFrame(classifications_nodups)\n\n del class_dups\n del is_a_dup\n print(\"Duplicates removed from analysis (%d unique user-subject-workflow groups).\" % n_dup_pairs)\n\n del subj_classifications\n del classifications_nodups\n gc.collect()\n\n\nclassifications['created_day'] = [q[:10] for q in classifications.created_at]\n\nfirst_class_day = min(classifications.created_day).replace(' ', '')\nlast_class_day = max(classifications.created_day).replace(' ', '')\n\n\n# save processing time and memory in the groupby.apply(); only keep the columns we're going to use or want to save\nif output_csv:\n if not keep_allcols:\n # if we'll be writing to a file at the end of this we need to save a few extra columns\n cols_used = [\"classification_id\", \"user_name\", \"user_id\", \"user_ip\", \"created_at\", \"created_day\", \"metadata\", \"meta_json\", \"subject_ids\", \"workflow_id\", \"workflow_version\"]\nelse:\n if not keep_allcols:\n cols_used = [\"classification_id\", \"user_name\", \"user_id\", \"user_ip\", \"created_at\", \"created_day\", \"meta_json\", \"subject_ids\"]\nclassifications = classifications[cols_used]\n# collect() calls PyInt_ClearFreeList(), so explicitly helps free some active memory\ngc.collect()\n\n# grab the subject counts\nn_subj_tot = len(classifications.subject_ids.unique())\nby_subject = classifications.groupby('subject_ids')\nsubj_class = by_subject.created_at.aggregate('count')\n\n# basic stats on how classified the subjects are\nsubj_class_mean = np.mean(subj_class)\nsubj_class_med = np.median(subj_class)\nsubj_class_min = np.min(subj_class)\nsubj_class_max = np.max(subj_class)\n\n# free up some memory - note calling this does take CPU time but\n# can free up GBs of active memory for big classification files\ndel by_subject\ngc.collect()\n\n\n# index by created_at as a timeseries\n# note: this means things might not be uniquely indexed\n# but it makes a lot of things easier and faster.\n# update: it's not really needed in the main bit, but will do it on each group later.\n#classifications.set_index('created_at_ts', inplace=True)\n\n\n# get some user information\nall_users = classifications.user_name.unique()\nby_user = classifications.groupby('user_name')\n\n# also count IP addresses\nn_ip = len(classifications.user_ip.unique())\n\n# get total classification and user counts\nn_class_tot = len(classifications)\nn_users_tot = len(all_users)\n\nunregistered = [q.startswith(\"not-logged-in\") for q in all_users]\nn_unreg = sum(unregistered)\nn_reg = n_users_tot - n_unreg\n\nis_unreg_class = [q.startswith(\"not-logged-in\") for q in classifications.user_name]\nn_unreg_class = sum(is_unreg_class)\nn_reg_class = n_class_tot - n_unreg_class\n\n# for the leaderboard, which I recommend project builders never make public because\n# Just Say No to gamification\n# But it's still interesting to see who your most prolific classifiers are, and\n# e.g. whether they're also your most prolific Talk users\nnclass_byuser = by_user.created_at.aggregate('count')\nnclass_byuser_ranked = nclass_byuser.copy()\nnclass_byuser_ranked.sort_values(inplace=True, ascending=False)\n# rename the columns properly so they'll print as useful csv headers\nnclass_byuser_ranked.name = 'user_name'\nnc = pd.DataFrame(nclass_byuser_ranked)\nnc.columns = ['n_class']\n\n# write this to a file, so you don't have to re-calculate it later\nnclass_byuser_outfile = classfile_in.replace(\".csv\", \"_nclass_byuser_ranked.csv\")\n# don't accidentally overwrite the classifications file just because someone\n# renamed it to not end in .csv\nif nclass_byuser_outfile == classfile_in:\n nclass_byuser_outfile = \"project_nclass_byuser_ranked.csv\"\nnc.to_csv(nclass_byuser_outfile)\n\n# very basic stats\nnclass_med = np.median(nclass_byuser)\nnclass_mean = np.mean(nclass_byuser)\n\n# Gini coefficient - see the comments above the gini() function for more notes\nnclass_gini = gini(nclass_byuser)\n\nprint(\"\\nOverall:\\n\\n%d classifications of %d subjects by %d classifiers,\" % (n_class_tot,n_subj_tot,n_users_tot))\nprint(\"%d logged in and %d not logged in, from %d unique IP addresses.\" % (n_reg,n_unreg,n_ip))\nprint(\"%d classifications were from logged-in users, %d from not-logged-in users.\\n\" % (n_reg_class, n_unreg_class))\nprint(\"That's %.2f classifications per subject on average (median = %.1f).\" % (subj_class_mean, subj_class_med))\nprint(\"The most classified subject has %d classifications; the least-classified subject has %d.\\n\" % (subj_class_max,subj_class_min))\nprint(\"Median number of classifications per user: %.2f\" %nclass_med)\nprint(\"Mean number of classifications per user: %.2f\" % nclass_mean)\nprint(\"\\nTop 10 most prolific classifiers:\")\nprint(nclass_byuser_ranked.head(10))\nprint(\"\\n\\nGini coefficient for classifications by user: %.2f\" % nclass_gini)\nprint(\"\\nClassifications were collected between %s and %s.\" % (first_class_day, last_class_day))\nprint(\"The highest classification id considered here is %d.\\n\" % max(classifications.classification_id))\n\n\n# if the input specified we should compute total time spent by classifiers, compute it\nif time_elapsed:\n # free up some memory\n # do this inside the if because if we're not computing times then the program\n # is about to end so this memory will be freed up anyway\n del unregistered\n del by_user\n gc.collect()\n\n\n classifications['started_at_str'] = [q['started_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]\n classifications['finished_at_str'] = [q['finished_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]\n\n sa_temp = classifications['started_at_str']\n fa_temp = classifications['finished_at_str']\n\n #print(\"Creating timeseries...\")#,datetime.datetime.now().strftime('%H:%M:%S.%f')\n\n\n try:\n classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S.%f')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n try:\n classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n classifications['started_at'] = pd.to_datetime(sa_temp)\n\n\n try:\n classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S.%f')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n try:\n classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S %Z')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n classifications['finished_at'] = pd.to_datetime(fa_temp)\n\n # we did all that above so that this would only take one line and be quite fast\n classifications['class_t_length'] = (classifications.finished_at - classifications.started_at)\n\n # throw away absurd time counts: accept lengths between 0 < dt < 30 minutes\n # anything outside that is either a wrongly reported time or the user walked away from their computer\n ok_times = (classifications.class_t_length > np.timedelta64(0, 's')) & (classifications.class_t_length < np.timedelta64(30, 'm'))\n\n # how many turned out to be okay?\n n_t_ok = sum(ok_times)\n\n # compute total times\n time_spent_classifying = np.sum(classifications['class_t_length'][ok_times])\n days_spent_classifying = time_spent_classifying / np.timedelta64(1, 'D')\n frac_good_durations = float(n_t_ok)/float(n_class_tot)\n\n print(\"Based on %d classifications (%.1f percent) where we can probably\\ntrust the classification durations, the classifiers spent a total of %.2f days\\n(or %.2f years) classifying in the project.\\n\" % (n_t_ok, frac_good_durations*100., days_spent_classifying, days_spent_classifying / 365.))\n\n mean_t_class = np.mean(classifications['class_t_length'][ok_times])\n median_t_class = np.median(classifications['class_t_length'][ok_times])\n\n human_effort_extrap = float(n_class_tot)*float(mean_t_class / np.timedelta64(1, 'D')) / 365. # in years\n\n print(\"Mean classification length: %8.1f seconds\" % float(mean_t_class / np.timedelta64(1, 's')))\n print(\"Median classification length: %6.1f seconds\" % float(median_t_class / np.timedelta64(1, 's')))\n\n\n\n print(\"\\nIf we use the mean to extrapolate and include the %.1f percent of\\nclassifications where the reported duration had an error, that means\\nthe total time spent is equivalent to %.2f years of human effort, or\\n%.2f years of FTE (1 person working 40 hours/week, no holiday.)\\n\" % ((1-frac_good_durations)*100., human_effort_extrap, human_effort_extrap * (24.*7.)/40.))\n\nif output_csv:\n # free up what memory we can before doing this (matters for big files)\n if time_elapsed:\n del ok_times\n del sa_temp\n del fa_temp\n del nclass_byuser\n del all_users\n del subj_class\n gc.collect()\n\n if keep_allcols:\n classifications[cols_out].to_csv(outfile_csv)\n else:\n classifications.to_csv(outfile_csv)\n print(\"File with used subset of classification info written to %s .\" % outfile_csv)\n\nprint(\"File with ranked list of user classification counts written to %s .\" % nclass_byuser_outfile)\n\nif remove_duplicates:\n if (n_dups > 0):\n print(\"Saved info for all classifications that have duplicates to %s .\" % duplicate_outfile)\n\n\n#end\n",
"import pandas\nimport matplotlib.pyplot as plt\n\n# inputs\nbrids_over_time_path = 'all_birds_over_time.csv'\n\nbirds = pandas.read_csv(brids_over_time_path)\nbirds.date = pandas.to_datetime(birds.date, format='%Y-%m-%d %H:%M:%S')\nbirds.sort_values('date', inplace=True)\n\nfor ct, site in enumerate(birds.site.unique()):\n sdx = birds.site == site\n birds_site = birds[sdx]\n site_count = birds_site.groupby(birds_site.date.map(lambda x: x.strftime('%Y-%m-%d')))['kittiwakes', 'guillemots', 'chicks', 'others'].sum()\n\n fig = plt.figure(ct, figsize=(12, 8))\n ax1 = plt.subplot(211)\n ax2 = plt.subplot(212)\n date = pandas.to_datetime(site_count.index)\n ax1.plot(date, site_count.kittiwakes, 'C8', label='Kittiwakes')\n ax1.plot(date, site_count.guillemots, 'C0', label='Guillemots')\n ax2.plot(date, site_count.others, 'C3', label='Others')\n ax2.plot(date, site_count.chicks, 'C6', label='Chicks')\n ax1.set_xlabel('Date')\n ax1.set_ylabel('Number')\n ax1.legend()\n ax2.set_xlabel('Date')\n ax2.set_ylabel('Number')\n ax2.legend()\n plt.tight_layout()\n plt.savefig('{0}_by_day.png'.format(site))\n plt.close(fig)\n",
"import sys, os\nimport numpy as np\nimport pandas as pd\nimport ujson\nfrom scipy.interpolate import interp1d\nimport scipy.ndimage\nfrom ast import literal_eval\n\n\n\nfrom get_workflow_info import get_workflow_info\n\nproject_name = \"planetary-response-network-and-rescue-global-caribbean-storms-2017\"\n\n# st thomas DG\n#ssid = 14759\n\n# St John DG\nssid = 14806\n\n# St John Planet\n#ssid = 14813\n\n# Puerto Rico before only\n#ssid = 14929\n\n# Turks and Caicos Cockburn Town DG/Planet\nssid = 14827\n\n# DG - Barbuda\nssid = 14896\n\n# DG - Antigua\nssid = 14930\n\n# Planet - Dominica\nssid = 14988\n\n\nactive_subject_sets = [ssid]\n\n\n#infile = \"%s-classifications.csv\" % project_name\n\n#infile = 'damage-floods-blockages-shelters-landsat-classifications.csv'\ninfile = 'damage-floods-blockages-shelters-classifications.csv'\n#infile = 'damages-floods-blockages-shelters-planet-labs-classifications.csv'\n#infile = 'planetary-response-network-and-rescue-global-caribbean-storms-2017-classifications_wfid4958_nodups_inclnonlive.csv'\n\ntry:\n infile = sys.argv[1]\nexcept:\n pass\n\nworkflow_version = -1\nworkflow_id = 4958\nfreetext = ''\noutdir = \"outfiles\"\nsubject_file_set_by_user = False\n\n# check for other command-line arguments\nif len(sys.argv) > 1:\n # if there are additional arguments, loop through them\n for i_arg, argstr in enumerate(sys.argv[1:]):\n arg = argstr.split('=')\n\n if (arg[0] == \"workflow_id\") | (arg[0] == \"wfid\"):\n workflow_id = int(arg[1])\n elif (arg[0] == \"workflow_version\") | (arg[0] == \"wfv\"):\n workflow_version = arg[1]\n elif (arg[0] == \"subject_set_id\") | (arg[0] == \"ssid\"):\n # might be passed as an int, might be passed as a list\n try:\n ssid = int(arg[1])\n ssid_str = arg[1]\n active_subject_sets = [ssid]\n except:\n active_subject_sets = literal_eval(arg[1])\n ssid = active_subject_sets[0]\n ssid_str = '%d' % ssid\n for i in range(len(active_subject_sets)):\n if i > 0:\n ssid_str = '%s_%d' % (ssid_str, active_subject_sets[i])\n\n elif (arg[0] == \"name\") | (arg[0] == \"stub\") | (arg[0] == \"freetext\"):\n freetext = arg[1]\n elif (arg[0] == \"outdir\"):\n outdir = arg[1]\n elif (arg[0] == \"subj\") | (arg[0] == \"subjects\") | (arg[0] == \"subjectfile\") | (arg[0] == \"subject_file\"):\n subjectfile = arg[1]\n subject_file_set_by_user = True\n\n\nworkflow_file = \"%s-workflows.csv\" % project_name\nworkflow_contents_file = \"%s-workflow_contents.csv\" % project_name\n# if this subject file doesn't already exist, run get_subject_sizes.py\n# note it has to download images to determine imsize (in pixels) so generate it some\n# other way if you already have that info\nif not subject_file_set_by_user:\n subjectfile = \"%s-subjects_enhancedinfo_ssids_%s.csv\" % (project_name, ssid_str)\n\n\n# these files will/may be written to\noutfile_nodir = \"%s-marks-points_wfid_%d.csv\" % (project_name, workflow_id)\nblankfile_nodir = \"%s-marks-blank_wfid_%d.csv\" % (project_name, workflow_id)\nshortcutfile_nodir = \"%s-marks-unclassifiable_wfid_%d.csv\" % (project_name, workflow_id)\nquestionfile_nodir = \"%s-questions_wfid_%d.csv\" % (project_name, workflow_id)\n\noutfile = \"%s/%s\" % (outdir, outfile_nodir)\nblankfile = \"%s/%s\" % (outdir, blankfile_nodir)\nshortcutfile = \"%s/%s\" % (outdir, shortcutfile_nodir)\nquestionfile = \"%s/%s\" % (outdir, questionfile_nodir)\n\n\n# the order of tools is from the workflow information - as is the fact the\n# marks are in task T2\ntools = ['Road Blockage', 'Flood', 'Temporary Settlement', 'Structural Damage']\nmark_count = [0, 0, 0, 0]\n\n\nshortcuts = ['Unclassifiable Image', 'Ocean Only (no land)']\nshortcut_mark_count = [0, 0]\n\n\n# for the structural damage subtask, if it exists\ndetails = ['Minor', 'Moderate', 'Catastrophic']\n\n\n\n\n\n\n\ndef get_wf_basics(workflow_id):\n # I should be able to do this marking_tasks, shortcuts, questions etc\n # automatically from workflow_info BUT NOT RIGHT NOW\n # Guadeloupe\n if workflow_id == 4928:\n workflow_version = '18.53'\n marking_tasks = ['T0']\n question_tasks = ['']\n shortcut_tasks = ['T1']\n struc_subtask = False\n\n # Turks and Caicos - Landsat 8\n elif workflow_id == 4970:\n workflow_version = '5.8'\n marking_tasks = ['T0']\n question_tasks = ['T2']\n shortcut_tasks = ['T1', 'T3']\n struc_subtask = False\n\n\n # St Thomas - Digital Globe\n # also anything that uses DG\n elif workflow_id == 4958:\n workflow_version = '17.60'\n marking_tasks = ['T0']\n question_tasks = ['T2']\n shortcut_tasks = ['T1', 'T3']\n struc_subtask = True\n\n # Clone of the DG workflow but for Planet data\n elif workflow_id == 4975:\n workflow_version = '1.1' # could also be 2.2 if Dominica or later\n marking_tasks = ['T0']\n question_tasks = ['T2']\n shortcut_tasks = ['T1', 'T3']\n #struc_subtask = True # even though I doubt these are trustworthy\n struc_subtask = False\n\n # Puerto Rico before only\n elif workflow_id == 5030:\n workflow_version = '3.8'\n marking_tasks = []\n question_tasks = ['T2']\n shortcut_tasks = ['T1', 'T3']\n struc_subtask = False\n\n\n # Clone of the Planet-only workflow but only the damage marking question\n elif workflow_id == 5071:\n workflow_version = '2.3' # could also be 2.2 if Dominica or later\n marking_tasks = ['T0']\n question_tasks = []\n shortcut_tasks = ['T1', 'T3']\n struc_subtask = False\n\n\n\n return workflow_version, marking_tasks, question_tasks, shortcut_tasks, struc_subtask\n\n\n\n\n\ndef get_coords_mark(markinfo):\n\n row = markinfo[1]\n # print(markinfo)\n # print(\"-----\")\n # print(row)\n # print(\"\\n\\n\")\n\n mark_x = row['x']\n mark_y = row['y']\n\n the_x = np.array([row['x_min'], row['imsize_x_pix']])\n the_y = np.array([row['y_min'], row['imsize_y_pix']])\n the_lon = np.array([row['lon_min'], row['lon_max']])\n the_lat = np.array([row['lat_min'], row['lat_max']])\n\n # don't throw an error if the coords are out of bounds, but also don't extrapolate\n f_x_lon = interp1d(the_x, the_lon, bounds_error=False, fill_value=(None, None))\n f_y_lat = interp1d(the_y, the_lat, bounds_error=False, fill_value=(None, None))\n\n return f_x_lon(mark_x), f_y_lat(mark_y)\n\n\n\n\ndef get_projection(projection_in):\n# # for now let's just return the same projection for everything\n# # this is for Sentinel 2\n# return Proj(init='epsg:32620')\n # if you're supplying anything with a colon like 'epsg:32619', you need init=.\n # if you are supplying something more like '+proj=utm +zone=19 +datum=WGS84 +units=m +no_defs ', which comes from e.g. gdal, using init= will crash things\n # even though those two strings represent the same projection\n # what fun this is\n try:\n inProj = Proj(projection_in)\n except:\n try:\n inProj = Proj(init=projection_in)\n except:\n # just assume a default\n inProj = Proj(init='epsg:32620')\n\n return inProj\n\n# takes a single metadata row\ndef get_corner_latlong(meta_json, projection_in):\n # in some cases we've included the corner lat and long in the metadata, in other cases not quite, but we can get that info\n # recall that longitude is the x direction, latitude is the y direction\n # BDS-created subjects have min and max lat and long so we can read it directly\n try:\n lon_min = meta_json['lon_min']\n lon_max = meta_json['lon_max']\n lat_min = meta_json['lat_min']\n lat_max = meta_json['lat_max']\n except:\n # some of the subjects have the corners given in unprojected units\n # which are in meters, but with actual value set by a global grid\n x_m_min = meta_json['#tile_UL_x']\n y_m_max = meta_json['#tile_UL_y']\n x_m_max = meta_json['#tile_LR_x']\n y_m_min = meta_json['#tile_LR_y']\n\n #print(meta_json)\n #print((x_m_min, y_m_min, x_m_max, y_m_max))\n\n #f_x_lon, f_y_lat = get_interp_grid(subjects, ssid)\n try:\n inProj = get_projection(meta_json['projection_orig'])\n except:\n inProj = get_projection(ssid)\n\n outProj = Proj(init='epsg:4326')\n\n lon_min, lat_min = transform(inProj,outProj,x_m_min,y_m_min)\n lon_max, lat_max = transform(inProj,outProj,x_m_max,y_m_max)\n\n #print((lon_min, lat_min, lon_max, lat_max))\n #print(\"\\n\")\n\n return lon_min, lon_max, lat_min, lat_max\n\n\n\nwfv, marking_tasks, question_tasks, shortcut_tasks, struc_subtask = get_wf_basics(workflow_id)\n# don't overwrite the workflow version if it's specified at the prompt\nif workflow_version < 1:\n workflow_version = wfv\n\n# okay turns out we didn't really need this but I'm hoping it will make it easier to generalize later\nworkflow_df = pd.read_csv(workflow_file)\nworkflow_cdf = pd.read_csv(workflow_contents_file)\nworkflow_info = get_workflow_info(workflow_df, workflow_cdf, workflow_id, workflow_version)\n\n\n\n\n\nclassifications_all = pd.read_csv(infile)\n\nclassifications_all['anno_json'] = [ujson.loads(q) for q in classifications_all['annotations']]\n\n# it's either True or it's blank, so change the blanks to explicitly False\nclassifications_all['gs'] = np.array(classifications_all.gold_standard, dtype=bool)\n\n# only use classifications from the workflow & version we care about\nclassifications_all['workflow_major'] = [int(q) for q in classifications_all.workflow_version]\nworkflow_version_major = int((workflow_version.split('.'))[0])\nin_workflow = classifications_all.workflow_major == workflow_version_major\n\nclassifications = classifications_all[in_workflow]\n\n\n'''\nI noticed during a previous project that pandas (I think it was pandas\nand not some more fundamental property of python itself?) seemed *very*\nslow when trying to build a large dataframe or series of marks and then write\nthe whole thing to a file in one go. For a project with lots of classifications\nit will be much faster to write line-by-line to a csv file and then, if needed,\nread in the csv file at the end of the loop through the classifications.\n\n'''\n\n# these are unnecessary if you're running this from a prompt but if you're copy-pasting in iPython they're needed so things below don't break\ntry:\n del fmarks\nexcept:\n pass\n\ntry:\n del fempty\nexcept:\n pass\n\ntry:\n del fquest\nexcept:\n pass\n\n# all markers for this project are a point so we're putting them all in the same file\n# likewise for the question task - there's just one so put everything in 1 file\n# we'll assume there is at least 1 mark and 1 question answer in the project so that this file will not end up empty\n# if we're wrong it won't crash, it'll just be a file with only a header line\n# (we don't make that assumption with the blanks file, so we only open/write to it if it's needed)\nfmarks = open(outfile, \"w\")\nfquest = open(questionfile, \"w\")\n\n# write the header line for the file\n# file has the basic classification information + the mark information\n# including sanity check stuff + stuff we may never need, like the tool number\n# and the frame the user drew the mark on, respectively\n\n# all markers are a point: {(x, y)}\nif struc_subtask:\n fmarks.write(\"mark_id,classification_id,subject_id,created_at,user_name,user_id,user_ip,tool,label,how_damaged,frame,x,y\\n\")\nelse:\n fmarks.write(\"mark_id,classification_id,subject_id,created_at,user_name,user_id,user_ip,tool,label,frame,x,y\\n\")\n\nfquest.write(\"classification_id,subject_id,created_at,user_name,user_id,user_ip,question,label,gold_standard\\n\")\n\n\n\n# now extract the marks from each classification\n# people who say Python should never need for loops are either way better at it\n# than I am or have never dealt with Zooniverse classification exports\n# (or both)\ni_empty = 0\ni_mark = 0\ni_shortcut = 0\ni_question = 0\ni_exception = 0\nexception_rows = []\nfor i, row in enumerate(classifications.iterrows()):\n # row[0] is the index, [1] is the classification info\n cl = row[1]\n\n class_id = cl['classification_id']\n subject_id = cl['subject_ids']\n created_at = cl['created_at']\n username = cl['user_name']\n userid = cl['user_id']\n userip = cl['user_ip']\n is_gs = cl['gs']\n\n # for anonymous users the userid field is blank so reads as NaN\n # which will throw an error later\n if np.isnan(userid):\n userid = -1\n\n # loop through annotations in this classification\n # (of which there can be arbitrarily many)\n for j, anno in enumerate(cl['anno_json']):\n\n thetask = anno['task']\n #thelabel = anno['task_label']\n\n if thetask in marking_tasks:\n\n #tool_label = anno['tool_label']\n\n # first, if this classification is blank, just write the basic information\n # this will keep track of classifications where the user said there was nothing there\n # these may be important for some user weighting schemes etc.\n if len(anno['value']) < 1:\n i_empty+=1\n try:\n # this will be fine for every empty mark except the first one\n fempty.write(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s\\n\" % (class_id, subject_id, created_at, username, userid, userip))\n except:\n # if the file isn't already opened, open it and write a header\n fempty = open(blankfile, \"w\")\n # the blank table just needs the classification information\n fempty.write(\"classification_id,subject_id,created_at,user_name,user_id,user_ip\\n\")\n fempty.write(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s\\n\" % (class_id, subject_id, created_at, username, userid, userip))\n\n else:\n # it's not empty, so let's collect other info\n # the marks themselves are in anno['value'], as a list\n for i_v, thevalue in enumerate(anno['value']):\n\n # how we write to the file (and which file) depends on which tool\n # is being used\n #\n # the annotation json returns an integer that's the index of the\n # tools array we defined earlier\n # obviously I could just use the integer but this is easier to read\n # so worry about string vs int compare speeds when you have many\n # millions of classifications\n\n try:\n thetool = tools[thevalue['tool']]\n is_exception = False\n except:\n is_exception = True\n i_exception += 1\n exception_rows.append(row[0])\n\n # I'm not just putting everything below inside the try statement because\n # if something else here crashes, I want it to shout at me\n # failing silently is BAD in aggregation\n if not is_exception:\n i_mark+=1\n thedeets = ''\n\n #'Road Blockage', 'Flood', 'Temporary Settlement', 'Structural Damage'\n if thetool == \"Road Blockage\":\n mark_count[0] += 1\n how_damaged = ''\n\n if thetool == \"Flood\":\n mark_count[1] += 1\n how_damaged = ''\n\n if thetool == \"Temporary Settlement\":\n mark_count[2] += 1\n how_damaged = ''\n\n if thetool == \"Structural Damage\":\n mark_count[3] += 1\n how_damaged = ''\n if struc_subtask:\n # filling this in is optional\n if thevalue['details'][0]['value'] is None:\n thedeets = 'Not Given'\n else:\n thedeets = details[thevalue['details'][0]['value']]\n\n\n\n if thetool in tools:\n if struc_subtask:\n fmarks.write(\"%d,%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,%d,\\\"%s\\\",\\\"%s\\\",%d,%.2f,%.2f\\n\" % (i_mark, class_id, subject_id, created_at, username, userid, userip, thevalue['tool'], thetool, thedeets, thevalue['frame'], thevalue['x'], thevalue['y']))\n else:\n fmarks.write(\"%d,%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,%d,\\\"%s\\\",%d,%.2f,%.2f\\n\" % (i_mark, class_id, subject_id, created_at, username, userid, userip, thevalue['tool'], thetool, thevalue['frame'], thevalue['x'], thevalue['y']))\n\n\n\n if thetask in question_tasks:\n i_question+=1\n # we currently only have single-answer-permitted question tasks so we don't need to loop through values\n thevalue = anno['value']\n theslug = workflow_info[thetask]['question_slug']\n #print(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,\\\"%s\\\",\\\"%s\\\"\" % (class_id, subject_id, created_at, username, userid, userip, theslug, thevalue))\n try:\n # this will be fine for every shortcut mark except the first one\n fquest.write(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,\\\"%s\\\",\\\"%s\\\"\\n\" % (class_id, subject_id, created_at, username, userid, userip, theslug, thevalue))\n except:\n # if the file isn't already opened, open it and write a header\n fquest = open(questionfile, \"w\")\n # the blank table just needs the classification information\n fquest.write(\"classification_id,subject_id,created_at,user_name,user_id,user_ip,question,label,gold_standard\\n\")\n fquest.write(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,\\\"%s\\\",\\\"%s\\\"\\n\" % (class_id, subject_id, created_at, username, userid, userip, theslug, thevalue))\n\n\n\n if thetask in shortcut_tasks:\n i_shortcut+=1\n for i_v, thevalue in enumerate(anno['value']):\n try:\n # this will be fine for every shortcut mark except the first one\n fshortcut.write(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,\\\"%s\\\",%r\\n\" % (class_id, subject_id, created_at, username, userid, userip, thevalue, is_gs))\n except:\n # if the file isn't already opened, open it and write a header\n fshortcut = open(shortcutfile, \"w\")\n # the blank table just needs the classification information\n fshortcut.write(\"classification_id,subject_id,created_at,user_name,user_id,user_ip,label,gold_standard\\n\")\n fshortcut.write(\"%d,%d,\\\"%s\\\",\\\"%s\\\",%d,%s,\\\"%s\\\",%r\\n\" % (class_id, subject_id, created_at, username, userid, userip, thevalue, is_gs))\n\n\n\n\nfmarks.close()\ntry:\n fempty.close()\nexcept:\n pass\n\ntry:\n fshortcut.close()\nexcept:\n pass\n\ntry:\n fquest.close()\nexcept:\n pass\n\nprint(\"Saved %d marks from %d classifications (of which %d were empty and %s were shortcuts) to %s.\" % (i_mark, len(classifications), i_empty, i_shortcut, outfile))\nprint(\"Saved %d questions from %d classifications to %s.\" % (i_question, len(classifications), questionfile))\nprint(\"Mark breakdown: Road Blockage %d, Flood %d, Temp Settlement %d, Structural damage %d\\n\" % tuple(mark_count))\n\n\n# now read in those mark files and match them to subjects\nprint(\"Matching to subjects in %s ...\" % subjectfile)\n\nsubjects_all = pd.read_csv(subjectfile)\n#active_subject_sets = [14709, 14710, 14746, 14750, 14759, 14764, 14770, 14773, 14806, 14813, 14929]\n#active_subject_sets = [ssid]\nis_active = np.array([q in active_subject_sets for q in subjects_all.subject_set_id])\n#in_workflow = subjects_all.workflow_id == workflow_id\n#subjects = (subjects_all[is_active & in_workflow]).copy()\nsubjects = (subjects_all[is_active]).copy()\n\nif len(subjects) > 0:\n\n subjects['meta_json'] = [ujson.loads(q) for q in subjects.metadata]\n # this should all already be there\n # subjects['loc_json'] = [ujson.loads(q) for q in subjects.locations]\n # subjects['loc_im0'] = [q['0'] for q in subjects.loc_json]\n #\n # coords = [get_corner_latlong(q) for q in subjects['meta_json']]\n # #lon_min, lon_max, lat_min, lat_max\n # subjects['lon_min'] = [q[0] for q in coords]\n # subjects['lon_max'] = [q[1] for q in coords]\n # subjects['lat_min'] = [q[2] for q in coords]\n # subjects['lat_max'] = [q[3] for q in coords]\n\n ################################## matching marks\n # read in the mark file we've just written\n file_mark_compact = ''\n if i_mark > 0:\n themarks = pd.read_csv(outfile)\n\n # match the marks to the subjects by subject ID\n marks_subj = pd.merge(themarks, subjects, how='left', on='subject_id', suffixes=('', '_2'))\n\n # now we have marks in pixel coordinates and we have the corner coordinates in both x,y and lon, lat\n\n marks_subj['x_min'] = np.ones_like(marks_subj.subject_id)\n marks_subj['y_min'] = np.ones_like(marks_subj.subject_id)\n\n marks_coords = [get_coords_mark(q) for q in marks_subj.iterrows()]\n marks_subj['lon_mark'] = np.array([q[0] for q in marks_coords], dtype=float)\n marks_subj['lat_mark'] = np.array([q[1] for q in marks_coords], dtype=float)\n\n in_bounds = np.invert(np.isnan(marks_subj['lon_mark'])) & np.invert(np.isnan(marks_subj['lat_mark']))\n\n marks_subj_clean = marks_subj[in_bounds]\n\n # columns we'd like to save in the subjects, in save order\n subj_cols_out = [u'lon_min', u'lon_max', u'lat_min', u'lat_max', u'filesize_bytes', u'imsize_x_pix', u'imsize_y_pix', 'subject_set_id', 'locations', 'classifications_count', 'retired_at', 'retirement_reason', 'metadata']\n\n #themarks.set_index('mark_id', inplace=True)\n\n # save all columns from the mark file\n mark_cols_out = (themarks.columns.values).tolist()\n\n # columns based on the intersection of these\n markcoords_cols = ['lon_mark', 'lat_mark']\n\n all_cols_out = mark_cols_out + markcoords_cols + subj_cols_out\n\n outfile_wsubj = \"%s/%s%s\" % (outdir, freetext, outfile_nodir.replace(\".csv\", \"-wsubjinfo.csv\"))\n\n marks_subj_clean[all_cols_out].to_csv(outfile_wsubj)\n\n mark_cols_clean_out = 'mark_id,classification_id,subject_id,created_at,user_name,user_id,user_ip,tool,label,how_damaged,frame,x,y,lon_mark,lat_mark,lon_min,lon_max,lat_min,lat_max,imsize_x_pix,imsize_y_pix'.split(',')\n if not struc_subtask:\n mark_cols_clean_out.remove('how_damaged')\n\n file_mark_compact = outfile_wsubj.replace(\".csv\", \"-compact.csv\")\n marks_subj_clean[mark_cols_clean_out].to_csv(file_mark_compact)\n\n print(\"%d marks out of %d matched to %d subjects; result output in %s.\" % (len(marks_subj_clean), i_mark, len(subjects), outfile_wsubj))\n\n\n subj_cols_compact = 'lon_min lon_max lat_min lat_max imsize_x_pix imsize_y_pix'.split()\n\n blankfile_wsubj = ''\n ################################## matching blanks to subject info\n if i_empty > 0:\n theblanks = pd.read_csv(blankfile)\n blanks_subj = pd.merge(theblanks, subjects, how='left', on='subject_id', suffixes=('', '_2'))\n blank_cols_out = (theblanks.columns.values).tolist()\n all_cols_out = blank_cols_out + subj_cols_compact\n blankfile_wsubj = \"%s/%s%s\" % (outdir, freetext, blankfile_nodir.replace(\".csv\", \"-wsubjinfo.csv\"))\n blanks_subj[all_cols_out][np.invert(np.isnan(blanks_subj.imsize_y_pix))].to_csv(blankfile_wsubj)\n print(\" ... saved %s\" % blankfile_wsubj)\n\n\n questionfile_wsubj = ''\n ################################## matching questions to subject info\n if i_question > 0:\n thequestions = pd.read_csv(questionfile)\n questions_subj = pd.merge(thequestions, subjects, how='left', on='subject_id', suffixes=('', '_2'))\n question_cols_out = (thequestions.columns.values).tolist()\n all_cols_out = question_cols_out + subj_cols_compact\n questionfile_wsubj = \"%s/%s%s\" % (outdir, freetext, questionfile_nodir.replace(\".csv\", \"-wsubjinfo.csv\"))\n questions_subj[all_cols_out][np.invert(np.isnan(questions_subj.imsize_y_pix))].to_csv(questionfile_wsubj)\n print(\" ... saved %s\" % questionfile_wsubj)\n\n\n shortcutfile_wsubj = ''\n ################################## matching shortcuts to subject info\n if i_shortcut > 0:\n theshortcuts = pd.read_csv(shortcutfile)\n shortcuts_subj = pd.merge(theshortcuts, subjects, how='left', on='subject_id', suffixes=('', '_2'))\n shortcut_cols_out = (theshortcuts.columns.values).tolist()\n all_cols_out = shortcut_cols_out + subj_cols_compact\n shortcutfile_wsubj = \"%s/%s%s\" % (outdir, freetext, shortcutfile_nodir.replace(\".csv\", \"-wsubjinfo.csv\"))\n shortcuts_subj[all_cols_out][np.invert(np.isnan(shortcuts_subj.imsize_y_pix))].to_csv(shortcutfile_wsubj)\n print(\" ... saved %s\" % shortcutfile_wsubj)\n\n print(\"Your next move might be something like:\")\n print(\"tar -czvf %sclassifications_marks_matched.tar.gz %s %s %s %s %s\" % (freetext, subjectfile, file_mark_compact, blankfile_wsubj, questionfile_wsubj, shortcutfile_wsubj))\n\n\nelse:\n print(\"OOPS: after filtering by subject set and workflow, you don't have any subjects to match to!\")\n\n\n\n\nif i_exception > 0:\n print(\"WARNING: There were %d exceptions (mark classification not formatted as expected). They were in rows:\\n\" % i_exception)\n print(exception_rows)\n\n\n#end\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"numpy.min",
"numpy.median",
"pandas.DataFrame",
"numpy.timedelta64",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.sum"
],
[
"matplotlib.pyplot.tight_layout",
"pandas.read_csv",
"pandas.to_datetime",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
],
[
"pandas.merge",
"pandas.read_csv",
"numpy.ones_like",
"numpy.isnan",
"scipy.interpolate.interp1d",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
welly87/zipline | [
"dbdfa8ed86417f954e95bd7468e144589f2cd482",
"dbdfa8ed86417f954e95bd7468e144589f2cd482"
] | [
"zipline/pipeline/term.py",
"zipline/utils/tradingcalendar.py"
] | [
"\"\"\"\nBase class for Filters, Factors and Classifiers\n\"\"\"\nfrom abc import ABCMeta, abstractproperty\nfrom bisect import insort\nfrom collections import Mapping\nfrom weakref import WeakValueDictionary\n\nfrom numpy import (\n array,\n dtype as dtype_class,\n ndarray,\n searchsorted,\n)\nfrom six import with_metaclass\n\nfrom zipline.assets import Asset\nfrom zipline.errors import (\n DTypeNotSpecified,\n InvalidOutputName,\n NonExistentAssetInTimeFrame,\n NonSliceableTerm,\n NonWindowSafeInput,\n NotDType,\n NonPipelineInputs,\n TermInputsNotSpecified,\n TermOutputsEmpty,\n UnsupportedDType,\n WindowLengthNotSpecified,\n)\nfrom zipline.lib.adjusted_array import can_represent_dtype\nfrom zipline.lib.labelarray import LabelArray\nfrom zipline.utils.input_validation import expect_types\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.numpy_utils import (\n bool_dtype,\n categorical_dtype,\n datetime64ns_dtype,\n default_missing_value_for_dtype,\n)\nfrom zipline.utils.sharedoc import (\n templated_docstring,\n PIPELINE_ALIAS_NAME_DOC,\n PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,\n)\n\nfrom .domain import Domain, GENERIC, infer_domain\nfrom .downsample_helpers import expect_downsample_frequency\nfrom .sentinels import NotSpecified\n\n\nclass Term(with_metaclass(ABCMeta, object)):\n \"\"\"\n Base class for objects that can appear in the compute graph of a\n :class:`zipline.pipeline.Pipeline`.\n\n Notes\n -----\n Most Pipeline API users only interact with :class:`Term` via subclasses:\n\n - :class:`~zipline.pipeline.data.BoundColumn`\n - :class:`~zipline.pipeline.Factor`\n - :class:`~zipline.pipeline.Filter`\n - :class:`~zipline.pipeline.Classifier`\n\n Instances of :class:`Term` are **memoized**. If you call a Term's\n constructor with the same arguments twice, the same object will be returned\n from both calls:\n\n **Example:**\n\n >>> from zipline.pipeline.data import EquityPricing\n >>> from zipline.pipeline.factors import SimpleMovingAverage\n >>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)\n >>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)\n >>> x is y\n True\n\n .. warning::\n\n Memoization of terms means that it's generally unsafe to modify\n attributes of a term after construction.\n \"\"\"\n # These are NotSpecified because a subclass is required to provide them.\n dtype = NotSpecified\n missing_value = NotSpecified\n\n # Subclasses aren't required to provide `params`. The default behavior is\n # no params.\n params = ()\n\n # All terms are generic by default.\n domain = GENERIC\n\n # Determines if a term is safe to be used as a windowed input.\n window_safe = False\n\n # The dimensions of the term's output (1D or 2D).\n ndim = 2\n\n _term_cache = WeakValueDictionary()\n\n def __new__(cls,\n domain=NotSpecified,\n dtype=NotSpecified,\n missing_value=NotSpecified,\n window_safe=NotSpecified,\n ndim=NotSpecified,\n # params is explicitly not allowed to be passed to an instance.\n *args,\n **kwargs):\n \"\"\"\n Memoized constructor for Terms.\n\n Caching previously-constructed Terms is useful because it allows us to\n only compute equivalent sub-expressions once when traversing a Pipeline\n dependency graph.\n\n Caching previously-constructed Terms is **sane** because terms and\n their inputs are both conceptually immutable.\n \"\"\"\n # Subclasses can override these class-level attributes to provide\n # different default values for instances.\n if domain is NotSpecified:\n domain = cls.domain\n if dtype is NotSpecified:\n dtype = cls.dtype\n if missing_value is NotSpecified:\n missing_value = cls.missing_value\n if ndim is NotSpecified:\n ndim = cls.ndim\n if window_safe is NotSpecified:\n window_safe = cls.window_safe\n\n dtype, missing_value = validate_dtype(\n cls.__name__,\n dtype,\n missing_value,\n )\n params = cls._pop_params(kwargs)\n\n identity = cls._static_identity(\n domain=domain,\n dtype=dtype,\n missing_value=missing_value,\n window_safe=window_safe,\n ndim=ndim,\n params=params,\n *args, **kwargs\n )\n\n try:\n return cls._term_cache[identity]\n except KeyError:\n new_instance = cls._term_cache[identity] = \\\n super(Term, cls).__new__(cls)._init(\n domain=domain,\n dtype=dtype,\n missing_value=missing_value,\n window_safe=window_safe,\n ndim=ndim,\n params=params,\n *args, **kwargs\n )\n return new_instance\n\n @classmethod\n def _pop_params(cls, kwargs):\n \"\"\"\n Pop entries from the `kwargs` passed to cls.__new__ based on the values\n in `cls.params`.\n\n Parameters\n ----------\n kwargs : dict\n The kwargs passed to cls.__new__.\n\n Returns\n -------\n params : list[(str, object)]\n A list of string, value pairs containing the entries in cls.params.\n\n Raises\n ------\n TypeError\n Raised if any parameter values are not passed or not hashable.\n \"\"\"\n params = cls.params\n if not isinstance(params, Mapping):\n params = {k: NotSpecified for k in params}\n param_values = []\n for key, default_value in params.items():\n try:\n value = kwargs.pop(key, default_value)\n if value is NotSpecified:\n raise KeyError(key)\n\n # Check here that the value is hashable so that we fail here\n # instead of trying to hash the param values tuple later.\n hash(value)\n except KeyError:\n raise TypeError(\n \"{typename} expected a keyword parameter {name!r}.\".format(\n typename=cls.__name__,\n name=key\n )\n )\n except TypeError:\n # Value wasn't hashable.\n raise TypeError(\n \"{typename} expected a hashable value for parameter \"\n \"{name!r}, but got {value!r} instead.\".format(\n typename=cls.__name__,\n name=key,\n value=value,\n )\n )\n\n param_values.append((key, value))\n return tuple(param_values)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Noop constructor to play nicely with our caching __new__. Subclasses\n should implement _init instead of this method.\n\n When a class' __new__ returns an instance of that class, Python will\n automatically call __init__ on the object, even if a new object wasn't\n actually constructed. Because we memoize instances, we often return an\n object that was already initialized from __new__, in which case we\n don't want to call __init__ again.\n\n Subclasses that need to initialize new instances should override _init,\n which is guaranteed to be called only once.\n \"\"\"\n pass\n\n @expect_types(key=Asset)\n def __getitem__(self, key):\n if isinstance(self, LoadableTerm):\n raise NonSliceableTerm(term=self)\n return Slice(self, key)\n\n @classmethod\n def _static_identity(cls,\n domain,\n dtype,\n missing_value,\n window_safe,\n ndim,\n params):\n \"\"\"\n Return the identity of the Term that would be constructed from the\n given arguments.\n\n Identities that compare equal will cause us to return a cached instance\n rather than constructing a new one. We do this primarily because it\n makes dependency resolution easier.\n\n This is a classmethod so that it can be called from Term.__new__ to\n determine whether to produce a new instance.\n \"\"\"\n return (cls, domain, dtype, missing_value, window_safe, ndim, params)\n\n def _init(self, domain, dtype, missing_value, window_safe, ndim, params):\n \"\"\"\n Parameters\n ----------\n domain : zipline.pipeline.domain.Domain\n The domain of this term.\n dtype : np.dtype\n Dtype of this term's output.\n missing_value : object\n Missing value for this term.\n ndim : 1 or 2\n The dimensionality of this term.\n params : tuple[(str, hashable)]\n Tuple of key/value pairs of additional parameters.\n \"\"\"\n self.domain = domain\n self.dtype = dtype\n self.missing_value = missing_value\n self.window_safe = window_safe\n self.ndim = ndim\n\n for name, value in params:\n if hasattr(self, name):\n raise TypeError(\n \"Parameter {name!r} conflicts with already-present\"\n \" attribute with value {value!r}.\".format(\n name=name,\n value=getattr(self, name),\n )\n )\n # TODO: Consider setting these values as attributes and replacing\n # the boilerplate in NumericalExpression, Rank, and\n # PercentileFilter.\n\n self.params = dict(params)\n\n # Make sure that subclasses call super() in their _validate() methods\n # by setting this flag. The base class implementation of _validate\n # should set this flag to True.\n self._subclass_called_super_validate = False\n self._validate()\n assert self._subclass_called_super_validate, (\n \"Term._validate() was not called.\\n\"\n \"This probably means that you overrode _validate\"\n \" without calling super().\"\n )\n del self._subclass_called_super_validate\n\n return self\n\n def _validate(self):\n \"\"\"\n Assert that this term is well-formed. This should be called exactly\n once, at the end of Term._init().\n \"\"\"\n # mark that we got here to enforce that subclasses overriding _validate\n # call super().\n self._subclass_called_super_validate = True\n\n def compute_extra_rows(self,\n all_dates,\n start_date,\n end_date,\n min_extra_rows):\n \"\"\"\n Calculate the number of extra rows needed to compute ``self``.\n\n Must return at least ``min_extra_rows``, and the default implementation\n is to just return ``min_extra_rows``. This is overridden by\n downsampled terms to ensure that the first date computed is a\n recomputation date.\n\n Parameters\n ----------\n all_dates : pd.DatetimeIndex\n The trading sessions against which ``self`` will be computed.\n start_date : pd.Timestamp\n The first date for which final output is requested.\n end_date : pd.Timestamp\n The last date for which final output is requested.\n min_extra_rows : int\n The minimum number of extra rows required of ``self``, as\n determined by other terms that depend on ``self``.\n\n Returns\n -------\n extra_rows : int\n The number of extra rows to compute. Must be at least\n ``min_extra_rows``.\n \"\"\"\n return min_extra_rows\n\n @abstractproperty\n def inputs(self):\n \"\"\"\n A tuple of other Terms needed as inputs for ``self``.\n \"\"\"\n raise NotImplementedError('inputs')\n\n @abstractproperty\n def windowed(self):\n \"\"\"\n Boolean indicating whether this term is a trailing-window computation.\n \"\"\"\n raise NotImplementedError('windowed')\n\n @abstractproperty\n def mask(self):\n \"\"\"\n A :class:`~zipline.pipeline.Filter` representing asset/date pairs to\n while computing this Term. True means include; False means exclude.\n \"\"\"\n raise NotImplementedError('mask')\n\n @abstractproperty\n def dependencies(self):\n \"\"\"\n A dictionary mapping terms that must be computed before `self` to the\n number of extra rows needed for those terms.\n \"\"\"\n raise NotImplementedError('dependencies')\n\n def graph_repr(self):\n \"\"\"A short repr to use when rendering GraphViz graphs.\n \"\"\"\n # Default graph_repr is just the name of the type.\n return type(self).__name__\n\n def recursive_repr(self):\n \"\"\"A short repr to use when recursively rendering terms with inputs.\n \"\"\"\n # Default recursive_repr is just the name of the type.\n return type(self).__name__\n\n\nclass AssetExists(Term):\n \"\"\"\n Pseudo-filter describing whether or not an asset existed on a given day.\n This is the default mask for all terms that haven't been passed a mask\n explicitly.\n\n This is morally a Filter, in the sense that it produces a boolean value for\n every asset on every date. We don't subclass Filter, however, because\n `AssetExists` is computed directly by the PipelineEngine.\n\n This term is guaranteed to be available as an input for any term computed\n by SimplePipelineEngine.run_pipeline().\n\n See Also\n --------\n zipline.assets.AssetFinder.lifetimes\n \"\"\"\n dtype = bool_dtype\n dataset = None\n inputs = ()\n dependencies = {}\n mask = None\n windowed = False\n\n def __repr__(self):\n return \"AssetExists()\"\n\n graph_repr = __repr__\n\n def _compute(self, today, assets, out):\n raise NotImplementedError(\n \"AssetExists cannot be computed directly.\"\n \" Check your PipelineEngine configuration.\"\n )\n\n\nclass InputDates(Term):\n \"\"\"\n 1-Dimensional term providing date labels for other term inputs.\n\n This term is guaranteed to be available as an input for any term computed\n by SimplePipelineEngine.run_pipeline().\n \"\"\"\n ndim = 1\n dataset = None\n dtype = datetime64ns_dtype\n inputs = ()\n dependencies = {}\n mask = None\n windowed = False\n window_safe = True\n\n def __repr__(self):\n return \"InputDates()\"\n\n graph_repr = __repr__\n\n def _compute(self, today, assets, out):\n raise NotImplementedError(\n \"InputDates cannot be computed directly.\"\n \" Check your PipelineEngine configuration.\"\n )\n\n\nclass LoadableTerm(Term):\n \"\"\"\n A Term that should be loaded from an external resource by a PipelineLoader.\n\n This is the base class for :class:`zipline.pipeline.data.BoundColumn`.\n \"\"\"\n windowed = False\n inputs = ()\n\n @lazyval\n def dependencies(self):\n return {self.mask: 0}\n\n\nclass ComputableTerm(Term):\n \"\"\"\n A Term that should be computed from a tuple of inputs.\n\n This is the base class for :class:`zipline.pipeline.Factor`,\n :class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.\n \"\"\"\n inputs = NotSpecified\n outputs = NotSpecified\n window_length = NotSpecified\n mask = NotSpecified\n domain = NotSpecified\n\n def __new__(cls,\n inputs=inputs,\n outputs=outputs,\n window_length=window_length,\n mask=mask,\n domain=domain,\n *args, **kwargs):\n\n if inputs is NotSpecified:\n inputs = cls.inputs\n\n # Having inputs = NotSpecified is an error, but we handle it later\n # in self._validate rather than here.\n if inputs is not NotSpecified:\n # Allow users to specify lists as class-level defaults, but\n # normalize to a tuple so that inputs is hashable.\n inputs = tuple(inputs)\n\n # Make sure all our inputs are valid pipeline objects before trying\n # to infer a domain.\n non_terms = [t for t in inputs if not isinstance(t, Term)]\n if non_terms:\n raise NonPipelineInputs(cls.__name__, non_terms)\n\n if domain is NotSpecified:\n domain = infer_domain(inputs)\n\n if outputs is NotSpecified:\n outputs = cls.outputs\n if outputs is not NotSpecified:\n outputs = tuple(outputs)\n\n if mask is NotSpecified:\n mask = cls.mask\n if mask is NotSpecified:\n mask = AssetExists()\n\n if window_length is NotSpecified:\n window_length = cls.window_length\n\n return super(ComputableTerm, cls).__new__(\n cls,\n inputs=inputs,\n outputs=outputs,\n mask=mask,\n window_length=window_length,\n domain=domain,\n *args, **kwargs\n )\n\n def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):\n self.inputs = inputs\n self.outputs = outputs\n self.window_length = window_length\n self.mask = mask\n return super(ComputableTerm, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls,\n inputs,\n outputs,\n window_length,\n mask,\n *args,\n **kwargs):\n return (\n super(ComputableTerm, cls)._static_identity(*args, **kwargs),\n inputs,\n outputs,\n window_length,\n mask,\n )\n\n def _validate(self):\n super(ComputableTerm, self)._validate()\n\n # Check inputs.\n if self.inputs is NotSpecified:\n raise TermInputsNotSpecified(termname=type(self).__name__)\n\n if not isinstance(self.domain, Domain):\n raise TypeError(\n \"Expected {}.domain to be an instance of Domain, \"\n \"but got {}.\".format(type(self).__name__, type(self.domain))\n )\n\n # Check outputs.\n if self.outputs is NotSpecified:\n pass\n elif not self.outputs:\n raise TermOutputsEmpty(termname=type(self).__name__)\n else:\n # Raise an exception if there are any naming conflicts between the\n # term's output names and certain attributes.\n disallowed_names = [\n attr for attr in dir(ComputableTerm)\n if not attr.startswith('_')\n ]\n\n # The name 'compute' is an added special case that is disallowed.\n # Use insort to add it to the list in alphabetical order.\n insort(disallowed_names, 'compute')\n\n for output in self.outputs:\n if output.startswith('_') or output in disallowed_names:\n raise InvalidOutputName(\n output_name=output,\n termname=type(self).__name__,\n disallowed_names=disallowed_names,\n )\n\n if self.window_length is NotSpecified:\n raise WindowLengthNotSpecified(termname=type(self).__name__)\n\n if self.mask is NotSpecified:\n # This isn't user error, this is a bug in our code.\n raise AssertionError(\"{term} has no mask\".format(term=self))\n\n if self.window_length > 1:\n for child in self.inputs:\n if not child.window_safe:\n raise NonWindowSafeInput(parent=self, child=child)\n\n def _compute(self, inputs, dates, assets, mask):\n \"\"\"\n Subclasses should implement this to perform actual computation.\n\n This is named ``_compute`` rather than just ``compute`` because\n ``compute`` is reserved for user-supplied functions in\n CustomFilter/CustomFactor/CustomClassifier.\n \"\"\"\n raise NotImplementedError()\n\n @lazyval\n def windowed(self):\n \"\"\"\n Whether or not this term represents a trailing window computation.\n\n If term.windowed is truthy, its compute_from_windows method will be\n called with instances of AdjustedArray as inputs.\n\n If term.windowed is falsey, its compute_from_baseline will be called\n with instances of np.ndarray as inputs.\n \"\"\"\n return (\n self.window_length is not NotSpecified\n and self.window_length > 0\n )\n\n @lazyval\n def dependencies(self):\n \"\"\"\n The number of extra rows needed for each of our inputs to compute this\n term.\n \"\"\"\n extra_input_rows = max(0, self.window_length - 1)\n out = {}\n for term in self.inputs:\n out[term] = extra_input_rows\n out[self.mask] = 0\n return out\n\n @expect_types(data=ndarray)\n def postprocess(self, data):\n \"\"\"\n Called with an result of ``self``, unravelled (i.e. 1-dimensional)\n after any user-defined screens have been applied.\n\n This is mostly useful for transforming the dtype of an output, e.g., to\n convert a LabelArray into a pandas Categorical.\n\n The default implementation is to just return data unchanged.\n \"\"\"\n return data\n\n def to_workspace_value(self, result, assets):\n \"\"\"\n Called with a column of the result of a pipeline. This needs to put\n the data into a format that can be used in a workspace to continue\n doing computations.\n\n Parameters\n ----------\n result : pd.Series\n A multiindexed series with (dates, assets) whose values are the\n results of running this pipeline term over the dates.\n assets : pd.Index\n All of the assets being requested. This allows us to correctly\n shape the workspace value.\n\n Returns\n -------\n workspace_value : array-like\n An array like value that the engine can consume.\n \"\"\"\n return result.unstack().fillna(self.missing_value).reindex(\n columns=assets,\n fill_value=self.missing_value,\n ).values\n\n def _downsampled_type(self, *args, **kwargs):\n \"\"\"\n The expression type to return from self.downsample().\n \"\"\"\n raise NotImplementedError(\n \"downsampling is not yet implemented \"\n \"for instances of %s.\" % type(self).__name__\n )\n\n @expect_downsample_frequency\n @templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)\n def downsample(self, frequency):\n \"\"\"\n Make a term that computes from ``self`` at lower-than-daily frequency.\n\n Parameters\n ----------\n {frequency}\n \"\"\"\n return self._downsampled_type(term=self, frequency=frequency)\n\n def _aliased_type(self, *args, **kwargs):\n \"\"\"\n The expression type to return from self.alias().\n \"\"\"\n raise NotImplementedError(\n \"alias is not yet implemented \"\n \"for instances of %s.\" % type(self).__name__\n )\n\n @templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)\n def alias(self, name):\n \"\"\"\n Make a term from ``self`` that names the expression.\n\n Parameters\n ----------\n {name}\n\n Returns\n -------\n aliased : Aliased\n ``self`` with a name.\n\n Notes\n -----\n This is useful for giving a name to a numerical or boolean expression.\n \"\"\"\n return self._aliased_type(term=self, name=name)\n\n def __repr__(self):\n return (\n \"{type}([{inputs}], {window_length})\"\n ).format(\n type=type(self).__name__,\n inputs=', '.join(i.recursive_repr() for i in self.inputs),\n window_length=self.window_length,\n )\n\n def recursive_repr(self):\n return type(self).__name__ + '(...)'\n\n\nclass Slice(ComputableTerm):\n \"\"\"\n Term for extracting a single column of a another term's output.\n\n Parameters\n ----------\n term : zipline.pipeline.Term\n The term from which to extract a column of data.\n asset : zipline.assets.Asset\n The asset corresponding to the column of `term` to be extracted.\n\n Notes\n -----\n Users should rarely construct instances of `Slice` directly. Instead, they\n should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.\n \"\"\"\n def __new__(cls, term, asset):\n return super(Slice, cls).__new__(\n cls,\n asset=asset,\n inputs=[term],\n window_length=0,\n mask=term.mask,\n dtype=term.dtype,\n missing_value=term.missing_value,\n window_safe=term.window_safe,\n ndim=1,\n )\n\n def __repr__(self):\n return \"{parent_term}[{asset}]\".format(\n type=type(self).__name__,\n parent_term=self.inputs[0].recursive_repr(),\n asset=self._asset,\n )\n\n def _init(self, asset, *args, **kwargs):\n self._asset = asset\n return super(Slice, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls, asset, *args, **kwargs):\n return (super(Slice, cls)._static_identity(*args, **kwargs), asset)\n\n def _compute(self, windows, dates, assets, mask):\n asset = self._asset\n asset_column = searchsorted(assets.values, asset.sid)\n if assets[asset_column] != asset.sid:\n raise NonExistentAssetInTimeFrame(\n asset=asset, start_date=dates[0], end_date=dates[-1],\n )\n\n # Return a 2D array with one column rather than a 1D array of the\n # column.\n return windows[0][:, [asset_column]]\n\n @property\n def asset(self):\n \"\"\"Get the asset whose data is selected by this slice.\n \"\"\"\n return self._asset\n\n @property\n def _downsampled_type(self):\n raise NotImplementedError(\n 'downsampling of slices is not yet supported'\n )\n\n\ndef validate_dtype(termname, dtype, missing_value):\n \"\"\"\n Validate a `dtype` and `missing_value` passed to Term.__new__.\n\n Ensures that we know how to represent ``dtype``, and that missing_value\n is specified for types without default missing values.\n\n Returns\n -------\n validated_dtype, validated_missing_value : np.dtype, any\n The dtype and missing_value to use for the new term.\n\n Raises\n ------\n DTypeNotSpecified\n When no dtype was passed to the instance, and the class doesn't\n provide a default.\n NotDType\n When either the class or the instance provides a value not\n coercible to a numpy dtype.\n NoDefaultMissingValue\n When dtype requires an explicit missing_value, but\n ``missing_value`` is NotSpecified.\n \"\"\"\n if dtype is NotSpecified:\n raise DTypeNotSpecified(termname=termname)\n\n try:\n dtype = dtype_class(dtype)\n except TypeError:\n raise NotDType(dtype=dtype, termname=termname)\n\n if not can_represent_dtype(dtype):\n raise UnsupportedDType(dtype=dtype, termname=termname)\n\n if missing_value is NotSpecified:\n missing_value = default_missing_value_for_dtype(dtype)\n\n try:\n if (dtype == categorical_dtype):\n # This check is necessary because we use object dtype for\n # categoricals, and numpy will allow us to promote numerical\n # values to object even though we don't support them.\n _assert_valid_categorical_missing_value(missing_value)\n\n # For any other type, we can check if the missing_value is safe by\n # making an array of that value and trying to safely convert it to\n # the desired type.\n # 'same_kind' allows casting between things like float32 and\n # float64, but not str and int.\n array([missing_value]).astype(dtype=dtype, casting='same_kind')\n except TypeError as e:\n raise TypeError(\n \"Missing value {value!r} is not a valid choice \"\n \"for term {termname} with dtype {dtype}.\\n\\n\"\n \"Coercion attempt failed with: {error}\".format(\n termname=termname,\n value=missing_value,\n dtype=dtype,\n error=e,\n )\n )\n\n return dtype, missing_value\n\n\ndef _assert_valid_categorical_missing_value(value):\n \"\"\"\n Check that value is a valid categorical missing_value.\n\n Raises a TypeError if the value is cannot be used as the missing_value for\n a categorical_dtype Term.\n \"\"\"\n label_types = LabelArray.SUPPORTED_SCALAR_TYPES\n if not isinstance(value, label_types):\n raise TypeError(\n \"Categorical terms must have missing values of type \"\n \"{types}.\".format(\n types=' or '.join([t.__name__ for t in label_types]),\n )\n )\n",
"#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pandas as pd\nimport pytz\n# import warnings\n\nfrom datetime import datetime\nfrom dateutil import rrule\nfrom functools import partial\n\n# from zipline.zipline_warnings import ZiplineDeprecationWarning\n\n# IMPORTANT: This module is deprecated and is only here for temporary backwards\n# compatibility. Look at the `trading-calendars`\n# module, as well as the calendar definitions in `trading_calendars`.\n\n# TODO: The new calendar API is currently in flux, so the deprecation\n# warning for this module is currently disabled. Re-enable once\n# the new API is stabilized.\n#\n# warnings.warn(\n# \"The `tradingcalendar` module is deprecated. See the \"\n# \"`trading-calendars` module, as well as the \"\n# \"calendar definitions in `trading-calendars`.\",\n# category=ZiplineDeprecationWarning,\n# stacklevel=1,\n# )\n\nstart = pd.Timestamp('1990-01-01', tz='UTC')\nend_base = pd.Timestamp('today', tz='UTC')\n# Give an aggressive buffer for logic that needs to use the next trading\n# day or minute.\nend = end_base + pd.Timedelta(days=365)\n\n\ndef canonicalize_datetime(dt):\n # Strip out any HHMMSS or timezone info in the user's datetime, so that\n # all the datetimes we return will be 00:00:00 UTC.\n return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)\n\n\ndef get_non_trading_days(start, end):\n non_trading_rules = []\n\n start = canonicalize_datetime(start)\n end = canonicalize_datetime(end)\n\n weekends = rrule.rrule(\n rrule.YEARLY,\n byweekday=(rrule.SA, rrule.SU),\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(weekends)\n\n new_years = rrule.rrule(\n rrule.MONTHLY,\n byyearday=1,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(new_years)\n\n new_years_sunday = rrule.rrule(\n rrule.MONTHLY,\n byyearday=2,\n byweekday=rrule.MO,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(new_years_sunday)\n\n mlk_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=1,\n byweekday=(rrule.MO(+3)),\n cache=True,\n dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),\n until=end\n )\n non_trading_rules.append(mlk_day)\n\n presidents_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=2,\n byweekday=(rrule.MO(3)),\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(presidents_day)\n\n good_friday = rrule.rrule(\n rrule.DAILY,\n byeaster=-2,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(good_friday)\n\n memorial_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=5,\n byweekday=(rrule.MO(-1)),\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(memorial_day)\n\n july_4th = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=4,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(july_4th)\n\n july_4th_sunday = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=5,\n byweekday=rrule.MO,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(july_4th_sunday)\n\n july_4th_saturday = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=3,\n byweekday=rrule.FR,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(july_4th_saturday)\n\n labor_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=9,\n byweekday=(rrule.MO(1)),\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(labor_day)\n\n thanksgiving = rrule.rrule(\n rrule.MONTHLY,\n bymonth=11,\n byweekday=(rrule.TH(4)),\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(thanksgiving)\n\n christmas = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=25,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(christmas)\n\n christmas_sunday = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=26,\n byweekday=rrule.MO,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(christmas_sunday)\n\n # If Christmas is a Saturday then 24th, a Friday is observed.\n christmas_saturday = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=24,\n byweekday=rrule.FR,\n cache=True,\n dtstart=start,\n until=end\n )\n non_trading_rules.append(christmas_saturday)\n\n non_trading_ruleset = rrule.rruleset()\n\n for rule in non_trading_rules:\n non_trading_ruleset.rrule(rule)\n\n non_trading_days = non_trading_ruleset.between(start, end, inc=True)\n\n # Add September 11th closings\n # http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks\n # Due to the terrorist attacks, the stock market did not open on 9/11/2001\n # It did not open again until 9/17/2001.\n #\n # September 2001\n # Su Mo Tu We Th Fr Sa\n # 1\n # 2 3 4 5 6 7 8\n # 9 10 11 12 13 14 15\n # 16 17 18 19 20 21 22\n # 23 24 25 26 27 28 29\n # 30\n\n for day_num in range(11, 17):\n non_trading_days.append(\n datetime(2001, 9, day_num, tzinfo=pytz.utc))\n\n # Add closings due to Hurricane Sandy in 2012\n # http://en.wikipedia.org/wiki/Hurricane_sandy\n #\n # The stock exchange was closed due to Hurricane Sandy's\n # impact on New York.\n # It closed on 10/29 and 10/30, reopening on 10/31\n # October 2012\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5 6\n # 7 8 9 10 11 12 13\n # 14 15 16 17 18 19 20\n # 21 22 23 24 25 26 27\n # 28 29 30 31\n\n for day_num in range(29, 31):\n non_trading_days.append(\n datetime(2012, 10, day_num, tzinfo=pytz.utc))\n\n # Misc closings from NYSE listing.\n # http://www.nyse.com/pdfs/closings.pdf\n #\n # National Days of Mourning\n # - President Richard Nixon\n non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))\n # - President Ronald W. Reagan - June 11, 2004\n non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))\n # - President Gerald R. Ford - Jan 2, 2007\n non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))\n\n non_trading_days.sort()\n return pd.DatetimeIndex(non_trading_days)\n\n\nnon_trading_days = get_non_trading_days(start, end)\ntrading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)\n\n\ndef get_trading_days(start, end, trading_day=trading_day):\n return pd.date_range(start=start.date(),\n end=end.date(),\n freq=trading_day).tz_localize('UTC')\n\n\ntrading_days = get_trading_days(start, end)\n\n\ndef get_early_closes(start, end):\n # 1:00 PM close rules based on\n # http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa\n # and verified against http://www.nyse.com/pdfs/closings.pdf\n\n # These rules are valid starting in 1993\n\n start = canonicalize_datetime(start)\n end = canonicalize_datetime(end)\n\n start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))\n end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))\n\n # Not included here are early closes prior to 1993\n # or unplanned early closes\n\n early_close_rules = []\n\n day_after_thanksgiving = rrule.rrule(\n rrule.MONTHLY,\n bymonth=11,\n # 4th Friday isn't correct if month starts on Friday, so restrict to\n # day range:\n byweekday=(rrule.FR),\n bymonthday=range(23, 30),\n cache=True,\n dtstart=start,\n until=end\n )\n early_close_rules.append(day_after_thanksgiving)\n\n christmas_eve = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=24,\n byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),\n cache=True,\n dtstart=start,\n until=end\n )\n early_close_rules.append(christmas_eve)\n\n friday_after_christmas = rrule.rrule(\n rrule.MONTHLY,\n bymonth=12,\n bymonthday=26,\n byweekday=rrule.FR,\n cache=True,\n dtstart=start,\n # valid 1993-2007\n until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))\n )\n early_close_rules.append(friday_after_christmas)\n\n day_before_independence_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=3,\n byweekday=(rrule.MO, rrule.TU, rrule.TH),\n cache=True,\n dtstart=start,\n until=end\n )\n early_close_rules.append(day_before_independence_day)\n\n day_after_independence_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=5,\n byweekday=rrule.FR,\n cache=True,\n dtstart=start,\n # starting in 2013: wednesday before independence day\n until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))\n )\n early_close_rules.append(day_after_independence_day)\n\n wednesday_before_independence_day = rrule.rrule(\n rrule.MONTHLY,\n bymonth=7,\n bymonthday=3,\n byweekday=rrule.WE,\n cache=True,\n # starting in 2013\n dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),\n until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))\n )\n early_close_rules.append(wednesday_before_independence_day)\n\n early_close_ruleset = rrule.rruleset()\n\n for rule in early_close_rules:\n early_close_ruleset.rrule(rule)\n early_closes = early_close_ruleset.between(start, end, inc=True)\n\n # Misc early closings from NYSE listing.\n # http://www.nyse.com/pdfs/closings.pdf\n #\n # New Year's Eve\n nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)\n if start <= nye_1999 and nye_1999 <= end:\n early_closes.append(nye_1999)\n\n early_closes.sort()\n return pd.DatetimeIndex(early_closes)\n\n\nearly_closes = get_early_closes(start, end)\n\n\ndef get_open_and_close(day, early_closes):\n market_open = pd.Timestamp(\n datetime(\n year=day.year,\n month=day.month,\n day=day.day,\n hour=9,\n minute=31),\n tz='US/Eastern').tz_convert('UTC')\n # 1 PM if early close, 4 PM otherwise\n close_hour = 13 if day in early_closes else 16\n market_close = pd.Timestamp(\n datetime(\n year=day.year,\n month=day.month,\n day=day.day,\n hour=close_hour),\n tz='US/Eastern').tz_convert('UTC')\n\n return market_open, market_close\n\n\ndef get_open_and_closes(trading_days, early_closes, get_open_and_close):\n open_and_closes = pd.DataFrame(index=trading_days,\n columns=('market_open', 'market_close'))\n\n get_o_and_c = partial(get_open_and_close, early_closes=early_closes)\n\n open_and_closes['market_open'], open_and_closes['market_close'] = \\\n zip(*open_and_closes.index.map(get_o_and_c))\n\n return open_and_closes\n\n\nopen_and_closes = get_open_and_closes(trading_days, early_closes,\n get_open_and_close)\n"
] | [
[
"numpy.array",
"numpy.dtype",
"numpy.searchsorted"
],
[
"pandas.DatetimeIndex",
"pandas.Timedelta",
"pandas.tseries.offsets.CDay",
"pandas.DataFrame",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PhylomatX/PhiFlow | [
"2b7a73c1f595e288d26945cd53cc482952bb1db9",
"2b7a73c1f595e288d26945cd53cc482952bb1db9"
] | [
"phi/tf/tf_backend.py",
"phi/tf/session.py"
] | [
"import numbers\nimport uuid\nimport warnings\nfrom packaging import version\nimport six\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom packaging import version\n\nfrom phi.backend.backend_helper import split_multi_mode_pad, PadSettings, general_grid_sample_nd, equalize_shapes, circular_pad, replicate_pad\nfrom phi.backend.scipy_backend import SciPyBackend\nfrom phi.tf.tf_cuda_resample import *\nfrom . import tf\n\nfrom phi.backend.backend import Backend\nfrom phi.backend.tensorop import expand, collapsed_gather_nd\n\n\nclass TFBackend(Backend):\n\n def __init__(self):\n Backend.__init__(self, \"TensorFlow\")\n\n @property\n def precision_dtype(self):\n return {16: np.float16, 32: np.float32, 64: np.float64, None: np.float32}[self.precision]\n\n def is_tensor(self, x, only_native=False):\n if not only_native and SciPyBackend().is_tensor(x, only_native=False):\n return True\n return isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.Operation))\n\n def as_tensor(self, x, convert_external=True):\n if self.is_tensor(x, only_native=convert_external):\n tensor = x\n elif isinstance(x, np.ndarray):\n tensor = tf.convert_to_tensor(SciPyBackend(precision=self.precision).as_tensor(x))\n else:\n tensor = tf.convert_to_tensor(x)\n # --- Enforce Precision ---\n if not isinstance(tensor, numbers.Number):\n if isinstance(tensor, np.ndarray):\n tensor = SciPyBackend(precision=self.precision).as_tensor(tensor)\n elif tensor.dtype.is_floating and self.has_fixed_precision:\n tensor = self.to_float(tensor)\n return tensor\n\n def copy(self, tensor, only_mutable=False):\n if not only_mutable or tf.executing_eagerly():\n return tf.identity(tensor)\n else:\n return tensor\n\n def equal(self, x, y):\n return tf.equal(x, y)\n\n def divide_no_nan(self, x, y):\n if version.parse(tf.__version__) >= version.parse('1.11.0'):\n return tf.div_no_nan(x, y)\n else:\n result = x / y\n return tf.where(tf.is_finite(result), result, tf.zeros_like(result))\n\n def random_uniform(self, shape, low=0, high=1):\n return tf.random.uniform(shape, minval=low, maxval=high, dtype=self.precision_dtype)\n\n def random_normal(self, shape):\n return tf.random.normal(shape, dtype=self.precision_dtype)\n\n def rank(self, value):\n return len(value.shape)\n\n def range(self, start, limit=None, delta=1, dtype=None):\n return tf.range(start, limit, delta, dtype)\n\n def tile(self, value, multiples):\n if isinstance(multiples, (tuple, list)) and self.ndims(value) < len(multiples):\n value = self.expand_dims(value, axis=0, number=len(multiples) - self.ndims(value))\n return tf.tile(value, multiples)\n\n def stack(self, values, axis=0):\n return tf.stack(values, axis=axis)\n\n def concat(self, values, axis):\n return tf.concat(values, axis)\n\n def pad(self, value, pad_width, mode='constant', constant_values=0):\n passes = split_multi_mode_pad(self.ndims(value), PadSettings(pad_width, mode, constant_values), split_by_constant_value=True)\n for pad_pass in passes:\n value = self._single_mode_single_constant_pad(value, *pad_pass)\n return value\n\n def _single_mode_single_constant_pad(self, value, pad_width, single_mode, constant_value=0):\n assert single_mode in ('constant', 'symmetric', 'circular', 'reflect', 'replicate'), single_mode\n if single_mode == 'circular':\n return circular_pad(value, pad_width, self)\n if single_mode == 'replicate':\n if np.any(np.array(pad_width) > 1):\n return replicate_pad(value, pad_width, self)\n else:\n single_mode = 'symmetric'\n return tf.pad(value, pad_width, single_mode.upper(), constant_values=constant_value) # constant, symmetric, reflect\n\n def reshape(self, value, shape):\n return tf.reshape(value, shape)\n\n def sum(self, value, axis=None, keepdims=False):\n if axis is not None:\n if not isinstance(axis, int):\n axis = list(axis)\n return tf.reduce_sum(value, axis=axis, keepdims=keepdims)\n\n def prod(self, value, axis=None):\n if axis is not None:\n if not isinstance(axis, int):\n axis = list(axis)\n if value.dtype == bool:\n return tf.reduce_all(value, axis=axis)\n return tf.reduce_prod(value, axis=axis)\n\n def where(self, condition, x=None, y=None):\n c = self.cast(condition, self.dtype(x))\n return c * x + (1 - c) * y\n # return tf.where(condition, x, y) # TF1 has an inconsistent broadcasting rule for where\n\n def mean(self, value, axis=None, keepdims=False):\n if axis is not None:\n if not isinstance(axis, int):\n axis = list(axis)\n return tf.reduce_mean(value, axis, keepdims=keepdims)\n\n def py_func(self, func, inputs, Tout, shape_out, stateful=True, name=None, grad=None):\n if grad is None:\n result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)\n else:\n # Need to generate a unique name to avoid duplicates:\n rnd_name = 'PyFuncGrad' + str(uuid.uuid4())\n\n tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example\n g = tf.get_default_graph()\n with g.gradient_override_map({\"PyFunc\": rnd_name}):\n result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)\n if shape_out is not None:\n result.set_shape(shape_out)\n return result\n\n def resample(self, inputs, sample_coords, interpolation='linear', boundary='constant', constant_values=0):\n assert interpolation == 'linear'\n if use_cuda(inputs):\n return resample_cuda(inputs, sample_coords, boundary)\n else:\n return general_grid_sample_nd(inputs, sample_coords, boundary, constant_values, self) # while this is a bit slower than niftynet, it give consisten results at the boundaries\n\n def zeros_like(self, tensor):\n return tf.zeros_like(tensor)\n\n def ones_like(self, tensor):\n return tf.ones_like(tensor)\n\n def dot(self, a, b, axes):\n return tf.tensordot(a, b, axes)\n\n def matmul(self, A, b):\n if isinstance(A, tf.SparseTensor):\n result = tf.sparse_tensor_dense_matmul(A, tf.transpose(b))\n result = tf.transpose(result)\n result.set_shape(tf.TensorShape([b.shape[0], A.shape[0]]))\n return result\n else:\n return tf.matmul(A, b)\n\n def einsum(self, equation, *tensors):\n return tf.einsum(equation, *tensors)\n\n def while_loop(self, cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, back_prop=True,\n swap_memory=False, name=None, maximum_iterations=None):\n return tf.while_loop(cond, body, loop_vars,\n shape_invariants=shape_invariants,\n parallel_iterations=parallel_iterations,\n back_prop=back_prop,\n swap_memory=swap_memory,\n name=name,\n maximum_iterations=maximum_iterations)\n\n def abs(self, x):\n return tf.abs(x)\n\n def sign(self, x):\n return tf.sign(x)\n\n def round(self, x):\n return tf.round(x)\n\n def ceil(self, x):\n return tf.ceil(x)\n\n def floor(self, x):\n return tf.floor(x)\n\n def max(self, x, axis=None, keepdims=False):\n return tf.reduce_max(x, axis=axis, keepdims=keepdims)\n\n def min(self, x, axis=None, keepdims=False):\n return tf.reduce_min(x, axis=axis, keepdims=keepdims)\n\n def with_custom_gradient(self, function, inputs, gradient, input_index=0, output_index=None, name_base=\"custom_gradient_func\"):\n # Setup custom gradient\n gradient_name = name_base + \"_\" + str(uuid.uuid4())\n tf.RegisterGradient(gradient_name)(gradient)\n\n g = tf.get_default_graph()\n with g.gradient_override_map({\"Identity\": gradient_name}):\n fake_function = tf.identity(inputs[input_index])\n\n outputs = function(*inputs)\n output = outputs if output_index is None else outputs[output_index]\n output_with_gradient = fake_function + tf.stop_gradient(output - fake_function)\n if output_index is None:\n return output_with_gradient\n else:\n outputs = list(outputs)\n outputs[output_index] = output_with_gradient\n return outputs\n\n def maximum(self, a, b):\n return tf.maximum(a, b)\n\n def minimum(self, a, b):\n return tf.minimum(a, b)\n\n def clip(self, x, minimum, maximum):\n return tf.clip_by_value(x, minimum, maximum)\n\n def sqrt(self, x):\n return tf.sqrt(x)\n\n def exp(self, x):\n return tf.exp(x)\n\n def conv(self, tensor, kernel, padding=\"SAME\"):\n rank = tensor_spatial_rank(tensor)\n padding = padding.upper()\n if rank == 1:\n result = tf.nn.conv1d(tensor, kernel, 1, padding)\n elif rank == 2:\n result = tf.nn.conv2d(tensor, kernel, [1, 1, 1, 1], padding)\n elif rank == 3:\n result = tf.nn.conv3d(tensor, kernel, [1, 1, 1, 1, 1], padding)\n else:\n raise ValueError(\"Tensor must be of rank 1, 2 or 3 but is %d\" % rank)\n return result\n\n def expand_dims(self, a, axis=0, number=1):\n if number == 0:\n return a\n for _i in range(number):\n a = tf.expand_dims(a, axis)\n return a\n\n def shape(self, tensor):\n return tf.shape(tensor)\n\n def to_float(self, x, float64=False):\n if float64:\n warnings.warn('float64 argument is deprecated, set Backend.precision = 64 to use 64 bit operations.', DeprecationWarning)\n return tf.cast(x, tf.float64)\n else:\n return tf.cast(x, self.precision_dtype)\n\n def staticshape(self, tensor):\n if self.is_tensor(tensor, only_native=True):\n return tuple(tensor.shape.as_list())\n else:\n return np.shape(tensor)\n\n def to_int(self, x, int64=False):\n return tf.cast(x, tf.int64) if int64 else tf.cast(x, tf.int32)\n\n def to_complex(self, x):\n if self.dtype(x) in (np.complex64, np.complex128):\n return x\n if self.dtype(x) == np.float64:\n return tf.to_complex128(x)\n else:\n return tf.to_complex64(x)\n\n def gather(self, values, indices):\n if isinstance(indices, slice):\n return values[indices]\n return tf.gather(values, indices)\n\n def gather_nd(self, values, indices, batch_dims=0):\n if batch_dims == 0:\n return tf.gather_nd(values, indices)\n elif version.parse(tf.__version__) >= version.parse('1.14.0'):\n return tf.gather_nd(values, indices, batch_dims=batch_dims)\n else:\n if batch_dims > 1:\n raise NotImplementedError('batch_dims > 1 only supported on TensorFlow >= 1.14')\n batch_size = self.shape(values)[0]\n batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * (self.ndims(indices) - 1))\n batch_ids = tf.tile(batch_ids, [1] + self.shape(indices)[1:-1] + [1])\n indices = tf.concat([batch_ids, indices], -1)\n return tf.gather_nd(values, indices)\n\n def unstack(self, tensor, axis=0, keepdims=False):\n unstacked = tf.unstack(tensor, axis=axis)\n if keepdims:\n unstacked = [self.expand_dims(c, axis=axis) for c in unstacked]\n return unstacked\n\n def std(self, x, axis=None, keepdims=False):\n _mean, var = tf.nn.moments(x, axis, keepdims=keepdims)\n return tf.sqrt(var)\n\n def boolean_mask(self, x, mask):\n return tf.boolean_mask(x, mask)\n\n def isfinite(self, x):\n return tf.is_finite(x)\n\n def any(self, boolean_tensor, axis=None, keepdims=False):\n return tf.reduce_any(boolean_tensor, axis=axis, keepdims=keepdims)\n\n def all(self, boolean_tensor, axis=None, keepdims=False):\n return tf.reduce_all(boolean_tensor, axis=axis, keepdims=keepdims)\n\n def scatter(self, points, indices, values, shape, duplicates_handling='undefined'):\n # Change indexing so batch number is included as first element of the index, for example: [0,31,24] indexes the first batch (batch 0) and 2D coordinates (31,24).\n buffer = tf.zeros(shape, dtype=values.dtype)\n\n repetitions = []\n for dim in range(len(indices.shape) - 1):\n if values.shape[dim] == 1:\n repetitions.append(indices.shape[dim])\n else:\n assert indices.shape[dim] == values.shape[dim]\n repetitions.append(1)\n repetitions.append(1)\n values = self.tile(values, repetitions)\n\n if duplicates_handling == 'add':\n # Only for Tensorflow with custom gradient\n @tf.custom_gradient\n def scatter_density(points, indices, values):\n result = tf.tensor_scatter_add(buffer, indices, values)\n\n def grad(dr):\n return self.resample(gradient(dr, difference='central'), points), None, None\n\n return result, grad\n\n return scatter_density(points, indices, values)\n elif duplicates_handling == 'mean':\n # Won't entirely work with out of bounds particles (still counted in mean)\n count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))\n total = tf.tensor_scatter_add(buffer, indices, values)\n return total / tf.maximum(1.0, count)\n else: # last, any, undefined\n # indices = self.to_int(indices, int64=True)\n # st = tf.SparseTensor(indices, values, shape) # ToDo this only supports 2D shapes\n # st = tf.sparse.reorder(st) # only needed if not ordered\n # return tf.sparse.to_dense(st)\n count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))\n total = tf.tensor_scatter_add(buffer, indices, values)\n return total / tf.maximum(1.0, count)\n\n def fft(self, x):\n rank = len(x.shape) - 2\n assert rank >= 1\n x = self.to_complex(x)\n if rank == 1:\n return tf.stack([tf.fft(c) for c in tf.unstack(x, axis=-1)], axis=-1)\n elif rank == 2:\n return tf.stack([tf.fft2d(c) for c in tf.unstack(x, axis=-1)], axis=-1)\n elif rank == 3:\n return tf.stack([tf.fft3d(c) for c in tf.unstack(x, axis=-1)], axis=-1)\n else:\n raise NotImplementedError('n-dimensional FFT not implemented.')\n\n def ifft(self, k):\n rank = len(k.shape) - 2\n assert rank >= 1\n if rank == 1:\n return tf.stack([tf.ifft(c) for c in tf.unstack(k, axis=-1)], axis=-1)\n elif rank == 2:\n return tf.stack([tf.ifft2d(c) for c in tf.unstack(k, axis=-1)], axis=-1)\n elif rank == 3:\n return tf.stack([tf.ifft3d(c) for c in tf.unstack(k, axis=-1)], axis=-1)\n else:\n raise NotImplementedError('n-dimensional inverse FFT not implemented.')\n\n def imag(self, complex):\n return tf.imag(complex)\n\n def real(self, complex):\n return tf.real(complex)\n\n def cast(self, x, dtype):\n return tf.cast(x, dtype)\n\n def sin(self, x):\n return tf.sin(x)\n\n def cos(self, x):\n return tf.cos(x)\n\n def dtype(self, array):\n if self.is_tensor(array, only_native=True):\n return array.dtype.as_numpy_dtype\n else:\n return SciPyBackend().dtype(array)\n\n def sparse_tensor(self, indices, values, shape):\n return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)\n\n\n# from niftynet.layer.resampler.py\n# https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyNet/blob/69c98e5a95cc6788ad9fb8c5e27dc24d1acec634/niftynet/layer/resampler.py\n\n\nCOORDINATES_TYPE = tf.int32\nEPS = 1e-6\n\n\ndef tensor_spatial_rank(tensor):\n return len(tensor.shape) - 2\n\n\ndef unit_direction(dim, spatial_rank): # ordered like z,y,x\n direction = [1 if i == dim else 0 for i in range(spatial_rank)]\n for _i in range(spatial_rank):\n direction = tf.expand_dims(direction, axis=0)\n return direction\n\n\ndef _resample_no_pack(grid, coords, boundary_func):\n resolution = np.array([int(d) for d in grid.shape[1:-1]])\n sp_rank = tensor_spatial_rank(grid)\n\n floor = boundary_func(tf.floor(coords), resolution)\n up_weights = coords - floor\n lo_weights = TFBackend().unstack(1 - up_weights, axis=-1, keepdims=True)\n up_weights = TFBackend().unstack(up_weights, axis=-1, keepdims=True)\n base_coords = tf.cast(floor, tf.int32)\n\n def interpolate_nd(coords, axis):\n direction = np.array([1 if ax == axis else 0 for ax in range(sp_rank)])\n print(direction.shape)\n with tf.variable_scope('coord_plus_one'):\n up_coords = coords + direction # This is extremely slow for some reason - ToDo tile direction array to have same dimensions before calling interpolate_nd?\n if axis == sp_rank - 1:\n # up_coords = boundary_func(up_coords, resolution)\n lo_values = tf.gather_nd(grid, coords, batch_dims=1)\n up_values = tf.gather_nd(grid, up_coords, batch_dims=1)\n else:\n lo_values = interpolate_nd(coords, axis + 1)\n up_values = interpolate_nd(up_coords, axis + 1)\n with tf.variable_scope('weighted_sum_axis_%d' % axis):\n return lo_values * lo_weights[axis] + up_values * up_weights[axis]\n\n with tf.variable_scope('interpolate_nd'):\n result = interpolate_nd(base_coords, 0)\n return result\n\n\ndef _resample_linear_niftynet(inputs, sample_coords, boundary, boundary_func, float_type):\n inputs = tf.convert_to_tensor(inputs)\n sample_coords = tf.convert_to_tensor(sample_coords)\n\n in_spatial_size = [int(d) for d in inputs.shape[1:-1]]\n in_spatial_rank = tensor_spatial_rank(inputs)\n batch_size = tf.shape(inputs)[0]\n\n out_spatial_rank = tensor_spatial_rank(sample_coords)\n out_spatial_size = sample_coords.get_shape().as_list()[1:-1]\n\n if sample_coords.shape[0] != inputs.shape[0]:\n sample_coords = tf.tile(sample_coords, [batch_size] + [1] * (len(sample_coords.shape) - 1))\n\n xy = tf.unstack(sample_coords, axis=-1)\n base_coords = [tf.floor(coords) for coords in xy]\n floor_coords = [tf.cast(boundary_func(x, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]\n ceil_coords = [tf.cast(boundary_func(x + 1.0, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]\n\n if boundary.upper() == 'ZERO':\n weight_0 = [tf.expand_dims(x - tf.cast(i, float_type), -1) for (x, i) in zip(xy, floor_coords)]\n weight_1 = [tf.expand_dims(tf.cast(i, float_type) - x, -1) for (x, i) in zip(xy, ceil_coords)]\n else:\n weight_0 = [tf.expand_dims(x - i, -1) for (x, i) in zip(xy, base_coords)]\n weight_1 = [1.0 - w for w in weight_0]\n\n batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * out_spatial_rank)\n batch_ids = tf.tile(batch_ids, [1] + out_spatial_size)\n sc = (floor_coords, ceil_coords)\n binary_neighbour_ids = [[int(c) for c in format(i, '0%ib' % in_spatial_rank)] for i in range(2 ** in_spatial_rank)]\n\n def get_knot(bc):\n coord = [sc[c][i] for i, c in enumerate(bc)]\n if version.parse(tf.__version__) >= version.parse('1.14.0'):\n coord = tf.stack(coord, -1)\n return tf.gather_nd(inputs, coord, batch_dims=1) # NaN can cause negative integers here\n else:\n coord = tf.stack([batch_ids] + coord, -1)\n return tf.gather_nd(inputs, coord) # NaN can cause negative integers here\n\n samples = [get_knot(bc) for bc in binary_neighbour_ids]\n\n def _pyramid_combination(samples, w_0, w_1):\n if len(w_0) == 1:\n return samples[0] * w_1[0] + samples[1] * w_0[0]\n f_0 = _pyramid_combination(samples[::2], w_0[:-1], w_1[:-1])\n f_1 = _pyramid_combination(samples[1::2], w_0[:-1], w_1[:-1])\n return f_0 * w_1[-1] + f_1 * w_0[-1]\n\n return _pyramid_combination(samples, weight_0, weight_1)\n\n\ndef _boundary_snap(sample_coords, spatial_shape):\n max_indices = [l - 1 for l in spatial_shape]\n for _i in range(len(spatial_shape)):\n max_indices = tf.expand_dims(max_indices, 0)\n sample_coords = tf.minimum(sample_coords, max_indices)\n sample_coords = tf.maximum(sample_coords, 0)\n return sample_coords\n\n\ndef _boundary_replicate(sample_coords, input_size):\n return tf.maximum(tf.minimum(sample_coords, input_size - 1), 0)\n\n\ndef _boundary_circular(sample_coords, input_size):\n return tf.mod(tf.mod(sample_coords, input_size) + input_size, input_size)\n\n\ndef _boundary_symmetric(sample_coords, input_size):\n sample_coords = _boundary_circular(sample_coords, 2 * input_size)\n return ((2 * input_size - 1) - tf.abs((2 * input_size - 1) - 2 * sample_coords)) // 2\n\n\ndef _boundary_reflect(sample_coords, input_size):\n sample_coords = _boundary_circular(sample_coords, 2 * input_size - 2)\n return (input_size - 1) - tf.abs((input_size - 1) - sample_coords)\n\n\nSUPPORTED_BOUNDARY = {\n 'zero': _boundary_replicate,\n 'replicate': _boundary_replicate,\n 'circular': _boundary_circular,\n 'symmetric': _boundary_symmetric,\n 'reflect': _boundary_reflect,\n}\n",
"import contextlib\nimport logging\nimport os\nimport threading\n\nimport numpy as np\nfrom . import tf\nfrom phi import struct\nfrom .profiling import Timeliner\nfrom .util import isplaceholder, istensor\n\n\nclass Session(object):\n\n def __init__(self, scene, session=None):\n self._scene = scene\n self._session = session if session is not None else tf.Session()\n assert self._session.graph == tf.get_default_graph(), 'Session %s does not reference the current TensorFlow graph.'\n self.graph = tf.get_default_graph()\n self.summary_writers = {}\n self.summary_directory = os.path.abspath(scene.subpath('summary')) if scene is not None else None\n self.profiling_directory = scene.subpath(\"profile\") if scene is not None else None\n self.trace_count = 0\n self.saver = None\n\n def initialize_variables(self):\n with self.graph.as_default():\n self._session.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver(max_to_keep=100, allow_empty=True)\n\n def run(self, fetches, feed_dict=None, summary_key=None, time=None, merged_summary=None, item_condition=struct.ALL_ITEMS):\n if isinstance(fetches, np.ndarray):\n return fetches\n if fetches is None:\n return None\n\n tensor_feed_dict = None\n if feed_dict is not None:\n tensor_feed_dict = {}\n for (key, value) in feed_dict.items():\n pairs = struct.zip([key, value], item_condition=item_condition, zip_parents_if_incompatible=True)\n\n def add_to_dict(key_tensor, value_tensor):\n if isplaceholder(key_tensor):\n tensor_feed_dict[key_tensor] = value_tensor\n return None\n struct.map(add_to_dict, pairs, item_condition=item_condition, content_type=struct.INVALID)\n\n tensor_fetches = struct.flatten(fetches, item_condition=item_condition)\n if isinstance(fetches, (tuple, list)):\n def is_fetch(x): return istensor(x) or _identity_in(x, fetches)\n else:\n def is_fetch(x): return istensor(x) or x is fetches\n tensor_fetches = tuple(filter(is_fetch, tensor_fetches))\n\n # Handle tracing\n trace = _trace_stack.get_default(raise_error=False)\n if trace:\n options = trace.timeliner.options\n run_metadata = trace.timeliner.run_metadata\n else:\n options = None\n run_metadata = None\n\n # Summary\n if summary_key is not None and merged_summary is not None:\n tensor_fetches = (merged_summary,) + tensor_fetches\n\n result_fetches = self._session.run(tensor_fetches, tensor_feed_dict, options, run_metadata)\n result_dict = {fetch: result for fetch, result in zip(tensor_fetches, result_fetches)}\n\n if summary_key:\n summary_buffer = result_fetches[0]\n result_fetches = result_fetches[1:]\n if summary_key in self.summary_writers:\n summary_writer = self.summary_writers[summary_key]\n else:\n summary_writer = tf.summary.FileWriter(os.path.join(self.summary_directory, str(summary_key)), self.graph)\n self.summary_writers[summary_key] = summary_writer\n summary_writer.add_summary(summary_buffer, time)\n summary_writer.flush()\n\n if trace:\n trace.timeliner.add_run()\n\n def replace_tensor_with_value(fetch):\n try:\n if fetch in result_dict:\n return result_dict[fetch]\n else:\n return fetch\n except TypeError: # not hashable\n return fetch\n result = struct.map(replace_tensor_with_value, fetches, item_condition=item_condition)\n return result\n\n def profiler(self):\n os.path.isdir(self.profiling_directory) or os.makedirs(self.profiling_directory)\n self.trace_count += 1\n return Trace(self.trace_count, self.profiling_directory)\n\n def save(self, dir):\n assert self.saver is not None, \"save() called before initialize_variables()\"\n os.path.isdir(dir) or os.makedirs(dir)\n self.saver.save(self._session, os.path.join(dir, \"model.ckpt\"))\n\n def restore(self, dir, scope=None):\n path = os.path.join(dir, \"model.ckpt\")\n vars = self.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n if len(vars) == 0:\n raise ValueError('The current graph does not contain any variables in scope \"%s.\\nAll: %s\"' % (scope, self.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)))\n saver = tf.train.Saver(var_list=vars)\n saver.restore(self._session, path)\n\n def restore_new_scope(self, dir, saved_scope, tf_scope):\n var_remap = dict()\n vars = [v for v in self.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf_scope) if \"Adam\" not in v.name]\n for var in vars:\n var_remap[saved_scope + var.name[len(tf_scope):-2]] = var\n path = os.path.join(dir, \"model.ckpt\")\n saver = tf.train.Saver(var_list=var_remap)\n try:\n saver.restore(self._session, path)\n except tf.errors.NotFoundError as e:\n from tensorflow.contrib.framework.python.framework import checkpoint_utils\n logging.info(checkpoint_utils.list_variables(dir))\n raise e\n\n def as_default(self):\n return self._session.as_default()\n\n\nclass Trace(object):\n\n def __init__(self, index, directory):\n self.index = index\n self.directory = directory\n self.timeliner = None\n self.timeline_file = None\n self._default_simulation_context_manager = None\n\n def __enter__(self):\n self.timeline_file = os.path.join(self.directory, 'trace %d.json' % self.index)\n self.timeliner = Timeliner()\n\n if self._default_simulation_context_manager is None:\n self._default_simulation_context_manager = _trace_stack.get_controller(self)\n return self._default_simulation_context_manager.__enter__()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.timeliner.save(self.timeline_file)\n\n self._default_simulation_context_manager.__exit__(exc_type, exc_val, exc_tb)\n self._default_simulation_context_manager = None\n\n\nclass _TraceStack(threading.local):\n\n def __init__(self):\n self.stack = []\n\n def get_default(self, raise_error=True):\n if raise_error:\n assert len(self.stack) > 0, \"Default simulation required. Use 'with simulation:' or 'with simulation.as_default():\"\n return self.stack[-1] if len(self.stack) >= 1 else None\n\n def reset(self):\n self.stack = []\n\n def is_cleared(self):\n return not self.stack\n\n @contextlib.contextmanager\n def get_controller(self, default):\n \"\"\"Returns a context manager for manipulating a default stack.\"\"\"\n try:\n self.stack.append(default)\n yield default\n finally:\n # stack may be empty if reset() was called\n if self.stack:\n self.stack.remove(default)\n\n\n_trace_stack = _TraceStack()\n\n\ndef _identity_in(obj, list):\n for item in list:\n if item is obj:\n return True\n return False\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.sign",
"tensorflow.concat",
"tensorflow.is_finite",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.equal",
"tensorflow.fft3d",
"tensorflow.get_default_graph",
"tensorflow.nn.conv1d",
"tensorflow.py_func",
"tensorflow.nn.conv2d",
"tensorflow.boolean_mask",
"tensorflow.while_loop",
"tensorflow.real",
"tensorflow.nn.moments",
"tensorflow.floor",
"tensorflow.to_complex128",
"tensorflow.tensor_scatter_add",
"tensorflow.fft",
"tensorflow.stop_gradient",
"tensorflow.ifft2d",
"tensorflow.gather",
"tensorflow.ifft3d",
"tensorflow.ceil",
"tensorflow.tensordot",
"tensorflow.div_no_nan",
"tensorflow.imag",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.TensorShape",
"tensorflow.executing_eagerly",
"tensorflow.gather_nd",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.reduce_any",
"tensorflow.random.uniform",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.zeros_like",
"tensorflow.nn.conv3d",
"tensorflow.reduce_prod",
"tensorflow.fft2d",
"tensorflow.RegisterGradient",
"tensorflow.round",
"numpy.array",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.sin",
"tensorflow.cos",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.ifft",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.reduce_all",
"tensorflow.expand_dims",
"tensorflow.einsum",
"tensorflow.SparseTensor",
"tensorflow.mod",
"tensorflow.reduce_min",
"numpy.shape",
"tensorflow.variable_scope",
"tensorflow.to_complex64",
"tensorflow.sqrt",
"tensorflow.random.normal",
"tensorflow.abs"
],
[
"tensorflow.contrib.framework.python.framework.checkpoint_utils.list_variables"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
Nitinram23/text-to-image | [
"f819bed3dffbccd8e20b03741e3f67178729812b"
] | [
"Python 3 Codes/bert_embed.py"
] | [
"from bert_embedding import BertEmbedding\nimport numpy as np\nimport pickle\nimport argparse\nimport json\nimport os\nfrom os.path import join, isfile\nimport re\nimport h5py\n\ndef save_caption_vectors_flowers(data_dir):\n\timport time\n\t\n\timg_dir = join(data_dir, 'flowers/jpg')\n\timage_files = [f for f in os.listdir(img_dir) if 'jpg' in f]\n\t# print(image_files[300:400])\n\t# print(len(image_files))\n\timage_captions = { img_file : [] for img_file in image_files }\n\n\tcaption_dir = join(data_dir, 'flowers/text_c10')\n\tclass_dirs = []\n\tfor i in range(1, 103):\n\t\tclass_dir_name = 'class_%.5d'%(i)\n\t\tclass_dirs.append( join(caption_dir, class_dir_name))\n\n\tfor class_dir in class_dirs:\n\t\tcaption_files = [f for f in os.listdir(class_dir) if 'txt' in f]\n\t\tfor cap_file in caption_files:\n\t\t\twith open(join(class_dir,cap_file)) as f:\n\t\t\t\tcaptions = f.read().split('\\n')\n\t\t\timg_file = cap_file[0:11] + \".jpg\"\n\t\t\t# 5 captions per image\n\t\t\timage_captions[img_file] += [cap for cap in captions if len(cap) > 0][0:5]\n\n\tencoded_captions = {}\n\tbert_embedding = BertEmbedding()\n\t\n\tfor i, img in enumerate(image_captions):\n\t\tst = time.time()\n\t\tembed_list = []\n\t\tembed_sum = np.zeros(768)\n\t\tembedding = bert_embedding(image_captions[img],'avg')\n\t\tfor sent in range(len(image_captions[img])):\n\t\t\tword_embed_list = embedding[sent][1]\n\t\t\tfor word_embed in word_embed_list:\n\t\t\t\tembed_sum += word_embed\n\t\t\tembed_list.append(embed_sum/len(word_embed_list))\n\t\tembed_list_np = np.asarray(embed_list)\n\t\tencoded_captions[img] = embed_list_np\n\t\tprint(i, len(image_captions), img)\n\t\tprint(\"Seconds\", time.time() - st)\n\t\t\n\th = h5py.File(join(data_dir, 'flower_bert.hdf5'))\n\tfor key in encoded_captions:\n\t\th.create_dataset(key, data=encoded_captions[key])\n\th.close()\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--split', type=str, default='train',\n help='train/val')\n\tparser.add_argument('--data_dir', type=str, default='Data',\n help='Data directory')\n\tparser.add_argument('--batch_size', type=int, default=64,\n help='Batch Size')\n\tparser.add_argument('--data_set', type=str, default='flowers',\n help='Data Set : Flowers, MS-COCO')\n\targs = parser.parse_args()\n\t\n\tif args.data_set == 'flowers':\n\t\tsave_caption_vectors_flowers(args.data_dir)\n\telse:\n\t\tprint('incorrect data')\n\nif __name__ == '__main__':\n\tmain()\n\n"
] | [
[
"numpy.asarray",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
developing-coder/pandas | [
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037"
] | [
"pandas/tests/reshape/test_cut.py",
"pandas/tests/internals/test_internals.py",
"pandas/core/strings.py",
"asv_bench/benchmarks/categoricals.py",
"pandas/tests/arrays/categorical/test_warnings.py",
"pandas/_config/display.py",
"pandas/tests/util/test_assert_produces_warning.py",
"asv_bench/benchmarks/io/stata.py",
"pandas/tests/indexes/datetimes/test_partial_slicing.py",
"pandas/tests/indexes/timedeltas/test_arithmetic.py",
"pandas/tests/tseries/holiday/test_observance.py",
"pandas/tests/indexes/period/test_construction.py",
"asv_bench/benchmarks/ctors.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical, DataFrame, DatetimeIndex, Index, Interval, IntervalIndex,\n Series, TimedeltaIndex, Timestamp, cut, date_range, isna, qcut,\n timedelta_range, to_datetime)\nfrom pandas.api.types import CategoricalDtype as CDT\nimport pandas.core.reshape.tile as tmod\nimport pandas.util.testing as tm\n\n\ndef test_simple():\n data = np.ones(5, dtype=\"int64\")\n result = cut(data, 4, labels=False)\n\n expected = np.array([1, 1, 1, 1, 1])\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n\ndef test_bins():\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])\n result, bins = cut(data, 3, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n\ndef test_right():\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=True, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n expected = Categorical(intervals, ordered=True)\n expected = expected.take([0, 0, 0, 2, 3, 0, 0])\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7]))\n\n\ndef test_no_right():\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=False, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3), closed=\"left\")\n intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))\n\n\ndef test_array_like():\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n result, bins = cut(data, 3, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n\ndef test_bins_from_interval_index():\n c = cut(range(5), 3)\n expected = c\n result = cut(range(5), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n expected = Categorical.from_codes(np.append(c.codes, -1),\n categories=c.categories,\n ordered=True)\n result = cut(range(6), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n\ndef test_bins_from_interval_index_doc_example():\n # Make sure we preserve the bins.\n ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])\n c = cut(ages, bins=[0, 18, 35, 70])\n expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])\n tm.assert_index_equal(c.categories, expected)\n\n result = cut([25, 20, 50], bins=c.categories)\n tm.assert_index_equal(result.categories, expected)\n tm.assert_numpy_array_equal(result.codes,\n np.array([1, 1, 2], dtype=\"int8\"))\n\n\ndef test_bins_not_overlapping_from_interval_index():\n # see gh-23980\n msg = \"Overlapping IntervalIndex is not accepted\"\n ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)])\n\n with pytest.raises(ValueError, match=msg):\n cut([5, 6], bins=ii)\n\n\ndef test_bins_not_monotonic():\n msg = \"bins must increase monotonically\"\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n\n with pytest.raises(ValueError, match=msg):\n cut(data, [0.1, 1.5, 1, 10])\n\n\[email protected](\"x, bins, expected\", [\n (date_range(\"2017-12-31\", periods=3),\n [Timestamp.min, Timestamp('2018-01-01'), Timestamp.max],\n IntervalIndex.from_tuples([\n (Timestamp.min, Timestamp('2018-01-01')),\n (Timestamp('2018-01-01'), Timestamp.max)])),\n\n ([-1, 0, 1],\n np.array([np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max],\n dtype=\"int64\"),\n IntervalIndex.from_tuples([\n (np.iinfo(np.int64).min, 0),\n (0, np.iinfo(np.int64).max)])),\n\n ([np.timedelta64(-1), np.timedelta64(0), np.timedelta64(1)],\n np.array([\n np.timedelta64(-np.iinfo(np.int64).max),\n np.timedelta64(0),\n np.timedelta64(np.iinfo(np.int64).max)]),\n IntervalIndex.from_tuples([\n (np.timedelta64(-np.iinfo(np.int64).max), np.timedelta64(0)),\n (np.timedelta64(0), np.timedelta64(np.iinfo(np.int64).max))])),\n])\ndef test_bins_monotonic_not_overflowing(x, bins, expected):\n # GH 26045\n result = cut(x, bins)\n tm.assert_index_equal(result.categories, expected)\n\n\ndef test_wrong_num_labels():\n msg = \"Bin labels must be one fewer than the number of bin edges\"\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n\n with pytest.raises(ValueError, match=msg):\n cut(data, [0, 1, 10], labels=[\"foo\", \"bar\", \"baz\"])\n\n\[email protected](\"x,bins,msg\", [\n ([], 2, \"Cannot cut empty array\"),\n ([1, 2, 3], 0.5, \"`bins` should be a positive integer\")\n])\ndef test_cut_corner(x, bins, msg):\n with pytest.raises(ValueError, match=msg):\n cut(x, bins)\n\n\[email protected](\"arg\", [2, np.eye(2), DataFrame(np.eye(2))])\[email protected](\"cut_func\", [cut, qcut])\ndef test_cut_not_1d_arg(arg, cut_func):\n msg = \"Input array must be 1 dimensional\"\n with pytest.raises(ValueError, match=msg):\n cut_func(arg, 2)\n\n\[email protected]('data', [\n [0, 1, 2, 3, 4, np.inf],\n [-np.inf, 0, 1, 2, 3, 4],\n [-np.inf, 0, 1, 2, 3, 4, np.inf]])\ndef test_int_bins_with_inf(data):\n # GH 24314\n msg = 'cannot specify integer `bins` when input data contains infinity'\n with pytest.raises(ValueError, match=msg):\n cut(data, bins=3)\n\n\ndef test_cut_out_of_range_more():\n # see gh-1511\n name = \"x\"\n\n ser = Series([0, -1, 0, 1, -3], name=name)\n ind = cut(ser, [0, 1], labels=False)\n\n exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name)\n tm.assert_series_equal(ind, exp)\n\n\[email protected](\"right,breaks,closed\", [\n (True, [-1e-3, 0.25, 0.5, 0.75, 1], \"right\"),\n (False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], \"left\")\n])\ndef test_labels(right, breaks, closed):\n arr = np.tile(np.arange(0, 1.01, 0.1), 4)\n\n result, bins = cut(arr, 4, retbins=True, right=right)\n ex_levels = IntervalIndex.from_breaks(breaks, closed=closed)\n tm.assert_index_equal(result.categories, ex_levels)\n\n\ndef test_cut_pass_series_name_to_factor():\n name = \"foo\"\n ser = Series(np.random.randn(100), name=name)\n\n factor = cut(ser, 4)\n assert factor.name == name\n\n\ndef test_label_precision():\n arr = np.arange(0, 0.73, 0.01)\n result = cut(arr, 4, precision=2)\n\n ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72])\n tm.assert_index_equal(result.categories, ex_levels)\n\n\[email protected](\"labels\", [None, False])\ndef test_na_handling(labels):\n arr = np.arange(0, 0.75, 0.01)\n arr[::3] = np.nan\n\n result = cut(arr, 4, labels=labels)\n result = np.asarray(result)\n\n expected = np.where(isna(arr), np.nan, result)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_inf_handling():\n data = np.arange(6)\n data_ser = Series(data, dtype=\"int64\")\n\n bins = [-np.inf, 2, 4, np.inf]\n result = cut(data, bins)\n result_ser = cut(data_ser, bins)\n\n ex_uniques = IntervalIndex.from_breaks(bins)\n tm.assert_index_equal(result.categories, ex_uniques)\n\n assert result[5] == Interval(4, np.inf)\n assert result[0] == Interval(-np.inf, 2)\n assert result_ser[5] == Interval(4, np.inf)\n assert result_ser[0] == Interval(-np.inf, 2)\n\n\ndef test_cut_out_of_bounds():\n arr = np.random.randn(100)\n result = cut(arr, [-1, 0, 1])\n\n mask = isna(result)\n ex_mask = (arr < -1) | (arr > 1)\n tm.assert_numpy_array_equal(mask, ex_mask)\n\n\[email protected](\"get_labels,get_expected\", [\n (lambda labels: labels,\n lambda labels: Categorical([\"Medium\"] + 4 * [\"Small\"] +\n [\"Medium\", \"Large\"],\n categories=labels, ordered=True)),\n (lambda labels: Categorical.from_codes([0, 1, 2], labels),\n lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels))\n])\ndef test_cut_pass_labels(get_labels, get_expected):\n bins = [0, 25, 50, 100]\n arr = [50, 5, 10, 15, 20, 30, 70]\n labels = [\"Small\", \"Medium\", \"Large\"]\n\n result = cut(arr, bins, labels=get_labels(labels))\n tm.assert_categorical_equal(result, get_expected(labels))\n\n\ndef test_cut_pass_labels_compat():\n # see gh-16459\n arr = [50, 5, 10, 15, 20, 30, 70]\n labels = [\"Good\", \"Medium\", \"Bad\"]\n\n result = cut(arr, 3, labels=labels)\n exp = cut(arr, 3, labels=Categorical(labels, categories=labels,\n ordered=True))\n tm.assert_categorical_equal(result, exp)\n\n\[email protected](\"x\", [np.arange(11.), np.arange(11.) / 1e10])\ndef test_round_frac_just_works(x):\n # It works.\n cut(x, 2)\n\n\[email protected](\"val,precision,expected\", [\n (-117.9998, 3, -118),\n (117.9998, 3, 118),\n (117.9998, 2, 118),\n (0.000123456, 2, 0.00012)\n])\ndef test_round_frac(val, precision, expected):\n # see gh-1979\n result = tmod._round_frac(val, precision=precision)\n assert result == expected\n\n\ndef test_cut_return_intervals():\n ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])\n result = cut(ser, 3)\n\n exp_bins = np.linspace(0, 8, num=4).round(3)\n exp_bins[0] -= 0.008\n\n expected = Series(IntervalIndex.from_breaks(exp_bins, closed=\"right\").take(\n [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_ret_bins():\n # see gh-8589\n ser = Series(np.arange(4))\n result, bins = cut(ser, 2, retbins=True)\n\n expected = Series(IntervalIndex.from_breaks(\n [-0.003, 1.5, 3], closed=\"right\").repeat(2)).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"kwargs,msg\", [\n (dict(duplicates=\"drop\"), None),\n (dict(), \"Bin edges must be unique\"),\n (dict(duplicates=\"raise\"), \"Bin edges must be unique\"),\n (dict(duplicates=\"foo\"), \"invalid value for 'duplicates' parameter\")\n])\ndef test_cut_duplicates_bin(kwargs, msg):\n # see gh-20947\n bins = [0, 2, 4, 6, 10, 10]\n values = Series(np.array([1, 3, 5, 7, 9]), index=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n\n if msg is not None:\n with pytest.raises(ValueError, match=msg):\n cut(values, bins, **kwargs)\n else:\n result = cut(values, bins, **kwargs)\n expected = cut(values, pd.unique(bins))\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"data\", [9.0, -9.0, 0.0])\[email protected](\"length\", [1, 2])\ndef test_single_bin(data, length):\n # see gh-14652, gh-15428\n ser = Series([data] * length)\n result = cut(ser, 1, labels=False)\n\n expected = Series([0] * length)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"array_1_writeable,array_2_writeable\",\n [(True, True), (True, False), (False, False)])\ndef test_cut_read_only(array_1_writeable, array_2_writeable):\n # issue 18773\n array_1 = np.arange(0, 100, 10)\n array_1.flags.writeable = array_1_writeable\n\n array_2 = np.arange(0, 100, 10)\n array_2.flags.writeable = array_2_writeable\n\n hundred_elements = np.arange(100)\n tm.assert_categorical_equal(cut(hundred_elements, array_1),\n cut(hundred_elements, array_2))\n\n\[email protected](\"conv\", [\n lambda v: Timestamp(v),\n lambda v: to_datetime(v),\n lambda v: np.datetime64(v),\n lambda v: Timestamp(v).to_pydatetime(),\n])\ndef test_datetime_bin(conv):\n data = [np.datetime64(\"2012-12-13\"), np.datetime64(\"2012-12-15\")]\n bin_data = [\"2012-12-12\", \"2012-12-14\", \"2012-12-16\"]\n\n expected = Series(IntervalIndex([\n Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),\n Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])).astype(\n CDT(ordered=True))\n\n bins = [conv(v) for v in bin_data]\n result = Series(cut(data, bins=bins))\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"data\", [\n to_datetime(Series([\"2013-01-01\", \"2013-01-02\", \"2013-01-03\"])),\n [np.datetime64(\"2013-01-01\"), np.datetime64(\"2013-01-02\"),\n np.datetime64(\"2013-01-03\")],\n np.array([np.datetime64(\"2013-01-01\"), np.datetime64(\"2013-01-02\"),\n np.datetime64(\"2013-01-03\")]),\n DatetimeIndex([\"2013-01-01\", \"2013-01-02\", \"2013-01-03\"])\n])\ndef test_datetime_cut(data):\n # see gh-14714\n #\n # Testing time data when it comes in various collection types.\n result, _ = cut(data, 3, retbins=True)\n expected = Series(IntervalIndex([\n Interval(Timestamp(\"2012-12-31 23:57:07.200000\"),\n Timestamp(\"2013-01-01 16:00:00\")),\n Interval(Timestamp(\"2013-01-01 16:00:00\"),\n Timestamp(\"2013-01-02 08:00:00\")),\n Interval(Timestamp(\"2013-01-02 08:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"))])).astype(CDT(ordered=True))\n tm.assert_series_equal(Series(result), expected)\n\n\[email protected](\"bins\", [\n 3, [Timestamp(\"2013-01-01 04:57:07.200000\"),\n Timestamp(\"2013-01-01 21:00:00\"),\n Timestamp(\"2013-01-02 13:00:00\"),\n Timestamp(\"2013-01-03 05:00:00\")]])\[email protected](\"box\", [list, np.array, Index, Series])\ndef test_datetime_tz_cut(bins, box):\n # see gh-19872\n tz = \"US/Eastern\"\n s = Series(date_range(\"20130101\", periods=3, tz=tz))\n\n if not isinstance(bins, int):\n bins = box(bins)\n\n result = cut(s, bins)\n expected = Series(IntervalIndex([\n Interval(Timestamp(\"2012-12-31 23:57:07.200000\", tz=tz),\n Timestamp(\"2013-01-01 16:00:00\", tz=tz)),\n Interval(Timestamp(\"2013-01-01 16:00:00\", tz=tz),\n Timestamp(\"2013-01-02 08:00:00\", tz=tz)),\n Interval(Timestamp(\"2013-01-02 08:00:00\", tz=tz),\n Timestamp(\"2013-01-03 00:00:00\", tz=tz))])).astype(\n CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n\ndef test_datetime_nan_error():\n msg = \"bins must be of datetime64 dtype\"\n\n with pytest.raises(ValueError, match=msg):\n cut(date_range(\"20130101\", periods=3), bins=[0, 2, 4])\n\n\ndef test_datetime_nan_mask():\n result = cut(date_range(\"20130102\", periods=5),\n bins=date_range(\"20130101\", periods=2))\n\n mask = result.categories.isna()\n tm.assert_numpy_array_equal(mask, np.array([False]))\n\n mask = result.isna()\n tm.assert_numpy_array_equal(mask, np.array([False, True, True,\n True, True]))\n\n\[email protected](\"tz\", [None, \"UTC\", \"US/Pacific\"])\ndef test_datetime_cut_roundtrip(tz):\n # see gh-19891\n ser = Series(date_range(\"20180101\", periods=3, tz=tz))\n result, result_bins = cut(ser, 2, retbins=True)\n\n expected = cut(ser, result_bins)\n tm.assert_series_equal(result, expected)\n\n expected_bins = DatetimeIndex([\"2017-12-31 23:57:07.200000\",\n \"2018-01-02 00:00:00\",\n \"2018-01-03 00:00:00\"])\n expected_bins = expected_bins.tz_localize(tz)\n tm.assert_index_equal(result_bins, expected_bins)\n\n\ndef test_timedelta_cut_roundtrip():\n # see gh-19891\n ser = Series(timedelta_range(\"1day\", periods=3))\n result, result_bins = cut(ser, 2, retbins=True)\n\n expected = cut(ser, result_bins)\n tm.assert_series_equal(result, expected)\n\n expected_bins = TimedeltaIndex([\"0 days 23:57:07.200000\",\n \"2 days 00:00:00\",\n \"3 days 00:00:00\"])\n tm.assert_index_equal(result_bins, expected_bins)\n",
"from collections import OrderedDict\nfrom datetime import date, datetime\nfrom distutils.version import LooseVersion\nimport itertools\nimport operator\nimport re\nimport sys\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.internals import BlockPlacement\nfrom pandas.compat import lrange\n\nimport pandas as pd\nfrom pandas import (\n Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,\n SparseArray)\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import DatetimeArray, TimedeltaArray\nfrom pandas.core.internals import BlockManager, SingleBlockManager, make_block\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_almost_equal, assert_frame_equal, assert_series_equal, randn)\n\n# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h\nPY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')\n\n\[email protected]\ndef mgr():\n return create_mgr(\n 'a: f8; b: object; c: f8; d: object; e: f8;'\n 'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'\n 'k: M8[ns, US/Eastern]; l: M8[ns, CET];')\n\n\ndef assert_block_equal(left, right):\n tm.assert_numpy_array_equal(left.values, right.values)\n assert left.dtype == right.dtype\n assert isinstance(left.mgr_locs, BlockPlacement)\n assert isinstance(right.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(left.mgr_locs.as_array,\n right.mgr_locs.as_array)\n\n\ndef get_numeric_mat(shape):\n arr = np.arange(shape[0])\n return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(\n arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()\n\n\nN = 10\n\n\ndef create_block(typestr, placement, item_shape=None, num_offset=0):\n \"\"\"\n Supported typestr:\n\n * float, f8, f4, f2\n * int, i8, i4, i2, i1\n * uint, u8, u4, u2, u1\n * complex, c16, c8\n * bool\n * object, string, O\n * datetime, dt, M8[ns], M8[ns, tz]\n * timedelta, td, m8[ns]\n * sparse (SparseArray with fill_value=0.0)\n * sparse_na (SparseArray with fill_value=np.nan)\n * category, category2\n\n \"\"\"\n placement = BlockPlacement(placement)\n num_items = len(placement)\n\n if item_shape is None:\n item_shape = (N, )\n\n shape = (num_items, ) + item_shape\n\n mat = get_numeric_mat(shape)\n\n if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',\n 'uint', 'u8', 'u4', 'u2', 'u1'):\n values = mat.astype(typestr) + num_offset\n elif typestr in ('complex', 'c16', 'c8'):\n values = 1.j * (mat.astype(typestr) + num_offset)\n elif typestr in ('object', 'string', 'O'):\n values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],\n shape)\n elif typestr in ('b', 'bool', ):\n values = np.ones(shape, dtype=np.bool_)\n elif typestr in ('datetime', 'dt', 'M8[ns]'):\n values = (mat * 1e9).astype('M8[ns]')\n elif typestr.startswith('M8[ns'):\n # datetime with tz\n m = re.search(r'M8\\[ns,\\s*(\\w+\\/?\\w*)\\]', typestr)\n assert m is not None, \"incompatible typestr -> {0}\".format(typestr)\n tz = m.groups()[0]\n assert num_items == 1, \"must have only 1 num items for a tz-aware\"\n values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)\n elif typestr in ('timedelta', 'td', 'm8[ns]'):\n values = (mat * 1).astype('m8[ns]')\n elif typestr in ('category', ):\n values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])\n elif typestr in ('category2', ):\n values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'\n ])\n elif typestr in ('sparse', 'sparse_na'):\n # FIXME: doesn't support num_rows != 10\n assert shape[-1] == 10\n assert all(s == 1 for s in shape[:-1])\n if typestr.endswith('_na'):\n fill_value = np.nan\n else:\n fill_value = 0.0\n values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,\n 4, 5, fill_value, 6], fill_value=fill_value)\n arr = values.sp_values.view()\n arr += (num_offset - 1)\n else:\n raise ValueError('Unsupported typestr: \"%s\"' % typestr)\n\n return make_block(values, placement=placement, ndim=len(shape))\n\n\ndef create_single_mgr(typestr, num_rows=None):\n if num_rows is None:\n num_rows = N\n\n return SingleBlockManager(\n create_block(typestr, placement=slice(0, num_rows), item_shape=()),\n np.arange(num_rows))\n\n\ndef create_mgr(descr, item_shape=None):\n \"\"\"\n Construct BlockManager from string description.\n\n String description syntax looks similar to np.matrix initializer. It looks\n like this::\n\n a,b,c: f8; d,e,f: i8\n\n Rules are rather simple:\n\n * see list of supported datatypes in `create_block` method\n * components are semicolon-separated\n * each component is `NAME,NAME,NAME: DTYPE_ID`\n * whitespace around colons & semicolons are removed\n * components with same DTYPE_ID are combined into single block\n * to force multiple blocks with same dtype, use '-SUFFIX'::\n\n 'a:f8-1; b:f8-2; c:f8-foobar'\n\n \"\"\"\n if item_shape is None:\n item_shape = (N, )\n\n offset = 0\n mgr_items = []\n block_placements = OrderedDict()\n for d in descr.split(';'):\n d = d.strip()\n if not len(d):\n continue\n names, blockstr = d.partition(':')[::2]\n blockstr = blockstr.strip()\n names = names.strip().split(',')\n\n mgr_items.extend(names)\n placement = list(np.arange(len(names)) + offset)\n try:\n block_placements[blockstr].extend(placement)\n except KeyError:\n block_placements[blockstr] = placement\n offset += len(names)\n\n mgr_items = Index(mgr_items)\n\n blocks = []\n num_offset = 0\n for blockstr, placement in block_placements.items():\n typestr = blockstr.split('-')[0]\n blocks.append(create_block(typestr,\n placement,\n item_shape=item_shape,\n num_offset=num_offset, ))\n num_offset += len(placement)\n\n return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),\n [mgr_items] + [np.arange(n) for n in item_shape])\n\n\nclass TestBlock:\n\n def setup_method(self, method):\n # self.fblock = get_float_ex() # a,c,e\n # self.cblock = get_complex_ex() #\n # self.oblock = get_obj_ex()\n # self.bool_block = get_bool_ex()\n # self.int_block = get_int_ex()\n\n self.fblock = create_block('float', [0, 2, 4])\n self.cblock = create_block('complex', [7])\n self.oblock = create_block('object', [1, 3])\n self.bool_block = create_block('bool', [5])\n self.int_block = create_block('int', [6])\n\n def test_constructor(self):\n int32block = create_block('i4', [0])\n assert int32block.dtype == np.int32\n\n def test_pickle(self):\n def _check(blk):\n assert_block_equal(tm.round_trip_pickle(blk), blk)\n\n _check(self.fblock)\n _check(self.cblock)\n _check(self.oblock)\n _check(self.bool_block)\n\n def test_mgr_locs(self):\n assert isinstance(self.fblock.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,\n np.array([0, 2, 4], dtype=np.int64))\n\n def test_attrs(self):\n assert self.fblock.shape == self.fblock.values.shape\n assert self.fblock.dtype == self.fblock.values.dtype\n assert len(self.fblock) == len(self.fblock.values)\n\n def test_merge(self):\n avals = randn(2, 10)\n bvals = randn(2, 10)\n\n ref_cols = Index(['e', 'a', 'b', 'd', 'f'])\n\n ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))\n bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))\n merged = ablock.merge(bblock)\n tm.assert_numpy_array_equal(merged.mgr_locs.as_array,\n np.array([0, 1, 2, 3], dtype=np.int64))\n tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))\n tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))\n\n # TODO: merge with mixed type?\n\n def test_copy(self):\n cop = self.fblock.copy()\n assert cop is not self.fblock\n assert_block_equal(self.fblock, cop)\n\n def test_reindex_index(self):\n pass\n\n def test_reindex_cast(self):\n pass\n\n def test_insert(self):\n pass\n\n def test_delete(self):\n newb = self.fblock.copy()\n newb.delete(0)\n assert isinstance(newb.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(newb.mgr_locs.as_array,\n np.array([2, 4], dtype=np.int64))\n assert (newb.values[0] == 1).all()\n\n newb = self.fblock.copy()\n newb.delete(1)\n assert isinstance(newb.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(newb.mgr_locs.as_array,\n np.array([0, 4], dtype=np.int64))\n assert (newb.values[1] == 2).all()\n\n newb = self.fblock.copy()\n newb.delete(2)\n tm.assert_numpy_array_equal(newb.mgr_locs.as_array,\n np.array([0, 2], dtype=np.int64))\n assert (newb.values[1] == 1).all()\n\n newb = self.fblock.copy()\n with pytest.raises(Exception):\n newb.delete(3)\n\n def test_make_block_same_class(self):\n # issue 19431\n block = create_block('M8[ns, US/Eastern]', [3])\n with tm.assert_produces_warning(DeprecationWarning,\n check_stacklevel=False):\n block.make_block_same_class(block.values,\n dtype=block.values.dtype)\n\n\nclass TestDatetimeBlock:\n\n def test_try_coerce_arg(self):\n block = create_block('datetime', [0])\n\n # coerce None\n none_coerced = block._try_coerce_args(block.values, None)[1]\n assert pd.Timestamp(none_coerced) is pd.NaT\n\n # coerce different types of date bojects\n vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),\n date(2010, 10, 10))\n for val in vals:\n coerced = block._try_coerce_args(block.values, val)[1]\n assert np.int64 == type(coerced)\n assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)\n\n\nclass TestBlockManager:\n\n def test_constructor_corner(self):\n pass\n\n def test_attrs(self):\n mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')\n assert mgr.nblocks == 2\n assert len(mgr) == 6\n\n def test_is_mixed_dtype(self):\n assert not create_mgr('a,b:f8').is_mixed_type\n assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type\n\n assert create_mgr('a,b:f8; c,d: f4').is_mixed_type\n assert create_mgr('a,b:f8; c,d: object').is_mixed_type\n\n def test_duplicate_ref_loc_failure(self):\n tmp_mgr = create_mgr('a:bool; a: f8')\n\n axes, blocks = tmp_mgr.axes, tmp_mgr.blocks\n\n blocks[0].mgr_locs = np.array([0])\n blocks[1].mgr_locs = np.array([0])\n\n # test trying to create block manager with overlapping ref locs\n with pytest.raises(AssertionError):\n BlockManager(blocks, axes)\n\n blocks[0].mgr_locs = np.array([0])\n blocks[1].mgr_locs = np.array([1])\n mgr = BlockManager(blocks, axes)\n mgr.iget(1)\n\n def test_contains(self, mgr):\n assert 'a' in mgr\n assert 'baz' not in mgr\n\n def test_pickle(self, mgr):\n\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n # share ref_items\n # assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items\n\n # GH2431\n assert hasattr(mgr2, \"_is_consolidated\")\n assert hasattr(mgr2, \"_known_consolidated\")\n\n # reset to False on load\n assert not mgr2._is_consolidated\n assert not mgr2._known_consolidated\n\n def test_non_unique_pickle(self):\n\n mgr = create_mgr('a,a,a:f8')\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n mgr = create_mgr('a: f8; a: i8')\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n def test_categorical_block_pickle(self):\n mgr = create_mgr('a: category')\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n smgr = create_single_mgr('category')\n smgr2 = tm.round_trip_pickle(smgr)\n assert_series_equal(Series(smgr), Series(smgr2))\n\n def test_get(self):\n cols = Index(list('abc'))\n values = np.random.rand(3, 3)\n block = make_block(values=values.copy(), placement=np.arange(3))\n mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])\n\n assert_almost_equal(mgr.get('a', fastpath=False), values[0])\n assert_almost_equal(mgr.get('b', fastpath=False), values[1])\n assert_almost_equal(mgr.get('c', fastpath=False), values[2])\n assert_almost_equal(mgr.get('a').internal_values(), values[0])\n assert_almost_equal(mgr.get('b').internal_values(), values[1])\n assert_almost_equal(mgr.get('c').internal_values(), values[2])\n\n def test_set(self):\n mgr = create_mgr('a,b,c: int', item_shape=(3, ))\n\n mgr.set('d', np.array(['foo'] * 3))\n mgr.set('b', np.array(['bar'] * 3))\n tm.assert_numpy_array_equal(mgr.get('a').internal_values(),\n np.array([0] * 3))\n tm.assert_numpy_array_equal(mgr.get('b').internal_values(),\n np.array(['bar'] * 3, dtype=np.object_))\n tm.assert_numpy_array_equal(mgr.get('c').internal_values(),\n np.array([2] * 3))\n tm.assert_numpy_array_equal(mgr.get('d').internal_values(),\n np.array(['foo'] * 3, dtype=np.object_))\n\n def test_set_change_dtype(self, mgr):\n mgr.set('baz', np.zeros(N, dtype=bool))\n\n mgr.set('baz', np.repeat('foo', N))\n assert mgr.get('baz').dtype == np.object_\n\n mgr2 = mgr.consolidate()\n mgr2.set('baz', np.repeat('foo', N))\n assert mgr2.get('baz').dtype == np.object_\n\n mgr2.set('quux', randn(N).astype(int))\n assert mgr2.get('quux').dtype == np.int_\n\n mgr2.set('quux', randn(N))\n assert mgr2.get('quux').dtype == np.float_\n\n def test_set_change_dtype_slice(self): # GH8850\n cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')\n ])\n df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)\n df['2nd'] = df['2nd'] * 2.0\n\n blocks = df._to_dict_of_blocks()\n assert sorted(blocks.keys()) == ['float64', 'int64']\n assert_frame_equal(blocks['float64'], DataFrame(\n [[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))\n assert_frame_equal(blocks['int64'], DataFrame(\n [[3], [6]], columns=cols[2:]))\n\n def test_copy(self, mgr):\n cp = mgr.copy(deep=False)\n for blk, cp_blk in zip(mgr.blocks, cp.blocks):\n\n # view assertion\n assert cp_blk.equals(blk)\n if isinstance(blk.values, np.ndarray):\n assert cp_blk.values.base is blk.values.base\n else:\n # DatetimeTZBlock has DatetimeIndex values\n assert cp_blk.values._data.base is blk.values._data.base\n\n cp = mgr.copy(deep=True)\n for blk, cp_blk in zip(mgr.blocks, cp.blocks):\n\n # copy assertion we either have a None for a base or in case of\n # some blocks it is an array (e.g. datetimetz), but was copied\n assert cp_blk.equals(blk)\n if not isinstance(cp_blk.values, np.ndarray):\n assert cp_blk.values._data.base is not blk.values._data.base\n else:\n assert cp_blk.values.base is None and blk.values.base is None\n\n def test_sparse(self):\n mgr = create_mgr('a: sparse-1; b: sparse-2')\n # what to test here?\n assert mgr.as_array().dtype == np.float64\n\n def test_sparse_mixed(self):\n mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')\n assert len(mgr.blocks) == 3\n assert isinstance(mgr, BlockManager)\n\n # what to test here?\n\n def test_as_array_float(self):\n mgr = create_mgr('c: f4; d: f2; e: f8')\n assert mgr.as_array().dtype == np.float64\n\n mgr = create_mgr('c: f4; d: f2')\n assert mgr.as_array().dtype == np.float32\n\n def test_as_array_int_bool(self):\n mgr = create_mgr('a: bool-1; b: bool-2')\n assert mgr.as_array().dtype == np.bool_\n\n mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')\n assert mgr.as_array().dtype == np.int64\n\n mgr = create_mgr('c: i4; d: i2; e: u1')\n assert mgr.as_array().dtype == np.int32\n\n def test_as_array_datetime(self):\n mgr = create_mgr('h: datetime-1; g: datetime-2')\n assert mgr.as_array().dtype == 'M8[ns]'\n\n def test_as_array_datetime_tz(self):\n mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')\n assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'\n assert mgr.get('g').dtype == 'datetime64[ns, CET]'\n assert mgr.as_array().dtype == 'object'\n\n def test_astype(self):\n # coerce all\n mgr = create_mgr('c: f4; d: f2; e: f8')\n for t in ['float16', 'float32', 'float64', 'int32', 'int64']:\n t = np.dtype(t)\n tmgr = mgr.astype(t)\n assert tmgr.get('c').dtype.type == t\n assert tmgr.get('d').dtype.type == t\n assert tmgr.get('e').dtype.type == t\n\n # mixed\n mgr = create_mgr('a,b: object; c: bool; d: datetime;'\n 'e: f4; f: f2; g: f8')\n for t in ['float16', 'float32', 'float64', 'int32', 'int64']:\n t = np.dtype(t)\n tmgr = mgr.astype(t, errors='ignore')\n assert tmgr.get('c').dtype.type == t\n assert tmgr.get('e').dtype.type == t\n assert tmgr.get('f').dtype.type == t\n assert tmgr.get('g').dtype.type == t\n\n assert tmgr.get('a').dtype.type == np.object_\n assert tmgr.get('b').dtype.type == np.object_\n if t != np.int64:\n assert tmgr.get('d').dtype.type == np.datetime64\n else:\n assert tmgr.get('d').dtype.type == t\n\n def test_convert(self):\n def _compare(old_mgr, new_mgr):\n \"\"\" compare the blocks, numeric compare ==, object don't \"\"\"\n old_blocks = set(old_mgr.blocks)\n new_blocks = set(new_mgr.blocks)\n assert len(old_blocks) == len(new_blocks)\n\n # compare non-numeric\n for b in old_blocks:\n found = False\n for nb in new_blocks:\n if (b.values == nb.values).all():\n found = True\n break\n assert found\n\n for b in new_blocks:\n found = False\n for ob in old_blocks:\n if (b.values == ob.values).all():\n found = True\n break\n assert found\n\n # noops\n mgr = create_mgr('f: i8; g: f8')\n new_mgr = mgr.convert()\n _compare(mgr, new_mgr)\n\n mgr = create_mgr('a, b: object; f: i8; g: f8')\n new_mgr = mgr.convert()\n _compare(mgr, new_mgr)\n\n # convert\n mgr = create_mgr('a,b,foo: object; f: i8; g: f8')\n mgr.set('a', np.array(['1'] * N, dtype=np.object_))\n mgr.set('b', np.array(['2.'] * N, dtype=np.object_))\n mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))\n new_mgr = mgr.convert(numeric=True)\n assert new_mgr.get('a').dtype == np.int64\n assert new_mgr.get('b').dtype == np.float64\n assert new_mgr.get('foo').dtype == np.object_\n assert new_mgr.get('f').dtype == np.int64\n assert new_mgr.get('g').dtype == np.float64\n\n mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'\n 'i: i8; g: f8; h: f2')\n mgr.set('a', np.array(['1'] * N, dtype=np.object_))\n mgr.set('b', np.array(['2.'] * N, dtype=np.object_))\n mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))\n new_mgr = mgr.convert(numeric=True)\n assert new_mgr.get('a').dtype == np.int64\n assert new_mgr.get('b').dtype == np.float64\n assert new_mgr.get('foo').dtype == np.object_\n assert new_mgr.get('f').dtype == np.int32\n assert new_mgr.get('bool').dtype == np.bool_\n assert new_mgr.get('dt').dtype.type, np.datetime64\n assert new_mgr.get('i').dtype == np.int64\n assert new_mgr.get('g').dtype == np.float64\n assert new_mgr.get('h').dtype == np.float16\n\n def test_interleave(self):\n\n # self\n for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',\n 'm8[ns]']:\n mgr = create_mgr('a: {0}'.format(dtype))\n assert mgr.as_array().dtype == dtype\n mgr = create_mgr('a: {0}; b: {0}'.format(dtype))\n assert mgr.as_array().dtype == dtype\n\n # will be converted according the actual dtype of the underlying\n mgr = create_mgr('a: category')\n assert mgr.as_array().dtype == 'i8'\n mgr = create_mgr('a: category; b: category')\n assert mgr.as_array().dtype == 'i8'\n mgr = create_mgr('a: category; b: category2')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: category2')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: category2; b: category2')\n assert mgr.as_array().dtype == 'object'\n\n # combinations\n mgr = create_mgr('a: f8')\n assert mgr.as_array().dtype == 'f8'\n mgr = create_mgr('a: f8; b: i8')\n assert mgr.as_array().dtype == 'f8'\n mgr = create_mgr('a: f4; b: i8')\n assert mgr.as_array().dtype == 'f8'\n mgr = create_mgr('a: f4; b: i8; d: object')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: bool; b: i8')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: complex')\n assert mgr.as_array().dtype == 'complex'\n mgr = create_mgr('a: f8; b: category')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: category')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: bool')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: i8')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: m8[ns]; b: bool')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: m8[ns]; b: i8')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: m8[ns]')\n assert mgr.as_array().dtype == 'object'\n\n def test_interleave_non_unique_cols(self):\n df = DataFrame([\n [pd.Timestamp('20130101'), 3.5],\n [pd.Timestamp('20130102'), 4.5]],\n columns=['x', 'x'],\n index=[1, 2])\n\n df_unique = df.copy()\n df_unique.columns = ['x', 'y']\n assert df_unique.values.shape == df.values.shape\n tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])\n tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])\n\n def test_consolidate(self):\n pass\n\n def test_consolidate_ordering_issues(self, mgr):\n mgr.set('f', randn(N))\n mgr.set('d', randn(N))\n mgr.set('b', randn(N))\n mgr.set('g', randn(N))\n mgr.set('h', randn(N))\n\n # we have datetime/tz blocks in mgr\n cons = mgr.consolidate()\n assert cons.nblocks == 4\n cons = mgr.consolidate().get_numeric_data()\n assert cons.nblocks == 1\n assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,\n np.arange(len(cons.items), dtype=np.int64))\n\n def test_reindex_index(self):\n pass\n\n def test_reindex_items(self):\n # mgr is not consolidated, f8 & f8-2 blocks\n mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'\n 'f: bool; g: f8-2')\n\n reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)\n assert reindexed.nblocks == 2\n tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))\n assert_almost_equal(\n mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))\n assert_almost_equal(\n mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))\n assert_almost_equal(\n mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))\n assert_almost_equal(\n mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))\n assert_almost_equal(\n mgr.get('g').internal_values(),\n reindexed.get('g').internal_values())\n assert_almost_equal(\n mgr.get('c').internal_values(),\n reindexed.get('c').internal_values())\n assert_almost_equal(\n mgr.get('a').internal_values(),\n reindexed.get('a').internal_values())\n assert_almost_equal(\n mgr.get('d').internal_values(),\n reindexed.get('d').internal_values())\n\n def test_multiindex_xs(self):\n mgr = create_mgr('a,b,c: f8; d,e,f: i8')\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',\n 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n\n mgr.set_axis(1, index)\n result = mgr.xs('bar', axis=1)\n assert result.shape == (6, 2)\n assert result.axes[1][0] == ('bar', 'one')\n assert result.axes[1][1] == ('bar', 'two')\n\n def test_get_numeric_data(self):\n mgr = create_mgr('int: int; float: float; complex: complex;'\n 'str: object; bool: bool; obj: object; dt: datetime',\n item_shape=(3, ))\n mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))\n\n numeric = mgr.get_numeric_data()\n tm.assert_index_equal(numeric.items,\n pd.Index(['int', 'float', 'complex', 'bool']))\n assert_almost_equal(\n mgr.get('float', fastpath=False), numeric.get('float',\n fastpath=False))\n assert_almost_equal(\n mgr.get('float').internal_values(),\n numeric.get('float').internal_values())\n\n # Check sharing\n numeric.set('float', np.array([100., 200., 300.]))\n assert_almost_equal(\n mgr.get('float', fastpath=False), np.array([100., 200., 300.]))\n assert_almost_equal(\n mgr.get('float').internal_values(), np.array([100., 200., 300.]))\n\n numeric2 = mgr.get_numeric_data(copy=True)\n tm.assert_index_equal(numeric.items,\n pd.Index(['int', 'float', 'complex', 'bool']))\n numeric2.set('float', np.array([1000., 2000., 3000.]))\n assert_almost_equal(\n mgr.get('float', fastpath=False), np.array([100., 200., 300.]))\n assert_almost_equal(\n mgr.get('float').internal_values(), np.array([100., 200., 300.]))\n\n def test_get_bool_data(self):\n mgr = create_mgr('int: int; float: float; complex: complex;'\n 'str: object; bool: bool; obj: object; dt: datetime',\n item_shape=(3, ))\n mgr.set('obj', np.array([True, False, True], dtype=np.object_))\n\n bools = mgr.get_bool_data()\n tm.assert_index_equal(bools.items, pd.Index(['bool']))\n assert_almost_equal(mgr.get('bool', fastpath=False),\n bools.get('bool', fastpath=False))\n assert_almost_equal(\n mgr.get('bool').internal_values(),\n bools.get('bool').internal_values())\n\n bools.set('bool', np.array([True, False, True]))\n tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),\n np.array([True, False, True]))\n tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),\n np.array([True, False, True]))\n\n # Check sharing\n bools2 = mgr.get_bool_data(copy=True)\n bools2.set('bool', np.array([False, True, False]))\n tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),\n np.array([True, False, True]))\n tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),\n np.array([True, False, True]))\n\n def test_unicode_repr_doesnt_raise(self):\n repr(create_mgr('b,\\u05d0: object'))\n\n def test_missing_unicode_key(self):\n df = DataFrame({\"a\": [1]})\n try:\n df.loc[:, \"\\u05d0\"] # should not raise UnicodeEncodeError\n except KeyError:\n pass # this is the expected exception\n\n def test_equals(self):\n # unique items\n bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')\n bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)\n assert bm1.equals(bm2)\n\n bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')\n bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)\n assert bm1.equals(bm2)\n\n def test_equals_block_order_different_dtypes(self):\n # GH 9330\n\n mgr_strings = [\n \"a:i8;b:f8\", # basic case\n \"a:i8;b:f8;c:c8;d:b\", # many types\n \"a:i8;e:dt;f:td;g:string\", # more types\n \"a:i8;b:category;c:category2;d:category2\", # categories\n \"c:sparse;d:sparse_na;b:f8\", # sparse\n ]\n\n for mgr_string in mgr_strings:\n bm = create_mgr(mgr_string)\n block_perms = itertools.permutations(bm.blocks)\n for bm_perm in block_perms:\n bm_this = BlockManager(bm_perm, bm.axes)\n assert bm.equals(bm_this)\n assert bm_this.equals(bm)\n\n def test_single_mgr_ctor(self):\n mgr = create_single_mgr('f8', num_rows=5)\n assert mgr.as_array().tolist() == [0., 1., 2., 3., 4.]\n\n def test_validate_bool_args(self):\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')\n\n for value in invalid_values:\n with pytest.raises(ValueError):\n bm1.replace_list([1], [2], inplace=value)\n\n\nclass TestIndexing:\n # Nosetests-style data-driven tests.\n #\n # This test applies different indexing routines to block managers and\n # compares the outcome to the result of same operations on np.ndarray.\n #\n # NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests\n # and are disabled.\n\n MANAGERS = [\n create_single_mgr('f8', N),\n create_single_mgr('i8', N),\n\n # 2-dim\n create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),\n create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),\n create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),\n create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),\n\n # 3-dim\n create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),\n create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),\n create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),\n create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),\n ]\n\n # MANAGERS = [MANAGERS[6]]\n\n def test_get_slice(self):\n def assert_slice_ok(mgr, axis, slobj):\n mat = mgr.as_array()\n\n # we maybe using an ndarray to test slicing and\n # might not be the full length of the axis\n if isinstance(slobj, np.ndarray):\n ax = mgr.axes[axis]\n if len(ax) and len(slobj) and len(slobj) != len(ax):\n slobj = np.concatenate([slobj, np.zeros(\n len(ax) - len(slobj), dtype=bool)])\n sliced = mgr.get_slice(slobj, axis=axis)\n mat_slobj = (slice(None), ) * axis + (slobj, )\n tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_array(),\n check_dtype=False)\n tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n # slice\n assert_slice_ok(mgr, ax, slice(None))\n assert_slice_ok(mgr, ax, slice(3))\n assert_slice_ok(mgr, ax, slice(100))\n assert_slice_ok(mgr, ax, slice(1, 4))\n assert_slice_ok(mgr, ax, slice(3, 0, -2))\n\n # boolean mask\n assert_slice_ok(\n mgr, ax, np.array([], dtype=np.bool_))\n assert_slice_ok(\n mgr, ax,\n np.ones(mgr.shape[ax], dtype=np.bool_))\n assert_slice_ok(\n mgr, ax,\n np.zeros(mgr.shape[ax], dtype=np.bool_))\n\n if mgr.shape[ax] >= 3:\n assert_slice_ok(\n mgr, ax,\n np.arange(mgr.shape[ax]) % 3 == 0)\n assert_slice_ok(\n mgr, ax, np.array(\n [True, True, False], dtype=np.bool_))\n\n # fancy indexer\n assert_slice_ok(mgr, ax, [])\n assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))\n\n if mgr.shape[ax] >= 3:\n assert_slice_ok(mgr, ax, [0, 1, 2])\n assert_slice_ok(mgr, ax, [-1, -2, -3])\n\n def test_take(self):\n def assert_take_ok(mgr, axis, indexer):\n mat = mgr.as_array()\n taken = mgr.take(indexer, axis)\n tm.assert_numpy_array_equal(np.take(mat, indexer, axis),\n taken.as_array(), check_dtype=False)\n tm.assert_index_equal(mgr.axes[axis].take(indexer),\n taken.axes[axis])\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n # take/fancy indexer\n assert_take_ok(mgr, ax, [])\n assert_take_ok(mgr, ax, [0, 0, 0])\n assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))\n\n if mgr.shape[ax] >= 3:\n assert_take_ok(mgr, ax, [0, 1, 2])\n assert_take_ok(mgr, ax, [-1, -2, -3])\n\n def test_reindex_axis(self):\n def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):\n mat = mgr.as_array()\n indexer = mgr.axes[axis].get_indexer_for(new_labels)\n\n reindexed = mgr.reindex_axis(new_labels, axis,\n fill_value=fill_value)\n tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,\n fill_value=fill_value),\n reindexed.as_array(),\n check_dtype=False)\n tm.assert_index_equal(reindexed.axes[axis], new_labels)\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n for fill_value in (None, np.nan, 100.):\n assert_reindex_axis_is_ok(\n mgr, ax,\n pd.Index([]), fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax, mgr.axes[ax],\n fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][[0, 0, 0]], fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']), fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n pd.Index(['foo', mgr.axes[ax][0], 'baz']),\n fill_value)\n\n if mgr.shape[ax] >= 3:\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][:-3], fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][-3::-1], fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)\n\n def test_reindex_indexer(self):\n\n def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,\n fill_value):\n mat = mgr.as_array()\n reindexed_mat = algos.take_nd(mat, indexer, axis,\n fill_value=fill_value)\n reindexed = mgr.reindex_indexer(new_labels, indexer, axis,\n fill_value=fill_value)\n tm.assert_numpy_array_equal(reindexed_mat,\n reindexed.as_array(),\n check_dtype=False)\n tm.assert_index_equal(reindexed.axes[axis], new_labels)\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n for fill_value in (None, np.nan, 100.):\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index([]), [], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo'] * mgr.shape[ax]),\n np.arange(mgr.shape[ax]), fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),\n fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax, mgr.axes[ax],\n np.arange(mgr.shape[ax])[::-1], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']),\n [0, 0, 0], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']),\n [-1, 0, -1], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', mgr.axes[ax][0], 'baz']),\n [-1, -1, -1], fill_value)\n\n if mgr.shape[ax] >= 3:\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']),\n [0, 1, 2], fill_value)\n\n # test_get_slice(slice_like, axis)\n # take(indexer, axis)\n # reindex_axis(new_labels, axis)\n # reindex_indexer(new_labels, indexer, axis)\n\n\nclass TestBlockPlacement:\n\n def test_slice_len(self):\n assert len(BlockPlacement(slice(0, 4))) == 4\n assert len(BlockPlacement(slice(0, 4, 2))) == 2\n assert len(BlockPlacement(slice(0, 3, 2))) == 2\n\n assert len(BlockPlacement(slice(0, 1, 2))) == 1\n assert len(BlockPlacement(slice(1, 0, -1))) == 1\n\n def test_zero_step_raises(self):\n with pytest.raises(ValueError):\n BlockPlacement(slice(1, 1, 0))\n with pytest.raises(ValueError):\n BlockPlacement(slice(1, 2, 0))\n\n def test_unbounded_slice_raises(self):\n def assert_unbounded_slice_error(slc):\n with pytest.raises(ValueError, match=\"unbounded slice\"):\n BlockPlacement(slc)\n\n assert_unbounded_slice_error(slice(None, None))\n assert_unbounded_slice_error(slice(10, None))\n assert_unbounded_slice_error(slice(None, None, -1))\n assert_unbounded_slice_error(slice(None, 10, -1))\n\n # These are \"unbounded\" because negative index will change depending on\n # container shape.\n assert_unbounded_slice_error(slice(-1, None))\n assert_unbounded_slice_error(slice(None, -1))\n assert_unbounded_slice_error(slice(-1, -1))\n assert_unbounded_slice_error(slice(-1, None, -1))\n assert_unbounded_slice_error(slice(None, -1, -1))\n assert_unbounded_slice_error(slice(-1, -1, -1))\n\n def test_not_slice_like_slices(self):\n def assert_not_slice_like(slc):\n assert not BlockPlacement(slc).is_slice_like\n\n assert_not_slice_like(slice(0, 0))\n assert_not_slice_like(slice(100, 0))\n\n assert_not_slice_like(slice(100, 100, -1))\n assert_not_slice_like(slice(0, 100, -1))\n\n assert not BlockPlacement(slice(0, 0)).is_slice_like\n assert not BlockPlacement(slice(100, 100)).is_slice_like\n\n def test_array_to_slice_conversion(self):\n def assert_as_slice_equals(arr, slc):\n assert BlockPlacement(arr).as_slice == slc\n\n assert_as_slice_equals([0], slice(0, 1, 1))\n assert_as_slice_equals([100], slice(100, 101, 1))\n\n assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))\n assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))\n assert_as_slice_equals([0, 100], slice(0, 200, 100))\n\n assert_as_slice_equals([2, 1], slice(2, 0, -1))\n\n if not PY361:\n assert_as_slice_equals([2, 1, 0], slice(2, None, -1))\n assert_as_slice_equals([100, 0], slice(100, None, -100))\n\n def test_not_slice_like_arrays(self):\n def assert_not_slice_like(arr):\n assert not BlockPlacement(arr).is_slice_like\n\n assert_not_slice_like([])\n assert_not_slice_like([-1])\n assert_not_slice_like([-1, -2, -3])\n assert_not_slice_like([-10])\n assert_not_slice_like([-1])\n assert_not_slice_like([-1, 0, 1, 2])\n assert_not_slice_like([-2, 0, 2, 4])\n assert_not_slice_like([1, 0, -1])\n assert_not_slice_like([1, 1, 1])\n\n def test_slice_iter(self):\n assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]\n assert list(BlockPlacement(slice(0, 0))) == []\n assert list(BlockPlacement(slice(3, 0))) == []\n\n if not PY361:\n assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]\n assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]\n\n def test_slice_to_array_conversion(self):\n def assert_as_array_equals(slc, asarray):\n tm.assert_numpy_array_equal(\n BlockPlacement(slc).as_array,\n np.asarray(asarray, dtype=np.int64))\n\n assert_as_array_equals(slice(0, 3), [0, 1, 2])\n assert_as_array_equals(slice(0, 0), [])\n assert_as_array_equals(slice(3, 0), [])\n\n assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])\n\n if not PY361:\n assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])\n assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])\n\n def test_blockplacement_add(self):\n bpl = BlockPlacement(slice(0, 5))\n assert bpl.add(1).as_slice == slice(1, 6, 1)\n assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)\n assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]\n\n def test_blockplacement_add_int(self):\n def assert_add_equals(val, inc, result):\n assert list(BlockPlacement(val).add(inc)) == result\n\n assert_add_equals(slice(0, 0), 0, [])\n assert_add_equals(slice(1, 4), 0, [1, 2, 3])\n assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])\n assert_add_equals([1, 2, 4], 0, [1, 2, 4])\n\n assert_add_equals(slice(0, 0), 10, [])\n assert_add_equals(slice(1, 4), 10, [11, 12, 13])\n assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])\n assert_add_equals([1, 2, 4], 10, [11, 12, 14])\n\n assert_add_equals(slice(0, 0), -1, [])\n assert_add_equals(slice(1, 4), -1, [0, 1, 2])\n assert_add_equals([1, 2, 4], -1, [0, 1, 3])\n\n with pytest.raises(ValueError):\n BlockPlacement(slice(1, 4)).add(-10)\n with pytest.raises(ValueError):\n BlockPlacement([1, 2, 4]).add(-10)\n\n if not PY361:\n assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])\n assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])\n assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])\n\n with pytest.raises(ValueError):\n BlockPlacement(slice(2, None, -1)).add(-1)\n\n\nclass DummyElement:\n def __init__(self, value, dtype):\n self.value = value\n self.dtype = np.dtype(dtype)\n\n def __array__(self):\n return np.array(self.value, dtype=self.dtype)\n\n def __str__(self):\n return \"DummyElement({}, {})\".format(self.value, self.dtype)\n\n def __repr__(self):\n return str(self)\n\n def astype(self, dtype, copy=False):\n self.dtype = dtype\n return self\n\n def view(self, dtype):\n return type(self)(self.value.view(dtype), dtype)\n\n def any(self, axis=None):\n return bool(self.value)\n\n\nclass TestCanHoldElement:\n @pytest.mark.parametrize('value, dtype', [\n (1, 'i8'),\n (1.0, 'f8'),\n (2**63, 'f8'),\n (1j, 'complex128'),\n (2**63, 'complex128'),\n (True, 'bool'),\n (np.timedelta64(20, 'ns'), '<m8[ns]'),\n (np.datetime64(20, 'ns'), '<M8[ns]'),\n ])\n @pytest.mark.parametrize('op', [\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.mod,\n operator.pow,\n ], ids=lambda x: x.__name__)\n def test_binop_other(self, op, value, dtype):\n skip = {(operator.add, 'bool'),\n (operator.sub, 'bool'),\n (operator.mul, 'bool'),\n (operator.truediv, 'bool'),\n (operator.mod, 'i8'),\n (operator.mod, 'complex128'),\n (operator.pow, 'bool')}\n if (op, dtype) in skip:\n pytest.skip(\"Invalid combination {},{}\".format(op, dtype))\n\n e = DummyElement(value, dtype)\n s = pd.DataFrame({\"A\": [e.value, e.value]}, dtype=e.dtype)\n\n invalid = {(operator.pow, '<M8[ns]'),\n (operator.mod, '<M8[ns]'),\n (operator.truediv, '<M8[ns]'),\n (operator.mul, '<M8[ns]'),\n (operator.add, '<M8[ns]'),\n (operator.pow, '<m8[ns]'),\n (operator.mul, '<m8[ns]')}\n\n if (op, dtype) in invalid:\n with pytest.raises(TypeError):\n op(s, e.value)\n else:\n # FIXME: Since dispatching to Series, this test no longer\n # asserts anything meaningful\n result = op(s, e.value).dtypes\n expected = op(s, value).dtypes\n assert_series_equal(result, expected)\n\n\[email protected]('typestr, holder', [\n ('category', Categorical),\n ('M8[ns]', DatetimeArray),\n ('M8[ns, US/Central]', DatetimeArray),\n ('m8[ns]', TimedeltaArray),\n ('sparse', SparseArray),\n])\ndef test_holder(typestr, holder):\n blk = create_block(typestr, [1])\n assert blk._holder is holder\n\n\ndef test_deprecated_fastpath():\n # GH#19265\n values = np.random.rand(3, 3)\n with tm.assert_produces_warning(DeprecationWarning,\n check_stacklevel=False):\n make_block(values, placement=np.arange(3), fastpath=True)\n\n\ndef test_validate_ndim():\n values = np.array([1.0, 2.0])\n placement = slice(2)\n msg = r\"Wrong number of dimensions. values.ndim != ndim \\[1 != 2\\]\"\n\n with pytest.raises(ValueError, match=msg):\n make_block(values, placement, ndim=2)\n\n\ndef test_block_shape():\n idx = pd.Index([0, 1, 2, 3, 4])\n a = pd.Series([1, 2, 3]).reindex(idx)\n b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)\n\n assert (a._data.blocks[0].mgr_locs.indexer ==\n b._data.blocks[0].mgr_locs.indexer)\n\n\ndef test_make_block_no_pandas_array():\n # https://github.com/pandas-dev/pandas/pull/24866\n arr = pd.array([1, 2])\n\n # PandasArray, no dtype\n result = make_block(arr, slice(len(arr)))\n assert result.is_integer is True\n assert result.is_extension is False\n\n # PandasArray, PandasDtype\n result = make_block(arr, slice(len(arr)), dtype=arr.dtype)\n assert result.is_integer is True\n assert result.is_extension is False\n\n # ndarray, PandasDtype\n result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype)\n assert result.is_integer is True\n assert result.is_extension is False\n",
"import codecs\nimport re\nimport textwrap\nfrom typing import Dict\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nimport pandas._libs.ops as libops\nfrom pandas.util._decorators import Appender, deprecate_kwarg\n\nfrom pandas.core.dtypes.common import (\n ensure_object, is_bool_dtype, is_categorical_dtype, is_integer,\n is_list_like, is_object_dtype, is_re, is_scalar, is_string_like)\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.algorithms import take_1d\nfrom pandas.core.base import NoNewAttributesMixin\nimport pandas.core.common as com\n\n_cpython_optimized_encoders = (\n \"utf-8\", \"utf8\", \"latin-1\", \"latin1\", \"iso-8859-1\", \"mbcs\", \"ascii\"\n)\n_cpython_optimized_decoders = _cpython_optimized_encoders + (\n \"utf-16\", \"utf-32\"\n)\n\n_shared_docs = dict() # type: Dict[str, str]\n\n\ndef cat_core(list_of_columns, sep):\n \"\"\"\n Auxiliary function for :meth:`str.cat`\n\n Parameters\n ----------\n list_of_columns : list of numpy arrays\n List of arrays to be concatenated with sep;\n these arrays may not contain NaNs!\n sep : string\n The separator string for concatenating the columns\n\n Returns\n -------\n nd.array\n The concatenation of list_of_columns with sep\n \"\"\"\n list_with_sep = [sep] * (2 * len(list_of_columns) - 1)\n list_with_sep[::2] = list_of_columns\n return np.sum(list_with_sep, axis=0)\n\n\ndef _na_map(f, arr, na_result=np.nan, dtype=object):\n # should really _check_ for NA\n return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)\n\n\ndef _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):\n if not len(arr):\n return np.ndarray(0, dtype=dtype)\n\n if isinstance(arr, ABCSeries):\n arr = arr.values\n if not isinstance(arr, np.ndarray):\n arr = np.asarray(arr, dtype=object)\n if na_mask:\n mask = isna(arr)\n try:\n convert = not all(mask)\n result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)\n except (TypeError, AttributeError) as e:\n # Reraise the exception if callable `f` got wrong number of args.\n # The user may want to be warned by this, instead of getting NaN\n p_err = (r'((takes)|(missing)) (?(2)from \\d+ to )?\\d+ '\n r'(?(3)required )positional arguments?')\n\n if len(e.args) >= 1 and re.search(p_err, e.args[0]):\n raise e\n\n def g(x):\n try:\n return f(x)\n except (TypeError, AttributeError):\n return na_value\n\n return _map(g, arr, dtype=dtype)\n if na_value is not np.nan:\n np.putmask(result, mask, na_value)\n if result.dtype == object:\n result = lib.maybe_convert_objects(result)\n return result\n else:\n return lib.map_infer(arr, f)\n\n\ndef str_count(arr, pat, flags=0):\n \"\"\"\n Count occurrences of pattern in each string of the Series/Index.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~pandas.Series`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n <https://docs.python.org/3/howto/regex.html#compilation-flags>`_.\n **kwargs\n For compatibility with other string methods. Not used.\n\n Returns\n -------\n Series or Index\n Same type as the calling object containing the integer counts.\n\n See Also\n --------\n re : Standard library module for regular expressions.\n str.count : Standard library version, without regular expression support.\n\n Notes\n -----\n Some characters need to be escaped when passing in `pat`.\n eg. ``'$'`` has a special meaning in regex and must be escaped when\n finding this literal character.\n\n Examples\n --------\n >>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])\n >>> s.str.count('a')\n 0 0.0\n 1 0.0\n 2 2.0\n 3 2.0\n 4 NaN\n 5 0.0\n 6 1.0\n dtype: float64\n\n Escape ``'$'`` to find the literal dollar sign.\n\n >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])\n >>> s.str.count('\\\\$')\n 0 1\n 1 0\n 2 1\n 3 2\n 4 2\n 5 0\n dtype: int64\n\n This is also available on Index\n\n >>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')\n Int64Index([0, 0, 2, 1], dtype='int64')\n \"\"\"\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return _na_map(f, arr, dtype=int)\n\n\ndef str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):\n \"\"\"\n Test if pattern or regex is contained within a string of a Series or Index.\n\n Return boolean Series or Index based on whether a given pattern or regex is\n contained within a string of a Series or Index.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n Flags to pass through to the re module, e.g. re.IGNORECASE.\n na : default NaN\n Fill value for missing values.\n regex : bool, default True\n If True, assumes the pat is a regular expression.\n\n If False, treats the pat as a literal string.\n\n Returns\n -------\n Series or Index of boolean values\n A Series or Index of boolean values indicating whether the\n given pattern is contained within the string of each element\n of the Series or Index.\n\n See Also\n --------\n match : Analogous, but stricter, relying on re.match instead of re.search.\n Series.str.startswith : Test if the start of each string element matches a\n pattern.\n Series.str.endswith : Same as startswith, but tests the end of string.\n\n Examples\n --------\n\n Returning a Series of booleans using only a literal pattern.\n\n >>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])\n >>> s1.str.contains('og', regex=False)\n 0 False\n 1 True\n 2 False\n 3 False\n 4 NaN\n dtype: object\n\n Returning an Index of booleans using only a literal pattern.\n\n >>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])\n >>> ind.str.contains('23', regex=False)\n Index([False, False, False, True, nan], dtype='object')\n\n Specifying case sensitivity using `case`.\n\n >>> s1.str.contains('oG', case=True, regex=True)\n 0 False\n 1 False\n 2 False\n 3 False\n 4 NaN\n dtype: object\n\n Specifying `na` to be `False` instead of `NaN` replaces NaN values\n with `False`. If Series or Index does not contain NaN values\n the resultant dtype will be `bool`, otherwise, an `object` dtype.\n\n >>> s1.str.contains('og', na=False, regex=True)\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n Returning 'house' or 'dog' when either expression occurs in a string.\n\n >>> s1.str.contains('house|dog', regex=True)\n 0 False\n 1 True\n 2 True\n 3 False\n 4 NaN\n dtype: object\n\n Ignoring case sensitivity using `flags` with regex.\n\n >>> import re\n >>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)\n 0 False\n 1 False\n 2 True\n 3 False\n 4 NaN\n dtype: object\n\n Returning any digit using regular expression.\n\n >>> s1.str.contains('\\\\d', regex=True)\n 0 False\n 1 False\n 2 False\n 3 True\n 4 NaN\n dtype: object\n\n Ensure `pat` is a not a literal pattern when `regex` is set to True.\n Note in the following example one might expect only `s2[1]` and `s2[3]` to\n return `True`. However, '.0' as a regex matches any character\n followed by a 0.\n\n >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])\n >>> s2.str.contains('.0', regex=True)\n 0 True\n 1 True\n 2 False\n 3 True\n 4 False\n dtype: bool\n \"\"\"\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if regex.groups > 0:\n warnings.warn(\"This pattern has match groups. To actually get the\"\n \" groups, use str.extract.\", UserWarning,\n stacklevel=3)\n\n f = lambda x: bool(regex.search(x))\n else:\n if case:\n f = lambda x: pat in x\n else:\n upper_pat = pat.upper()\n f = lambda x: upper_pat in x\n uppered = _na_map(lambda x: x.upper(), arr)\n return _na_map(f, uppered, na, dtype=bool)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_startswith(arr, pat, na=np.nan):\n \"\"\"\n Test if the start of each string element matches a pattern.\n\n Equivalent to :meth:`str.startswith`.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n na : object, default NaN\n Object shown if element tested is not a string.\n\n Returns\n -------\n Series or Index of bool\n A Series of booleans indicating whether the given pattern matches\n the start of each string element.\n\n See Also\n --------\n str.startswith : Python standard library string method.\n Series.str.endswith : Same as startswith, but tests the end of string.\n Series.str.contains : Tests if string element contains a pattern.\n\n Examples\n --------\n >>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])\n >>> s\n 0 bat\n 1 Bear\n 2 cat\n 3 NaN\n dtype: object\n\n >>> s.str.startswith('b')\n 0 True\n 1 False\n 2 False\n 3 NaN\n dtype: object\n\n Specifying `na` to be `False` instead of `NaN`.\n\n >>> s.str.startswith('b', na=False)\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n \"\"\"\n f = lambda x: x.startswith(pat)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_endswith(arr, pat, na=np.nan):\n \"\"\"\n Test if the end of each string element matches a pattern.\n\n Equivalent to :meth:`str.endswith`.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n na : object, default NaN\n Object shown if element tested is not a string.\n\n Returns\n -------\n Series or Index of bool\n A Series of booleans indicating whether the given pattern matches\n the end of each string element.\n\n See Also\n --------\n str.endswith : Python standard library string method.\n Series.str.startswith : Same as endswith, but tests the start of string.\n Series.str.contains : Tests if string element contains a pattern.\n\n Examples\n --------\n >>> s = pd.Series(['bat', 'bear', 'caT', np.nan])\n >>> s\n 0 bat\n 1 bear\n 2 caT\n 3 NaN\n dtype: object\n\n >>> s.str.endswith('t')\n 0 True\n 1 False\n 2 False\n 3 NaN\n dtype: object\n\n Specifying `na` to be `False` instead of `NaN`.\n\n >>> s.str.endswith('t', na=False)\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n \"\"\"\n f = lambda x: x.endswith(pat)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):\n r\"\"\"\n Replace occurrences of pattern/regex in the Series/Index with\n some other string. Equivalent to :meth:`str.replace` or\n :func:`re.sub`.\n\n Parameters\n ----------\n pat : str or compiled regex\n String can be a character sequence or regular expression.\n\n .. versionadded:: 0.20.0\n `pat` also accepts a compiled regex.\n\n repl : str or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n .. versionadded:: 0.20.0\n `repl` also accepts a callable.\n\n n : int, default -1 (all)\n Number of replacements to make from start.\n case : bool, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : bool, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n Series or Index of object\n A copy of the object with all matching occurrences of `pat` replaced by\n `repl`.\n\n Raises\n ------\n ValueError\n * if `regex` is False and `repl` is a callable or `pat` is a compiled\n regex\n * if `pat` is a compiled regex and `case` or `flags` is set\n\n Notes\n -----\n When `pat` is a compiled regex, all flags should be included in the\n compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled\n regex will raise an error.\n\n Examples\n --------\n When `pat` is a string and `regex` is True (the default), the given `pat`\n is compiled as a regex. When `repl` is a string, it replaces matching\n regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are\n left as is:\n\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)\n 0 bao\n 1 baz\n 2 NaN\n dtype: object\n\n When `pat` is a string and `regex` is False, every `pat` is replaced with\n `repl` as with :meth:`str.replace`:\n\n >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)\n 0 bao\n 1 fuz\n 2 NaN\n dtype: object\n\n When `repl` is a callable, it is called on every `pat` using\n :func:`re.sub`. The callable should expect one positional argument\n (a regex object) and return a string.\n\n To get the idea:\n\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\n 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo\n 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz\n 2 NaN\n dtype: object\n\n Reverse every lowercase alphabetic word:\n\n >>> repl = lambda m: m.group(0)[::-1]\n >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)\n 0 oof 123\n 1 rab zab\n 2 NaN\n dtype: object\n\n Using regex groups (extract second group and swap case):\n\n >>> pat = r\"(?P<one>\\w+) (?P<two>\\w+) (?P<three>\\w+)\"\n >>> repl = lambda m: m.group('two').swapcase()\n >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)\n 0 tWO\n 1 bAR\n dtype: object\n\n Using a compiled regex with flags\n\n >>> import re\n >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')\n 0 foo\n 1 bar\n 2 NaN\n dtype: object\n \"\"\"\n\n # Check whether repl is valid (GH 13438, GH 15055)\n if not (is_string_like(repl) or callable(repl)):\n raise TypeError(\"repl must be a string or callable\")\n\n is_compiled_re = is_re(pat)\n if regex:\n if is_compiled_re:\n if (case is not None) or (flags != 0):\n raise ValueError(\"case and flags cannot be set\"\n \" when pat is a compiled regex\")\n else:\n # not a compiled regex\n # set default case\n if case is None:\n case = True\n\n # add case flag, if provided\n if case is False:\n flags |= re.IGNORECASE\n if is_compiled_re or len(pat) > 1 or flags or callable(repl):\n n = n if n >= 0 else 0\n compiled = re.compile(pat, flags=flags)\n f = lambda x: compiled.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n else:\n if is_compiled_re:\n raise ValueError(\"Cannot use a compiled regex as replacement \"\n \"pattern with regex=False\")\n if callable(repl):\n raise ValueError(\"Cannot use a callable replacement when \"\n \"regex=False\")\n f = lambda x: x.replace(pat, repl, n)\n\n return _na_map(f, arr)\n\n\ndef str_repeat(arr, repeats):\n \"\"\"\n Duplicate each string in the Series or Index.\n\n Parameters\n ----------\n repeats : int or sequence of int\n Same value for all (int) or different value per (sequence).\n\n Returns\n -------\n Series or Index of object\n Series or Index of repeated string objects specified by\n input parameter repeats.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n\n Single int repeats string in Series\n\n >>> s.str.repeat(repeats=2)\n 0 aa\n 1 bb\n 2 cc\n dtype: object\n\n Sequence of int repeats corresponding string in Series\n\n >>> s.str.repeat(repeats=[1, 2, 3])\n 0 a\n 1 bb\n 2 ccc\n dtype: object\n \"\"\"\n if is_scalar(repeats):\n def scalar_rep(x):\n try:\n return bytes.__mul__(x, repeats)\n except TypeError:\n return str.__mul__(x, repeats)\n\n return _na_map(scalar_rep, arr)\n else:\n\n def rep(x, r):\n try:\n return bytes.__mul__(x, r)\n except TypeError:\n return str.__mul__(x, r)\n\n repeats = np.asarray(repeats, dtype=object)\n result = libops.vec_binop(com.values_from_object(arr), repeats, rep)\n return result\n\n\ndef str_match(arr, pat, case=True, flags=0, na=np.nan):\n \"\"\"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE.\n na : default NaN\n Fill value for missing values.\n\n Returns\n -------\n Series/array of boolean values\n\n See Also\n --------\n contains : Analogous, but less strict, relying on re.search instead of\n re.match.\n extract : Extract matched groups.\n \"\"\"\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n dtype = bool\n f = lambda x: bool(regex.match(x))\n\n return _na_map(f, arr, na, dtype=dtype)\n\n\ndef _get_single_group_name(rx):\n try:\n return list(rx.groupindex.keys()).pop()\n except IndexError:\n return None\n\n\ndef _groups_or_na_fun(regex):\n \"\"\"Used in both extract_noexpand and extract_frame\"\"\"\n if regex.groups == 0:\n raise ValueError(\"pattern contains no capture groups\")\n empty_row = [np.nan] * regex.groups\n\n def f(x):\n if not isinstance(x, str):\n return empty_row\n m = regex.search(x)\n if m:\n return [np.nan if item is None else item for item in m.groups()]\n else:\n return empty_row\n return f\n\n\ndef _str_extract_noexpand(arr, pat, flags=0):\n \"\"\"\n Find groups in each string in the Series using passed regular\n expression. This function is called from\n str_extract(expand=False), and can return Series, DataFrame, or\n Index.\n\n \"\"\"\n from pandas import DataFrame, Index\n\n regex = re.compile(pat, flags=flags)\n groups_or_na = _groups_or_na_fun(regex)\n\n if regex.groups == 1:\n result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)\n name = _get_single_group_name(regex)\n else:\n if isinstance(arr, Index):\n raise ValueError(\"only one regex group is supported with Index\")\n name = None\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n if arr.empty:\n result = DataFrame(columns=columns, dtype=object)\n else:\n result = DataFrame(\n [groups_or_na(val) for val in arr],\n columns=columns,\n index=arr.index,\n dtype=object)\n return result, name\n\n\ndef _str_extract_frame(arr, pat, flags=0):\n \"\"\"\n For each subject string in the Series, extract groups from the\n first match of regular expression pat. This function is called from\n str_extract(expand=True), and always returns a DataFrame.\n\n \"\"\"\n from pandas import DataFrame\n\n regex = re.compile(pat, flags=flags)\n groups_or_na = _groups_or_na_fun(regex)\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n\n if len(arr) == 0:\n return DataFrame(columns=columns, dtype=object)\n try:\n result_index = arr.index\n except AttributeError:\n result_index = None\n return DataFrame(\n [groups_or_na(val) for val in arr],\n columns=columns,\n index=result_index,\n dtype=object)\n\n\ndef str_extract(arr, pat, flags=0, expand=True):\n r\"\"\"\n Extract capture groups in the regex `pat` as columns in a DataFrame.\n\n For each subject string in the Series, extract groups from the\n first match of regular expression `pat`.\n\n Parameters\n ----------\n pat : str\n Regular expression pattern with capturing groups.\n flags : int, default 0 (no flags)\n Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that\n modify regular expression matching for things like case,\n spaces, etc. For more details, see :mod:`re`.\n expand : bool, default True\n If True, return DataFrame with one column per capture group.\n If False, return a Series/Index if there is one capture group\n or DataFrame if there are multiple capture groups.\n\n .. versionadded:: 0.18.0\n\n Returns\n -------\n DataFrame or Series or Index\n A DataFrame with one row for each subject string, and one\n column for each group. Any capture group names in regular\n expression pat will be used for column names; otherwise\n capture group numbers will be used. The dtype of each result\n column is always object, even when no match is found. If\n ``expand=False`` and pat has only one capture group, then\n return a Series (if subject is a Series) or Index (if subject\n is an Index).\n\n See Also\n --------\n extractall : Returns all matches (not just the first match).\n\n Examples\n --------\n A pattern with two groups will return a DataFrame with two columns.\n Non-matches will be NaN.\n\n >>> s = pd.Series(['a1', 'b2', 'c3'])\n >>> s.str.extract(r'([ab])(\\d)')\n 0 1\n 0 a 1\n 1 b 2\n 2 NaN NaN\n\n A pattern may contain optional groups.\n\n >>> s.str.extract(r'([ab])?(\\d)')\n 0 1\n 0 a 1\n 1 b 2\n 2 NaN 3\n\n Named groups will become column names in the result.\n\n >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\\d)')\n letter digit\n 0 a 1\n 1 b 2\n 2 NaN NaN\n\n A pattern with one group will return a DataFrame with one column\n if expand=True.\n\n >>> s.str.extract(r'[ab](\\d)', expand=True)\n 0\n 0 1\n 1 2\n 2 NaN\n\n A pattern with one group will return a Series if expand=False.\n\n >>> s.str.extract(r'[ab](\\d)', expand=False)\n 0 1\n 1 2\n 2 NaN\n dtype: object\n \"\"\"\n if not isinstance(expand, bool):\n raise ValueError(\"expand must be True or False\")\n if expand:\n return _str_extract_frame(arr._orig, pat, flags=flags)\n else:\n result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)\n return arr._wrap_result(result, name=name, expand=expand)\n\n\ndef str_extractall(arr, pat, flags=0):\n r\"\"\"\n For each subject string in the Series, extract groups from all\n matches of regular expression pat. When each subject string in the\n Series has exactly one match, extractall(pat).xs(0, level='match')\n is the same as extract(pat).\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n pat : str\n Regular expression pattern with capturing groups.\n flags : int, default 0 (no flags)\n A ``re`` module flag, for example ``re.IGNORECASE``. These allow\n to modify regular expression matching for things like case, spaces,\n etc. Multiple flags can be combined with the bitwise OR operator,\n for example ``re.IGNORECASE | re.MULTILINE``.\n\n Returns\n -------\n DataFrame\n A ``DataFrame`` with one row for each match, and one column for each\n group. Its rows have a ``MultiIndex`` with first levels that come from\n the subject ``Series``. The last level is named 'match' and indexes the\n matches in each item of the ``Series``. Any capture group names in\n regular expression pat will be used for column names; otherwise capture\n group numbers will be used.\n\n See Also\n --------\n extract : Returns first match only (not all matches).\n\n Examples\n --------\n A pattern with one group will return a DataFrame with one column.\n Indices with no matches will not appear in the result.\n\n >>> s = pd.Series([\"a1a2\", \"b1\", \"c1\"], index=[\"A\", \"B\", \"C\"])\n >>> s.str.extractall(r\"[ab](\\d)\")\n 0\n match\n A 0 1\n 1 2\n B 0 1\n\n Capture group names are used for column names of the result.\n\n >>> s.str.extractall(r\"[ab](?P<digit>\\d)\")\n digit\n match\n A 0 1\n 1 2\n B 0 1\n\n A pattern with two groups will return a DataFrame with two columns.\n\n >>> s.str.extractall(r\"(?P<letter>[ab])(?P<digit>\\d)\")\n letter digit\n match\n A 0 a 1\n 1 a 2\n B 0 b 1\n\n Optional groups that do not match are NaN in the result.\n\n >>> s.str.extractall(r\"(?P<letter>[ab])?(?P<digit>\\d)\")\n letter digit\n match\n A 0 a 1\n 1 a 2\n B 0 b 1\n C 0 NaN 1\n \"\"\"\n\n regex = re.compile(pat, flags=flags)\n # the regex must contain capture groups.\n if regex.groups == 0:\n raise ValueError(\"pattern contains no capture groups\")\n\n if isinstance(arr, ABCIndexClass):\n arr = arr.to_series().reset_index(drop=True)\n\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n match_list = []\n index_list = []\n is_mi = arr.index.nlevels > 1\n\n for subject_key, subject in arr.iteritems():\n if isinstance(subject, str):\n\n if not is_mi:\n subject_key = (subject_key, )\n\n for match_i, match_tuple in enumerate(regex.findall(subject)):\n if isinstance(match_tuple, str):\n match_tuple = (match_tuple,)\n na_tuple = [np.NaN if group == \"\" else group\n for group in match_tuple]\n match_list.append(na_tuple)\n result_key = tuple(subject_key + (match_i, ))\n index_list.append(result_key)\n\n from pandas import MultiIndex\n index = MultiIndex.from_tuples(\n index_list, names=arr.index.names + [\"match\"])\n\n result = arr._constructor_expanddim(match_list, index=index,\n columns=columns)\n return result\n\n\ndef str_get_dummies(arr, sep='|'):\n \"\"\"\n Split each string in the Series by sep and return a DataFrame\n of dummy/indicator variables.\n\n Parameters\n ----------\n sep : str, default \"|\"\n String to split on.\n\n Returns\n -------\n DataFrame\n Dummy variables corresponding to values of the Series.\n\n See Also\n --------\n get_dummies : Convert categorical variable into dummy/indicator\n variables.\n\n Examples\n --------\n >>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()\n a b c\n 0 1 1 0\n 1 1 0 0\n 2 1 0 1\n\n >>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()\n a b c\n 0 1 1 0\n 1 0 0 0\n 2 1 0 1\n \"\"\"\n arr = arr.fillna('')\n try:\n arr = sep + arr + sep\n except TypeError:\n arr = sep + arr.astype(str) + sep\n\n tags = set()\n for ts in arr.str.split(sep):\n tags.update(ts)\n tags = sorted(tags - {\"\"})\n\n dummies = np.empty((len(arr), len(tags)), dtype=np.int64)\n\n for i, t in enumerate(tags):\n pat = sep + t + sep\n dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)\n return dummies, tags\n\n\ndef str_join(arr, sep):\n \"\"\"\n Join lists contained as elements in the Series/Index with passed delimiter.\n\n If the elements of a Series are lists themselves, join the content of these\n lists using the delimiter passed to the function.\n This function is an equivalent to :meth:`str.join`.\n\n Parameters\n ----------\n sep : str\n Delimiter to use between list entries.\n\n Returns\n -------\n Series/Index: object\n The list entries concatenated by intervening occurrences of the\n delimiter.\n\n Raises\n -------\n AttributeError\n If the supplied Series contains neither strings nor lists.\n\n See Also\n --------\n str.join : Standard library version of this method.\n Series.str.split : Split strings around given separator/delimiter.\n\n Notes\n -----\n If any of the list items is not a string object, the result of the join\n will be `NaN`.\n\n Examples\n --------\n Example with a list that contains non-string elements.\n\n >>> s = pd.Series([['lion', 'elephant', 'zebra'],\n ... [1.1, 2.2, 3.3],\n ... ['cat', np.nan, 'dog'],\n ... ['cow', 4.5, 'goat'],\n ... ['duck', ['swan', 'fish'], 'guppy']])\n >>> s\n 0 [lion, elephant, zebra]\n 1 [1.1, 2.2, 3.3]\n 2 [cat, nan, dog]\n 3 [cow, 4.5, goat]\n 4 [duck, [swan, fish], guppy]\n dtype: object\n\n Join all lists using a '-'. The lists containing object(s) of types other\n than str will produce a NaN.\n\n >>> s.str.join('-')\n 0 lion-elephant-zebra\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: object\n \"\"\"\n return _na_map(sep.join, arr)\n\n\ndef str_findall(arr, pat, flags=0):\n \"\"\"\n Find all occurrences of pattern or regular expression in the Series/Index.\n\n Equivalent to applying :func:`re.findall` to all the elements in the\n Series/Index.\n\n Parameters\n ----------\n pat : str\n Pattern or regular expression.\n flags : int, default 0\n Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which\n means no flags).\n\n Returns\n -------\n Series/Index of lists of strings\n All non-overlapping matches of pattern or regular expression in each\n string of this Series/Index.\n\n See Also\n --------\n count : Count occurrences of pattern or regular expression in each string\n of the Series/Index.\n extractall : For each string in the Series, extract groups from all matches\n of regular expression and return a DataFrame with one row for each\n match and one column for each group.\n re.findall : The equivalent ``re`` function to all non-overlapping matches\n of pattern or regular expression in string, as a list of strings.\n\n Examples\n --------\n\n >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])\n\n The search for the pattern 'Monkey' returns one match:\n\n >>> s.str.findall('Monkey')\n 0 []\n 1 [Monkey]\n 2 []\n dtype: object\n\n On the other hand, the search for the pattern 'MONKEY' doesn't return any\n match:\n\n >>> s.str.findall('MONKEY')\n 0 []\n 1 []\n 2 []\n dtype: object\n\n Flags can be added to the pattern or regular expression. For instance,\n to find the pattern 'MONKEY' ignoring the case:\n\n >>> import re\n >>> s.str.findall('MONKEY', flags=re.IGNORECASE)\n 0 []\n 1 [Monkey]\n 2 []\n dtype: object\n\n When the pattern matches more than one string in the Series, all matches\n are returned:\n\n >>> s.str.findall('on')\n 0 [on]\n 1 [on]\n 2 []\n dtype: object\n\n Regular expressions are supported too. For instance, the search for all the\n strings ending with the word 'on' is shown next:\n\n >>> s.str.findall('on$')\n 0 [on]\n 1 []\n 2 []\n dtype: object\n\n If the pattern is found more than once in the same string, then a list of\n multiple strings is returned:\n\n >>> s.str.findall('b')\n 0 []\n 1 []\n 2 [b, b]\n dtype: object\n \"\"\"\n regex = re.compile(pat, flags=flags)\n return _na_map(regex.findall, arr)\n\n\ndef str_find(arr, sub, start=0, end=None, side='left'):\n \"\"\"\n Return indexes in each strings in the Series/Index where the\n substring is fully contained between [start:end]. Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched.\n start : int\n Left edge index.\n end : int\n Right edge index.\n side : {'left', 'right'}, default 'left'\n Specifies a starting side, equivalent to ``find`` or ``rfind``.\n\n Returns\n -------\n Series or Index\n Indexes where substring is found.\n \"\"\"\n\n if not isinstance(sub, str):\n msg = 'expected a string object, not {0}'\n raise TypeError(msg.format(type(sub).__name__))\n\n if side == 'left':\n method = 'find'\n elif side == 'right':\n method = 'rfind'\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return _na_map(f, arr, dtype=int)\n\n\ndef str_index(arr, sub, start=0, end=None, side='left'):\n if not isinstance(sub, str):\n msg = 'expected a string object, not {0}'\n raise TypeError(msg.format(type(sub).__name__))\n\n if side == 'left':\n method = 'index'\n elif side == 'right':\n method = 'rindex'\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return _na_map(f, arr, dtype=int)\n\n\ndef str_pad(arr, width, side='left', fillchar=' '):\n \"\"\"\n Pad strings in the Series/Index up to width.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with character defined in `fillchar`.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to fill resulting string.\n fillchar : str, default ' '\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n Series or Index of object\n Returns Series or Index with minimum number of char in object.\n\n See Also\n --------\n Series.str.rjust : Fills the left side of strings with an arbitrary\n character. Equivalent to ``Series.str.pad(side='left')``.\n Series.str.ljust : Fills the right side of strings with an arbitrary\n character. Equivalent to ``Series.str.pad(side='right')``.\n Series.str.center : Fills boths sides of strings with an arbitrary\n character. Equivalent to ``Series.str.pad(side='both')``.\n Series.str.zfill : Pad strings in the Series/Index by prepending '0'\n character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.\n\n Examples\n --------\n >>> s = pd.Series([\"caribou\", \"tiger\"])\n >>> s\n 0 caribou\n 1 tiger\n dtype: object\n\n >>> s.str.pad(width=10)\n 0 caribou\n 1 tiger\n dtype: object\n\n >>> s.str.pad(width=10, side='right', fillchar='-')\n 0 caribou---\n 1 tiger-----\n dtype: object\n\n >>> s.str.pad(width=10, side='both', fillchar='-')\n 0 -caribou--\n 1 --tiger---\n dtype: object\n \"\"\"\n if not isinstance(fillchar, str):\n msg = 'fillchar must be a character, not {0}'\n raise TypeError(msg.format(type(fillchar).__name__))\n\n if len(fillchar) != 1:\n raise TypeError('fillchar must be a character, not str')\n\n if not is_integer(width):\n msg = 'width must be of integer type, not {0}'\n raise TypeError(msg.format(type(width).__name__))\n\n if side == 'left':\n f = lambda x: x.rjust(width, fillchar)\n elif side == 'right':\n f = lambda x: x.ljust(width, fillchar)\n elif side == 'both':\n f = lambda x: x.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n return _na_map(f, arr)\n\n\ndef str_split(arr, pat=None, n=None):\n\n if pat is None:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n if len(pat) == 1:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n if n is None or n == -1:\n n = 0\n regex = re.compile(pat)\n f = lambda x: regex.split(x, maxsplit=n)\n res = _na_map(f, arr)\n return res\n\n\ndef str_rsplit(arr, pat=None, n=None):\n\n if n is None or n == 0:\n n = -1\n f = lambda x: x.rsplit(pat, n)\n res = _na_map(f, arr)\n return res\n\n\ndef str_slice(arr, start=None, stop=None, step=None):\n \"\"\"\n Slice substrings from each element in the Series or Index.\n\n Parameters\n ----------\n start : int, optional\n Start position for slice operation.\n stop : int, optional\n Stop position for slice operation.\n step : int, optional\n Step size for slice operation.\n\n Returns\n -------\n Series or Index of object\n Series or Index from sliced substring from original string object.\n\n See Also\n --------\n Series.str.slice_replace : Replace a slice with a string.\n Series.str.get : Return element at position.\n Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`\n being the position.\n\n Examples\n --------\n >>> s = pd.Series([\"koala\", \"fox\", \"chameleon\"])\n >>> s\n 0 koala\n 1 fox\n 2 chameleon\n dtype: object\n\n >>> s.str.slice(start=1)\n 0 oala\n 1 ox\n 2 hameleon\n dtype: object\n\n >>> s.str.slice(stop=2)\n 0 ko\n 1 fo\n 2 ch\n dtype: object\n\n >>> s.str.slice(step=2)\n 0 kaa\n 1 fx\n 2 caeen\n dtype: object\n\n >>> s.str.slice(start=0, stop=5, step=3)\n 0 kl\n 1 f\n 2 cm\n dtype: object\n\n Equivalent behaviour to:\n\n >>> s.str[0:5:3]\n 0 kl\n 1 f\n 2 cm\n dtype: object\n \"\"\"\n obj = slice(start, stop, step)\n f = lambda x: x[obj]\n return _na_map(f, arr)\n\n\ndef str_slice_replace(arr, start=None, stop=None, repl=None):\n \"\"\"\n Replace a positional slice of a string with another value.\n\n Parameters\n ----------\n start : int, optional\n Left index position to use for the slice. If not specified (None),\n the slice is unbounded on the left, i.e. slice from the start\n of the string.\n stop : int, optional\n Right index position to use for the slice. If not specified (None),\n the slice is unbounded on the right, i.e. slice until the\n end of the string.\n repl : str, optional\n String for replacement. If not specified (None), the sliced region\n is replaced with an empty string.\n\n Returns\n -------\n Series or Index\n Same type as the original object.\n\n See Also\n --------\n Series.str.slice : Just slicing without replacement.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])\n >>> s\n 0 a\n 1 ab\n 2 abc\n 3 abdc\n 4 abcde\n dtype: object\n\n Specify just `start`, meaning replace `start` until the end of the\n string with `repl`.\n\n >>> s.str.slice_replace(1, repl='X')\n 0 aX\n 1 aX\n 2 aX\n 3 aX\n 4 aX\n dtype: object\n\n Specify just `stop`, meaning the start of the string to `stop` is replaced\n with `repl`, and the rest of the string is included.\n\n >>> s.str.slice_replace(stop=2, repl='X')\n 0 X\n 1 X\n 2 Xc\n 3 Xdc\n 4 Xcde\n dtype: object\n\n Specify `start` and `stop`, meaning the slice from `start` to `stop` is\n replaced with `repl`. Everything before or after `start` and `stop` is\n included as is.\n\n >>> s.str.slice_replace(start=1, stop=3, repl='X')\n 0 aX\n 1 aX\n 2 aX\n 3 aXc\n 4 aXde\n dtype: object\n \"\"\"\n if repl is None:\n repl = ''\n\n def f(x):\n if x[start:stop] == '':\n local_stop = start\n else:\n local_stop = stop\n y = ''\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return _na_map(f, arr)\n\n\ndef str_strip(arr, to_strip=None, side='both'):\n \"\"\"\n Strip whitespace (including newlines) from each string in the\n Series/Index.\n\n Parameters\n ----------\n to_strip : str or unicode\n side : {'left', 'right', 'both'}, default 'both'\n\n Returns\n -------\n Series or Index\n \"\"\"\n if side == 'both':\n f = lambda x: x.strip(to_strip)\n elif side == 'left':\n f = lambda x: x.lstrip(to_strip)\n elif side == 'right':\n f = lambda x: x.rstrip(to_strip)\n else: # pragma: no cover\n raise ValueError('Invalid side')\n return _na_map(f, arr)\n\n\ndef str_wrap(arr, width, **kwargs):\n r\"\"\"\n Wrap long strings in the Series/Index to be formatted in\n paragraphs with length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line width.\n expand_tabs : bool, optional\n If True, tab characters will be expanded to spaces (default: True).\n replace_whitespace : bool, optional\n If True, each whitespace character (as defined by string.whitespace)\n remaining after tab expansion will be replaced by a single space\n (default: True).\n drop_whitespace : bool, optional\n If True, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True).\n break_long_words : bool, optional\n If True, then words longer than width will be broken in order to ensure\n that no lines are longer than width. If it is false, long words will\n not be broken, and some lines may be longer than width (default: True).\n break_on_hyphens : bool, optional\n If True, wrapping will occur preferably on whitespace and right after\n hyphens in compound words, as it is customary in English. If false,\n only whitespaces will be considered as potentially good places for line\n breaks, but you need to set break_long_words to false if you want truly\n insecable words (default: True).\n\n Returns\n -------\n Series or Index\n\n Notes\n -----\n Internally, this method uses a :class:`textwrap.TextWrapper` instance with\n default settings. To achieve behavior matching R's stringr library str_wrap\n function, use the arguments:\n\n - expand_tabs = False\n - replace_whitespace = True\n - drop_whitespace = True\n - break_long_words = False\n - break_on_hyphens = False\n\n Examples\n --------\n\n >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])\n >>> s.str.wrap(12)\n 0 line to be\\nwrapped\n 1 another line\\nto be\\nwrapped\n dtype: object\n \"\"\"\n kwargs['width'] = width\n\n tw = textwrap.TextWrapper(**kwargs)\n\n return _na_map(lambda s: '\\n'.join(tw.wrap(s)), arr)\n\n\ndef str_translate(arr, table):\n \"\"\"\n Map all characters in the string through the given mapping table.\n Equivalent to standard :meth:`str.translate`.\n\n Parameters\n ----------\n table : dict\n table is a mapping of Unicode ordinals to Unicode ordinals, strings, or\n None. Unmapped characters are left untouched.\n Characters mapped to None are deleted. :meth:`str.maketrans` is a\n helper function for making translation tables.\n\n Returns\n -------\n Series or Index\n \"\"\"\n return _na_map(lambda x: x.translate(table), arr)\n\n\ndef str_get(arr, i):\n \"\"\"\n Extract element from each component at specified position.\n\n Extract element from lists, tuples, or strings in each element in the\n Series/Index.\n\n Parameters\n ----------\n i : int\n Position of element to extract.\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = pd.Series([\"String\",\n ... (1, 2, 3),\n ... [\"a\", \"b\", \"c\"],\n ... 123,\n ... -456,\n ... {1: \"Hello\", \"2\": \"World\"}])\n >>> s\n 0 String\n 1 (1, 2, 3)\n 2 [a, b, c]\n 3 123\n 4 -456\n 5 {1: 'Hello', '2': 'World'}\n dtype: object\n\n >>> s.str.get(1)\n 0 t\n 1 2\n 2 b\n 3 NaN\n 4 NaN\n 5 Hello\n dtype: object\n\n >>> s.str.get(-1)\n 0 g\n 1 3\n 2 c\n 3 NaN\n 4 NaN\n 5 None\n dtype: object\n \"\"\"\n def f(x):\n if isinstance(x, dict):\n return x.get(i)\n elif len(x) > i >= -len(x):\n return x[i]\n return np.nan\n return _na_map(f, arr)\n\n\ndef str_decode(arr, encoding, errors=\"strict\"):\n \"\"\"\n Decode character string in the Series/Index using indicated encoding.\n Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in\n python3.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n Series or Index\n \"\"\"\n if encoding in _cpython_optimized_decoders:\n # CPython optimized implementation\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n return _na_map(f, arr)\n\n\ndef str_encode(arr, encoding, errors=\"strict\"):\n \"\"\"\n Encode character string in the Series/Index using indicated encoding.\n Equivalent to :meth:`str.encode`.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : Series/Index of objects\n \"\"\"\n if encoding in _cpython_optimized_encoders:\n # CPython optimized implementation\n f = lambda x: x.encode(encoding, errors)\n else:\n encoder = codecs.getencoder(encoding)\n f = lambda x: encoder(x, errors)[0]\n return _na_map(f, arr)\n\n\ndef _noarg_wrapper(f, docstring=None, **kargs):\n def wrapper(self):\n result = _na_map(f, self._parent, **kargs)\n return self._wrap_result(result)\n\n wrapper.__name__ = f.__name__\n if docstring is not None:\n wrapper.__doc__ = docstring\n else:\n raise ValueError('Provide docstring')\n\n return wrapper\n\n\ndef _pat_wrapper(f, flags=False, na=False, **kwargs):\n def wrapper1(self, pat):\n result = f(self._parent, pat)\n return self._wrap_result(result)\n\n def wrapper2(self, pat, flags=0, **kwargs):\n result = f(self._parent, pat, flags=flags, **kwargs)\n return self._wrap_result(result)\n\n def wrapper3(self, pat, na=np.nan):\n result = f(self._parent, pat, na=na)\n return self._wrap_result(result)\n\n wrapper = wrapper3 if na else wrapper2 if flags else wrapper1\n\n wrapper.__name__ = f.__name__\n if f.__doc__:\n wrapper.__doc__ = f.__doc__\n\n return wrapper\n\n\ndef copy(source):\n \"Copy a docstring from another source function (if present)\"\n\n def do_copy(target):\n if source.__doc__:\n target.__doc__ = source.__doc__\n return target\n\n return do_copy\n\n\nclass StringMethods(NoNewAttributesMixin):\n \"\"\"\n Vectorized string functions for Series and Index. NAs stay NA unless\n handled otherwise by a particular method. Patterned after Python's string\n methods, with some inspiration from R's stringr package.\n\n Examples\n --------\n >>> s.str.split('_')\n >>> s.str.replace('_', '')\n \"\"\"\n\n def __init__(self, data):\n self._validate(data)\n self._is_categorical = is_categorical_dtype(data)\n\n # .values.categories works for both Series/Index\n self._parent = data.values.categories if self._is_categorical else data\n # save orig to blow up categoricals to the right type\n self._orig = data\n self._freeze()\n\n @staticmethod\n def _validate(data):\n from pandas.core.index import Index\n\n if (isinstance(data, ABCSeries) and\n not ((is_categorical_dtype(data.dtype) and\n is_object_dtype(data.values.categories)) or\n (is_object_dtype(data.dtype)))):\n # it's neither a string series not a categorical series with\n # strings inside the categories.\n # this really should exclude all series with any non-string values\n # (instead of test for object dtype), but that isn't practical for\n # performance reasons until we have a str dtype (GH 9343)\n raise AttributeError(\"Can only use .str accessor with string \"\n \"values, which use np.object_ dtype in \"\n \"pandas\")\n elif isinstance(data, Index):\n # can't use ABCIndex to exclude non-str\n\n # see src/inference.pyx which can contain string values\n allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')\n if is_categorical_dtype(data.dtype):\n inf_type = data.categories.inferred_type\n else:\n inf_type = data.inferred_type\n if inf_type not in allowed_types:\n message = (\"Can only use .str accessor with string values \"\n \"(i.e. inferred_type is 'string', 'unicode' or \"\n \"'mixed')\")\n raise AttributeError(message)\n if data.nlevels > 1:\n message = (\"Can only use .str accessor with Index, not \"\n \"MultiIndex\")\n raise AttributeError(message)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n return self.slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self.get(key)\n\n def __iter__(self):\n i = 0\n g = self.get(i)\n while g.notna().any():\n yield g\n i += 1\n g = self.get(i)\n\n def _wrap_result(self, result, use_codes=True,\n name=None, expand=None, fill_value=np.nan):\n\n from pandas import Index, Series, MultiIndex\n\n # for category, we do the stuff on the categories, so blow it up\n # to the full series again\n # But for some operations, we have to do the stuff on the full values,\n # so make it possible to skip this step as the method already did this\n # before the transformation...\n if use_codes and self._is_categorical:\n # if self._orig is a CategoricalIndex, there is no .cat-accessor\n result = take_1d(result, Series(self._orig, copy=False).cat.codes,\n fill_value=fill_value)\n\n if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):\n return result\n assert result.ndim < 3\n\n if expand is None:\n # infer from ndim if expand is not specified\n expand = result.ndim != 1\n\n elif expand is True and not isinstance(self._orig, Index):\n # required when expand=True is explicitly specified\n # not needed when inferred\n\n def cons_row(x):\n if is_list_like(x):\n return x\n else:\n return [x]\n\n result = [cons_row(x) for x in result]\n if result:\n # propagate nan values to match longest sequence (GH 18450)\n max_len = max(len(x) for x in result)\n result = [x * max_len if len(x) == 0 or x[0] is np.nan\n else x for x in result]\n\n if not isinstance(expand, bool):\n raise ValueError(\"expand must be True or False\")\n\n if expand is False:\n # if expand is False, result should have the same name\n # as the original otherwise specified\n if name is None:\n name = getattr(result, 'name', None)\n if name is None:\n # do not use logical or, _orig may be a DataFrame\n # which has \"name\" column\n name = self._orig.name\n\n # Wait until we are sure result is a Series or Index before\n # checking attributes (GH 12180)\n if isinstance(self._orig, Index):\n # if result is a boolean np.array, return the np.array\n # instead of wrapping it into a boolean Index (GH 8875)\n if is_bool_dtype(result):\n return result\n\n if expand:\n result = list(result)\n out = MultiIndex.from_tuples(result, names=name)\n if out.nlevels == 1:\n # We had all tuples of length-one, which are\n # better represented as a regular Index.\n out = out.get_level_values(0)\n return out\n else:\n return Index(result, name=name)\n else:\n index = self._orig.index\n if expand:\n cons = self._orig._constructor_expanddim\n return cons(result, columns=name, index=index)\n else:\n # Must be a Series\n cons = self._orig._constructor\n return cons(result, name=name, index=index)\n\n def _get_series_list(self, others, ignore_index=False):\n \"\"\"\n Auxiliary function for :meth:`str.cat`. Turn potentially mixed input\n into a list of Series (elements without an index must match the length\n of the calling Series/Index).\n\n Parameters\n ----------\n others : Series, Index, DataFrame, np.ndarray, list-like or list-like\n of objects that are Series, Index or np.ndarray (1-dim)\n ignore_index : boolean, default False\n Determines whether to forcefully align others with index of caller\n\n Returns\n -------\n tuple : (others transformed into list of Series,\n boolean whether FutureWarning should be raised)\n \"\"\"\n\n # Once str.cat defaults to alignment, this function can be simplified;\n # will not need `ignore_index` and the second boolean output anymore\n\n from pandas import Index, Series, DataFrame\n\n # self._orig is either Series or Index\n idx = self._orig if isinstance(self._orig, Index) else self._orig.index\n\n err_msg = ('others must be Series, Index, DataFrame, np.ndarrary or '\n 'list-like (either containing only strings or containing '\n 'only objects of type Series/Index/list-like/np.ndarray)')\n\n # Generally speaking, all objects without an index inherit the index\n # `idx` of the calling Series/Index - i.e. must have matching length.\n # Objects with an index (i.e. Series/Index/DataFrame) keep their own\n # index, *unless* ignore_index is set to True.\n if isinstance(others, Series):\n warn = not others.index.equals(idx)\n # only reconstruct Series when absolutely necessary\n los = [Series(others.values, index=idx)\n if ignore_index and warn else others]\n return (los, warn)\n elif isinstance(others, Index):\n warn = not others.equals(idx)\n los = [Series(others.values,\n index=(idx if ignore_index else others))]\n return (los, warn)\n elif isinstance(others, DataFrame):\n warn = not others.index.equals(idx)\n if ignore_index and warn:\n # without copy, this could change \"others\"\n # that was passed to str.cat\n others = others.copy()\n others.index = idx\n return ([others[x] for x in others], warn)\n elif isinstance(others, np.ndarray) and others.ndim == 2:\n others = DataFrame(others, index=idx)\n return ([others[x] for x in others], False)\n elif is_list_like(others, allow_sets=False):\n others = list(others) # ensure iterators do not get read twice etc\n\n # in case of list-like `others`, all elements must be\n # either one-dimensional list-likes or scalars\n if all(is_list_like(x, allow_sets=False) for x in others):\n los = []\n join_warn = False\n depr_warn = False\n # iterate through list and append list of series for each\n # element (which we check to be one-dimensional and non-nested)\n while others:\n nxt = others.pop(0) # nxt is guaranteed list-like by above\n\n # GH 21950 - DeprecationWarning\n # only allowing Series/Index/np.ndarray[1-dim] will greatly\n # simply this function post-deprecation.\n if not (isinstance(nxt, (Series, Index)) or\n (isinstance(nxt, np.ndarray) and nxt.ndim == 1)):\n depr_warn = True\n\n if not isinstance(nxt, (DataFrame, Series,\n Index, np.ndarray)):\n # safety for non-persistent list-likes (e.g. iterators)\n # do not map indexed/typed objects; info needed below\n nxt = list(nxt)\n\n # known types for which we can avoid deep inspection\n no_deep = ((isinstance(nxt, np.ndarray) and nxt.ndim == 1)\n or isinstance(nxt, (Series, Index)))\n # nested list-likes are forbidden:\n # -> elements of nxt must not be list-like\n is_legal = ((no_deep and nxt.dtype == object)\n or all(not is_list_like(x) for x in nxt))\n\n # DataFrame is false positive of is_legal\n # because \"x in df\" returns column names\n if not is_legal or isinstance(nxt, DataFrame):\n raise TypeError(err_msg)\n\n nxt, wnx = self._get_series_list(nxt,\n ignore_index=ignore_index)\n los = los + nxt\n join_warn = join_warn or wnx\n\n if depr_warn:\n warnings.warn('list-likes other than Series, Index, or '\n 'np.ndarray WITHIN another list-like are '\n 'deprecated and will be removed in a future '\n 'version.', FutureWarning, stacklevel=3)\n return (los, join_warn)\n elif all(not is_list_like(x) for x in others):\n return ([Series(others, index=idx)], False)\n raise TypeError(err_msg)\n\n def cat(self, others=None, sep=None, na_rep=None, join=None):\n \"\"\"\n Concatenate strings in the Series/Index with given separator.\n\n If `others` is specified, this function concatenates the Series/Index\n and elements of `others` element-wise.\n If `others` is not passed, then all values in the Series/Index are\n concatenated into a single string with a given `sep`.\n\n Parameters\n ----------\n others : Series, Index, DataFrame, np.ndarrary or list-like\n Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and\n other list-likes of strings must have the same length as the\n calling Series/Index, with the exception of indexed objects (i.e.\n Series/Index/DataFrame) if `join` is not None.\n\n If others is a list-like that contains a combination of Series,\n Index or np.ndarray (1-dim), then all elements will be unpacked and\n must satisfy the above criteria individually.\n\n If others is None, the method returns the concatenation of all\n strings in the calling Series/Index.\n sep : str, default ''\n The separator between the different elements/columns. By default\n the empty string `''` is used.\n na_rep : str or None, default None\n Representation that is inserted for all missing values:\n\n - If `na_rep` is None, and `others` is None, missing values in the\n Series/Index are omitted from the result.\n - If `na_rep` is None, and `others` is not None, a row containing a\n missing value in any of the columns (before concatenation) will\n have a missing value in the result.\n join : {'left', 'right', 'outer', 'inner'}, default None\n Determines the join-style between the calling Series/Index and any\n Series/Index/DataFrame in `others` (objects without an index need\n to match the length of the calling Series/Index). If None,\n alignment is disabled, but this option will be removed in a future\n version of pandas and replaced with a default of `'left'`. To\n disable alignment, use `.values` on any Series/Index/DataFrame in\n `others`.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n str, Series or Index\n If `others` is None, `str` is returned, otherwise a `Series/Index`\n (same type as caller) of objects is returned.\n\n See Also\n --------\n split : Split each string in the Series/Index.\n join : Join lists contained as elements in the Series/Index.\n\n Examples\n --------\n When not passing `others`, all values are concatenated into a single\n string:\n\n >>> s = pd.Series(['a', 'b', np.nan, 'd'])\n >>> s.str.cat(sep=' ')\n 'a b d'\n\n By default, NA values in the Series are ignored. Using `na_rep`, they\n can be given a representation:\n\n >>> s.str.cat(sep=' ', na_rep='?')\n 'a b ? d'\n\n If `others` is specified, corresponding values are concatenated with\n the separator. Result will be a Series of strings.\n\n >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')\n 0 a,A\n 1 b,B\n 2 NaN\n 3 d,D\n dtype: object\n\n Missing values will remain missing in the result, but can again be\n represented using `na_rep`\n\n >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')\n 0 a,A\n 1 b,B\n 2 -,C\n 3 d,D\n dtype: object\n\n If `sep` is not specified, the values are concatenated without\n separation.\n\n >>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')\n 0 aA\n 1 bB\n 2 -C\n 3 dD\n dtype: object\n\n Series with different indexes can be aligned before concatenation. The\n `join`-keyword works as in other methods.\n\n >>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])\n >>> s.str.cat(t, join='left', na_rep='-')\n 0 aa\n 1 b-\n 2 -c\n 3 dd\n dtype: object\n >>>\n >>> s.str.cat(t, join='outer', na_rep='-')\n 0 aa\n 1 b-\n 2 -c\n 3 dd\n 4 -e\n dtype: object\n >>>\n >>> s.str.cat(t, join='inner', na_rep='-')\n 0 aa\n 2 -c\n 3 dd\n dtype: object\n >>>\n >>> s.str.cat(t, join='right', na_rep='-')\n 3 dd\n 0 aa\n 4 -e\n 2 -c\n dtype: object\n\n For more examples, see :ref:`here <text.concatenate>`.\n \"\"\"\n from pandas import Index, Series, concat\n\n if isinstance(others, str):\n raise ValueError(\"Did you mean to supply a `sep` keyword?\")\n if sep is None:\n sep = ''\n\n if isinstance(self._orig, Index):\n data = Series(self._orig, index=self._orig)\n else: # Series\n data = self._orig\n\n # concatenate Series/Index with itself if no \"others\"\n if others is None:\n data = ensure_object(data)\n na_mask = isna(data)\n if na_rep is None and na_mask.any():\n data = data[~na_mask]\n elif na_rep is not None and na_mask.any():\n data = np.where(na_mask, na_rep, data)\n return sep.join(data)\n\n try:\n # turn anything in \"others\" into lists of Series\n others, warn = self._get_series_list(others,\n ignore_index=(join is None))\n except ValueError: # do not catch TypeError raised by _get_series_list\n if join is None:\n raise ValueError('All arrays must be same length, except '\n 'those having an index if `join` is not None')\n else:\n raise ValueError('If `others` contains arrays or lists (or '\n 'other list-likes without an index), these '\n 'must all be of the same length as the '\n 'calling Series/Index.')\n\n if join is None and warn:\n warnings.warn(\"A future version of pandas will perform index \"\n \"alignment when `others` is a Series/Index/\"\n \"DataFrame (or a list-like containing one). To \"\n \"disable alignment (the behavior before v.0.23) and \"\n \"silence this warning, use `.values` on any Series/\"\n \"Index/DataFrame in `others`. To enable alignment \"\n \"and silence this warning, pass `join='left'|\"\n \"'outer'|'inner'|'right'`. The future default will \"\n \"be `join='left'`.\", FutureWarning, stacklevel=2)\n\n # if join is None, _get_series_list already force-aligned indexes\n join = 'left' if join is None else join\n\n # align if required\n if any(not data.index.equals(x.index) for x in others):\n # Need to add keys for uniqueness in case of duplicate columns\n others = concat(others, axis=1,\n join=(join if join == 'inner' else 'outer'),\n keys=range(len(others)), sort=False, copy=False)\n data, others = data.align(others, join=join)\n others = [others[x] for x in others] # again list of Series\n\n all_cols = [ensure_object(x) for x in [data] + others]\n na_masks = np.array([isna(x) for x in all_cols])\n union_mask = np.logical_or.reduce(na_masks, axis=0)\n\n if na_rep is None and union_mask.any():\n # no na_rep means NaNs for all rows where any column has a NaN\n # only necessary if there are actually any NaNs\n result = np.empty(len(data), dtype=object)\n np.putmask(result, union_mask, np.nan)\n\n not_masked = ~union_mask\n result[not_masked] = cat_core([x[not_masked] for x in all_cols],\n sep)\n elif na_rep is not None and union_mask.any():\n # fill NaNs with na_rep in case there are actually any NaNs\n all_cols = [np.where(nm, na_rep, col)\n for nm, col in zip(na_masks, all_cols)]\n result = cat_core(all_cols, sep)\n else:\n # no NaNs - can just concatenate\n result = cat_core(all_cols, sep)\n\n if isinstance(self._orig, Index):\n # add dtype for case that result is all-NA\n result = Index(result, dtype=object, name=self._orig.name)\n else: # Series\n result = Series(result, dtype=object, index=data.index,\n name=self._orig.name)\n return result\n\n _shared_docs['str_split'] = (\"\"\"\n Split strings around given separator/delimiter.\n\n Splits the string in the Series/Index from the %(side)s,\n at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n pat : str, optional\n String or regular expression to split on.\n If not specified, split on whitespace.\n n : int, default -1 (all)\n Limit number of splits in output.\n ``None``, 0 and -1 will be interpreted as return all splits.\n expand : bool, default False\n Expand the splitted strings into separate columns.\n\n * If ``True``, return DataFrame/MultiIndex expanding dimensionality.\n * If ``False``, return Series/Index, containing lists of strings.\n\n Returns\n -------\n Series, Index, DataFrame or MultiIndex\n Type matches caller unless ``expand=True`` (see Notes).\n\n See Also\n --------\n Series.str.split : Split strings around given separator/delimiter.\n Series.str.rsplit : Splits string around given separator/delimiter,\n starting from the right.\n Series.str.join : Join lists contained as elements in the Series/Index\n with passed delimiter.\n str.split : Standard library version for split.\n str.rsplit : Standard library version for rsplit.\n\n Notes\n -----\n The handling of the `n` keyword depends on the number of found splits:\n\n - If found splits > `n`, make first `n` splits only\n - If found splits <= `n`, make all splits\n - If for a certain row the number of found splits < `n`,\n append `None` for padding up to `n` if ``expand=True``\n\n If using ``expand=True``, Series and Index callers return DataFrame and\n MultiIndex objects, respectively.\n\n Examples\n --------\n >>> s = pd.Series([\"this is a regular sentence\",\n \"https://docs.python.org/3/tutorial/index.html\", np.nan])\n\n In the default setting, the string is split by whitespace.\n\n >>> s.str.split()\n 0 [this, is, a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n Without the `n` parameter, the outputs of `rsplit` and `split`\n are identical.\n\n >>> s.str.rsplit()\n 0 [this, is, a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n The `n` parameter can be used to limit the number of splits on the\n delimiter. The outputs of `split` and `rsplit` are different.\n\n >>> s.str.split(n=2)\n 0 [this, is, a regular sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n >>> s.str.rsplit(n=2)\n 0 [this is a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n The `pat` parameter can be used to split by other characters.\n\n >>> s.str.split(pat = \"/\")\n 0 [this is a regular sentence]\n 1 [https:, , docs.python.org, 3, tutorial, index...\n 2 NaN\n dtype: object\n\n When using ``expand=True``, the split elements will expand out into\n separate columns. If NaN is present, it is propagated throughout\n the columns during the split.\n\n >>> s.str.split(expand=True)\n 0 1 2 3\n 0 this is a regular\n 1 https://docs.python.org/3/tutorial/index.html None None None\n 2 NaN NaN NaN NaN \\\n\n 4\n 0 sentence\n 1 None\n 2 NaN\n\n For slightly more complex use cases like splitting the html document name\n from a url, a combination of parameter settings can be used.\n\n >>> s.str.rsplit(\"/\", n=1, expand=True)\n 0 1\n 0 this is a regular sentence None\n 1 https://docs.python.org/3/tutorial index.html\n 2 NaN NaN\n \"\"\")\n\n @Appender(_shared_docs['str_split'] % {\n 'side': 'beginning',\n 'method': 'split'})\n def split(self, pat=None, n=-1, expand=False):\n result = str_split(self._parent, pat, n=n)\n return self._wrap_result(result, expand=expand)\n\n @Appender(_shared_docs['str_split'] % {\n 'side': 'end',\n 'method': 'rsplit'})\n def rsplit(self, pat=None, n=-1, expand=False):\n result = str_rsplit(self._parent, pat, n=n)\n return self._wrap_result(result, expand=expand)\n\n _shared_docs['str_partition'] = (\"\"\"\n Split the string at the %(side)s occurrence of `sep`.\n\n This method splits the string at the %(side)s occurrence of `sep`,\n and returns 3 elements containing the part before the separator,\n the separator itself, and the part after the separator.\n If the separator is not found, return %(return)s.\n\n Parameters\n ----------\n sep : str, default whitespace\n String to split on.\n pat : str, default whitespace\n .. deprecated:: 0.24.0\n Use ``sep`` instead\n expand : bool, default True\n If True, return DataFrame/MultiIndex expanding dimensionality.\n If False, return Series/Index.\n\n Returns\n -------\n DataFrame/MultiIndex or Series/Index of objects\n\n See Also\n --------\n %(also)s\n Series.str.split : Split strings around given separators.\n str.partition : Standard library version.\n\n Examples\n --------\n\n >>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])\n >>> s\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n dtype: object\n\n >>> s.str.partition()\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n\n To partition by the last space instead of the first one:\n\n >>> s.str.rpartition()\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n\n To partition by something different than a space:\n\n >>> s.str.partition('-')\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt - Rivers\n\n To return a Series containining tuples instead of a DataFrame:\n\n >>> s.str.partition('-', expand=False)\n 0 (Linda van der Berg, , )\n 1 (George Pitt, -, Rivers)\n dtype: object\n\n Also available on indices:\n\n >>> idx = pd.Index(['X 123', 'Y 999'])\n >>> idx\n Index(['X 123', 'Y 999'], dtype='object')\n\n Which will create a MultiIndex:\n\n >>> idx.str.partition()\n MultiIndex(levels=[['X', 'Y'], [' '], ['123', '999']],\n codes=[[0, 1], [0, 0], [0, 1]])\n\n Or an index with tuples with ``expand=False``:\n\n >>> idx.str.partition(expand=False)\n Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')\n \"\"\")\n\n @Appender(_shared_docs['str_partition'] % {\n 'side': 'first',\n 'return': '3 elements containing the string itself, followed by two '\n 'empty strings',\n 'also': 'rpartition : Split the string at the last occurrence of '\n '`sep`.'\n })\n @deprecate_kwarg(old_arg_name='pat', new_arg_name='sep')\n def partition(self, sep=' ', expand=True):\n f = lambda x: x.partition(sep)\n result = _na_map(f, self._parent)\n return self._wrap_result(result, expand=expand)\n\n @Appender(_shared_docs['str_partition'] % {\n 'side': 'last',\n 'return': '3 elements containing two empty strings, followed by the '\n 'string itself',\n 'also': 'partition : Split the string at the first occurrence of '\n '`sep`.'\n })\n @deprecate_kwarg(old_arg_name='pat', new_arg_name='sep')\n def rpartition(self, sep=' ', expand=True):\n f = lambda x: x.rpartition(sep)\n result = _na_map(f, self._parent)\n return self._wrap_result(result, expand=expand)\n\n @copy(str_get)\n def get(self, i):\n result = str_get(self._parent, i)\n return self._wrap_result(result)\n\n @copy(str_join)\n def join(self, sep):\n result = str_join(self._parent, sep)\n return self._wrap_result(result)\n\n @copy(str_contains)\n def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):\n result = str_contains(self._parent, pat, case=case, flags=flags, na=na,\n regex=regex)\n return self._wrap_result(result, fill_value=na)\n\n @copy(str_match)\n def match(self, pat, case=True, flags=0, na=np.nan):\n result = str_match(self._parent, pat, case=case, flags=flags, na=na)\n return self._wrap_result(result, fill_value=na)\n\n @copy(str_replace)\n def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n result = str_replace(self._parent, pat, repl, n=n, case=case,\n flags=flags, regex=regex)\n return self._wrap_result(result)\n\n @copy(str_repeat)\n def repeat(self, repeats):\n result = str_repeat(self._parent, repeats)\n return self._wrap_result(result)\n\n @copy(str_pad)\n def pad(self, width, side='left', fillchar=' '):\n result = str_pad(self._parent, width, side=side, fillchar=fillchar)\n return self._wrap_result(result)\n\n _shared_docs['str_pad'] = (\"\"\"\n Filling %(side)s side of strings in the Series/Index with an\n additional character. Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : Series/Index of objects\n \"\"\")\n\n @Appender(_shared_docs['str_pad'] % dict(side='left and right',\n method='center'))\n def center(self, width, fillchar=' '):\n return self.pad(width, side='both', fillchar=fillchar)\n\n @Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))\n def ljust(self, width, fillchar=' '):\n return self.pad(width, side='right', fillchar=fillchar)\n\n @Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))\n def rjust(self, width, fillchar=' '):\n return self.pad(width, side='left', fillchar=fillchar)\n\n def zfill(self, width):\n \"\"\"\n Pad strings in the Series/Index by prepending '0' characters.\n\n Strings in the Series/Index are padded with '0' characters on the\n left of the string to reach a total string length `width`. Strings\n in the Series/Index with length greater or equal to `width` are\n unchanged.\n\n Parameters\n ----------\n width : int\n Minimum length of resulting string; strings with length less\n than `width` be prepended with '0' characters.\n\n Returns\n -------\n Series/Index of objects\n\n See Also\n --------\n Series.str.rjust : Fills the left side of strings with an arbitrary\n character.\n Series.str.ljust : Fills the right side of strings with an arbitrary\n character.\n Series.str.pad : Fills the specified sides of strings with an arbitrary\n character.\n Series.str.center : Fills boths sides of strings with an arbitrary\n character.\n\n Notes\n -----\n Differs from :meth:`str.zfill` which has special handling\n for '+'/'-' in the string.\n\n Examples\n --------\n >>> s = pd.Series(['-1', '1', '1000', 10, np.nan])\n >>> s\n 0 -1\n 1 1\n 2 1000\n 3 10\n 4 NaN\n dtype: object\n\n Note that ``10`` and ``NaN`` are not strings, therefore they are\n converted to ``NaN``. The minus sign in ``'-1'`` is treated as a\n regular character and the zero is added to the left of it\n (:meth:`str.zfill` would have moved it to the left). ``1000``\n remains unchanged as it is longer than `width`.\n\n >>> s.str.zfill(3)\n 0 0-1\n 1 001\n 2 1000\n 3 NaN\n 4 NaN\n dtype: object\n \"\"\"\n result = str_pad(self._parent, width, side='left', fillchar='0')\n return self._wrap_result(result)\n\n @copy(str_slice)\n def slice(self, start=None, stop=None, step=None):\n result = str_slice(self._parent, start, stop, step)\n return self._wrap_result(result)\n\n @copy(str_slice_replace)\n def slice_replace(self, start=None, stop=None, repl=None):\n result = str_slice_replace(self._parent, start, stop, repl)\n return self._wrap_result(result)\n\n @copy(str_decode)\n def decode(self, encoding, errors=\"strict\"):\n result = str_decode(self._parent, encoding, errors)\n return self._wrap_result(result)\n\n @copy(str_encode)\n def encode(self, encoding, errors=\"strict\"):\n result = str_encode(self._parent, encoding, errors)\n return self._wrap_result(result)\n\n _shared_docs['str_strip'] = (r\"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the Series/Index from %(side)s.\n Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n Series/Index of objects\n\n See Also\n --------\n Series.str.strip : Remove leading and trailing characters in Series/Index.\n Series.str.lstrip : Remove leading characters in Series/Index.\n Series.str.rstrip : Remove trailing characters in Series/Index.\n\n Examples\n --------\n >>> s = pd.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', np.nan])\n >>> s\n 0 1. Ant.\n 1 2. Bee!\\n\n 2 3. Cat?\\t\n 3 NaN\n dtype: object\n\n >>> s.str.strip()\n 0 1. Ant.\n 1 2. Bee!\n 2 3. Cat?\n 3 NaN\n dtype: object\n\n >>> s.str.lstrip('123.')\n 0 Ant.\n 1 Bee!\\n\n 2 Cat?\\t\n 3 NaN\n dtype: object\n\n >>> s.str.rstrip('.!? \\n\\t')\n 0 1. Ant\n 1 2. Bee\n 2 3. Cat\n 3 NaN\n dtype: object\n\n >>> s.str.strip('123.!? \\n\\t')\n 0 Ant\n 1 Bee\n 2 Cat\n 3 NaN\n dtype: object\n \"\"\")\n\n @Appender(_shared_docs['str_strip'] % dict(side='left and right sides',\n method='strip'))\n def strip(self, to_strip=None):\n result = str_strip(self._parent, to_strip, side='both')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['str_strip'] % dict(side='left side',\n method='lstrip'))\n def lstrip(self, to_strip=None):\n result = str_strip(self._parent, to_strip, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['str_strip'] % dict(side='right side',\n method='rstrip'))\n def rstrip(self, to_strip=None):\n result = str_strip(self._parent, to_strip, side='right')\n return self._wrap_result(result)\n\n @copy(str_wrap)\n def wrap(self, width, **kwargs):\n result = str_wrap(self._parent, width, **kwargs)\n return self._wrap_result(result)\n\n @copy(str_get_dummies)\n def get_dummies(self, sep='|'):\n # we need to cast to Series of strings as only that has all\n # methods available for making the dummies...\n data = self._orig.astype(str) if self._is_categorical else self._parent\n result, name = str_get_dummies(data, sep)\n return self._wrap_result(result, use_codes=(not self._is_categorical),\n name=name, expand=True)\n\n @copy(str_translate)\n def translate(self, table):\n result = str_translate(self._parent, table)\n return self._wrap_result(result)\n\n count = _pat_wrapper(str_count, flags=True)\n startswith = _pat_wrapper(str_startswith, na=True)\n endswith = _pat_wrapper(str_endswith, na=True)\n findall = _pat_wrapper(str_findall, flags=True)\n\n @copy(str_extract)\n def extract(self, pat, flags=0, expand=True):\n return str_extract(self, pat, flags=flags, expand=expand)\n\n @copy(str_extractall)\n def extractall(self, pat, flags=0):\n return str_extractall(self._orig, pat, flags=flags)\n\n _shared_docs['find'] = (\"\"\"\n Return %(side)s indexes in each strings in the Series/Index\n where the substring is fully contained between [start:end].\n Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : Series/Index of integer values\n\n See Also\n --------\n %(also)s\n \"\"\")\n\n @Appender(_shared_docs['find'] %\n dict(side='lowest', method='find',\n also='rfind : Return highest indexes in each strings.'))\n def find(self, sub, start=0, end=None):\n result = str_find(self._parent, sub, start=start, end=end, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['find'] %\n dict(side='highest', method='rfind',\n also='find : Return lowest indexes in each strings.'))\n def rfind(self, sub, start=0, end=None):\n result = str_find(self._parent, sub,\n start=start, end=end, side='right')\n return self._wrap_result(result)\n\n def normalize(self, form):\n \"\"\"\n Return the Unicode normal form for the strings in the Series/Index.\n For more information on the forms, see the\n :func:`unicodedata.normalize`.\n\n Parameters\n ----------\n form : {'NFC', 'NFKC', 'NFD', 'NFKD'}\n Unicode form\n\n Returns\n -------\n normalized : Series/Index of objects\n \"\"\"\n import unicodedata\n f = lambda x: unicodedata.normalize(form, x)\n result = _na_map(f, self._parent)\n return self._wrap_result(result)\n\n _shared_docs['index'] = (\"\"\"\n Return %(side)s indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.%(similar)s`` except instead of returning -1, it raises a ValueError\n when the substring is not found. Equivalent to standard ``str.%(method)s``.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : Series/Index of objects\n\n See Also\n --------\n %(also)s\n \"\"\")\n\n @Appender(_shared_docs['index'] %\n dict(side='lowest', similar='find', method='index',\n also='rindex : Return highest indexes in each strings.'))\n def index(self, sub, start=0, end=None):\n result = str_index(self._parent, sub,\n start=start, end=end, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['index'] %\n dict(side='highest', similar='rfind', method='rindex',\n also='index : Return lowest indexes in each strings.'))\n def rindex(self, sub, start=0, end=None):\n result = str_index(self._parent, sub,\n start=start, end=end, side='right')\n return self._wrap_result(result)\n\n _shared_docs['len'] = (\"\"\"\n Compute the length of each element in the Series/Index. The element may be\n a sequence (such as a string, tuple or list) or a collection\n (such as a dictionary).\n\n Returns\n -------\n Series or Index of int\n A Series or Index of integer values indicating the length of each\n element in the Series or Index.\n\n See Also\n --------\n str.len : Python built-in function returning the length of an object.\n Series.size : Returns the length of the Series.\n\n Examples\n --------\n Returns the length (number of characters) in a string. Returns the\n number of entries for dictionaries, lists or tuples.\n\n >>> s = pd.Series(['dog',\n ... '',\n ... 5,\n ... {'foo' : 'bar'},\n ... [2, 3, 5, 7],\n ... ('one', 'two', 'three')])\n >>> s\n 0 dog\n 1\n 2 5\n 3 {'foo': 'bar'}\n 4 [2, 3, 5, 7]\n 5 (one, two, three)\n dtype: object\n >>> s.str.len()\n 0 3.0\n 1 0.0\n 2 NaN\n 3 1.0\n 4 4.0\n 5 3.0\n dtype: float64\n \"\"\")\n len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)\n\n _shared_docs['casemethods'] = (\"\"\"\n Convert strings in the Series/Index to %(type)s.\n %(version)s\n Equivalent to :meth:`str.%(method)s`.\n\n Returns\n -------\n Series/Index of objects\n\n See Also\n --------\n Series.str.lower : Converts all characters to lowercase.\n Series.str.upper : Converts all characters to uppercase.\n Series.str.title : Converts first character of each word to uppercase and\n remaining to lowercase.\n Series.str.capitalize : Converts first character to uppercase and\n remaining to lowercase.\n Series.str.swapcase : Converts uppercase to lowercase and lowercase to\n uppercase.\n Series.str.casefold: Removes all case distinctions in the string.\n\n Examples\n --------\n >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])\n >>> s\n 0 lower\n 1 CAPITALS\n 2 this is a sentence\n 3 SwApCaSe\n dtype: object\n\n >>> s.str.lower()\n 0 lower\n 1 capitals\n 2 this is a sentence\n 3 swapcase\n dtype: object\n\n >>> s.str.upper()\n 0 LOWER\n 1 CAPITALS\n 2 THIS IS A SENTENCE\n 3 SWAPCASE\n dtype: object\n\n >>> s.str.title()\n 0 Lower\n 1 Capitals\n 2 This Is A Sentence\n 3 Swapcase\n dtype: object\n\n >>> s.str.capitalize()\n 0 Lower\n 1 Capitals\n 2 This is a sentence\n 3 Swapcase\n dtype: object\n\n >>> s.str.swapcase()\n 0 LOWER\n 1 capitals\n 2 THIS IS A SENTENCE\n 3 sWaPcAsE\n dtype: object\n \"\"\")\n\n # _doc_args holds dict of strings to use in substituting casemethod docs\n _doc_args = {} # type: Dict[str, Dict[str, str]]\n _doc_args['lower'] = dict(type='lowercase', method='lower', version='')\n _doc_args['upper'] = dict(type='uppercase', method='upper', version='')\n _doc_args['title'] = dict(type='titlecase', method='title', version='')\n _doc_args['capitalize'] = dict(type='be capitalized', method='capitalize',\n version='')\n _doc_args['swapcase'] = dict(type='be swapcased', method='swapcase',\n version='')\n _doc_args['casefold'] = dict(type='be casefolded', method='casefold',\n version='\\n .. versionadded:: 0.25.0\\n')\n lower = _noarg_wrapper(lambda x: x.lower(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['lower'])\n upper = _noarg_wrapper(lambda x: x.upper(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['upper'])\n title = _noarg_wrapper(lambda x: x.title(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['title'])\n capitalize = _noarg_wrapper(lambda x: x.capitalize(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['capitalize'])\n swapcase = _noarg_wrapper(lambda x: x.swapcase(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['swapcase'])\n casefold = _noarg_wrapper(lambda x: x.casefold(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['casefold'])\n\n _shared_docs['ismethods'] = (\"\"\"\n Check whether all characters in each string are %(type)s.\n\n This is equivalent to running the Python string method\n :meth:`str.%(method)s` for each element of the Series/Index. If a string\n has zero characters, ``False`` is returned for that check.\n\n Returns\n -------\n Series or Index of bool\n Series or Index of boolean values with the same length as the original\n Series/Index.\n\n See Also\n --------\n Series.str.isalpha : Check whether all characters are alphabetic.\n Series.str.isnumeric : Check whether all characters are numeric.\n Series.str.isalnum : Check whether all characters are alphanumeric.\n Series.str.isdigit : Check whether all characters are digits.\n Series.str.isdecimal : Check whether all characters are decimal.\n Series.str.isspace : Check whether all characters are whitespace.\n Series.str.islower : Check whether all characters are lowercase.\n Series.str.isupper : Check whether all characters are uppercase.\n Series.str.istitle : Check whether all characters are titlecase.\n\n Examples\n --------\n **Checks for Alphabetic and Numeric Characters**\n\n >>> s1 = pd.Series(['one', 'one1', '1', ''])\n\n >>> s1.str.isalpha()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n >>> s1.str.isnumeric()\n 0 False\n 1 False\n 2 True\n 3 False\n dtype: bool\n\n >>> s1.str.isalnum()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Note that checks against characters mixed with any additional punctuation\n or whitespace will evaluate to false for an alphanumeric check.\n\n >>> s2 = pd.Series(['A B', '1.5', '3,000'])\n >>> s2.str.isalnum()\n 0 False\n 1 False\n 2 False\n dtype: bool\n\n **More Detailed Checks for Numeric Characters**\n\n There are several different but overlapping sets of numeric characters that\n can be checked for.\n\n >>> s3 = pd.Series(['23', '³', '⅕', ''])\n\n The ``s3.str.isdecimal`` method checks for characters used to form numbers\n in base 10.\n\n >>> s3.str.isdecimal()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also\n includes special digits, like superscripted and subscripted digits in\n unicode.\n\n >>> s3.str.isdigit()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also\n includes other characters that can represent quantities such as unicode\n fractions.\n\n >>> s3.str.isnumeric()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n **Checks for Whitespace**\n\n >>> s4 = pd.Series([' ', '\\\\t\\\\r\\\\n ', ''])\n >>> s4.str.isspace()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n **Checks for Character Case**\n\n >>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])\n\n >>> s5.str.islower()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n >>> s5.str.isupper()\n 0 False\n 1 False\n 2 True\n 3 False\n dtype: bool\n\n The ``s5.str.istitle`` method checks for whether all words are in title\n case (whether only the first letter of each word is capitalized). Words are\n assumed to be as any sequence of non-numeric characters seperated by\n whitespace characters.\n\n >>> s5.str.istitle()\n 0 False\n 1 True\n 2 False\n 3 False\n dtype: bool\n \"\"\")\n _doc_args['isalnum'] = dict(type='alphanumeric', method='isalnum')\n _doc_args['isalpha'] = dict(type='alphabetic', method='isalpha')\n _doc_args['isdigit'] = dict(type='digits', method='isdigit')\n _doc_args['isspace'] = dict(type='whitespace', method='isspace')\n _doc_args['islower'] = dict(type='lowercase', method='islower')\n _doc_args['isupper'] = dict(type='uppercase', method='isupper')\n _doc_args['istitle'] = dict(type='titlecase', method='istitle')\n _doc_args['isnumeric'] = dict(type='numeric', method='isnumeric')\n _doc_args['isdecimal'] = dict(type='decimal', method='isdecimal')\n isalnum = _noarg_wrapper(lambda x: x.isalnum(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isalnum'])\n isalpha = _noarg_wrapper(lambda x: x.isalpha(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isalpha'])\n isdigit = _noarg_wrapper(lambda x: x.isdigit(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isdigit'])\n isspace = _noarg_wrapper(lambda x: x.isspace(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isspace'])\n islower = _noarg_wrapper(lambda x: x.islower(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['islower'])\n isupper = _noarg_wrapper(lambda x: x.isupper(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isupper'])\n istitle = _noarg_wrapper(lambda x: x.istitle(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['istitle'])\n isnumeric = _noarg_wrapper(lambda x: x.isnumeric(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isnumeric'])\n isdecimal = _noarg_wrapper(lambda x: x.isdecimal(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isdecimal'])\n\n @classmethod\n def _make_accessor(cls, data):\n cls._validate(data)\n return cls(data)\n",
"import warnings\n\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm\ntry:\n from pandas.api.types import union_categoricals\nexcept ImportError:\n try:\n from pandas.types.concat import union_categoricals\n except ImportError:\n pass\n\n\nclass Concat:\n\n def setup(self):\n N = 10**5\n self.s = pd.Series(list('aabbcd') * N).astype('category')\n\n self.a = pd.Categorical(list('aabbcd') * N)\n self.b = pd.Categorical(list('bbcdjk') * N)\n\n def time_concat(self):\n pd.concat([self.s, self.s])\n\n def time_union(self):\n union_categoricals([self.a, self.b])\n\n\nclass Constructor:\n\n def setup(self):\n N = 10**5\n self.categories = list('abcde')\n self.cat_idx = pd.Index(self.categories)\n self.values = np.tile(self.categories, N)\n self.codes = np.tile(range(len(self.categories)), N)\n\n self.datetimes = pd.Series(pd.date_range('1995-01-01 00:00:00',\n periods=N / 10,\n freq='s'))\n self.datetimes_with_nat = self.datetimes.copy()\n self.datetimes_with_nat.iloc[-1] = pd.NaT\n\n self.values_some_nan = list(np.tile(self.categories + [np.nan], N))\n self.values_all_nan = [np.nan] * len(self.values)\n self.values_all_int8 = np.ones(N, 'int8')\n self.categorical = pd.Categorical(self.values, self.categories)\n self.series = pd.Series(self.categorical)\n\n def time_regular(self):\n pd.Categorical(self.values, self.categories)\n\n def time_fastpath(self):\n pd.Categorical(self.codes, self.cat_idx, fastpath=True)\n\n def time_datetimes(self):\n pd.Categorical(self.datetimes)\n\n def time_datetimes_with_nat(self):\n pd.Categorical(self.datetimes_with_nat)\n\n def time_with_nan(self):\n pd.Categorical(self.values_some_nan)\n\n def time_all_nan(self):\n pd.Categorical(self.values_all_nan)\n\n def time_from_codes_all_int8(self):\n pd.Categorical.from_codes(self.values_all_int8, self.categories)\n\n def time_existing_categorical(self):\n pd.Categorical(self.categorical)\n\n def time_existing_series(self):\n pd.Categorical(self.series)\n\n\nclass ValueCounts:\n\n params = [True, False]\n param_names = ['dropna']\n\n def setup(self, dropna):\n n = 5 * 10**5\n arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,\n size=n)]\n self.ts = pd.Series(arr).astype('category')\n\n def time_value_counts(self, dropna):\n self.ts.value_counts(dropna=dropna)\n\n\nclass Repr:\n\n def setup(self):\n self.sel = pd.Series(['s1234']).astype('category')\n\n def time_rendering(self):\n str(self.sel)\n\n\nclass SetCategories:\n\n def setup(self):\n n = 5 * 10**5\n arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,\n size=n)]\n self.ts = pd.Series(arr).astype('category')\n\n def time_set_categories(self):\n self.ts.cat.set_categories(self.ts.cat.categories[::2])\n\n\nclass RemoveCategories:\n\n def setup(self):\n n = 5 * 10**5\n arr = ['s{:04d}'.format(i) for i in np.random.randint(0, n // 10,\n size=n)]\n self.ts = pd.Series(arr).astype('category')\n\n def time_remove_categories(self):\n self.ts.cat.remove_categories(self.ts.cat.categories[::2])\n\n\nclass Rank:\n\n def setup(self):\n N = 10**5\n ncats = 100\n\n self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str)\n self.s_str_cat = self.s_str.astype('category')\n with warnings.catch_warnings(record=True):\n self.s_str_cat_ordered = self.s_str.astype('category',\n ordered=True)\n\n self.s_int = pd.Series(np.random.randint(0, ncats, size=N))\n self.s_int_cat = self.s_int.astype('category')\n with warnings.catch_warnings(record=True):\n self.s_int_cat_ordered = self.s_int.astype('category',\n ordered=True)\n\n def time_rank_string(self):\n self.s_str.rank()\n\n def time_rank_string_cat(self):\n self.s_str_cat.rank()\n\n def time_rank_string_cat_ordered(self):\n self.s_str_cat_ordered.rank()\n\n def time_rank_int(self):\n self.s_int.rank()\n\n def time_rank_int_cat(self):\n self.s_int_cat.rank()\n\n def time_rank_int_cat_ordered(self):\n self.s_int_cat_ordered.rank()\n\n\nclass Isin:\n\n params = ['object', 'int64']\n param_names = ['dtype']\n\n def setup(self, dtype):\n np.random.seed(1234)\n n = 5 * 10**5\n sample_size = 100\n arr = [i for i in np.random.randint(0, n // 10, size=n)]\n if dtype == 'object':\n arr = ['s{:04d}'.format(i) for i in arr]\n self.sample = np.random.choice(arr, sample_size)\n self.series = pd.Series(arr).astype('category')\n\n def time_isin_categorical(self, dtype):\n self.series.isin(self.sample)\n\n\nclass IsMonotonic:\n\n def setup(self):\n N = 1000\n self.c = pd.CategoricalIndex(list('a' * N + 'b' * N + 'c' * N))\n self.s = pd.Series(self.c)\n\n def time_categorical_index_is_monotonic_increasing(self):\n self.c.is_monotonic_increasing\n\n def time_categorical_index_is_monotonic_decreasing(self):\n self.c.is_monotonic_decreasing\n\n def time_categorical_series_is_monotonic_increasing(self):\n self.s.is_monotonic_increasing\n\n def time_categorical_series_is_monotonic_decreasing(self):\n self.s.is_monotonic_decreasing\n\n\nclass Contains:\n\n def setup(self):\n N = 10**5\n self.ci = tm.makeCategoricalIndex(N)\n self.c = self.ci.values\n self.key = self.ci.categories[0]\n\n def time_categorical_index_contains(self):\n self.key in self.ci\n\n def time_categorical_contains(self):\n self.key in self.c\n\n\nclass CategoricalSlicing:\n\n params = ['monotonic_incr', 'monotonic_decr', 'non_monotonic']\n param_names = ['index']\n\n def setup(self, index):\n N = 10**6\n categories = ['a', 'b', 'c']\n values = [0] * N + [1] * N + [2] * N\n if index == 'monotonic_incr':\n self.data = pd.Categorical.from_codes(values,\n categories=categories)\n elif index == 'monotonic_decr':\n self.data = pd.Categorical.from_codes(list(reversed(values)),\n categories=categories)\n elif index == 'non_monotonic':\n self.data = pd.Categorical.from_codes([0, 1, 2] * N,\n categories=categories)\n else:\n raise ValueError('Invalid index param: {}'.format(index))\n\n self.scalar = 10000\n self.list = list(range(10000))\n self.cat_scalar = 'b'\n\n def time_getitem_scalar(self, index):\n self.data[self.scalar]\n\n def time_getitem_slice(self, index):\n self.data[:self.scalar]\n\n def time_getitem_list_like(self, index):\n self.data[[self.scalar]]\n\n def time_getitem_list(self, index):\n self.data[self.list]\n\n def time_getitem_bool_array(self, index):\n self.data[self.data == self.cat_scalar]\n\n\nclass Indexing:\n\n def setup(self):\n N = 10**5\n self.index = pd.CategoricalIndex(range(N), range(N))\n self.series = pd.Series(range(N), index=self.index).sort_index()\n self.category = self.index[500]\n\n def time_get_loc(self):\n self.index.get_loc(self.category)\n\n def time_shape(self):\n self.index.shape\n\n def time_shallow_copy(self):\n self.index._shallow_copy()\n\n def time_align(self):\n pd.DataFrame({'a': self.series, 'b': self.series[:500]})\n\n def time_intersection(self):\n self.index[:750].intersection(self.index[250:])\n\n def time_unique(self):\n self.index.unique()\n\n def time_reindex(self):\n self.index.reindex(self.index[:500])\n\n def time_reindex_missing(self):\n self.index.reindex(['a', 'b', 'c', 'd'])\n\n def time_sort_values(self):\n self.index.sort_values(ascending=False)\n\n\nfrom .pandas_vb_common import setup # noqa: F401\n",
"import pytest\n\nimport pandas as pd\nimport pandas.util.testing as tm\n\n\nclass TestCategoricalWarnings:\n def test_tab_complete_warning(self, ip):\n # https://github.com/pandas-dev/pandas/issues/16409\n pytest.importorskip('IPython', minversion=\"6.0.0\")\n from IPython.core.completer import provisionalcompleter\n\n code = \"import pandas as pd; c = Categorical([])\"\n ip.run_code(code)\n with tm.assert_produces_warning(None):\n with provisionalcompleter('ignore'):\n list(ip.Completer.completions('c.', 1))\n\n def test_CategoricalAccessor_categorical_deprecation(self):\n with tm.assert_produces_warning(FutureWarning):\n pd.Series(['a', 'b'], dtype='category').cat.categorical\n\n def test_CategoricalAccessor_name_deprecation(self):\n with tm.assert_produces_warning(FutureWarning):\n pd.Series(['a', 'b'], dtype='category').cat.name\n\n def test_CategoricalAccessor_index_deprecation(self):\n with tm.assert_produces_warning(FutureWarning):\n pd.Series(['a', 'b'], dtype='category').cat.index\n",
"\"\"\"\nUnopinionated display configuration.\n\"\"\"\nimport locale\nimport sys\n\nfrom pandas._config import config as cf\n\n# -----------------------------------------------------------------------------\n# Global formatting options\n_initial_defencoding = None\n\n\ndef detect_console_encoding():\n \"\"\"\n Try to find the most capable encoding supported by the console.\n slightly modified from the way IPython handles the same issue.\n \"\"\"\n global _initial_defencoding\n\n encoding = None\n try:\n encoding = sys.stdout.encoding or sys.stdin.encoding\n except (AttributeError, IOError):\n pass\n\n # try again for something better\n if not encoding or 'ascii' in encoding.lower():\n try:\n encoding = locale.getpreferredencoding()\n except Exception:\n pass\n\n # when all else fails. this will usually be \"ascii\"\n if not encoding or 'ascii' in encoding.lower():\n encoding = sys.getdefaultencoding()\n\n # GH#3360, save the reported defencoding at import time\n # MPL backends may change it. Make available for debugging.\n if not _initial_defencoding:\n _initial_defencoding = sys.getdefaultencoding()\n\n return encoding\n\n\npc_encoding_doc = \"\"\"\n: str/unicode\n Defaults to the detected encoding of the console.\n Specifies the encoding to be used for strings returned by to_string,\n these are generally strings meant to be displayed on the console.\n\"\"\"\n\nwith cf.config_prefix('display'):\n cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc,\n validator=cf.is_text)\n",
"import warnings\n\nimport pytest\n\nimport pandas.util.testing as tm\n\n\ndef f():\n warnings.warn('f1', FutureWarning)\n warnings.warn('f2', RuntimeWarning)\n\n\[email protected]('ignore:f1:FutureWarning')\ndef test_assert_produces_warning_honors_filter():\n # Raise by default.\n msg = r\"Caused unexpected warning\\(s\\)\"\n with pytest.raises(AssertionError, match=msg):\n with tm.assert_produces_warning(RuntimeWarning):\n f()\n\n with tm.assert_produces_warning(RuntimeWarning,\n raise_on_extra_warnings=False):\n f()\n",
"import numpy as np\nfrom pandas import DataFrame, date_range, read_stata\nimport pandas.util.testing as tm\n\nfrom ..pandas_vb_common import BaseIO\n\n\nclass Stata(BaseIO):\n\n params = ['tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty']\n param_names = ['convert_dates']\n\n def setup(self, convert_dates):\n self.fname = '__test__.dta'\n N = self.N = 100000\n C = self.C = 5\n self.df = DataFrame(np.random.randn(N, C),\n columns=['float{}'.format(i) for i in range(C)],\n index=date_range('20000101', periods=N, freq='H'))\n self.df['object'] = tm.makeStringIndex(self.N)\n self.df['int8_'] = np.random.randint(np.iinfo(np.int8).min,\n np.iinfo(np.int8).max - 27, N)\n self.df['int16_'] = np.random.randint(np.iinfo(np.int16).min,\n np.iinfo(np.int16).max - 27, N)\n self.df['int32_'] = np.random.randint(np.iinfo(np.int32).min,\n np.iinfo(np.int32).max - 27, N)\n self.df['float32_'] = np.array(np.random.randn(N),\n dtype=np.float32)\n self.convert_dates = {'index': convert_dates}\n self.df.to_stata(self.fname, self.convert_dates)\n\n def time_read_stata(self, convert_dates):\n read_stata(self.fname)\n\n def time_write_stata(self, convert_dates):\n self.df.to_stata(self.fname, self.convert_dates)\n\n\nclass StataMissing(Stata):\n def setup(self, convert_dates):\n super().setup(convert_dates)\n for i in range(10):\n missing_data = np.random.randn(self.N)\n missing_data[missing_data < 0] = np.nan\n self.df['missing_{0}'.format(i)] = missing_data\n self.df.to_stata(self.fname, self.convert_dates)\n\n\nfrom ..pandas_vb_common import setup # noqa: F401\n",
"\"\"\" test partial slicing on Series/Frame \"\"\"\n\nfrom datetime import datetime\nimport operator as op\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame, DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range)\nfrom pandas.core.indexing import IndexingError\nfrom pandas.util import testing as tm\n\n\nclass TestSlicing:\n def test_dti_slicing(self):\n dti = date_range(start='1/1/2005', end='12/1/2005', freq='M')\n dti2 = dti[[1, 3, 5]]\n\n v1 = dti2[0]\n v2 = dti2[1]\n v3 = dti2[2]\n\n assert v1 == Timestamp('2/28/2005')\n assert v2 == Timestamp('4/30/2005')\n assert v3 == Timestamp('6/30/2005')\n\n # don't carry freq through irregular slicing\n assert dti2.freq is None\n\n def test_slice_keeps_name(self):\n # GH4226\n st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')\n et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')\n dr = pd.date_range(st, et, freq='H', name='timebucket')\n assert dr[1:].name == dr.name\n\n def test_slice_with_negative_step(self):\n ts = Series(np.arange(20),\n date_range('2014-01-01', periods=20, freq='MS'))\n SLC = pd.IndexSlice\n\n def assert_slices_equivalent(l_slc, i_slc):\n tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])\n tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])\n tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])\n\n assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])\n assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])\n\n assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])\n assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])\n\n assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],\n SLC[13:8:-1])\n assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(\n '2014-10-01'):-1], SLC[13:8:-1])\n assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],\n SLC[13:8:-1])\n assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],\n SLC[13:8:-1])\n\n assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])\n\n def test_slice_with_zero_step_raises(self):\n ts = Series(np.arange(20),\n date_range('2014-01-01', periods=20, freq='MS'))\n with pytest.raises(ValueError, match='slice step cannot be zero'):\n ts[::0]\n with pytest.raises(ValueError, match='slice step cannot be zero'):\n ts.loc[::0]\n with pytest.raises(ValueError, match='slice step cannot be zero'):\n ts.loc[::0]\n\n def test_slice_bounds_empty(self):\n # GH#14354\n empty_idx = date_range(freq='1H', periods=0, end='2015')\n\n right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')\n exp = Timestamp('2015-01-02 23:59:59.999999999')\n assert right == exp\n\n left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')\n exp = Timestamp('2015-01-02 00:00:00')\n assert left == exp\n\n def test_slice_duplicate_monotonic(self):\n # https://github.com/pandas-dev/pandas/issues/16515\n idx = pd.DatetimeIndex(['2017', '2017'])\n result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc')\n expected = Timestamp('2017-01-01')\n assert result == expected\n\n def test_monotone_DTI_indexing_bug(self):\n # GH 19362\n # Testing accessing the first element in a montononic descending\n # partial string indexing.\n\n df = pd.DataFrame(list(range(5)))\n date_list = ['2018-01-02', '2017-02-10', '2016-03-10',\n '2015-03-15', '2014-03-16']\n date_index = pd.to_datetime(date_list)\n df['date'] = date_index\n expected = pd.DataFrame({0: list(range(5)), 'date': date_index})\n tm.assert_frame_equal(df, expected)\n\n df = pd.DataFrame({'A': [1, 2, 3]},\n index=pd.date_range('20170101',\n periods=3)[::-1])\n expected = pd.DataFrame({'A': 1},\n index=pd.date_range('20170103',\n periods=1))\n tm.assert_frame_equal(df.loc['2017-01-03'], expected)\n\n def test_slice_year(self):\n dti = date_range(freq='B', start=datetime(2005, 1, 1), periods=500)\n\n s = Series(np.arange(len(dti)), index=dti)\n result = s['2005']\n expected = s[s.index.year == 2005]\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(np.random.rand(len(dti), 5), index=dti)\n result = df.loc['2005']\n expected = df[df.index.year == 2005]\n tm.assert_frame_equal(result, expected)\n\n rng = date_range('1/1/2000', '1/1/2010')\n\n result = rng.get_loc('2009')\n expected = slice(3288, 3653)\n assert result == expected\n\n def test_slice_quarter(self):\n dti = date_range(freq='D', start=datetime(2000, 6, 1), periods=500)\n\n s = Series(np.arange(len(dti)), index=dti)\n assert len(s['2001Q1']) == 90\n\n df = DataFrame(np.random.rand(len(dti), 5), index=dti)\n assert len(df.loc['1Q01']) == 90\n\n def test_slice_month(self):\n dti = date_range(freq='D', start=datetime(2005, 1, 1), periods=500)\n s = Series(np.arange(len(dti)), index=dti)\n assert len(s['2005-11']) == 30\n\n df = DataFrame(np.random.rand(len(dti), 5), index=dti)\n assert len(df.loc['2005-11']) == 30\n\n tm.assert_series_equal(s['2005-11'], s['11-2005'])\n\n def test_partial_slice(self):\n rng = date_range(freq='D', start=datetime(2005, 1, 1), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-05':'2006-02']\n expected = s['20050501':'20060228']\n tm.assert_series_equal(result, expected)\n\n result = s['2005-05':]\n expected = s['20050501':]\n tm.assert_series_equal(result, expected)\n\n result = s[:'2006-02']\n expected = s[:'20060228']\n tm.assert_series_equal(result, expected)\n\n result = s['2005-1-1']\n assert result == s.iloc[0]\n\n with pytest.raises(KeyError, match=r\"^'2004-12-31'$\"):\n s['2004-12-31']\n\n def test_partial_slice_daily(self):\n rng = date_range(freq='H', start=datetime(2005, 1, 31), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-1-31']\n tm.assert_series_equal(result, s.iloc[:24])\n\n with pytest.raises(KeyError, match=r\"^'2004-12-31 00'$\"):\n s['2004-12-31 00']\n\n def test_partial_slice_hourly(self):\n rng = date_range(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),\n periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-1-1']\n tm.assert_series_equal(result, s.iloc[:60 * 4])\n\n result = s['2005-1-1 20']\n tm.assert_series_equal(result, s.iloc[:60])\n\n assert s['2005-1-1 20:00'] == s.iloc[0]\n with pytest.raises(KeyError, match=r\"^'2004-12-31 00:15'$\"):\n s['2004-12-31 00:15']\n\n def test_partial_slice_minutely(self):\n rng = date_range(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),\n periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s['2005-1-1 23:59']\n tm.assert_series_equal(result, s.iloc[:60])\n\n result = s['2005-1-1']\n tm.assert_series_equal(result, s.iloc[:60])\n\n assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]\n with pytest.raises(KeyError, match=r\"^'2004-12-31 00:00:00'$\"):\n s['2004-12-31 00:00:00']\n\n def test_partial_slice_second_precision(self):\n rng = date_range(start=datetime(2005, 1, 1, 0, 0, 59,\n microsecond=999990),\n periods=20, freq='US')\n s = Series(np.arange(20), rng)\n\n tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])\n tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])\n\n tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])\n tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])\n\n assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]\n with pytest.raises(KeyError, match='2005-1-1 00:00:00'):\n s['2005-1-1 00:00:00']\n\n def test_partial_slicing_dataframe(self):\n # GH14856\n # Test various combinations of string slicing resolution vs.\n # index resolution\n # - If string resolution is less precise than index resolution,\n # string is considered a slice\n # - If string resolution is equal to or more precise than index\n # resolution, string is considered an exact match\n formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',\n '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']\n resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']\n for rnum, resolution in enumerate(resolutions[2:], 2):\n # we check only 'day', 'hour', 'minute' and 'second'\n unit = Timedelta(\"1 \" + resolution)\n middate = datetime(2012, 1, 1, 0, 0, 0)\n index = DatetimeIndex([middate - unit,\n middate, middate + unit])\n values = [1, 2, 3]\n df = DataFrame({'a': values}, index, dtype=np.int64)\n assert df.index.resolution == resolution\n\n # Timestamp with the same resolution as index\n # Should be exact match for Series (return scalar)\n # and raise KeyError for Frame\n for timestamp, expected in zip(index, values):\n ts_string = timestamp.strftime(formats[rnum])\n # make ts_string as precise as index\n result = df['a'][ts_string]\n assert isinstance(result, np.int64)\n assert result == expected\n msg = r\"^'{}'$\".format(ts_string)\n with pytest.raises(KeyError, match=msg):\n df[ts_string]\n\n # Timestamp with resolution less precise than index\n for fmt in formats[:rnum]:\n for element, theslice in [[0, slice(None, 1)],\n [1, slice(1, None)]]:\n ts_string = index[element].strftime(fmt)\n\n # Series should return slice\n result = df['a'][ts_string]\n expected = df['a'][theslice]\n tm.assert_series_equal(result, expected)\n\n # Frame should return slice as well\n result = df[ts_string]\n expected = df[theslice]\n tm.assert_frame_equal(result, expected)\n\n # Timestamp with resolution more precise than index\n # Compatible with existing key\n # Should return scalar for Series\n # and raise KeyError for Frame\n for fmt in formats[rnum + 1:]:\n ts_string = index[1].strftime(fmt)\n result = df['a'][ts_string]\n assert isinstance(result, np.int64)\n assert result == 2\n msg = r\"^'{}'$\".format(ts_string)\n with pytest.raises(KeyError, match=msg):\n df[ts_string]\n\n # Not compatible with existing key\n # Should raise KeyError\n for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:\n ts = index[1] + Timedelta(\"1 \" + res)\n ts_string = ts.strftime(fmt)\n msg = r\"^'{}'$\".format(ts_string)\n with pytest.raises(KeyError, match=msg):\n df['a'][ts_string]\n with pytest.raises(KeyError, match=msg):\n df[ts_string]\n\n def test_partial_slicing_with_multiindex(self):\n\n # GH 4758\n # partial string indexing with a multi-index buggy\n df = DataFrame({'ACCOUNT': [\"ACCT1\", \"ACCT1\", \"ACCT1\", \"ACCT2\"],\n 'TICKER': [\"ABC\", \"MNP\", \"XYZ\", \"XYZ\"],\n 'val': [1, 2, 3, 4]},\n index=date_range(\"2013-06-19 09:30:00\",\n periods=4, freq='5T'))\n df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)\n\n expected = DataFrame([\n [1]\n ], index=Index(['ABC'], name='TICKER'), columns=['val'])\n result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]\n tm.assert_frame_equal(result, expected)\n\n expected = df_multi.loc[\n (pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]\n result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]\n tm.assert_series_equal(result, expected)\n\n # this is an IndexingError as we don't do partial string selection on\n # multi-levels.\n msg = \"Too many indexers\"\n with pytest.raises(IndexingError, match=msg):\n df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]\n\n # GH 4294\n # partial slice on a series mi\n s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(\n '2000-1-1', periods=1000)).stack()\n\n s2 = s[:-1].copy()\n expected = s2['2000-1-4']\n result = s2[pd.Timestamp('2000-1-4')]\n tm.assert_series_equal(result, expected)\n\n result = s[pd.Timestamp('2000-1-4')]\n expected = s['2000-1-4']\n tm.assert_series_equal(result, expected)\n\n df2 = pd.DataFrame(s)\n expected = df2.xs('2000-1-4')\n result = df2.loc[pd.Timestamp('2000-1-4')]\n tm.assert_frame_equal(result, expected)\n\n def test_partial_slice_doesnt_require_monotonicity(self):\n # For historical reasons.\n s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10))\n\n nonmonotonic = s[[3, 5, 4]]\n expected = nonmonotonic.iloc[:0]\n timestamp = pd.Timestamp('2014-01-10')\n\n tm.assert_series_equal(nonmonotonic['2014-01-10':], expected)\n with pytest.raises(KeyError,\n match=r\"Timestamp\\('2014-01-10 00:00:00'\\)\"):\n nonmonotonic[timestamp:]\n\n tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected)\n with pytest.raises(KeyError,\n match=r\"Timestamp\\('2014-01-10 00:00:00'\\)\"):\n nonmonotonic.loc[timestamp:]\n\n def test_loc_datetime_length_one(self):\n # GH16071\n df = pd.DataFrame(columns=['1'],\n index=pd.date_range('2016-10-01T00:00:00',\n '2016-10-01T23:59:59'))\n result = df.loc[datetime(2016, 10, 1):]\n tm.assert_frame_equal(result, df)\n\n result = df.loc['2016-10-01T00:00:00':]\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize('datetimelike', [\n Timestamp('20130101'), datetime(2013, 1, 1),\n np.datetime64('2013-01-01T00:00', 'ns')])\n @pytest.mark.parametrize('op,expected', [\n (op.lt, [True, False, False, False]),\n (op.le, [True, True, False, False]),\n (op.eq, [False, True, False, False]),\n (op.gt, [False, False, False, True])])\n def test_selection_by_datetimelike(self, datetimelike, op, expected):\n # GH issue #17965, test for ability to compare datetime64[ns] columns\n # to datetimelike\n df = DataFrame({'A': [pd.Timestamp('20120101'),\n pd.Timestamp('20130101'),\n np.nan, pd.Timestamp('20130103')]})\n result = op(df.A, datetimelike)\n expected = Series(expected, name='A')\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize('start', [\n '2018-12-02 21:50:00+00:00', pd.Timestamp('2018-12-02 21:50:00+00:00'),\n pd.Timestamp('2018-12-02 21:50:00+00:00').to_pydatetime()\n ])\n @pytest.mark.parametrize('end', [\n '2018-12-02 21:52:00+00:00', pd.Timestamp('2018-12-02 21:52:00+00:00'),\n pd.Timestamp('2018-12-02 21:52:00+00:00').to_pydatetime()\n ])\n def test_getitem_with_datestring_with_UTC_offset(self, start, end):\n # GH 24076\n idx = pd.date_range(start='2018-12-02 14:50:00-07:00',\n end='2018-12-02 14:50:00-07:00', freq='1min')\n df = pd.DataFrame(1, index=idx, columns=['A'])\n result = df[start:end]\n expected = df.iloc[0:3, :]\n tm.assert_frame_equal(result, expected)\n\n # GH 16785\n start = str(start)\n end = str(end)\n with pytest.raises(ValueError, match=\"Both dates must\"):\n df[start:end[:-4] + '1:00']\n\n with pytest.raises(ValueError, match=\"The index must be timezone\"):\n df = df.tz_localize(None)\n df[start:end]\n",
"from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import NullFrequencyError\n\nimport pandas as pd\nfrom pandas import Timedelta, TimedeltaIndex, timedelta_range\nimport pandas.util.testing as tm\n\n\[email protected](params=[pd.offsets.Hour(2), timedelta(hours=2),\n np.timedelta64(2, 'h'), Timedelta(hours=2)],\n ids=str)\ndef delta(request):\n # Several ways of representing two hours\n return request.param\n\n\[email protected](params=['B', 'D'])\ndef freq(request):\n return request.param\n\n\nclass TestTimedeltaIndexArithmetic:\n # Addition and Subtraction Operations\n\n # -------------------------------------------------------------\n # TimedeltaIndex.shift is used by __add__/__sub__\n\n def test_tdi_shift_empty(self):\n # GH#9903\n idx = pd.TimedeltaIndex([], name='xxx')\n tm.assert_index_equal(idx.shift(0, freq='H'), idx)\n tm.assert_index_equal(idx.shift(3, freq='H'), idx)\n\n def test_tdi_shift_hours(self):\n # GH#9903\n idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')\n tm.assert_index_equal(idx.shift(0, freq='H'), idx)\n exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')\n tm.assert_index_equal(idx.shift(3, freq='H'), exp)\n exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')\n tm.assert_index_equal(idx.shift(-3, freq='H'), exp)\n\n def test_tdi_shift_minutes(self):\n # GH#9903\n idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')\n tm.assert_index_equal(idx.shift(0, freq='T'), idx)\n exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],\n name='xxx')\n tm.assert_index_equal(idx.shift(3, freq='T'), exp)\n exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],\n name='xxx')\n tm.assert_index_equal(idx.shift(-3, freq='T'), exp)\n\n def test_tdi_shift_int(self):\n # GH#8083\n trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)\n result = trange.shift(1)\n expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',\n '3 days 01:00:00',\n '4 days 01:00:00', '5 days 01:00:00'],\n freq='D')\n tm.assert_index_equal(result, expected)\n\n def test_tdi_shift_nonstandard_freq(self):\n # GH#8083\n trange = pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)\n result = trange.shift(3, freq='2D 1s')\n expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',\n '8 days 01:00:03', '9 days 01:00:03',\n '10 days 01:00:03'], freq='D')\n tm.assert_index_equal(result, expected)\n\n def test_shift_no_freq(self):\n # GH#19147\n tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None)\n with pytest.raises(NullFrequencyError):\n tdi.shift(2)\n\n # -------------------------------------------------------------\n # Binary operations TimedeltaIndex and integer\n\n def test_tdi_add_int(self, one):\n # Variants of `one` for #19012\n rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n result = rng + one\n expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)\n tm.assert_index_equal(result, expected)\n\n def test_tdi_iadd_int(self, one):\n rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)\n expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n rng += one\n tm.assert_index_equal(rng, expected)\n\n def test_tdi_sub_int(self, one):\n rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n result = rng - one\n expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)\n tm.assert_index_equal(result, expected)\n\n def test_tdi_isub_int(self, one):\n rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)\n expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n rng -= one\n tm.assert_index_equal(rng, expected)\n\n # -------------------------------------------------------------\n # __add__/__sub__ with integer arrays\n\n @pytest.mark.parametrize('box', [np.array, pd.Index])\n def test_tdi_add_integer_array(self, box):\n # GH#19959\n rng = timedelta_range('1 days 09:00:00', freq='H', periods=3)\n other = box([4, 3, 2])\n expected = TimedeltaIndex(['1 day 13:00:00'] * 3)\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n result = rng + other\n tm.assert_index_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n result = other + rng\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize('box', [np.array, pd.Index])\n def test_tdi_sub_integer_array(self, box):\n # GH#19959\n rng = timedelta_range('9H', freq='H', periods=3)\n other = box([4, 3, 2])\n expected = TimedeltaIndex(['5H', '7H', '9H'])\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n result = rng - other\n tm.assert_index_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#22535\n result = other - rng\n tm.assert_index_equal(result, -expected)\n\n @pytest.mark.parametrize('box', [np.array, pd.Index])\n def test_tdi_addsub_integer_array_no_freq(self, box):\n # GH#19959\n tdi = TimedeltaIndex(['1 Day', 'NaT', '3 Hours'])\n other = box([14, -1, 16])\n with pytest.raises(NullFrequencyError):\n tdi + other\n with pytest.raises(NullFrequencyError):\n other + tdi\n with pytest.raises(NullFrequencyError):\n tdi - other\n with pytest.raises(NullFrequencyError):\n other - tdi\n\n # -------------------------------------------------------------\n # Binary operations TimedeltaIndex and timedelta-like\n # Note: add and sub are tested in tests.test_arithmetic, in-place\n # tests are kept here because their behavior is Index-specific\n\n def test_tdi_iadd_timedeltalike(self, delta):\n # only test adding/sub offsets as + is now numeric\n rng = timedelta_range('1 days', '10 days')\n expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',\n freq='D')\n rng += delta\n tm.assert_index_equal(rng, expected)\n\n def test_tdi_isub_timedeltalike(self, delta):\n # only test adding/sub offsets as - is now numeric\n rng = timedelta_range('1 days', '10 days')\n expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')\n rng -= delta\n tm.assert_index_equal(rng, expected)\n\n # -------------------------------------------------------------\n\n # TODO: after #24365 this probably belongs in scalar tests\n def test_ops_ndarray(self):\n td = Timedelta('1 day')\n\n # timedelta, timedelta\n other = pd.to_timedelta(['1 day']).values\n expected = pd.to_timedelta(['2 days']).values\n tm.assert_numpy_array_equal(td + other, expected)\n tm.assert_numpy_array_equal(other + td, expected)\n msg = r\"unsupported operand type\\(s\\) for \\+: 'Timedelta' and 'int'\"\n with pytest.raises(TypeError, match=msg):\n td + np.array([1])\n msg = (r\"unsupported operand type\\(s\\) for \\+: 'numpy.ndarray' and\"\n \" 'Timedelta'\")\n with pytest.raises(TypeError, match=msg):\n np.array([1]) + td\n\n expected = pd.to_timedelta(['0 days']).values\n tm.assert_numpy_array_equal(td - other, expected)\n tm.assert_numpy_array_equal(-other + td, expected)\n msg = r\"unsupported operand type\\(s\\) for -: 'Timedelta' and 'int'\"\n with pytest.raises(TypeError, match=msg):\n td - np.array([1])\n msg = (r\"unsupported operand type\\(s\\) for -: 'numpy.ndarray' and\"\n \" 'Timedelta'\")\n with pytest.raises(TypeError, match=msg):\n np.array([1]) - td\n\n expected = pd.to_timedelta(['2 days']).values\n tm.assert_numpy_array_equal(td * np.array([2]), expected)\n tm.assert_numpy_array_equal(np.array([2]) * td, expected)\n msg = (\"ufunc multiply cannot use operands with types\"\n r\" dtype\\('<m8\\[ns\\]'\\) and dtype\\('<m8\\[ns\\]'\\)\")\n with pytest.raises(TypeError, match=msg):\n td * other\n with pytest.raises(TypeError, match=msg):\n other * td\n\n tm.assert_numpy_array_equal(td / other,\n np.array([1], dtype=np.float64))\n tm.assert_numpy_array_equal(other / td,\n np.array([1], dtype=np.float64))\n\n # timedelta, datetime\n other = pd.to_datetime(['2000-01-01']).values\n expected = pd.to_datetime(['2000-01-02']).values\n tm.assert_numpy_array_equal(td + other, expected)\n tm.assert_numpy_array_equal(other + td, expected)\n\n expected = pd.to_datetime(['1999-12-31']).values\n tm.assert_numpy_array_equal(-td + other, expected)\n tm.assert_numpy_array_equal(other - td, expected)\n\n def test_tdi_ops_attributes(self):\n rng = timedelta_range('2 days', periods=5, freq='2D', name='x')\n\n result = rng + 1 * rng.freq\n exp = timedelta_range('4 days', periods=5, freq='2D', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == '2D'\n\n result = rng - 2 * rng.freq\n exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == '2D'\n\n result = rng * 2\n exp = timedelta_range('4 days', periods=5, freq='4D', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == '4D'\n\n result = rng / 2\n exp = timedelta_range('1 days', periods=5, freq='D', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == 'D'\n\n result = -rng\n exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq == '-2D'\n\n rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')\n\n result = abs(rng)\n exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',\n '2 days'], name='x')\n tm.assert_index_equal(result, exp)\n assert result.freq is None\n",
"from datetime import datetime\n\nimport pytest\n\nfrom pandas.tseries.holiday import (\n after_nearest_workday, before_nearest_workday, nearest_workday,\n next_monday, next_monday_or_tuesday, next_workday, previous_friday,\n previous_workday, sunday_to_monday, weekend_to_monday)\n\n_WEDNESDAY = datetime(2014, 4, 9)\n_THURSDAY = datetime(2014, 4, 10)\n_FRIDAY = datetime(2014, 4, 11)\n_SATURDAY = datetime(2014, 4, 12)\n_SUNDAY = datetime(2014, 4, 13)\n_MONDAY = datetime(2014, 4, 14)\n_TUESDAY = datetime(2014, 4, 15)\n\n\[email protected](\"day\", [_SATURDAY, _SUNDAY])\ndef test_next_monday(day):\n assert next_monday(day) == _MONDAY\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _MONDAY),\n (_SUNDAY, _TUESDAY),\n (_MONDAY, _TUESDAY)\n])\ndef test_next_monday_or_tuesday(day, expected):\n assert next_monday_or_tuesday(day) == expected\n\n\[email protected](\"day\", [_SATURDAY, _SUNDAY])\ndef test_previous_friday(day):\n assert previous_friday(day) == _FRIDAY\n\n\ndef test_sunday_to_monday():\n assert sunday_to_monday(_SUNDAY) == _MONDAY\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _FRIDAY),\n (_SUNDAY, _MONDAY),\n (_MONDAY, _MONDAY)\n])\ndef test_nearest_workday(day, expected):\n assert nearest_workday(day) == expected\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _MONDAY),\n (_SUNDAY, _MONDAY),\n (_MONDAY, _MONDAY)\n])\ndef test_weekend_to_monday(day, expected):\n assert weekend_to_monday(day) == expected\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _MONDAY),\n (_SUNDAY, _MONDAY),\n (_MONDAY, _TUESDAY)\n])\ndef test_next_workday(day, expected):\n assert next_workday(day) == expected\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _FRIDAY),\n (_SUNDAY, _FRIDAY),\n (_TUESDAY, _MONDAY)\n])\ndef test_previous_workday(day, expected):\n assert previous_workday(day) == expected\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _THURSDAY),\n (_SUNDAY, _FRIDAY),\n (_TUESDAY, _MONDAY)\n])\ndef test_before_nearest_workday(day, expected):\n assert before_nearest_workday(day) == expected\n\n\[email protected](\"day,expected\", [\n (_SATURDAY, _MONDAY),\n (_SUNDAY, _TUESDAY),\n (_FRIDAY, _MONDAY)\n])\ndef test_after_nearest_workday(day, expected):\n assert after_nearest_workday(day) == expected\n",
"import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.period import IncompatibleFrequency\nfrom pandas.compat import lmap, lrange\n\nfrom pandas.core.dtypes.dtypes import PeriodDtype\n\nimport pandas as pd\nfrom pandas import (\n Index, Period, PeriodIndex, Series, date_range, offsets, period_range)\nimport pandas.core.indexes.period as period\nimport pandas.util.testing as tm\n\n\nclass TestPeriodIndex:\n\n def setup_method(self, method):\n pass\n\n def test_construction_base_constructor(self):\n # GH 13664\n arr = [pd.Period('2011-01', freq='M'), pd.NaT,\n pd.Period('2011-03', freq='M')]\n tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)),\n pd.PeriodIndex(np.array(arr)))\n\n arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]\n tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)),\n pd.PeriodIndex(np.array(arr)))\n\n arr = [pd.Period('2011-01', freq='M'), pd.NaT,\n pd.Period('2011-03', freq='D')]\n tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))\n\n tm.assert_index_equal(pd.Index(np.array(arr)),\n pd.Index(np.array(arr), dtype=object))\n\n def test_constructor_use_start_freq(self):\n # GH #1118\n p = Period('4/2/2012', freq='B')\n with tm.assert_produces_warning(FutureWarning):\n index = PeriodIndex(start=p, periods=10)\n expected = period_range(start='4/2/2012', periods=10, freq='B')\n tm.assert_index_equal(index, expected)\n\n index = period_range(start=p, periods=10)\n tm.assert_index_equal(index, expected)\n\n def test_constructor_field_arrays(self):\n # GH #1264\n\n years = np.arange(1990, 2010).repeat(4)[2:-2]\n quarters = np.tile(np.arange(1, 5), 20)[2:-2]\n\n index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')\n expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')\n tm.assert_index_equal(index, expected)\n\n index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')\n tm.assert_numpy_array_equal(index.asi8, index2.asi8)\n\n index = PeriodIndex(year=years, quarter=quarters)\n tm.assert_index_equal(index, expected)\n\n years = [2007, 2007, 2007]\n months = [1, 2]\n\n msg = \"Mismatched Period array lengths\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(year=years, month=months, freq='M')\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(year=years, month=months, freq='2M')\n\n msg = \"Can either instantiate from fields or endpoints, but not both\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(year=years, month=months, freq='M',\n start=Period('2007-01', freq='M'))\n\n years = [2007, 2007, 2007]\n months = [1, 2, 3]\n idx = PeriodIndex(year=years, month=months, freq='M')\n exp = period_range('2007-01', periods=3, freq='M')\n tm.assert_index_equal(idx, exp)\n\n def test_constructor_U(self):\n # U was used as undefined period\n with pytest.raises(ValueError, match=\"Invalid frequency: X\"):\n period_range('2007-1-1', periods=500, freq='X')\n\n def test_constructor_nano(self):\n idx = period_range(start=Period(ordinal=1, freq='N'),\n end=Period(ordinal=4, freq='N'), freq='N')\n exp = PeriodIndex([Period(ordinal=1, freq='N'),\n Period(ordinal=2, freq='N'),\n Period(ordinal=3, freq='N'),\n Period(ordinal=4, freq='N')], freq='N')\n tm.assert_index_equal(idx, exp)\n\n def test_constructor_arrays_negative_year(self):\n years = np.arange(1960, 2000, dtype=np.int64).repeat(4)\n quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)\n\n pindex = PeriodIndex(year=years, quarter=quarters)\n\n tm.assert_index_equal(pindex.year, pd.Index(years))\n tm.assert_index_equal(pindex.quarter, pd.Index(quarters))\n\n def test_constructor_invalid_quarters(self):\n msg = \"Quarter must be 1 <= q <= 4\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(year=lrange(2000, 2004), quarter=lrange(4),\n freq='Q-DEC')\n\n def test_constructor_corner(self):\n msg = \"Not enough parameters to construct Period range\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(periods=10, freq='A')\n\n start = Period('2007', freq='A-JUN')\n end = Period('2010', freq='A-DEC')\n\n msg = \"start and end must have same freq\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(start=start, end=end)\n\n msg = (\"Of the three parameters: start, end, and periods, exactly two\"\n \" must be specified\")\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(start=start)\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(end=end)\n\n result = period_range('2007-01', periods=10.5, freq='M')\n exp = period_range('2007-01', periods=10, freq='M')\n tm.assert_index_equal(result, exp)\n\n def test_constructor_fromarraylike(self):\n idx = period_range('2007-01', periods=20, freq='M')\n\n # values is an array of Period, thus can retrieve freq\n tm.assert_index_equal(PeriodIndex(idx.values), idx)\n tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)\n\n msg = \"freq not specified and cannot be inferred\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(idx._ndarray_values)\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(list(idx._ndarray_values))\n\n msg = \"'Period' object is not iterable\"\n with pytest.raises(TypeError, match=msg):\n PeriodIndex(data=Period('2007', freq='A'))\n\n result = PeriodIndex(iter(idx))\n tm.assert_index_equal(result, idx)\n\n result = PeriodIndex(idx)\n tm.assert_index_equal(result, idx)\n\n result = PeriodIndex(idx, freq='M')\n tm.assert_index_equal(result, idx)\n\n result = PeriodIndex(idx, freq=offsets.MonthEnd())\n tm.assert_index_equal(result, idx)\n assert result.freq == 'M'\n\n result = PeriodIndex(idx, freq='2M')\n tm.assert_index_equal(result, idx.asfreq('2M'))\n assert result.freq == '2M'\n\n result = PeriodIndex(idx, freq=offsets.MonthEnd(2))\n tm.assert_index_equal(result, idx.asfreq('2M'))\n assert result.freq == '2M'\n\n result = PeriodIndex(idx, freq='D')\n exp = idx.asfreq('D', 'e')\n tm.assert_index_equal(result, exp)\n\n def test_constructor_datetime64arr(self):\n vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)\n vals = vals.view(np.dtype('M8[us]'))\n\n msg = r\"Wrong dtype: datetime64\\[us\\]\"\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(vals, freq='D')\n\n @pytest.mark.parametrize('box', [None, 'series', 'index'])\n def test_constructor_datetime64arr_ok(self, box):\n # https://github.com/pandas-dev/pandas/issues/23438\n data = pd.date_range('2017', periods=4, freq=\"M\")\n if box is None:\n data = data._values\n elif box == 'series':\n data = pd.Series(data)\n\n result = PeriodIndex(data, freq='D')\n expected = PeriodIndex([\n '2017-01-31', '2017-02-28', '2017-03-31', '2017-04-30'\n ], freq=\"D\")\n tm.assert_index_equal(result, expected)\n\n def test_constructor_dtype(self):\n # passing a dtype with a tz should localize\n idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')\n exp = PeriodIndex(['2013-01', '2013-03'], freq='M')\n tm.assert_index_equal(idx, exp)\n assert idx.dtype == 'period[M]'\n\n idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')\n exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')\n tm.assert_index_equal(idx, exp)\n assert idx.dtype == 'period[3D]'\n\n # if we already have a freq and its not the same, then asfreq\n # (not changed)\n idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')\n\n res = PeriodIndex(idx, dtype='period[M]')\n exp = PeriodIndex(['2013-01', '2013-01'], freq='M')\n tm.assert_index_equal(res, exp)\n assert res.dtype == 'period[M]'\n\n res = PeriodIndex(idx, freq='M')\n tm.assert_index_equal(res, exp)\n assert res.dtype == 'period[M]'\n\n msg = 'specified freq and dtype are different'\n with pytest.raises(period.IncompatibleFrequency, match=msg):\n PeriodIndex(['2011-01'], freq='M', dtype='period[D]')\n\n def test_constructor_empty(self):\n idx = pd.PeriodIndex([], freq='M')\n assert isinstance(idx, PeriodIndex)\n assert len(idx) == 0\n assert idx.freq == 'M'\n\n with pytest.raises(ValueError, match='freq not specified'):\n pd.PeriodIndex([])\n\n def test_constructor_pi_nat(self):\n idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,\n Period('2011-01', freq='M')])\n exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')\n tm.assert_index_equal(idx, exp)\n\n idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,\n Period('2011-01', freq='M')]))\n tm.assert_index_equal(idx, exp)\n\n idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),\n Period('2011-01', freq='M')])\n exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')\n tm.assert_index_equal(idx, exp)\n\n idx = PeriodIndex(np.array([pd.NaT, pd.NaT,\n Period('2011-01', freq='M'),\n Period('2011-01', freq='M')]))\n tm.assert_index_equal(idx, exp)\n\n idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')\n tm.assert_index_equal(idx, exp)\n\n with pytest.raises(ValueError, match='freq not specified'):\n PeriodIndex([pd.NaT, pd.NaT])\n\n with pytest.raises(ValueError, match='freq not specified'):\n PeriodIndex(np.array([pd.NaT, pd.NaT]))\n\n with pytest.raises(ValueError, match='freq not specified'):\n PeriodIndex(['NaT', 'NaT'])\n\n with pytest.raises(ValueError, match='freq not specified'):\n PeriodIndex(np.array(['NaT', 'NaT']))\n\n def test_constructor_incompat_freq(self):\n msg = \"Input has different freq=D from PeriodIndex\\\\(freq=M\\\\)\"\n\n with pytest.raises(period.IncompatibleFrequency, match=msg):\n PeriodIndex([Period('2011-01', freq='M'), pd.NaT,\n Period('2011-01', freq='D')])\n\n with pytest.raises(period.IncompatibleFrequency, match=msg):\n PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,\n Period('2011-01', freq='D')]))\n\n # first element is pd.NaT\n with pytest.raises(period.IncompatibleFrequency, match=msg):\n PeriodIndex([pd.NaT, Period('2011-01', freq='M'),\n Period('2011-01', freq='D')])\n\n with pytest.raises(period.IncompatibleFrequency, match=msg):\n PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),\n Period('2011-01', freq='D')]))\n\n def test_constructor_mixed(self):\n idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])\n exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')\n tm.assert_index_equal(idx, exp)\n\n idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])\n exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')\n tm.assert_index_equal(idx, exp)\n\n idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,\n '2012-01-01'])\n exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')\n tm.assert_index_equal(idx, exp)\n\n def test_constructor_simple_new(self):\n idx = period_range('2007-01', name='p', periods=2, freq='M')\n result = idx._simple_new(idx, name='p', freq=idx.freq)\n tm.assert_index_equal(result, idx)\n\n result = idx._simple_new(idx.astype('i8'), name='p', freq=idx.freq)\n tm.assert_index_equal(result, idx)\n\n def test_constructor_simple_new_empty(self):\n # GH13079\n idx = PeriodIndex([], freq='M', name='p')\n result = idx._simple_new(idx, name='p', freq='M')\n tm.assert_index_equal(result, idx)\n\n @pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])])\n def test_constructor_floats(self, floats):\n msg = r\"PeriodIndex\\._simple_new does not accept floats\"\n with pytest.raises(TypeError, match=msg):\n pd.PeriodIndex._simple_new(floats, freq='M')\n\n msg = \"PeriodIndex does not allow floating point in construction\"\n with pytest.raises(TypeError, match=msg):\n pd.PeriodIndex(floats, freq='M')\n\n def test_constructor_nat(self):\n msg = \"start and end must not be NaT\"\n with pytest.raises(ValueError, match=msg):\n period_range(start='NaT', end='2011-01-01', freq='M')\n with pytest.raises(ValueError, match=msg):\n period_range(start='2011-01-01', end='NaT', freq='M')\n\n def test_constructor_year_and_quarter(self):\n year = pd.Series([2001, 2002, 2003])\n quarter = year - 2000\n idx = PeriodIndex(year=year, quarter=quarter)\n strs = ['%dQ%d' % t for t in zip(quarter, year)]\n lops = list(map(Period, strs))\n p = PeriodIndex(lops)\n tm.assert_index_equal(p, idx)\n\n @pytest.mark.parametrize('func, warning', [\n (PeriodIndex, FutureWarning),\n (period_range, None)\n ])\n def test_constructor_freq_mult(self, func, warning):\n # GH #7811\n with tm.assert_produces_warning(warning):\n # must be the same, but for sure...\n pidx = func(start='2014-01', freq='2M', periods=4)\n expected = PeriodIndex(['2014-01', '2014-03',\n '2014-05', '2014-07'], freq='2M')\n tm.assert_index_equal(pidx, expected)\n\n with tm.assert_produces_warning(warning):\n pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')\n expected = PeriodIndex(['2014-01-02', '2014-01-05',\n '2014-01-08', '2014-01-11',\n '2014-01-14'], freq='3D')\n tm.assert_index_equal(pidx, expected)\n\n with tm.assert_produces_warning(warning):\n pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)\n expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',\n '2014-01-01 17:00'], freq='4H')\n tm.assert_index_equal(pidx, expected)\n\n msg = ('Frequency must be positive, because it'\n ' represents span: -1M')\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(['2011-01'], freq='-1M')\n\n msg = ('Frequency must be positive, because it' ' represents span: 0M')\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(['2011-01'], freq='0M')\n\n msg = ('Frequency must be positive, because it' ' represents span: 0M')\n with pytest.raises(ValueError, match=msg):\n period_range('2011-01', periods=3, freq='0M')\n\n @pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S'])\n @pytest.mark.parametrize('mult', [1, 2, 3, 4, 5])\n def test_constructor_freq_mult_dti_compat(self, mult, freq):\n freqstr = str(mult) + freq\n pidx = period_range(start='2014-04-01', freq=freqstr, periods=10)\n expected = date_range(start='2014-04-01', freq=freqstr,\n periods=10).to_period(freqstr)\n tm.assert_index_equal(pidx, expected)\n\n def test_constructor_freq_combined(self):\n for freq in ['1D1H', '1H1D']:\n pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)\n expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],\n freq='25H')\n for freq in ['1D1H', '1H1D']:\n pidx = period_range(start='2016-01-01', periods=2, freq=freq)\n expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],\n freq='25H')\n tm.assert_index_equal(pidx, expected)\n\n def test_constructor_range_based_deprecated(self):\n with tm.assert_produces_warning(FutureWarning):\n pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')\n assert len(pi) == 9\n\n def test_constructor_range_based_deprecated_different_freq(self):\n with tm.assert_produces_warning(FutureWarning) as m:\n PeriodIndex(start='2000', periods=2)\n\n warning, = m\n assert 'freq=\"A-DEC\"' in str(warning.message)\n\n def test_constructor(self):\n pi = period_range(freq='A', start='1/1/2001', end='12/1/2009')\n assert len(pi) == 9\n\n pi = period_range(freq='Q', start='1/1/2001', end='12/1/2009')\n assert len(pi) == 4 * 9\n\n pi = period_range(freq='M', start='1/1/2001', end='12/1/2009')\n assert len(pi) == 12 * 9\n\n pi = period_range(freq='D', start='1/1/2001', end='12/31/2009')\n assert len(pi) == 365 * 9 + 2\n\n pi = period_range(freq='B', start='1/1/2001', end='12/31/2009')\n assert len(pi) == 261 * 9\n\n pi = period_range(freq='H', start='1/1/2001', end='12/31/2001 23:00')\n assert len(pi) == 365 * 24\n\n pi = period_range(freq='Min', start='1/1/2001', end='1/1/2001 23:59')\n assert len(pi) == 24 * 60\n\n pi = period_range(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')\n assert len(pi) == 24 * 60 * 60\n\n start = Period('02-Apr-2005', 'B')\n i1 = period_range(start=start, periods=20)\n assert len(i1) == 20\n assert i1.freq == start.freq\n assert i1[0] == start\n\n end_intv = Period('2006-12-31', 'W')\n i1 = period_range(end=end_intv, periods=10)\n assert len(i1) == 10\n assert i1.freq == end_intv.freq\n assert i1[-1] == end_intv\n\n end_intv = Period('2006-12-31', '1w')\n i2 = period_range(end=end_intv, periods=10)\n assert len(i1) == len(i2)\n assert (i1 == i2).all()\n assert i1.freq == i2.freq\n\n end_intv = Period('2006-12-31', ('w', 1))\n i2 = period_range(end=end_intv, periods=10)\n assert len(i1) == len(i2)\n assert (i1 == i2).all()\n assert i1.freq == i2.freq\n\n end_intv = Period('2005-05-01', 'B')\n i1 = period_range(start=start, end=end_intv)\n\n # infer freq from first element\n i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))\n assert len(i2) == 2\n assert i2[0] == end_intv\n\n # Mixed freq should fail\n vals = [end_intv, Period('2006-12-31', 'w')]\n msg = r\"Input has different freq=W-SUN from PeriodIndex\\(freq=B\\)\"\n with pytest.raises(IncompatibleFrequency, match=msg):\n PeriodIndex(vals)\n vals = np.array(vals)\n with pytest.raises(IncompatibleFrequency, match=msg):\n PeriodIndex(vals)\n\n def test_constructor_error(self):\n start = Period('02-Apr-2005', 'B')\n end_intv = Period('2006-12-31', ('w', 1))\n\n msg = 'start and end must have same freq'\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(start=start, end=end_intv)\n\n msg = ('Of the three parameters: start, end, and periods, '\n 'exactly two must be specified')\n with pytest.raises(ValueError, match=msg):\n PeriodIndex(start=start)\n\n @pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B',\n 'T', 'S', 'L', 'U', 'N', 'H'])\n def test_recreate_from_data(self, freq):\n org = period_range(start='2001/04/01', freq=freq, periods=1)\n idx = PeriodIndex(org.values, freq=freq)\n tm.assert_index_equal(idx, org)\n\n def test_map_with_string_constructor(self):\n raw = [2005, 2007, 2009]\n index = PeriodIndex(raw, freq='A')\n\n expected = Index(lmap(str, raw))\n res = index.map(str)\n\n # should return an Index\n assert isinstance(res, Index)\n\n # preserve element types\n assert all(isinstance(resi, str) for resi in res)\n\n # lastly, values should compare equal\n tm.assert_index_equal(res, expected)\n\n\nclass TestSeriesPeriod:\n\n def setup_method(self, method):\n self.series = Series(period_range('2000-01-01', periods=10, freq='D'))\n\n def test_constructor_cant_cast_period(self):\n msg = \"Cannot cast PeriodArray to dtype float64\"\n with pytest.raises(TypeError, match=msg):\n Series(period_range('2000-01-01', periods=10, freq='D'),\n dtype=float)\n\n def test_constructor_cast_object(self):\n s = Series(period_range('1/1/2000', periods=10),\n dtype=PeriodDtype(\"D\"))\n exp = Series(period_range('1/1/2000', periods=10))\n tm.assert_series_equal(s, exp)\n",
"import numpy as np\nimport pandas.util.testing as tm\nfrom pandas import Series, Index, DatetimeIndex, Timestamp, MultiIndex\n\n\ndef no_change(arr):\n return arr\n\n\ndef list_of_str(arr):\n return list(arr.astype(str))\n\n\ndef gen_of_str(arr):\n return (x for x in arr.astype(str))\n\n\ndef arr_dict(arr):\n return dict(zip(range(len(arr)), arr))\n\n\ndef list_of_tuples(arr):\n return [(i, -i) for i in arr]\n\n\ndef gen_of_tuples(arr):\n return ((i, -i) for i in arr)\n\n\ndef list_of_lists(arr):\n return [[i, -i] for i in arr]\n\n\ndef list_of_tuples_with_none(arr):\n return [(i, -i) for i in arr][:-1] + [None]\n\n\ndef list_of_lists_with_none(arr):\n return [[i, -i] for i in arr][:-1] + [None]\n\n\nclass SeriesConstructors:\n\n param_names = [\"data_fmt\", \"with_index\", \"dtype\"]\n params = [[no_change,\n list,\n list_of_str,\n gen_of_str,\n arr_dict,\n list_of_tuples,\n gen_of_tuples,\n list_of_lists,\n list_of_tuples_with_none,\n list_of_lists_with_none],\n [False, True],\n ['float', 'int']]\n\n def setup(self, data_fmt, with_index, dtype):\n N = 10**4\n if dtype == 'float':\n arr = np.random.randn(N)\n else:\n arr = np.arange(N)\n self.data = data_fmt(arr)\n self.index = np.arange(N) if with_index else None\n\n def time_series_constructor(self, data_fmt, with_index, dtype):\n Series(self.data, index=self.index)\n\n\nclass SeriesDtypesConstructors:\n\n def setup(self):\n N = 10**4\n self.arr = np.random.randn(N)\n self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object)\n self.s = Series([Timestamp('20110101'), Timestamp('20120101'),\n Timestamp('20130101')] * N * 10)\n\n def time_index_from_array_string(self):\n Index(self.arr_str)\n\n def time_index_from_array_floats(self):\n Index(self.arr)\n\n def time_dtindex_from_series(self):\n DatetimeIndex(self.s)\n\n def time_dtindex_from_index_with_series(self):\n Index(self.s)\n\n\nclass MultiIndexConstructor:\n\n def setup(self):\n N = 10**4\n self.iterables = [tm.makeStringIndex(N), range(20)]\n\n def time_multiindex_from_iterables(self):\n MultiIndex.from_product(self.iterables)\n\n\nfrom .pandas_vb_common import setup # noqa: F401\n"
] | [
[
"pandas.to_datetime",
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.iinfo",
"pandas.isna",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_categorical_equal",
"numpy.arange",
"numpy.eye",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.cut",
"pandas.core.reshape.tile._round_frac",
"pandas.Categorical.from_codes",
"pandas.Categorical",
"pandas.util.testing.assert_almost_equal",
"numpy.timedelta64",
"numpy.append",
"pandas.unique",
"pandas.Interval",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"pandas.api.types.CategoricalDtype",
"numpy.ones",
"pandas.IntervalIndex.from_tuples",
"pandas.IntervalIndex.from_breaks",
"numpy.datetime64",
"pandas.Timestamp"
],
[
"pandas.Series",
"numpy.take",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.dtype",
"pandas.util.testing.assert_index_equal",
"pandas.util.testing.round_trip_pickle",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.Index",
"pandas.util.testing.assert_series_equal",
"pandas.core.internals.BlockManager",
"numpy.repeat",
"numpy.zeros",
"pandas.core.algorithms.take_nd",
"pandas.MultiIndex",
"pandas.core.internals.make_block",
"pandas.Categorical",
"pandas.array",
"numpy.timedelta64",
"numpy.random.rand",
"pandas.util.testing.randn",
"pandas._libs.internals.BlockPlacement",
"numpy.array",
"pandas.SparseArray",
"numpy.datetime64",
"numpy.ones",
"pandas.Timestamp",
"pandas.compat.lrange"
],
[
"pandas.util._decorators.deprecate_kwarg",
"pandas.Series",
"numpy.asarray",
"pandas.MultiIndex.from_tuples",
"numpy.ndarray",
"pandas.DataFrame",
"pandas.core.dtypes.common.ensure_object",
"pandas.core.common.values_from_object",
"numpy.where",
"pandas.core.dtypes.common.is_string_like",
"pandas.Index",
"numpy.logical_or.reduce",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_categorical_dtype",
"numpy.putmask",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"numpy.sum",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_re",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.maybe_convert_objects"
],
[
"pandas.Categorical.from_codes",
"pandas.concat",
"pandas.Series",
"numpy.random.seed",
"numpy.random.choice",
"pandas.Categorical",
"pandas.util.testing.makeCategoricalIndex",
"numpy.tile",
"pandas.Index",
"numpy.ones",
"pandas.DataFrame",
"pandas.types.concat.union_categoricals",
"pandas.date_range",
"numpy.random.randint"
],
[
"pandas.util.testing.assert_produces_warning",
"pandas.Series"
],
[
"pandas._config.config.config_prefix"
],
[
"pandas.util.testing.assert_produces_warning"
],
[
"pandas.util.testing.makeStringIndex",
"numpy.random.randn",
"numpy.iinfo",
"pandas.date_range",
"pandas.read_stata"
],
[
"pandas.to_datetime",
"pandas.Series",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.Timedelta",
"numpy.datetime64",
"pandas.Index",
"numpy.random.rand",
"pandas.date_range",
"pandas.Timestamp"
],
[
"pandas.timedelta_range",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.to_datetime",
"pandas.TimedeltaIndex",
"pandas.util.testing.assert_produces_warning",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.util.testing.assert_index_equal",
"pandas.to_timedelta",
"numpy.array",
"pandas.offsets.Hour"
],
[
"pandas.tseries.holiday.previous_workday",
"pandas.tseries.holiday.before_nearest_workday",
"pandas.tseries.holiday.previous_friday",
"pandas.tseries.holiday.nearest_workday",
"pandas.tseries.holiday.next_workday",
"pandas.tseries.holiday.next_monday",
"pandas.tseries.holiday.after_nearest_workday",
"pandas.tseries.holiday.sunday_to_monday",
"pandas.tseries.holiday.weekend_to_monday",
"pandas.tseries.holiday.next_monday_or_tuesday"
],
[
"pandas.util.testing.assert_numpy_array_equal",
"pandas.PeriodIndex",
"pandas.Series",
"pandas.period_range",
"pandas.PeriodIndex._simple_new",
"numpy.arange",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"numpy.dtype",
"pandas.util.testing.assert_index_equal",
"pandas.compat.lmap",
"pandas.Period",
"pandas.date_range",
"pandas.offsets.MonthEnd",
"pandas.core.dtypes.dtypes.PeriodDtype",
"numpy.array",
"pandas.compat.lrange"
],
[
"pandas.Series",
"pandas.Timestamp",
"numpy.arange",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.util.testing.makeStringIndex",
"numpy.random.randn",
"pandas.MultiIndex.from_product",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.20",
"0.19"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.24",
"0.23",
"0.21",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rlaplaza/Deep-Reinforcement-Learning-for-Automated-Stock-Trading-Ensemble-Strategy-ICAIF-2020 | [
"5fe6b8554587320bc6044164270635166c93616d"
] | [
"env/EnvMultipleStock_train.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom gym.utils import seeding\nimport gym\nfrom gym import spaces\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\n# shares normalization factor\n# 100 shares per trade\nHMAX_NORMALIZE = 100\n# initial amount of money we have in our account\nINITIAL_ACCOUNT_BALANCE = 1000000\n# total number of stocks in our portfolio\nSTOCK_DIM = 30\n# transaction fee: 1/1000 reasonable percentage\nTRANSACTION_FEE_PERCENT = 0.001\nREWARD_SCALING = 1e-4\n\n\nclass StockEnvTrain(gym.Env):\n \"\"\"A stock trading environment for OpenAI gym\"\"\"\n\n metadata = {\"render.modes\": [\"human\"]}\n\n def __init__(self, df, day=0):\n # super(StockEnv, self).__init__()\n # money = 10 , scope = 1\n self.day = day\n self.df = df\n\n # action_space normalization and shape is STOCK_DIM\n self.action_space = spaces.Box(low=-1, high=1, shape=(STOCK_DIM,))\n # Shape = 181: [Current Balance]+[prices 1-30]+[owned shares 1-30]\n # +[macd 1-30]+ [rsi 1-30] + [cci 1-30] + [adx 1-30]\n self.observation_space = spaces.Box(low=0, high=np.inf, shape=(181,))\n # load data from a pandas dataframe\n self.data = self.df.loc[self.day, :]\n self.terminal = False\n # initalize state\n self.state = (\n [INITIAL_ACCOUNT_BALANCE]\n + self.data.adjcp.values.tolist()\n + [0] * STOCK_DIM\n + self.data.macd.values.tolist()\n + self.data.rsi.values.tolist()\n + self.data.cci.values.tolist()\n + self.data.adx.values.tolist()\n )\n # initialize reward\n self.reward = 0\n self.cost = 0\n # memorize all the total balance change\n self.asset_memory = [INITIAL_ACCOUNT_BALANCE]\n self.rewards_memory = []\n self.trades = 0\n # self.reset()\n self._seed()\n\n def _sell_stock(self, index, action):\n # perform sell action based on the sign of the action\n if self.state[index + STOCK_DIM + 1] > 0:\n # update balance\n self.state[0] += (\n self.state[index + 1]\n * min(abs(action), self.state[index + STOCK_DIM + 1])\n * (1 - TRANSACTION_FEE_PERCENT)\n )\n\n self.state[index + STOCK_DIM + 1] -= min(\n abs(action), self.state[index + STOCK_DIM + 1]\n )\n self.cost += (\n self.state[index + 1]\n * min(abs(action), self.state[index + STOCK_DIM + 1])\n * TRANSACTION_FEE_PERCENT\n )\n self.trades += 1\n else:\n pass\n\n def _buy_stock(self, index, action):\n # perform buy action based on the sign of the action\n available_amount = self.state[0] // self.state[index + 1]\n # print('available_amount:{}'.format(available_amount))\n\n # update balance\n self.state[0] -= (\n self.state[index + 1]\n * min(available_amount, action)\n * (1 + TRANSACTION_FEE_PERCENT)\n )\n\n self.state[index + STOCK_DIM + 1] += min(available_amount, action)\n\n self.cost += (\n self.state[index + 1]\n * min(available_amount, action)\n * TRANSACTION_FEE_PERCENT\n )\n self.trades += 1\n\n def step(self, actions):\n # print(self.day)\n self.terminal = self.day >= len(self.df.index.unique()) - 1\n # print(actions)\n\n if self.terminal:\n plt.plot(self.asset_memory, \"r\")\n plt.savefig(\"results/account_value_train.png\")\n plt.close()\n end_total_asset = self.state[0] + sum(\n np.array(self.state[1 : (STOCK_DIM + 1)])\n * np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n )\n\n # print(\"end_total_asset:{}\".format(end_total_asset))\n df_total_value = pd.DataFrame(self.asset_memory)\n df_total_value.to_csv(\"results/account_value_train.csv\")\n # print(\"total_reward:{}\".format(self.state[0]+sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):61]))- INITIAL_ACCOUNT_BALANCE ))\n # print(\"total_cost: \", self.cost)\n # print(\"total_trades: \", self.trades)\n df_total_value.columns = [\"account_value\"]\n df_total_value[\"daily_return\"] = df_total_value.pct_change(1)\n sharpe = (\n (252 ** 0.5)\n * df_total_value[\"daily_return\"].mean()\n / df_total_value[\"daily_return\"].std()\n )\n # print(\"Sharpe: \",sharpe)\n # print(\"=================================\")\n df_rewards = pd.DataFrame(self.rewards_memory)\n # df_rewards.to_csv('results/account_rewards_train.csv')\n\n # print('total asset: {}'.format(self.state[0]+ sum(np.array(self.state[1:29])*np.array(self.state[29:]))))\n # with open('obs.pkl', 'wb') as f:\n # pickle.dump(self.state, f)\n\n return self.state, self.reward, self.terminal, {}\n\n else:\n # print(np.array(self.state[1:29]))\n\n actions = actions * HMAX_NORMALIZE\n # actions = (actions.astype(int))\n\n begin_total_asset = self.state[0] + sum(\n np.array(self.state[1 : (STOCK_DIM + 1)])\n * np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n )\n # print(\"begin_total_asset:{}\".format(begin_total_asset))\n\n argsort_actions = np.argsort(actions)\n\n sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]]\n buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]]\n\n for index in sell_index:\n # print('take sell action'.format(actions[index]))\n self._sell_stock(index, actions[index])\n\n for index in buy_index:\n # print('take buy action: {}'.format(actions[index]))\n self._buy_stock(index, actions[index])\n\n self.day += 1\n self.data = self.df.loc[self.day, :]\n # load next state\n # print(\"stock_shares:{}\".format(self.state[29:]))\n self.state = (\n [self.state[0]]\n + self.data.adjcp.values.tolist()\n + list(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n + self.data.macd.values.tolist()\n + self.data.rsi.values.tolist()\n + self.data.cci.values.tolist()\n + self.data.adx.values.tolist()\n )\n\n end_total_asset = self.state[0] + sum(\n np.array(self.state[1 : (STOCK_DIM + 1)])\n * np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n )\n self.asset_memory.append(end_total_asset)\n # print(\"end_total_asset:{}\".format(end_total_asset))\n\n self.reward = end_total_asset - begin_total_asset\n # print(\"step_reward:{}\".format(self.reward))\n self.rewards_memory.append(self.reward)\n\n self.reward = self.reward * REWARD_SCALING\n\n return self.state, self.reward, self.terminal, {}\n\n def reset(self):\n self.asset_memory = [INITIAL_ACCOUNT_BALANCE]\n self.day = 0\n self.data = self.df.loc[self.day, :]\n self.cost = 0\n self.trades = 0\n self.terminal = False\n self.rewards_memory = []\n # initiate state\n self.state = (\n [INITIAL_ACCOUNT_BALANCE]\n + self.data.adjcp.values.tolist()\n + [0] * STOCK_DIM\n + self.data.macd.values.tolist()\n + self.data.rsi.values.tolist()\n + self.data.cci.values.tolist()\n + self.data.adx.values.tolist()\n )\n # iteration += 1\n return self.state\n\n def render(self, mode=\"human\"):\n return self.state\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.argsort",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Ravnit202/PYJAC | [
"65987f8afd2e54e1b308b09f45f291e374e79bd2"
] | [
"Game/finger.py"
] | [
"import cv2\nimport mediapipe\nimport numpy\nimport pydirectinput\nclass FingerDetector:\n\n\n wScr, hScr = pydirectinput.size() #Get the current screen resolution\n pX, pY = 0, 0 \n cX, cY = 0, 0 \n\n def __init__(self):\n \"\"\"\n Initialize all objects\n \"\"\"\n #Load the mediapipe libraries/solutions\n self.initHand = mediapipe.solutions.hands\n self.mainHand = self.initHand.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)\n self.draw = mediapipe.solutions.drawing_utils\n\n self.fingerTips = []\n self.img = None\n\n def handLandmarks(self, colorImg):\n \"\"\"\n Detect the hand landmarks\n \"\"\"\n landmarkList = []\n\n landmarkPositions = self.mainHand.process(colorImg) # Process the given image\n landmarkCheck = landmarkPositions.multi_hand_landmarks \n\n if landmarkCheck: # Checks if landmarks exist\n for index, hand in enumerate(landmarkCheck): # differentiate by hand\n for index, landmark in enumerate(hand.landmark): \n self.draw.draw_landmarks(self.img, hand, self.initHand.HAND_CONNECTIONS) \n h, w, c = self.img.shape \n centerX, centerY = int(landmark.x * w), int(landmark.y * h) \n landmarkList.append([index, centerX, centerY]) \n \n return landmarkList\n\n def fingers(self, landmarks):\n \"\"\"\n Check the action of the fingers\n \"\"\"\n fingerTips = []\n tipIds = [4, 8, 12, 16, 20] #Values for each fingertip\n \n #Check if the thumb is up\n if landmarks[tipIds[0]][1] > self.lmList[tipIds[0] - 1][1]:\n fingerTips.append(1)\n else:\n fingerTips.append(0)\n \n #Check if fingers are up and the thumb is down\n for id in range(1, 5):\n if landmarks[tipIds[id]][2] < landmarks[tipIds[id] - 3][2]: # Checks to see if the tip of the finger is higher than the joint\n fingerTips.append(1)\n else:\n fingerTips.append(0)\n\n return fingerTips\n\n\n def fingerDetection(self, frame):\n \"\"\"\n Detect the fingers positions through the frame\n \"\"\"\n frame = cv2.flip(frame, 1)\n self.img = frame\n imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Changes the format of the frames from BGR to RGB\n \n self.lmList = self.handLandmarks(imgRGB)\n\n if len(self.lmList) > 12:\n x1, y1 = self.lmList[8][1:] \n finger = self.fingers(self.lmList) \n if finger[1] == 1 and finger[2] == 0: \n x3 = numpy.interp(x1, (75, 720 - 75), (75, self.wScr)) # Converts the width of the window relative to the screen width\n y3 = numpy.interp(y1, (75, 560 - 75), (75, self.hScr)) # Converts the height of the window relative to the screen height\n \n cX = self.pX + (x3 - self.pX) /2 # Smooth out the mouse x movement\n cY = self.pY + (y3 - self.pY) /2 # Smooth out the mouse y movement\n\n pydirectinput.moveTo(int(cX), int(cY)) #Move the mouse using pydirectinput\n self.pX, self.pY = cX, cY # Save the current x and y values\n\n if finger[1] == 0 and finger[0] == 1: # Check if the pointer finger is down and the thumb finger is up\n pydirectinput.rightClick()\n \n return\n"
] | [
[
"numpy.interp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LiPengze97/oneflow | [
"1c1d2d3faa1c02d20e009046a290cf1095ee12e0",
"1c1d2d3faa1c02d20e009046a290cf1095ee12e0"
] | [
"python/oneflow/test/modules/test_ne.py",
"python/oneflow/test/modules/test_sub.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList\n\nimport oneflow as flow\nimport oneflow.unittest\n\nfrom oneflow.test_utils.automated_test_util import *\n\n\ndef _test_ne(test_case, shape, device):\n arr1 = np.random.randn(*shape)\n arr2 = np.random.randn(*shape)\n input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))\n other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))\n of_out = flow.ne(input, other)\n of_out2 = flow.not_equal(input, other)\n np_out = np.not_equal(arr1, arr2)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n test_case.assertTrue(np.array_equal(of_out2.numpy(), np_out))\n\n\ndef _test_tensor_ne_operator(test_case, shape, device):\n arr1 = np.random.randn(*shape)\n arr2 = np.random.randn(*shape)\n input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))\n other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))\n of_out = input.ne(other)\n np_out = np.not_equal(arr1, arr2)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_ne_int(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1\n of_out = flow.ne(input, num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_tensor_ne_operator_int(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1\n of_out = input.ne(num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_ne_float(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1.0\n of_out = flow.ne(input, num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_tensor_ne_operator_float(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1.0\n of_out = input.ne(num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\[email protected]_unless_1n1d()\nclass TestNe(flow.unittest.TestCase):\n def test_ne(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_func\"] = [\n _test_ne,\n _test_tensor_ne_operator,\n _test_ne_int,\n _test_tensor_ne_operator_int,\n _test_ne_float,\n _test_tensor_ne_operator_float,\n ]\n arg_dict[\"shape\"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n @autotest(auto_backward=False, check_graph=False)\n def test_ne_with_0shape_data(test_case):\n device = random_device()\n x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)\n x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)\n y1 = torch.ne(x1, x2)\n y2 = torch.ne(x1, 2)\n y3 = torch.ne(x1, 2.0)\n return (y1, y2, y3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\n\nfrom oneflow.test_utils.automated_test_util import *\nfrom test_util import GenArgList\n\nimport oneflow as flow\nimport oneflow.unittest\n\nfrom oneflow.test_utils.automated_test_util import *\n\n\ndef _test_sub_impl(test_case, shape, device):\n x = flow.tensor(\n np.random.randn(*shape),\n dtype=flow.float32,\n device=flow.device(device),\n requires_grad=True,\n )\n y = flow.tensor(\n np.random.randn(*shape),\n dtype=flow.float32,\n device=flow.device(device),\n requires_grad=True,\n )\n of_out = flow.sub(x, y)\n np_out = np.subtract(x.numpy(), y.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n of_out = of_out.sum()\n of_out.backward()\n np_grad_x = np.ones(shape)\n np_grad_y = -np.ones(shape)\n test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad_x, 1e-05, 1e-05))\n test_case.assertTrue(np.allclose(y.grad.numpy(), np_grad_y, 1e-05, 1e-05))\n x = 5\n y = flow.tensor(\n np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)\n )\n of_out = flow.sub(x, y)\n np_out = np.subtract(x, y.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n x = flow.tensor(\n np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)\n )\n y = 5\n of_out = flow.sub(x, y)\n np_out = np.subtract(x.numpy(), y)\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n x = flow.tensor(\n np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)\n )\n y = flow.tensor(\n np.random.randn(1, 1), dtype=flow.float32, device=flow.device(device)\n )\n of_out = flow.sub(x, y)\n np_out = np.subtract(x.numpy(), y.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n x = flow.tensor(np.array([5.0]), dtype=flow.float32)\n y = flow.tensor(np.random.randn(1, 1), dtype=flow.float32)\n of_out = flow.sub(x, y)\n np_out = np.subtract(x.numpy(), y.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n x = flow.tensor(np.random.randn(1, 1), dtype=flow.float32, requires_grad=True)\n y = flow.tensor(np.array([5.0]), dtype=flow.float32, requires_grad=True)\n of_out = flow.sub(x, y)\n np_out = np.subtract(x.numpy(), y.numpy())\n test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))\n of_out = of_out.sum()\n of_out.backward()\n np_grad_x = np.ones((1, 1))\n np_grad_y = -np.ones(1)\n test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad_x, 1e-05, 1e-05))\n test_case.assertTrue(np.allclose(y.grad.numpy(), np_grad_y, 1e-05, 1e-05))\n\n\[email protected]_unless_1n1d()\nclass TestSubModule(flow.unittest.TestCase):\n def test_sub(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"shape\"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n _test_sub_impl(test_case, *arg)\n\n def test_sub_against_pytorch(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_type\"] = [test_flow_against_pytorch, test_tensor_against_pytorch]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n arg_dict[\"op\"] = [\"sub\"]\n for arg in GenArgList(arg_dict):\n arg[0](\n test_case,\n arg[2],\n extra_annotations={\"other\": flow.Tensor},\n extra_generators={\n \"input\": random_tensor(ndim=2, dim0=2, dim1=3),\n \"other\": random_tensor(ndim=2, dim0=2, dim1=3),\n },\n device=arg[1],\n )\n arg[0](\n test_case,\n arg[2],\n extra_annotations={\"other\": float},\n extra_generators={\n \"input\": random_tensor(ndim=2, dim0=2, dim1=3),\n \"other\": random(0, 5),\n },\n device=arg[1],\n )\n\n @autotest(auto_backward=False, check_graph=False)\n def test_sub_with_0shape_data(test_case):\n device = random_device()\n x = random_pytorch_tensor(2, 0, 3).to(device)\n y = random_pytorch_tensor(2, 1, 3).to(device)\n out1 = x - y\n out2 = x - 2\n out3 = 2 - x\n out4 = torch.sub(x, y)\n return out1, out2, out3, out4\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.not_equal",
"numpy.random.randn"
],
[
"numpy.array",
"numpy.random.randn",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ray0089/PSGMN | [
"0363d558add24034e035d26121e2e1b61d97c198"
] | [
"utils/utils.py"
] | [
"# import PIL\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport cv2\nimport torch\nfrom torch_geometric.data import Data\n\n\ndef load_ply(path):\n \"\"\"\n Loads a 3D mesh model from a PLY file.\n\n :param path: Path to a PLY file.\n :return: The loaded model given by a dictionary with items:\n 'pts' (nx3 ndarray), 'normals' (nx3 ndarray), 'colors' (nx3 ndarray),\n 'faces' (mx3 ndarray) - the latter three are optional.\n \"\"\"\n f = open(path, 'r')\n\n n_pts = 0\n n_faces = 0\n face_n_corners = 3 # Only triangular faces are supported\n pt_props = []\n face_props = []\n is_binary = False\n header_vertex_section = False\n header_face_section = False\n\n # Read header\n while True:\n line = f.readline().rstrip('\\n').rstrip('\\r') # Strip the newline character(s)\n if line.startswith('element vertex'):\n n_pts = int(line.split()[-1])\n header_vertex_section = True\n header_face_section = False\n elif line.startswith('element face'):\n n_faces = int(line.split()[-1])\n header_vertex_section = False\n header_face_section = True\n elif line.startswith('element'): # Some other element\n header_vertex_section = False\n header_face_section = False\n elif line.startswith('property') and header_vertex_section:\n # (name of the property, data type)\n pt_props.append((line.split()[-1], line.split()[-2]))\n elif line.startswith('property list') and header_face_section:\n elems = line.split()\n if elems[-1] == 'vertex_indices':\n # (name of the property, data type)\n face_props.append(('n_corners', elems[2]))\n for i in range(face_n_corners):\n face_props.append(('ind_' + str(i), elems[3]))\n else:\n print('Warning: Not supported face property: ' + elems[-1])\n elif line.startswith('format'):\n if 'binary' in line:\n is_binary = True\n elif line.startswith('end_header'):\n break\n\n # Prepare data structures\n model = {}\n model['pts'] = np.zeros((n_pts, 3), np.float)\n if n_faces > 0:\n model['faces'] = np.zeros((n_faces, face_n_corners), np.float)\n\n pt_props_names = [p[0] for p in pt_props]\n is_normal = False\n if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):\n is_normal = True\n model['normals'] = np.zeros((n_pts, 3), np.float)\n\n is_color = False\n if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):\n is_color = True\n model['colors'] = np.zeros((n_pts, 3), np.float)\n\n is_texture = False\n if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):\n is_texture = True\n model['texture_uv'] = np.zeros((n_pts, 2), np.float)\n\n formats = { # For binary format\n 'float': ('f', 4),\n 'double': ('d', 8),\n 'int': ('i', 4),\n 'uchar': ('B', 1)\n }\n\n # Load vertices\n for pt_id in range(n_pts):\n prop_vals = {}\n load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz',\n 'red', 'green', 'blue', 'texture_u', 'texture_v']\n if is_binary:\n for prop in pt_props:\n format = formats[prop[1]]\n val = struct.unpack(format[0], f.read(format[1]))[0]\n if prop[0] in load_props:\n prop_vals[prop[0]] = val\n else:\n elems = f.readline().rstrip('\\n').rstrip('\\r').split()\n for prop_id, prop in enumerate(pt_props):\n if prop[0] in load_props:\n prop_vals[prop[0]] = elems[prop_id]\n\n model['pts'][pt_id, 0] = float(prop_vals['x'])\n model['pts'][pt_id, 1] = float(prop_vals['y'])\n model['pts'][pt_id, 2] = float(prop_vals['z'])\n\n if is_normal:\n model['normals'][pt_id, 0] = float(prop_vals['nx'])\n model['normals'][pt_id, 1] = float(prop_vals['ny'])\n model['normals'][pt_id, 2] = float(prop_vals['nz'])\n\n if is_color:\n model['colors'][pt_id, 0] = float(prop_vals['red'])\n model['colors'][pt_id, 1] = float(prop_vals['green'])\n model['colors'][pt_id, 2] = float(prop_vals['blue'])\n\n if is_texture:\n model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u'])\n model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v'])\n\n # Load faces\n for face_id in range(n_faces):\n prop_vals = {}\n if is_binary:\n for prop in face_props:\n format = formats[prop[1]]\n val = struct.unpack(format[0], f.read(format[1]))[0]\n if prop[0] == 'n_corners':\n if val != face_n_corners:\n print('Error: Only triangular faces are supported.')\n print('Number of face corners: ' + str(val))\n exit(-1)\n else:\n prop_vals[prop[0]] = val\n else:\n elems = f.readline().rstrip('\\n').rstrip('\\r').split()\n for prop_id, prop in enumerate(face_props):\n if prop[0] == 'n_corners':\n if int(elems[prop_id]) != face_n_corners:\n print('Error: Only triangular faces are supported.')\n print('Number of face corners: ' + str(int(elems[prop_id])))\n exit(-1)\n else:\n prop_vals[prop[0]] = elems[prop_id]\n\n model['faces'][face_id, 0] = int(prop_vals['ind_0'])\n model['faces'][face_id, 1] = int(prop_vals['ind_1'])\n model['faces'][face_id, 2] = int(prop_vals['ind_2'])\n\n f.close()\n\n return model\n\ndef read_ply_to_data(path):\n\n model = load_ply(path)\n mean=[0.485, 0.456, 0.406] \n std=[0.229, 0.224, 0.225]\n x = model['colors']\n\n x = x / 255.0\n x -= mean\n x /= std\n x = np.concatenate([x,model['pts'],model['normals']],axis=-1)\n x = torch.tensor(x,dtype=torch.float32)\n \n pos = torch.tensor(model['pts'],dtype=torch.float32)\n face = torch.tensor(model['faces'],dtype=torch.long).transpose(1,0)\n data = Data(x = x, pos=pos,face = face)\n return data\n\ndef read_mask(path, split, cls_idx=1):\n if split == \"train\" or split == \"test\":\n return (np.asarray(Image.open(path))[:, :, 0] != 0).astype(np.uint8)\n elif split == \"fuse\":\n return (np.asarray(Image.open(path)) == cls_idx).astype(np.uint8)\n elif split == \"render\":\n return (np.asarray(Image.open(path))).astype(np.uint8)\n\n\ndef mask_iou(self, output, batch):\n mask_pred = torch.argmax(output[\"seg\"], dim=1)[0].detach().cpu().numpy()\n mask_gt = batch[\"mask\"][0].detach().cpu().numpy()\n iou = (mask_pred & mask_gt).sum() / (mask_pred | mask_gt).sum()\n self.mask_ap.append(iou > 0.7)\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n\ndef cal_error(S, y, img_shape=(480, 640)):\n S = S[:, y[0, :, 0], :]\n S = S.detach().cpu().numpy()\n y = y.detach().cpu().numpy()\n S = np.argmax(S, axis=-1)\n S = S.reshape(-1)\n y = y[:, :, 1].reshape(-1)\n\n gt_pos = []\n for idx in y:\n v = math.floor(idx / img_shape[1])\n u = idx - img_shape[1] * v\n gt_pos.append([u, v])\n\n est_pos = []\n for idx in S:\n v = math.floor(idx / (img_shape[1] / 2)) * 2\n u = (idx - img_shape[1] / 2 * (v / 2)) * 2\n est_pos.append([u, v])\n \n gt_pos = np.array(gt_pos, dtype=np.float32)\n est_pos = np.array(est_pos, dtype=np.float32)\n error = np.abs(gt_pos - est_pos)\n dist = np.sqrt(error[0] ** 2 + error[1] ** 2)\n avg_error = np.mean(dist)\n sigma = np.std(dist)\n\n return avg_error, sigma\n\n\ndef project(xyz, K, RT):\n \"\"\"\n xyz: [N, 3]\n K: [3, 3]\n RT: [3, 4]\n \"\"\"\n xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T\n xyz = np.dot(xyz, K.T)\n xy = xyz[:, :2] / xyz[:, 2:]\n return xy\n\n\ndef mesh_project(xyz, K, RT):\n \"\"\"\n xyz: [N, 3]\n K: [3, 3]\n RT: [3, 4]\n \"\"\"\n xyz = xyz.astype(np.float32)\n K = K.astype(np.float32)\n RT = RT.astype(np.float32)\n xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T\n z = xyz[:, 2].copy()\n xyz = np.dot(xyz, K.astype(np.float32).T)\n xyz = xyz / xyz[:, 2:]\n\n xyz[:, 2] = z\n return xyz\n\ndef find_neighborhold_node(model):\n pts = model[\"pts\"]\n faces = model[\"faces\"]\n neighbors = [[] for i in range(pts.shape[0])]\n for i in range(pts.shape[0]):\n dim0, dim1 = np.where(faces == i)\n for idx in faces[dim0]:\n for id in idx:\n if id not in neighbors[i] and id != i:\n neighbors[i].append(id)\n\n return neighbors\n\n\ndef bbox_from_mask(mask_img, stride=0):\n\n mask_img = np.array(mask_img)\n mask = mask_img[:, :, 0]\n img_shape = mask.shape\n coor = np.nonzero(mask)\n coor[0].sort()\n xmin = coor[0][0]\n xmax = coor[0][-1]\n coor[1].sort()\n ymin = coor[1][0]\n ymax = coor[1][-1]\n\n if xmin >= stride:\n xmin -= stride\n else:\n xmin = 0\n if xmax + stride <= img_shape[0]:\n xmax += stride\n else:\n xmax = img_shape[0]\n\n if ymin >= stride:\n ymin -= stride\n else:\n ymin = 0\n\n if ymax + stride <= img_shape[1]:\n ymax += stride\n else:\n ymax = img_shape[1]\n\n return xmax, ymax, xmin, ymin\n\n\ndef concate_graph(x, edge, attribute):\n\n batch_size = x.shape[0]\n x_num = 0\n if x.ndim == 3:\n x_num = x.shape[1]\n elif x.ndim == 4:\n x_num = x.shape[1] * x.shape[2]\n x = x.reshape(-1, x.shape[-1])\n for i in range(batch_size):\n edge[i, :, :] += i * x_num\n\n edge = edge.permute(0, 2, 1)\n edge = edge.reshape(-1, 2)\n edge = edge.permute(1, 0)\n attribute = attribute.reshape(-1, attribute.shape[-1])\n\n return [x, edge, attribute]\n\n\ndef adjust_learning_rate(optimizer, epoch, init_lr):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = init_lr * (0.5 ** (epoch // 20))\n print(\"LR:{}\".format(lr))\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\ndef draw_error(S, y, image):\n\n S = S[:, y[0, :, 0], :]\n S = S.detach().cpu().numpy()\n batch_size = S.shape[0]\n y = y.detach().cpu().numpy()\n img = image.detach().cpu().numpy()[0]\n\n S = np.argmax(S, axis=-1)\n S = S.reshape(-1)\n y = y[:, :, 1].reshape(-1)\n gt_pos = []\n for idx in y:\n v = math.floor(idx / img.shape[1])\n u = idx - img.shape[1] * v\n gt_pos.append([u, v])\n est_pos = []\n for idx in S:\n v = math.floor(idx / (img.shape[1] / 2)) * 2\n u = (idx - img.shape[1] / 2 * (v / 2)) * 2\n est_pos.append([u, v])\n gt_pos = np.array(gt_pos, dtype=np.float32)\n est_pos = np.array(est_pos, dtype=np.float32)\n\n\nif __name__ == \"__main__\":\n\n img = plt.imread(\"/home/ray/data/LINEMOD/ape/mask/0000.png\")\n img = np.array(img)\n bbox_from_mask(img)\n"
] | [
[
"numpy.dot",
"numpy.abs",
"numpy.sqrt",
"numpy.nonzero",
"matplotlib.pyplot.imread",
"torch.tensor",
"numpy.concatenate",
"numpy.std",
"numpy.argmax",
"numpy.mean",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.where",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vumichien/hummingbird | [
"8981e11ce2536167c329a5d9d20e81125a792fe4",
"8981e11ce2536167c329a5d9d20e81125a792fe4",
"8981e11ce2536167c329a5d9d20e81125a792fe4",
"8981e11ce2536167c329a5d9d20e81125a792fe4"
] | [
"tests/test_sklearn_pipeline.py",
"tests/test_onnxml_lightgbm_converter.py",
"hummingbird/ml/operator_converters/sklearn/gbdt.py",
"tests/test_onnxml_imputer_converter.py"
] | [
"import unittest\nimport numpy as np\nfrom sklearn import datasets\n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.datasets import load_iris, load_diabetes\nfrom sklearn.svm import LinearSVC, LinearSVR\nfrom sklearn.datasets import make_regression\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression, RidgeCV\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler\n\nimport hummingbird.ml\nfrom hummingbird.ml._utils import pandas_installed, onnx_runtime_installed\nfrom hummingbird.ml import constants\n\nfrom onnxconverter_common.data_types import (\n FloatTensorType,\n Int64TensorType,\n StringTensorType,\n)\n\ntry:\n from sklearn.impute import SimpleImputer\nexcept ImportError:\n from sklearn.preprocessing import Imputer as SimpleImputer\n\ntry:\n from sklearn.ensemble import StackingClassifier, StackingRegressor\nexcept ImportError:\n StackingClassifier = None\n\nif pandas_installed():\n import pandas\n\n\nclass TestSklearnPipeline(unittest.TestCase):\n def test_pipeline(self):\n data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)\n scaler = StandardScaler()\n scaler.fit(data)\n model = Pipeline([(\"scaler1\", scaler), (\"scaler2\", scaler)])\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n def test_pipeline2(self):\n data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)\n scaler = StandardScaler()\n scaler.fit(data)\n model = Pipeline([(\"scaler1\", scaler), (\"scaler2\", scaler)])\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n def test_combine_inputs_union_in_pipeline(self):\n from sklearn.preprocessing import StandardScaler\n from sklearn.pipeline import Pipeline\n\n data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)\n model = Pipeline(\n [\n (\"scaler1\", StandardScaler()),\n (\"union\", FeatureUnion([(\"scaler2\", StandardScaler()), (\"scaler3\", MinMaxScaler())])),\n ]\n )\n model.fit(data)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n def test_combine_inputs_floats_ints(self):\n data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]\n scaler = StandardScaler()\n scaler.fit(data)\n model = Pipeline([(\"scaler1\", scaler), (\"scaler2\", scaler)])\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_1(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n preprocessor = ColumnTransformer(transformers=[(\"num\", numeric_transformer, numeric_features)])\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_string(self):\n \"\"\"\n TODO: Hummingbird does not yet support strings in this context. Should raise error.\n When this feature is complete, change this test.\n \"\"\"\n # fit\n titanic_url = \"https://raw.githubusercontent.com/amueller/scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv\"\n data = pandas.read_csv(titanic_url)\n X = data.drop(\"survived\", axis=1)\n y = data[\"survived\"]\n # SimpleImputer on string is not available for string\n # in ONNX-ML specifications.\n # So we do it beforehand.\n X[\"pclass\"].fillna(\"missing\", inplace=True)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n numeric_features = [\"age\", \"fare\"]\n numeric_transformer = Pipeline(steps=[(\"imputer\", SimpleImputer(strategy=\"median\")), (\"scaler\", StandardScaler())])\n\n categorical_features = [\"pclass\"]\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n clf = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"classifier\", LogisticRegression(solver=\"liblinear\"))])\n\n to_drop = {\"parch\", \"sibsp\", \"cabin\", \"ticket\", \"name\", \"body\", \"home.dest\", \"boat\", \"sex\", \"embarked\"}\n\n X_train = X_train.copy()\n X_test = X_test.copy()\n X_train[\"pclass\"] = X_train[\"pclass\"].astype(np.int64)\n X_test[\"pclass\"] = X_test[\"pclass\"].astype(np.int64)\n X_train = X_train.drop(to_drop, axis=1)\n X_test = X_test.drop(to_drop, axis=1)\n\n clf.fit(X_train, y_train)\n\n torch_model = hummingbird.ml.convert(clf, \"torch\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n clf.predict(X_test), torch_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_pandas(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_pandas_ts(self):\n iris = datasets.load_iris()\n X = np.array(iris.data[:, :3], np.float32) # If we don't use float32 here, with python 3.5 and torch 1.5.1 will fail.\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n model = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch.jit\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_weights(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n )\n\n model = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_weights_pandas(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_drop(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n remainder=\"drop\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_drop_noweights(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n remainder=\"drop\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_passthrough(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n remainder=\"passthrough\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_passthrough_noweights(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n remainder=\"passthrough\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_passthrough_slice(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = slice(0, 1) # [\"vA\", \"vB\"]\n categorical_features = slice(3, 4) # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n remainder=\"passthrough\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n # Taken from https://github.com/microsoft/hummingbird/issues/388https://github.com/microsoft/hummingbird/issues/388\n def test_pipeline_pca_rf(self):\n X, y = make_regression(n_samples=1000, n_features=8, n_informative=5, n_targets=1, random_state=0, shuffle=True)\n pca = PCA(n_components=8, svd_solver=\"randomized\", whiten=True)\n clf = make_pipeline(StandardScaler(), pca, RandomForestRegressor(n_estimators=10, max_depth=30, random_state=0))\n clf.fit(X, y)\n\n model = hummingbird.ml.convert(clf, \"pytorch\")\n\n prediction_sk = clf.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n\n prediction_hb = model.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n\n np.testing.assert_allclose(prediction_sk, prediction_hb, rtol=1e-06, atol=1e-06)\n\n @unittest.skipIf(not onnx_runtime_installed(), reason=\"Test requires ORT installed\")\n def test_pipeline_many_inputs(self):\n n_features = 18\n X = np.random.rand(100, n_features)\n y = np.random.randint(1000, size=100)\n\n scaler_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n preprocessor = ColumnTransformer(transformers=[(\"scaling\", scaler_transformer, list(range(n_features)))])\n model = RandomForestRegressor(n_estimators=10, max_depth=9)\n pipeline = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"model\", model)])\n\n pipeline.fit(X, y)\n\n X_test = tuple(np.split(X, n_features, axis=1))\n\n hb_model = hummingbird.ml.convert(pipeline, \"onnx\", X_test)\n\n assert len(hb_model.model.graph.input) == n_features\n\n np.testing.assert_allclose(\n pipeline.predict(X), np.array(hb_model.predict(X_test)).flatten(), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not onnx_runtime_installed(), reason=\"Test requires ORT installed\")\n def test_pipeline_many_inputs_with_schema(self):\n n_features = 5\n X = np.random.rand(100, n_features)\n y = np.random.randint(1000, size=100)\n input_column_names = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n output_column_names = [\"score\"]\n\n scaler_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n preprocessor = ColumnTransformer(transformers=[(\"scaling\", scaler_transformer, list(range(n_features)))])\n model = RandomForestRegressor(n_estimators=10, max_depth=9)\n pipeline = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"model\", model)])\n\n pipeline.fit(X, y)\n\n X_test = tuple(np.split(X, n_features, axis=1))\n extra_config = {constants.INPUT_NAMES: input_column_names, constants.OUTPUT_NAMES: output_column_names}\n\n hb_model = hummingbird.ml.convert(pipeline, \"onnx\", X_test, extra_config=extra_config)\n\n graph_inputs = [input.name for input in hb_model.model.graph.input]\n graph_outputs = [output.name for output in hb_model.model.graph.output]\n\n assert len(hb_model.model.graph.input) == n_features\n assert graph_inputs == input_column_names\n assert graph_outputs == output_column_names\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingClassifier not available in scikit-learn < 0.22\")\n def test_stacking_classifier(self):\n X, y = load_iris(return_X_y=True)\n estimators = [\n (\"rf\", RandomForestClassifier(n_estimators=10, random_state=42)),\n (\"svr\", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),\n ]\n clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\n clf.fit(X_train, y_train)\n\n hb_model = hummingbird.ml.convert(clf, \"torch\")\n\n np.testing.assert_allclose(\n clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingClassifier not available in scikit-learn < 0.22\")\n def test_stacking_classifier_passthrough(self):\n X, y = load_iris(return_X_y=True)\n estimators = [\n (\"rf\", RandomForestClassifier(n_estimators=10, random_state=42)),\n (\"svr\", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),\n ]\n clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), passthrough=True)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\n clf.fit(X_train, y_train)\n\n hb_model = hummingbird.ml.convert(clf, \"torch\")\n\n np.testing.assert_allclose(\n clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingClassifier not available in scikit-learn < 0.22\")\n def test_stacking_classifier_decision_function(self):\n X, y = load_iris(return_X_y=True)\n estimators = [\n (\"rf\", RandomForestClassifier(n_estimators=10, random_state=42)),\n (\"svr\", make_pipeline(StandardScaler(), LinearSVC(random_state=42))),\n ]\n clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\n clf.fit(X_train, y_train)\n\n self.assertRaises(ValueError, hummingbird.ml.convert, clf, \"torch\")\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingRegressor not available in scikit-learn < 0.22\")\n def test_stacking_regressor(self):\n X, y = load_diabetes(return_X_y=True)\n estimators = [(\"lr\", RidgeCV()), (\"svr\", LinearSVR(random_state=42))]\n reg = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n reg.fit(X_train, y_train)\n\n hb_model = hummingbird.ml.convert(reg, \"torch\")\n\n np.testing.assert_allclose(\n reg.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"\nTests lightgbm->onnxmltools->hb conversion for lightgbm models.\n\"\"\"\nimport unittest\nimport warnings\n\nimport sys\nimport os\nimport pickle\nimport numpy as np\nfrom onnxconverter_common.data_types import FloatTensorType\n\nfrom hummingbird.ml import convert\nfrom hummingbird.ml import constants\nfrom hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, lightgbm_installed\n\nif lightgbm_installed():\n import lightgbm as lgb\nif onnx_runtime_installed():\n import onnxruntime as ort\nif onnx_ml_tools_installed():\n from onnxmltools.convert import convert_lightgbm\n\n\nclass TestONNXLightGBMConverter(unittest.TestCase):\n def __init__(self, *args, **kwargs):\n super(TestONNXLightGBMConverter, self).__init__(*args, **kwargs)\n\n # Base test implementation comparing ONNXML and ONNX models.\n def _test_lgbm(self, X, model, extra_config={}):\n # Create ONNX-ML model\n onnx_ml_model = convert_lightgbm(\n model, initial_types=[(\"input\", FloatTensorType([X.shape[0], X.shape[1]]))], target_opset=9\n )\n\n # Create ONNX model\n onnx_model = convert(onnx_ml_model, \"onnx\", extra_config=extra_config)\n\n # Get the predictions for the ONNX-ML model\n session = ort.InferenceSession(onnx_ml_model.SerializeToString())\n output_names = [session.get_outputs()[i].name for i in range(len(session.get_outputs()))]\n onnx_ml_pred = [[] for i in range(len(output_names))]\n inputs = {session.get_inputs()[0].name: X}\n pred = session.run(output_names, inputs)\n for i in range(len(output_names)):\n if \"label\" in output_names[i]:\n onnx_ml_pred[1] = pred[i]\n else:\n onnx_ml_pred[0] = pred[i]\n\n # Get the predictions for the ONNX model\n onnx_pred = [[] for i in range(len(output_names))]\n if len(output_names) == 1: # regression\n onnx_pred = onnx_model.predict(X)\n else: # classification\n onnx_pred[0] = onnx_model.predict_proba(X)\n onnx_pred[1] = onnx_model.predict(X)\n\n return onnx_ml_pred, onnx_pred, output_names\n\n # Utility function for testing regression models.\n def _test_regressor(self, X, model, rtol=1e-06, atol=1e-06, extra_config={}):\n onnx_ml_pred, onnx_pred, output_names = self._test_lgbm(X, model, extra_config)\n\n # Check that predicted values match\n np.testing.assert_allclose(onnx_ml_pred[0].ravel(), onnx_pred, rtol=rtol, atol=atol)\n\n # Utility function for testing classification models.\n def _test_classifier(self, X, model, rtol=1e-06, atol=1e-06, extra_config={}):\n onnx_ml_pred, onnx_pred, output_names = self._test_lgbm(X, model, extra_config)\n\n np.testing.assert_allclose(onnx_ml_pred[1], onnx_pred[1], rtol=rtol, atol=atol) # labels\n np.testing.assert_allclose(\n list(map(lambda x: list(x.values()), onnx_ml_pred[0])), onnx_pred[0], rtol=rtol, atol=atol\n ) # probs\n\n # Check that ONNXML models can also target other backends.\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_onnx_pytorch(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0]]\n X = np.array(X, dtype=np.float32)\n y = np.array([100, -10, 50], dtype=np.float32)\n model = lgb.LGBMRegressor(n_estimators=3, min_child_samples=1)\n model.fit(X, y)\n\n # Create ONNX-ML model\n onnx_ml_model = convert_lightgbm(\n model, initial_types=[(\"input\", FloatTensorType([X.shape[0], X.shape[1]]))], target_opset=9\n )\n\n pt_model = convert(onnx_ml_model, \"torch\", X)\n assert pt_model\n\n # Get the predictions for the ONNX-ML model\n session = ort.InferenceSession(onnx_ml_model.SerializeToString())\n output_names = [session.get_outputs()[i].name for i in range(len(session.get_outputs()))]\n onnx_ml_pred = [[] for i in range(len(output_names))]\n inputs = {session.get_inputs()[0].name: X}\n onnx_ml_pred = session.run(output_names, inputs)\n\n np.testing.assert_allclose(onnx_ml_pred[0].flatten(), pt_model.predict(X))\n\n # Basic regression test.\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lgbm_onnxml_model_regressor(self):\n warnings.filterwarnings(\"ignore\")\n n_features = 28\n n_total = 100\n np.random.seed(0)\n X = np.random.rand(n_total, n_features)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(n_total, size=n_total)\n\n # Create LightGBM model\n model = lgb.LGBMRegressor()\n model.fit(X, y)\n import platform\n\n # TODO bug on newer macOS versions?\n if platform.system() == \"Darwin\":\n self._test_regressor(X, model, rtol=1e-05, atol=1e-04)\n else:\n self._test_regressor(X, model)\n\n # Regression test with 3 estimators (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_regressor(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0]]\n X = np.array(X, dtype=np.float32)\n y = np.array([100, -10, 50], dtype=np.float32)\n model = lgb.LGBMRegressor(n_estimators=3, min_child_samples=1)\n model.fit(X, y)\n self._test_regressor(X, model)\n\n # Regression test with 1 estimator (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_regressor1(self):\n warnings.filterwarnings(\"ignore\")\n model = lgb.LGBMRegressor(n_estimators=1, min_child_samples=1)\n X = [[0, 1], [1, 1], [2, 0]]\n X = np.array(X, dtype=np.float32)\n y = np.array([100, -10, 50], dtype=np.float32)\n model.fit(X, y)\n self._test_regressor(X, model)\n\n # Regression test with 2 estimators (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_regressor2(self):\n warnings.filterwarnings(\"ignore\")\n model = lgb.LGBMRegressor(n_estimators=2, max_depth=1, min_child_samples=1)\n X = [[0, 1], [1, 1], [2, 0]]\n X = np.array(X, dtype=np.float32)\n y = np.array([100, -10, 50], dtype=np.float32)\n model.fit(X, y)\n self._test_regressor(X, model)\n\n # Regression test with gbdt boosting type (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_booster_regressor(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 1.1]\n data = lgb.Dataset(X, label=y)\n model = lgb.train(\n {\"boosting_type\": \"gbdt\", \"objective\": \"regression\", \"n_estimators\": 3, \"min_child_samples\": 1, \"max_depth\": 1},\n data,\n )\n self._test_regressor(X, model)\n\n # Binary classication test.\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lgbm_onnxml_model_binary(self):\n warnings.filterwarnings(\"ignore\")\n n_features = 28\n n_total = 100\n np.random.seed(0)\n X = np.random.rand(n_total, n_features)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(2, size=n_total)\n\n # Create LightGBM model\n model = lgb.LGBMClassifier()\n model.fit(X, y)\n self._test_classifier(X, model)\n\n # Binary classication test with float64.\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lgbm_onnxml_model_binary_float64(self):\n warnings.filterwarnings(\"ignore\")\n n_features = 28\n n_total = 100\n np.random.seed(0)\n X = np.random.rand(n_total, n_features)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(2, size=n_total)\n\n # Create LightGBM model\n model = lgb.LGBMClassifier()\n model.fit(X, y)\n\n onnx_model = convert(model, \"onnx\", X)\n\n np.testing.assert_allclose(model.predict(X), onnx_model.predict(X))\n\n # Binary classification test with 3 estimators (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_classifier(self):\n warnings.filterwarnings(\"ignore\")\n model = lgb.LGBMClassifier(n_estimators=3, min_child_samples=1)\n X = [[0, 1], [1, 1], [2, 0]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 0]\n model.fit(X, y)\n self._test_classifier(X, model)\n\n # Binary classification test with 3 estimators zipmap (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_classifier_zipmap(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0], [1, 2]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 0, 1]\n model = lgb.LGBMClassifier(n_estimators=3, min_child_samples=1)\n model.fit(X, y)\n self._test_classifier(X, model)\n\n # Binary classification test with 3 estimators and selecting boosting type (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_booster_classifier(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0], [1, 2]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 0, 1]\n data = lgb.Dataset(X, label=y)\n model = lgb.train({\"boosting_type\": \"gbdt\", \"objective\": \"binary\", \"n_estimators\": 3, \"min_child_samples\": 1}, data)\n self._test_classifier(X, model)\n\n # Binary classification test with 3 estimators and selecting boosting type zipmap (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_booster_classifier_zipmap(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0], [1, 2]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 0, 1]\n data = lgb.Dataset(X, label=y)\n model = lgb.train({\"boosting_type\": \"gbdt\", \"objective\": \"binary\", \"n_estimators\": 3, \"min_child_samples\": 1}, data)\n self._test_classifier(X, model)\n\n # Multiclass classification test.\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lgbm_onnxml_model_multi(self):\n warnings.filterwarnings(\"ignore\")\n n_features = 28\n n_total = 100\n np.random.seed(0)\n X = np.random.rand(n_total, n_features)\n X = np.array(X, dtype=np.float32)\n y = np.random.randint(3, size=n_total)\n\n # Create LightGBM model\n model = lgb.LGBMClassifier()\n model.fit(X, y)\n self._test_classifier(X, model)\n\n # Multiclass classification test with 3 estimators (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_classifier_multi(self):\n warnings.filterwarnings(\"ignore\")\n model = lgb.LGBMClassifier(n_estimators=3, min_child_samples=1)\n X = [[0, 1], [1, 1], [2, 0], [0.5, 0.5], [1.1, 1.1], [2.1, 0.1]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 2, 1, 1, 2]\n model.fit(X, y)\n self._test_classifier(X, model)\n\n # Multiclass classification test with 3 estimators and selecting boosting type (taken from ONNXMLTOOLS).\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test require ONNX, ORT and ONNXMLTOOLS\"\n )\n @unittest.skipIf(not lightgbm_installed(), reason=\"LightGBM test requires LightGBM installed\")\n def test_lightgbm_booster_multi_classifier(self):\n warnings.filterwarnings(\"ignore\")\n X = [[0, 1], [1, 1], [2, 0], [1, 2], [-1, 2], [1, -2]]\n X = np.array(X, dtype=np.float32)\n y = [0, 1, 0, 1, 2, 2]\n data = lgb.Dataset(X, label=y)\n model = lgb.train(\n {\"boosting_type\": \"gbdt\", \"objective\": \"multiclass\", \"n_estimators\": 3, \"min_child_samples\": 1, \"num_class\": 3},\n data,\n )\n self._test_classifier(X, model)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\"\"\"\nConverters for Sklearn's GradientBoosting models.\n\"\"\"\n\nimport numpy as np\nfrom onnxconverter_common.registration import register_converter\n\nfrom .. import constants\nfrom .._gbdt_commons import convert_gbdt_common, convert_gbdt_classifier_common\nfrom .._tree_commons import get_parameters_for_sklearn_common, get_parameters_for_tree_trav_sklearn, TreeParameters\n\n\ndef _get_n_features(model):\n try:\n return model.n_features_\n except AttributeError:\n # HistGradientBoosting\n return model._n_features\n\n\ndef _get_parameters_hist_gbdt(trees, extra_config):\n \"\"\"\n Extract the tree parameters from SklearnHistGradientBoostingClassifier trees\n Args:\n trees: The information representing a tree (ensemble)\n Returns: The tree parameters wrapped into an instance of `operator_converters._tree_commons_TreeParameters`\n \"\"\"\n features = [n[\"feature_idx\"] for n in trees.nodes]\n try:\n thresholds = [n[\"threshold\"] if n[\"threshold\"] != 0 else -1 for n in trees.nodes]\n except ValueError:\n # newer version of scikit-learn\n thresholds = [n[\"num_threshold\"] if n[\"num_threshold\"] != 0 else -1 for n in trees.nodes]\n lefts = [n[\"left\"] if n[\"left\"] != 0 else -1 for n in trees.nodes]\n rights = [n[\"right\"] if n[\"right\"] != 0 else -1 for n in trees.nodes]\n values = [[n[\"value\"]] if n[\"value\"] != 0 else [-1] for n in trees.nodes]\n\n return TreeParameters(lefts, rights, features, thresholds, values)\n\n\ndef convert_sklearn_gbdt_classifier(operator, device, extra_config):\n \"\"\"\n Converter for `sklearn.ensemble.GradientBoostingClassifier`\n\n Args:\n operator: An operator wrapping a `sklearn.ensemble.GradientBoostingClassifier`\n or `sklearn.ensemble.HistGradientBoostingClassifier` model\n device: String defining the type of device the converted operator should be run on\n extra_config: Extra configuration used to select the best conversion strategy\n\n Returns:\n A PyTorch model\n \"\"\"\n assert operator is not None, \"Cannot convert None operator\"\n\n # Get tree information out of the operator.\n tree_infos = operator.raw_operator.estimators_\n # GBDT does not scale the value using the learning rate upfront, we have to do it.\n extra_config[constants.LEARNING_RATE] = operator.raw_operator.learning_rate\n # GBDT does not normalize values upfront, we have to do it.\n extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL] = get_parameters_for_tree_trav_sklearn\n\n n_features = _get_n_features(operator.raw_operator)\n classes = operator.raw_operator.classes_.tolist()\n n_classes = len(classes)\n\n # Analyze classes.\n if not all(isinstance(c, int) for c in classes):\n raise RuntimeError(\"GBDT Classifier translation only supports integer class labels.\")\n if n_classes == 2:\n n_classes -= 1\n\n # Reshape the tree_infos into hummingbird gbdt internal format.\n tree_infos = [tree_infos[i][j] for j in range(n_classes) for i in range(len(tree_infos))]\n\n # Get the value for Alpha.\n if hasattr(operator.raw_operator, \"init\"):\n if operator.raw_operator.init == \"zero\":\n base_prediction = [[0.0]]\n elif operator.raw_operator.init is None:\n if n_classes == 1:\n base_prediction = [\n [np.log(operator.raw_operator.init_.class_prior_[1] / (1 - operator.raw_operator.init_.class_prior_[1]))]\n ]\n else:\n base_prediction = [[np.log(operator.raw_operator.init_.class_prior_[i]) for i in range(n_classes)]]\n else:\n raise RuntimeError(\"Custom initializers for GBDT are not yet supported in Hummingbird.\")\n elif hasattr(operator.raw_operator, \"_baseline_prediction\"):\n if n_classes == 1:\n base_prediction = [[operator.raw_operator._baseline_prediction]]\n else:\n base_prediction = np.array([operator.raw_operator._baseline_prediction.flatten().tolist()])\n\n extra_config[constants.BASE_PREDICTION] = base_prediction\n extra_config[constants.REORDER_TREES] = False\n\n return convert_gbdt_classifier_common(\n operator, tree_infos, get_parameters_for_sklearn_common, n_features, n_classes, classes, extra_config\n )\n\n\ndef convert_sklearn_gbdt_regressor(operator, device, extra_config):\n \"\"\"\n Converter for `sklearn.ensemble.GradientBoostingRegressor`.\n\n Args:\n operator: An operator wrapping a `sklearn.ensemble.GradientBoostingRegressor` or\n `sklearn.ensemble.HistGradientBoostingRegressor` model\n device: String defining the type of device the converted operator should be run on\n extra_config: Extra configuration used to select the best conversion strategy\n\n Returns:\n A PyTorch model\n \"\"\"\n assert operator is not None, \"Cannot convert None operator\"\n\n # Get tree information out of the operator.\n tree_infos = operator.raw_operator.estimators_.ravel().tolist()\n n_features = _get_n_features(operator.raw_operator)\n extra_config[constants.LEARNING_RATE] = operator.raw_operator.learning_rate\n # For sklearn models we need to massage the parameters a bit before generating the parameters for tree_trav.\n extra_config[constants.GET_PARAMETERS_FOR_TREE_TRAVERSAL] = get_parameters_for_tree_trav_sklearn\n\n # Get the value for Alpha.\n if operator.raw_operator.init == \"zero\":\n base_prediction = [[0.0]]\n elif operator.raw_operator.init is None:\n base_prediction = operator.raw_operator.init_.constant_.tolist()\n else:\n raise RuntimeError(\"Custom initializers for GBDT are not yet supported in Hummingbird.\")\n\n extra_config[constants.BASE_PREDICTION] = base_prediction\n\n return convert_gbdt_common(operator, tree_infos, get_parameters_for_sklearn_common, n_features, None, extra_config)\n\n\ndef convert_sklearn_hist_gbdt_classifier(operator, device, extra_config):\n \"\"\"\n Converter for `sklearn.ensemble.HistGradientBoostingClassifier`\n\n Args:\n operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingClassifier` model\n device: String defining the type of device the converted operator should be run on\n extra_config: Extra configuration used to select the best conversion strategy\n\n Returns:\n A PyTorch model\n \"\"\"\n assert operator is not None, \"Cannot convert None operator\"\n\n # Get tree information out of the operator.\n tree_infos = operator.raw_operator._predictors\n n_features = _get_n_features(operator.raw_operator)\n classes = operator.raw_operator.classes_.tolist()\n n_classes = len(classes)\n\n # Analyze classes.\n if not all(isinstance(c, int) for c in classes):\n raise RuntimeError(\"GBDT Classifier translation only supports integer class labels.\")\n if n_classes == 2:\n n_classes -= 1\n\n # Reshape the tree_infos to a more generic format.\n tree_infos = [tree_infos[i][j] for j in range(n_classes) for i in range(len(tree_infos))]\n\n # Get the value for Alpha.\n if n_classes == 1:\n base_prediction = [[operator.raw_operator._baseline_prediction]]\n else:\n base_prediction = np.array([operator.raw_operator._baseline_prediction.flatten().tolist()])\n\n extra_config[constants.BASE_PREDICTION] = base_prediction\n extra_config[constants.REORDER_TREES] = False\n\n return convert_gbdt_classifier_common(\n operator, tree_infos, _get_parameters_hist_gbdt, n_features, n_classes, classes, extra_config\n )\n\n\ndef convert_sklearn_hist_gbdt_regressor(operator, device, extra_config):\n \"\"\"\n Converter for `sklearn.ensemble.HistGradientBoostingRegressor`\n\n Args:\n operator: An operator wrapping a `sklearn.ensemble.HistGradientBoostingRegressor` model\n device: String defining the type of device the converted operator should be run on\n extra_config: Extra configuration used to select the best conversion strategy\n\n Returns:\n A PyTorch model\n \"\"\"\n assert operator is not None, \"Cannot convert None operator\"\n\n # Get tree information out of the operator.\n tree_infos = operator.raw_operator._predictors\n tree_infos = [tree_infos[i][0] for i in range(len(tree_infos))]\n n_features = _get_n_features(operator.raw_operator)\n extra_config[constants.BASE_PREDICTION] = [[operator.raw_operator._baseline_prediction]]\n\n return convert_gbdt_common(operator, tree_infos, _get_parameters_hist_gbdt, n_features, None, extra_config)\n\n\n# Register the converters.\nregister_converter(\"SklearnGradientBoostingClassifier\", convert_sklearn_gbdt_classifier)\nregister_converter(\"SklearnGradientBoostingRegressor\", convert_sklearn_gbdt_regressor)\nregister_converter(\"SklearnHistGradientBoostingClassifier\", convert_sklearn_hist_gbdt_classifier)\nregister_converter(\"SklearnHistGradientBoostingRegressor\", convert_sklearn_hist_gbdt_regressor)\n",
"\"\"\"\nTests onnxml Imputer converter\n\"\"\"\nimport unittest\nimport warnings\n\nimport numpy as np\nimport torch\nfrom sklearn.impute import SimpleImputer\n\nfrom hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, lightgbm_installed\nfrom hummingbird.ml import convert\n\nif onnx_runtime_installed():\n import onnxruntime as ort\nif onnx_ml_tools_installed():\n from onnxmltools import convert_sklearn\n from onnxmltools.convert.common.data_types import FloatTensorType as FloatTensorType_onnx\n\n\nclass TestONNXImputer(unittest.TestCase):\n def _test_imputer_converter(self, model, mode=\"onnx\"):\n warnings.filterwarnings(\"ignore\")\n X = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)\n model.fit(X)\n\n # Create ONNX-ML model\n onnx_ml_model = convert_sklearn(model, initial_types=[(\"float_input\", FloatTensorType_onnx(X.shape))])\n\n # Get the predictions for the ONNX-ML model\n session = ort.InferenceSession(onnx_ml_model.SerializeToString())\n output_names = [session.get_outputs()[i].name for i in range(len(session.get_outputs()))]\n inputs = {session.get_inputs()[0].name: X}\n onnx_ml_pred = session.run(output_names, inputs)[0]\n\n # Create test model by calling converter\n model = convert(onnx_ml_model, mode, X)\n\n # Get the predictions for the test model\n pred = model.transform(X)\n\n return onnx_ml_pred, pred\n\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test requires ONNX, ORT and ONNXMLTOOLS\"\n )\n def test_onnx_imputer_const(self, rtol=1e-06, atol=1e-06):\n model = SimpleImputer(strategy=\"constant\")\n onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)\n\n # Check that predicted values match\n np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)\n\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test requires ONNX, ORT and ONNXMLTOOLS\"\n )\n def test_onnx_imputer_const_nan0(self, rtol=1e-06, atol=1e-06):\n model = SimpleImputer(strategy=\"constant\", fill_value=0)\n onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)\n\n # Check that predicted values match\n np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)\n\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test requires ONNX, ORT and ONNXMLTOOLS\"\n )\n def test_onnx_imputer_mean(self, rtol=1e-06, atol=1e-06):\n model = SimpleImputer(strategy=\"mean\", fill_value=\"nan\")\n onnx_ml_pred, onnx_pred = self._test_imputer_converter(model)\n\n # Check that predicted values match\n np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)\n\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test requires ONNX, ORT and ONNXMLTOOLS\"\n )\n def test_onnx_imputer_converter_raises_rt(self):\n warnings.filterwarnings(\"ignore\")\n model = SimpleImputer(strategy=\"mean\", fill_value=\"nan\")\n X = np.array([[1, 2], [np.nan, 3], [7, 6]], dtype=np.float32)\n model.fit(X)\n\n # Create ONNX-ML model\n onnx_ml_model = convert_sklearn(model, initial_types=[(\"float_input\", FloatTensorType_onnx(X.shape))])\n onnx_ml_model.graph.node[0].attribute[0].name = \"\".encode()\n\n self.assertRaises(RuntimeError, convert, onnx_ml_model, \"onnx\", X)\n\n @unittest.skipIf(\n not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason=\"ONNXML test requires ONNX, ORT and ONNXMLTOOLS\"\n )\n def test_onnx_imputer_torch(self, rtol=1e-06, atol=1e-06):\n model = SimpleImputer(strategy=\"constant\")\n onnx_ml_pred, onnx_pred = self._test_imputer_converter(model, mode=\"torch\")\n\n # Check that predicted values match\n np.testing.assert_allclose(onnx_ml_pred, onnx_pred, rtol=rtol, atol=atol)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"sklearn.ensemble.RandomForestRegressor",
"numpy.split",
"sklearn.datasets.load_diabetes",
"pandas.DataFrame",
"sklearn.svm.LinearSVC",
"sklearn.svm.LinearSVR",
"numpy.random.randint",
"sklearn.compose.ColumnTransformer",
"sklearn.preprocessing.MinMaxScaler",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.pipeline.Pipeline",
"sklearn.preprocessing.Imputer",
"sklearn.linear_model.RidgeCV",
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array",
"sklearn.decomposition.PCA",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.datasets.make_regression",
"sklearn.preprocessing.StandardScaler"
],
[
"numpy.random.seed",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint"
],
[
"numpy.log"
],
[
"numpy.array",
"sklearn.impute.SimpleImputer",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BumagniyPacket/ocr | [
"f2651f3a23cf835a689b35a658ef3443086fd72a"
] | [
"ocr/paint.py"
] | [
"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\n\n\ndef show_image(image):\n plt.imshow(-image, cmap='Greys')\n plt.show()\n\n\ndef show_two(image1, image2):\n plt.subplot(121)\n plt.imshow(-image1, cmap='Greys')\n\n plt.subplot(122)\n plt.imshow(-image2, cmap='Greys')\n\n plt.show()\n\n\ndef plot_hist(img):\n plt.hist(img.ravel(), 256, range=(0., 1.), color='red')\n plt.show()\n\n\ndef plot_2img_2hist(image1, image2):\n\n plt.subplot(221)\n plt.imshow(-image1, cmap='Greys')\n\n plt.subplot(223)\n plt.hist(image1.ravel(), 256, range=(0., 1.), color='red')\n\n plt.subplot(222)\n plt.imshow(-image2, cmap='Greys')\n\n plt.subplot(224)\n plt.hist(image2.ravel(), 256, range=(0., 1.), color='red')\n\n plt.show()\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RoyalTS/stochatreat | [
"6e638e748b8638b64a185229f78967cf864cd45e"
] | [
"tests/test_stochatreat_assignment.py"
] | [
"import pytest\n\nfrom math import gcd\n\nimport numpy as np\nimport pandas as pd\n\nfrom stochatreat import stochatreat\nfrom stochatreat import get_lcm_prob_denominators\n\n\n################################################################################\n# fixtures\n################################################################################\n\[email protected](params=[10_000, 100_000])\ndef df(request):\n N = request.param\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"dummy\": [1] * N,\n \"stratum1\": np.random.randint(1, 100, size=N),\n \"stratum2\": np.random.randint(0, 2, size=N),\n }\n )\n\n return df\n\n# a set of treatment assignment probabilities to throw at many tests\nstandard_probs = [[0.1, 0.9],\n [1/3, 2/3],\n [0.5, 0.5],\n [2/3, 1/3],\n [0.9, 0.1]]\n\n# a set of stratum column combinations from the above df fixture to throw at\n# many tests\nstandard_stratum_cols = [\n [\"dummy\"],\n [\"stratum1\"],\n [\"stratum1\", \"stratum2\"],\n]\n\n\n# a DataFrame and treatment assignment probabilities under which there will be\n# no misfits\[email protected]\ndef df_no_misfits():\n N = 1_000\n stratum_size = 10\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"stratum\": np.repeat(\n np.arange(N / stratum_size),\n repeats=stratum_size\n )\n }\n )\n\n return df\n\nprobs_no_misfits =[\n [0.1, 0.9],\n [0.5, 0.5],\n [0.9, 0.1],\n]\n\n\n################################################################################\n# overall treatment assignment proportions\n################################################################################\n\[email protected](\"n_treats\", [2, 3, 4, 5, 10])\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_no_probs(n_treats, stratum_cols, df):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended with equal treatment assignment probabilities -- relies on the Law\n of Large Numbers, not deterministic\n \"\"\"\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=n_treats,\n idx_col=\"id\",\n random_state=42\n )\n\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array([1 / n_treats] * n_treats), decimal=2\n )\n\n\[email protected](\"probs\", standard_probs)\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_probs(probs, stratum_cols, df):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended with unequal treatment assignment probabilities -- relies on the\n Law of Large Numbers, not deterministic\n \"\"\"\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array(probs), decimal=2\n )\n\n\[email protected](\"probs\", probs_no_misfits)\ndef test_stochatreat_no_misfits(probs, df_no_misfits):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended when strata are such that there are no misfits\n \"\"\"\n treats = stochatreat(\n data=df_no_misfits,\n stratum_cols=[\"stratum\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array(probs), decimal=2\n )\n\n\[email protected](\"probs\", standard_probs)\ndef test_stochatreat_only_misfits(probs):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended when strata are such that there are only misfits and the number of\n units is sufficiently large -- relies on the Law of Large Numbers, not\n deterministic\n \"\"\"\n N = 10_000\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"stratum\": np.arange(N),\n }\n )\n treats = stochatreat(\n data=df,\n stratum_cols=[\"stratum\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array(probs), decimal=2\n )\n\n\n################################################################################\n# within-stratum treatment assignments\n################################################################################\n\ndef get_within_strata_counts(treats):\n \"\"\"Helper function to compute the treatment shares within strata\"\"\"\n treatment_counts = (treats\n .groupby([\"stratum_id\", \"treat\"])[[\"id\"]]\n .count()\n .rename(columns={\"id\": \"treat_count\"})\n .reset_index()\n )\n\n stratum_counts = (treats\n .groupby([\"stratum_id\"])[[\"id\"]]\n .count()\n .rename(columns={\"id\": \"stratum_count\"})\n .reset_index()\n )\n\n counts = pd.merge(\n treatment_counts, stratum_counts, on=\"stratum_id\", how=\"left\"\n )\n\n return counts\n\n\ndef compute_count_diff(treats, probs):\n \"\"\"\n Helper function to compute the treatment counts within strata and line them\n up with required counts, and returns the different treatment counts\n aggregated at the stratum level as well as the dataframe with the different\n counts used in some tests\n \"\"\"\n counts = get_within_strata_counts(treats)\n\n required_props = pd.DataFrame(\n {\"required_prop\": probs, \"treat\": range(len(probs))}\n )\n comp = pd.merge(\n counts, required_props, on=\"treat\", how=\"left\"\n )\n comp[\"desired_counts\"] = comp[\"stratum_count\"] * comp[\"required_prop\"]\n\n comp[\"count_diff\"] = (comp[\"treat_count\"] - comp[\"desired_counts\"]).abs()\n\n return comp\n\n\[email protected](\"n_treats\", [2, 3, 4, 5, 10])\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_within_strata_no_probs(n_treats, stratum_cols, df):\n \"\"\"\n Tests that within strata treatment assignment counts are only as far from\n the required counts as misfit assignment randomization allows with equal\n treatment assignment probabilities but a differing number of treatments\n \"\"\"\n probs = n_treats * [1 / n_treats]\n lcm_prob_denominators = n_treats\n treats = stochatreat(\n data=df, \n stratum_cols=stratum_cols, \n treats=n_treats, \n idx_col=\"id\", \n random_state=42\n )\n comp = compute_count_diff(treats, probs)\n\n assert_msg = \"\"\"The counts differences exceed the bound that misfit \n allocation should not exceed\"\"\"\n assert (comp[\"count_diff\"] < lcm_prob_denominators).all(), assert_msg\n\n\[email protected](\"probs\", standard_probs)\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_within_strata_probs(probs, stratum_cols, df):\n \"\"\"\n Tests that within strata treatment assignment counts are only as far from\n the required counts as misfit assignment randomization allows with two\n treatments but unequal treatment assignment probabilities\n \"\"\"\n lcm_prob_denominators = get_lcm_prob_denominators(probs)\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n comp = compute_count_diff(treats, probs)\n\n assert_msg = \"\"\"The counts differences exceed the bound that misfit \n allocation should not exceed\"\"\"\n assert (comp[\"count_diff\"] < lcm_prob_denominators).all(), assert_msg\n\n\[email protected](\"probs\", probs_no_misfits)\ndef test_stochatreat_within_strata_no_misfits(probs, df_no_misfits):\n \"\"\"\n Tests that within strata treatment assignment counts are exactly equal to\n the required counts when strata are such that there are no misfits\n \"\"\"\n treats = stochatreat(\n data=df_no_misfits,\n stratum_cols=[\"stratum\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n comp = compute_count_diff(treats, probs)\n\n assert_msg = \"The required proportions are not reached without misfits\"\n assert (comp[\"count_diff\"] == 0).all(), assert_msg\n\n\[email protected](\"probs\", standard_probs)\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_global_strategy(probs, stratum_cols, df):\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n misfit_strategy=\"global\"\n )\n comp = compute_count_diff(treats, probs)\n\n stratum_count_diff = comp.groupby([\"stratum_id\"])[\"count_diff\"].sum()\n\n assert_msg = \"There is more than one stratum with misfits\"\n assert (stratum_count_diff != 0).sum() <= 1, assert_msg\n\n\[email protected](\"misfit_strategy\", [\"global\", \"stratum\"])\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_stratum_ids(df, misfit_strategy, stratum_cols):\n \"\"\"Tests that the function returns the right number of stratum ids\"\"\"\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=2,\n idx_col=\"id\",\n random_state=42,\n misfit_strategy=misfit_strategy,\n )\n\n n_unique_strata = len(df[stratum_cols].drop_duplicates())\n\n n_unique_stratum_ids = len(treats[\"stratum_id\"].drop_duplicates())\n\n if misfit_strategy == \"global\":\n # depending on whether there are misfits\n assert (\n (n_unique_stratum_ids == n_unique_strata) or\n (n_unique_stratum_ids - 1 == n_unique_strata)\n )\n else:\n assert n_unique_stratum_ids == n_unique_strata\n\n\[email protected](\"stratum_cols\", standard_stratum_cols)\[email protected](\"misfit_strategy\", [\"global\", \"stratum\"])\ndef test_stochatreat_random_state(df, stratum_cols, misfit_strategy):\n \"\"\"\n Tests that the results are the same on two consecutive calls with the same\n random state\n \"\"\"\n random_state = 42\n treats = []\n for _ in range(2):\n treatments_i = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=2,\n idx_col=\"id\",\n random_state=random_state,\n misfit_strategy=misfit_strategy,\n )\n treats.append(treatments_i)\n \n pd.testing.assert_series_equal(\n treats[0][\"treat\"], treats[1][\"treat\"]\n )\n\n \[email protected](\"stratum_cols\", standard_stratum_cols)\[email protected](\"misfit_strategy\", [\"global\", \"stratum\"])\ndef test_stochatreat_shuffle_data(df, stratum_cols, misfit_strategy):\n \"\"\"\n Tests that the mapping between idx_col and the assignments is the same on\n two consecutive calls with the same random state and shuffled data points\n \"\"\"\n random_state = 42\n treats = []\n for _ in range(2):\n treatments_i = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=2,\n idx_col=\"id\",\n random_state=random_state,\n misfit_strategy=misfit_strategy,\n )\n treatments_i = treatments_i.sort_values(\"id\")\n treats.append(treatments_i)\n\n df = df.sample(len(df), random_state=random_state)\n \n pd.testing.assert_series_equal(\n treats[0][\"treat\"], treats[1][\"treat\"]\n )\n\n\n\n\n \n\n\n\n"
] | [
[
"pandas.merge",
"pandas.testing.assert_series_equal",
"numpy.arange",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
mutazag/misc | [
"dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f"
] | [
"py_merge/mergeexample.py"
] | [
"#%% \n\nimport pandas as pd\n\n#%%\n\ndf1 = pd.read_csv('df1.csv', index_col=0)\n# %%\ndf2 = pd.read_csv('df2.csv', index_col=0)\n# %%\ndf3 = pd.read_csv('df3.csv', index_col=0)\n# %%\ndf1.merge(df2, on='proj_id').merge(df3, on='doc_id')\n# %%\ndf1.merge(df2, on='proj_id', how='left').merge(df3, on='doc_id', how='left')\n# %%\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mohammadrezabk/eo-learn | [
"8de3cfd64e74c1e4832e585954cdbf0ee9676eb3",
"8de3cfd64e74c1e4832e585954cdbf0ee9676eb3",
"8de3cfd64e74c1e4832e585954cdbf0ee9676eb3",
"8de3cfd64e74c1e4832e585954cdbf0ee9676eb3"
] | [
"features/eolearn/features/radiometric_normalization.py",
"core/eolearn/tests/test_eodata_io.py",
"io/eolearn/tests/test_local_io.py",
"ml_tools/eolearn/ml_tools/classifier.py"
] | [
"\"\"\"\nModule for radiometric normalization\n\nCredits:\nCopyright (c) 2018-2019 Johannes Schmid (GeoVille)\nCopyright (c) 2017-2019 Matej Aleksandrov, Matic Lubej, Devis Peresutti (Sinergise)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\n\nimport numpy as np\n\nfrom eolearn.core import EOTask, FeatureType\n\n\nclass ReferenceScenes(EOTask):\n \"\"\" Creates a layer of reference scenes which have the highest fraction of valid pixels.\n\n The number of reference scenes is limited to a definable number.\n\n Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018\n\n :param feature: Name of the eopatch data layer. Needs to be of the FeatureType \"DATA\".\n :type feature: (FeatureType, str) or (FeatureType, str, str)\n :param valid_fraction_feature: Name of the layer containing the valid fraction obtained with the EOTask\n 'AddValidDataFraction'. Needs to be of the FeatureType \"SCALAR\".\n :type valid_fraction_feature: (FeatureType, str)\n :param max_scene_number: Maximum number of reference scenes taken for the creation of the composite. By default,\n the maximum number of scenes equals the number of time frames\n :type max_scene_number: int\n\n \"\"\"\n def __init__(self, feature, valid_fraction_feature, max_scene_number=None):\n self.feature = self._parse_features(feature, new_names=True,\n default_feature_type=FeatureType.DATA,\n rename_function='{}_REFERENCE'.format)\n self.valid_fraction_feature = self._parse_features(valid_fraction_feature,\n default_feature_type=FeatureType.SCALAR)\n self.number = max_scene_number\n\n def execute(self, eopatch):\n feature_type, feature_name, new_feature_name = next(self.feature(eopatch))\n valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))\n\n valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())\n data = eopatch[feature_type][feature_name]\n\n number = data.shape[0] if self.number is None else self.number\n\n eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in\n sorted(zip(valid_frac, range(data.shape[0])), reverse=True)\n if x <= number-1])\n\n return eopatch\n\n\nclass BaseCompositing(EOTask):\n \"\"\" Base class to create a composite of reference scenes\n\n Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018\n\n :param feature: Feature holding the input time-series. Default type is FeatureType.DATA\n :type feature: (FeatureType, str)\n :param feature_composite: Type and name of output composite image. Default type is FeatureType.DATA_TIMELESS\n :type feature_composite: (FeatureType, str)\n :param percentile: Percentile along the time dimension used for compositing. Methods use different percentiles\n :type percentile: int or list\n :param max_index: Value used to flag indices with NaNs. Could be integer or NaN. Default is 255\n :type max_index: int or NaN\n :param interpolation: Method used to compute percentile. Allowed values are {'geoville', 'linear', 'lower',\n 'higher', 'midpoint', 'nearest'}. 'geoville' interpolation performs a custom\n implementation, while the other methods use the numpy `percentile` function. Default is\n 'lower'\n :type interpolation: str\n :param no_data_value: Value in the composite assigned to non valid data points. Default is NaN\n :type no_data_value: float or NaN\n \"\"\"\n\n def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',\n no_data_value=np.nan):\n self.feature = self._parse_features(feature,\n default_feature_type=FeatureType.DATA,\n rename_function='{}_COMPOSITE'.format)\n self.composite_type, self.composite_name = next(\n self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())\n self.percentile = percentile\n self.max_index = max_index\n self.interpolation = interpolation\n self._index_by_percentile = self._geoville_index_by_percentile \\\n if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile\n self.no_data_value = no_data_value\n\n def _numpy_index_by_percentile(self, data, percentile):\n \"\"\" Calculate percentile of numpy stack and return the index of the chosen pixel.\n\n numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',\n 'midpoint', 'nearest'}\n \"\"\"\n data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)\n\n indices = np.empty(data_perc_low.shape, dtype=np.uint8)\n indices[:] = np.nan\n\n abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))\n\n indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))\n\n return indices\n\n def _geoville_index_by_percentile(self, data, percentile):\n \"\"\" Calculate percentile of numpy stack and return the index of the chosen pixel. \"\"\"\n # no_obs = bn.allnan(arr_tmp[\"data\"], axis=0)\n data_tmp = np.array(data, copy=True)\n valid_obs = np.sum(np.isfinite(data_tmp), axis=0)\n # replace NaN with maximum\n max_val = np.nanmax(data_tmp) + 1\n data_tmp[np.isnan(data_tmp)] = max_val\n # sort - former NaNs will move to the end\n ind_tmp = np.argsort(data_tmp, kind=\"mergesort\", axis=0)\n # desired position as well as floor and ceiling of it\n k_arr = (valid_obs - 1) * (percentile / 100.0)\n k_arr = np.where(k_arr < 0, 0, k_arr)\n f_arr = np.floor(k_arr + 0.5)\n f_arr = f_arr.astype(int)\n # get floor value of reference band and index band\n ind = f_arr.astype(\"int16\")\n y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]\n y_val, x_val = np.ogrid[0:y_val, 0:x_val]\n idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])\n return idx\n\n def _get_reference_band(self, data):\n \"\"\" Extract reference band from input 4D data according to compositing method\n\n :param data: 4D array from which to extract reference band (e.g. blue, maxNDVI, ..)\n :type data: numpy array\n :return: 3D array containing reference band according to compositing method\n \"\"\"\n raise NotImplementedError\n\n def _get_indices(self, data):\n \"\"\" Compute indices along temporal dimension corresponding to the sought percentile\n\n :param data: Input 3D array holding the reference band\n :type data: numpy array\n :return: 2D array holding the temporal index corresponding to percentile\n \"\"\"\n indices = self._index_by_percentile(data, self.percentile)\n return indices\n\n def execute(self, eopatch):\n \"\"\" Compute composite array merging temporal frames according to the compositing method\n\n :param eopatch: eopatch holding time-series\n :return: eopatch with composite image of time-series\n \"\"\"\n feature_type, feature_name = next(self.feature(eopatch))\n data = eopatch[feature_type][feature_name].copy()\n\n # compute band according to compositing method (e.g. blue, maxNDVI, maxNDWI)\n reference_bands = self._get_reference_band(data)\n\n # find temporal indices corresponding to pre-defined percentile\n indices = self._get_indices(reference_bands)\n\n # compute composite image selecting values along temporal dimension corresponding to percentile indices\n composite_image = np.empty((data.shape[1:]), np.float32)\n composite_image[:] = self.no_data_value\n for scene_id, scene in enumerate(data):\n composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)\n\n eopatch[self.composite_type][self.composite_name] = composite_image\n\n return eopatch\n\n\nclass BlueCompositing(BaseCompositing):\n \"\"\" Blue band compositing method\n\n - blue (25th percentile of the blue band)\n\n :param blue_idx: Index of blue band in `feature` array\n :type blue_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)\n self.blue_idx = blue_idx\n if not isinstance(blue_idx, int):\n raise ValueError('Incorrect value of blue band index specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the blue band from time-series\n\n :param data: 4D array from which to extract the blue reference band\n :type data: numpy array\n :return: 3D array containing the blue reference band\n \"\"\"\n return data[..., self.blue_idx].astype(\"float32\")\n\n\nclass HOTCompositing(BaseCompositing):\n \"\"\" HOT compositing method\n\n - HOT (Index using bands blue and red)\n\n The HOT index is defined as per\n Zhu, Z., & Woodcock, C. E. (2012). \"Object-based cloud and cloud shadow detection in Landsat imagery.\"\n Remote Sensing of Environment, 118, 83-94.\n\n :param blue_idx: Index of blue band in `feature` array\n :type blue_idx: int\n :param red_idx: Index of red band in `feature` array\n :type red_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)\n self.blue_idx = blue_idx\n self.red_idx = red_idx\n if not isinstance(blue_idx, int) or not isinstance(red_idx, int):\n raise ValueError('Incorrect values of blue and red band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the HOT band from time-series\n\n :param data: 4D array from which to extract the HOT reference band\n :type data: numpy array\n :return: 3D array containing the HOT reference band\n \"\"\"\n return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08\n\n\nclass MaxNDVICompositing(BaseCompositing):\n \"\"\" maxNDVI compositing method\n\n - maxNDVI (temporal maximum of NDVI)\n\n :param red_idx: Index of red band in `feature` array\n :type red_idx: int\n :param nir_idx: Index of NIR band in `feature` array\n :type nir_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)\n self.red_idx = red_idx\n self.nir_idx = nir_idx\n if not isinstance(nir_idx, int) or not isinstance(red_idx, int):\n raise ValueError('Incorrect values of red and NIR band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the NDVI band from time-series\n\n :param data: 4D array from which to compute the NDVI reference band\n :type data: numpy array\n :return: 3D array containing the NDVI reference band\n \"\"\"\n nir = data[..., self.nir_idx].astype(\"float32\")\n red = data[..., self.red_idx].astype(\"float32\")\n return (nir - red) / (nir + red)\n\n def _get_indices(self, data):\n median = np.nanmedian(data, axis=0)\n indices_min = self._index_by_percentile(data, self.percentile[0])\n indices_max = self._index_by_percentile(data, self.percentile[1])\n indices = np.where(median < -0.05, indices_min, indices_max)\n return indices\n\n\nclass MaxNDWICompositing(BaseCompositing):\n \"\"\" maxNDWI compositing method\n\n - maxNDWI (temporal maximum of NDWI)\n\n :param nir_idx: Index of NIR band in `feature` array\n :type nir_idx: int\n :param swir1_idx: Index of SWIR1 band in `feature` array\n :type swir1_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)\n self.nir_idx = nir_idx\n self.swir1_idx = swir1_idx\n if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):\n raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the NDWI band from time-series\n\n :param data: 4D array from which to compute the NDWI reference band\n :type data: numpy array\n :return: 3D array containing the NDWI reference band\n \"\"\"\n nir = data[..., self.nir_idx].astype(\"float32\")\n swir1 = data[..., self.swir1_idx].astype(\"float32\")\n return (nir - swir1) / (nir + swir1)\n\n\nclass MaxRatioCompositing(BaseCompositing):\n \"\"\" maxRatio compositing method\n\n - maxRatio (temporal maximum of a ratio using bands blue, NIR and SWIR)\n\n :param blue_idx: Index of blue band in `feature` array\n :type blue_idx: int\n :param nir_idx: Index of NIR band in `feature` array\n :type nir_idx: int\n :param swir1_idx: Index of SWIR1 band in `feature` array\n :type swir1_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)\n self.blue_idx = blue_idx\n self.nir_idx = nir_idx\n self.swir1_idx = swir1_idx\n if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):\n raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the max-ratio band from time-series\n\n The max-ratio is defined as max(NIR,SWIR1)/BLUE\n\n :param data: 4D array from which to compute the max-ratio reference band\n :type data: numpy array\n :return: 3D array containing the max-ratio reference band\n \"\"\"\n blue = data[..., self.blue_idx].astype(\"float32\")\n nir = data[..., self.nir_idx].astype(\"float32\")\n swir1 = data[..., self.swir1_idx].astype(\"float32\")\n return np.nanmax(np.array([nir, swir1]), axis=0) / blue\n\n\nclass HistogramMatching(EOTask):\n \"\"\" Histogram match of each band of each scene within a time-series with respect to the corresponding band of a\n reference composite.\n\n Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018\n\n :param feature: Name of the eopatch data layer that will undergo a histogram match.\n Should be of the FeatureType \"DATA\".\n :type feature: (FeatureType, str) or (FeatureType, str, str)\n :param reference: Name of the eopatch data layer that represents the reference for the histogram match.\n Should be of the FeatureType \"DATA_TIMELESS\".\n :type reference: (FeatureType, str)\n \"\"\"\n\n def __init__(self, feature, reference):\n self.feature = self._parse_features(feature, new_names=True,\n default_feature_type=FeatureType.DATA,\n rename_function='{}_NORMALISED'.format)\n self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)\n\n def execute(self, eopatch):\n \"\"\" Perform histogram matching of the time-series with respect to a reference scene\n\n :param eopatch: eopatch holding the time-series and reference data\n :type eopatch: EOPatch\n :return: The same eopatch instance with the normalised time-series\n \"\"\"\n feature_type, feature_name, new_feature_name = next(self.feature(eopatch))\n reference_type, reference_name = next(self.reference(eopatch))\n\n reference_scene = eopatch[reference_type][reference_name]\n # check if band dimension matches\n if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:\n raise ValueError('Time-series and reference scene must have corresponding bands')\n\n eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])\n for source_id, source in enumerate(eopatch[feature_type][feature_name]):\n # mask-out same invalid pixels\n src_masked = np.where(np.isnan(reference_scene), np.nan, source)\n ref_masked = np.where(np.isnan(source), np.nan, reference_scene)\n # compute statistics\n std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)\n std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)\n mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)\n mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)\n # normalise values\n eopatch[feature_type][new_feature_name][source_id] = \\\n source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))\n\n return eopatch\n",
"\"\"\"\nCredits:\nCopyright (c) 2017-2020 Matej Aleksandrov, Matej Batič, Grega Milčinski, Matic Lubej, Devis Peresutti (Sinergise)\nCopyright (c) 2017-2020 Nejc Vesel, Jovan Višnjić, Anže Zupanc (Sinergise)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\nimport unittest\nimport logging\nimport datetime\nimport os\nimport tempfile\n\nimport numpy as np\nimport fs\nfrom fs.errors import CreateFailed, ResourceNotFound\nfrom fs.tempfs import TempFS\nfrom fs_s3fs import S3FS\nfrom geopandas import GeoDataFrame\nfrom moto import mock_s3\nimport boto3\n\nfrom sentinelhub import BBox, CRS\nfrom eolearn.core import EOPatch, FeatureType, OverwritePermission, SaveTask, LoadTask\n\nlogging.basicConfig(level=logging.INFO)\n\n\n@mock_s3\ndef _create_new_s3_fs():\n \"\"\" Creates a new empty mocked s3 bucket. If one such bucket already exists it deletes it first.\n \"\"\"\n bucket_name = 'mocked-test-bucket'\n s3resource = boto3.resource('s3', region_name='eu-central-1')\n\n bucket = s3resource.Bucket(bucket_name)\n\n if bucket.creation_date: # If bucket already exists\n for key in bucket.objects.all():\n key.delete()\n bucket.delete()\n\n s3resource.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration={'LocationConstraint': 'eu-central-1'})\n\n return S3FS(bucket_name=bucket_name)\n\n\n@mock_s3\nclass TestEOPatchIO(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n eopatch = EOPatch()\n mask = np.zeros((3, 3, 2), dtype=np.int16)\n data = np.zeros((2, 3, 3, 2), dtype=np.int16)\n eopatch.data_timeless['mask'] = mask\n eopatch.data['data'] = data\n eopatch.timestamp = [datetime.datetime(2017, 1, 1, 10, 4, 7),\n datetime.datetime(2017, 1, 4, 10, 14, 5)]\n eopatch.meta_info['something'] = 'nothing'\n eopatch.meta_info['something-else'] = 'nothing'\n eopatch.bbox = BBox((1, 2, 3, 4), CRS.WGS84)\n eopatch.scalar['my scalar with spaces'] = np.array([[1, 2, 3], [1, 2, 3]])\n eopatch.scalar_timeless['my timeless scalar with spaces'] = np.array([1, 2, 3])\n eopatch.vector['my-df'] = GeoDataFrame({\n 'values': [1, 2],\n 'TIMESTAMP': [datetime.datetime(2017, 1, 1, 10, 4, 7), datetime.datetime(2017, 1, 4, 10, 14, 5)],\n 'geometry': [eopatch.bbox.geometry, eopatch.bbox.geometry]\n }, crs=eopatch.bbox.crs.pyproj_crs())\n\n cls.eopatch = eopatch\n\n cls.filesystem_loaders = [TempFS, _create_new_s3_fs]\n\n def test_saving_to_a_file(self):\n with tempfile.NamedTemporaryFile() as fp:\n with self.assertRaises(CreateFailed):\n self.eopatch.save(fp.name)\n\n def test_saving_in_empty_folder(self):\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n\n if isinstance(temp_fs, TempFS):\n self.eopatch.save(temp_fs.root_path)\n else:\n self.eopatch.save('/', filesystem=temp_fs)\n self.assertTrue(temp_fs.exists('/data_timeless/mask.npy'))\n\n subfolder = 'new-subfolder'\n self.eopatch.save('new-subfolder', filesystem=temp_fs)\n self.assertTrue(temp_fs.exists('/{}/bbox.pkl'.format(subfolder)))\n\n def test_saving_in_non_empty_folder(self):\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n empty_file = 'foo.txt'\n\n with temp_fs.open(empty_file, 'w'):\n pass\n\n self.eopatch.save('/', filesystem=temp_fs)\n self.assertTrue(temp_fs.exists(empty_file))\n\n self.eopatch.save('/', overwrite_permission=OverwritePermission.OVERWRITE_PATCH, filesystem=temp_fs)\n self.assertFalse(temp_fs.exists(empty_file))\n\n def test_overwriting_non_empty_folder(self):\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n self.eopatch.save('/', filesystem=temp_fs)\n self.eopatch.save('/', filesystem=temp_fs, overwrite_permission=OverwritePermission.OVERWRITE_FEATURES)\n self.eopatch.save('/', filesystem=temp_fs, overwrite_permission=OverwritePermission.OVERWRITE_PATCH)\n\n add_eopatch = EOPatch()\n add_eopatch.data['some data'] = np.empty((2, 3, 3, 2))\n add_eopatch.save('/', filesystem=temp_fs, overwrite_permission=OverwritePermission.ADD_ONLY)\n with self.assertRaises(ValueError):\n add_eopatch.save('/', filesystem=temp_fs, overwrite_permission=OverwritePermission.ADD_ONLY)\n\n new_eopatch = EOPatch.load('/', filesystem=temp_fs, lazy_loading=False)\n self.assertEqual(new_eopatch, self.eopatch + add_eopatch)\n\n def test_save_load(self):\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n self.eopatch.save('/', filesystem=temp_fs)\n eopatch2 = EOPatch.load('/', filesystem=temp_fs)\n self.assertEqual(self.eopatch, eopatch2)\n\n eopatch2.save('/', filesystem=temp_fs, overwrite_permission=1)\n eopatch2 = EOPatch.load('/', filesystem=temp_fs)\n self.assertEqual(self.eopatch, eopatch2)\n\n eopatch2.save('/', filesystem=temp_fs, overwrite_permission=1)\n eopatch2 = EOPatch.load('/', filesystem=temp_fs, lazy_loading=False)\n self.assertEqual(self.eopatch, eopatch2)\n\n features = {FeatureType.DATA_TIMELESS: {'mask'}, FeatureType.TIMESTAMP: ...}\n eopatch2.save('/', filesystem=temp_fs, features=features,\n compress_level=3, overwrite_permission=1)\n eopatch2 = EOPatch.load('/', filesystem=temp_fs, lazy_loading=True)\n self.assertEqual(self.eopatch, eopatch2)\n eopatch3 = EOPatch.load('/', filesystem=temp_fs, lazy_loading=True, features=features)\n self.assertNotEqual(self.eopatch, eopatch3)\n\n def test_save_add_only_features(self):\n features = [\n (FeatureType.DATA_TIMELESS, 'mask'),\n FeatureType.MASK,\n FeatureType.VECTOR,\n (FeatureType.SCALAR, ...),\n (FeatureType.META_INFO, 'something'),\n FeatureType.BBOX\n ]\n\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n self.eopatch.save('/', filesystem=temp_fs, features=features, overwrite_permission=0)\n\n def test_overwrite_failure(self):\n eopatch = EOPatch()\n mask = np.arange(3 * 3 * 2).reshape(3, 3, 2)\n eopatch.data_timeless['mask'] = mask\n eopatch.data_timeless['Mask'] = mask\n\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs, self.assertRaises(IOError):\n eopatch.save('/', filesystem=temp_fs)\n\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n eopatch.save('/', filesystem=temp_fs, features=[(FeatureType.DATA_TIMELESS, 'mask')],\n overwrite_permission=2)\n\n with self.assertRaises(IOError):\n eopatch.save('/', filesystem=temp_fs, features=[(FeatureType.DATA_TIMELESS, 'Mask')],\n overwrite_permission=0)\n\n def test_save_and_load_tasks(self):\n folder = 'foo-folder'\n patch_folder = 'patch-folder'\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n temp_fs.makedir(folder)\n\n save_task = SaveTask(folder, filesystem=temp_fs, compress_level=9)\n load_task = LoadTask(folder, filesystem=temp_fs, lazy_loading=False)\n\n saved_eop = save_task(self.eopatch, eopatch_folder=patch_folder)\n bbox_path = fs.path.join(folder, patch_folder, 'bbox.pkl.gz')\n self.assertTrue(temp_fs.exists(bbox_path))\n self.assertEqual(saved_eop, self.eopatch)\n\n eop = load_task(eopatch_folder=patch_folder)\n self.assertEqual(eop, self.eopatch)\n\n def test_fail_saving_nonexistent_feature(self):\n features = [(FeatureType.DATA, 'nonexistent')]\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs, self.assertRaises(ValueError):\n self.eopatch.save('/', filesystem=temp_fs, features=features)\n\n def test_fail_loading_nonexistent_feature(self):\n for features in [[(FeatureType.DATA, 'nonexistent')], [(FeatureType.META_INFO, 'nonexistent')]]:\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs, self.assertRaises(IOError):\n EOPatch.load('/', filesystem=temp_fs, features=features)\n\n def test_nonexistent_location(self):\n path = './folder/subfolder/new-eopatch/'\n empty_eop = EOPatch()\n\n for fs_loader in self.filesystem_loaders:\n with fs_loader() as temp_fs:\n with self.assertRaises(ResourceNotFound):\n EOPatch.load(path, filesystem=temp_fs)\n\n empty_eop.save(path, filesystem=temp_fs)\n\n with TempFS() as temp_fs:\n full_path = os.path.join(temp_fs.root_path, path)\n with self.assertRaises(CreateFailed):\n EOPatch.load(full_path)\n\n load_task = LoadTask(full_path)\n with self.assertRaises(CreateFailed):\n load_task.execute()\n\n empty_eop.save(full_path)\n self.assertTrue(os.path.exists(full_path))\n\n with TempFS() as temp_fs:\n full_path = os.path.join(temp_fs.root_path, path)\n save_task = SaveTask(full_path)\n save_task.execute(empty_eop)\n self.assertTrue(os.path.exists(full_path))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"\nCredits:\nCopyright (c) 2017-2019 Matej Aleksandrov, Matej Batič, Andrej Burja, Eva Erzin (Sinergise)\nCopyright (c) 2017-2019 Grega Milčinski, Matic Lubej, Devis Peresutti, Jernej Puc, Tomislav Slijepčević (Sinergise)\nCopyright (c) 2017-2019 Blaž Sovdat, Nejc Vesel, Jovan Višnjić, Anže Zupanc, Lojze Žust (Sinergise)\nCopyright (c) 2018-2019 William Ouellette\nCopyright (c) 2019 Drew Bollinger (DevelopmentSeed)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\nimport copy\nimport os\nimport unittest\nfrom unittest.mock import patch\nimport logging\nimport tempfile\nimport datetime\n\nimport numpy as np\nimport boto3\nfrom moto import mock_s3\nfrom fs.errors import ResourceNotFound\n\nfrom sentinelhub import read_data\nfrom sentinelhub.time_utils import serialize_time\n\nfrom eolearn.core import EOPatch, FeatureType\nfrom eolearn.io import ExportToTiff, ImportFromTiff\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\nclass TestExportAndImportTiff(unittest.TestCase):\n \"\"\" Testing if export and then import of the data preserves the data\n \"\"\"\n\n class TestCase:\n \"\"\"\n Container for each test case\n \"\"\"\n\n def __init__(self, name, feature_type, data, bands=None, times=None, expected_times=None):\n self.name = name\n self.feature_type = feature_type\n self.data = data\n self.bands = bands\n self.times = times\n self.expected_times = expected_times\n\n if self.expected_times is None:\n self.expected_times = self.times\n\n def get_expected(self):\n \"\"\" Returns expected data at the end of export-import process\n \"\"\"\n expected = self.data.copy()\n\n if isinstance(self.expected_times, tuple):\n expected = expected[self.expected_times[0]: self.expected_times[1] + 1, ...]\n elif isinstance(self.expected_times, list):\n expected = expected[self.expected_times, ...]\n\n if isinstance(self.bands, tuple):\n expected = expected[..., self.bands[0]: self.bands[1] + 1]\n elif isinstance(self.bands, list):\n expected = expected[..., self.bands]\n\n if expected.dtype == np.int64:\n expected = expected.astype(np.int32)\n\n return expected\n\n def get_expected_timestamp_size(self):\n if self.feature_type.is_timeless():\n return None\n return self.get_expected().shape[0]\n\n @classmethod\n def setUpClass(cls):\n\n cls.eopatch = EOPatch.load(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '../../../example_data/TestEOPatch'))\n\n dates = cls.eopatch.timestamp\n scalar_array = np.arange(10 * 6, dtype=np.float32).reshape(10, 6)\n mask_array = np.arange(5*3*2*1, dtype=np.uint16).reshape(5, 3, 2, 1)\n data_timeless_array = np.arange(3*2*5, dtype=np.float64).reshape(3, 2, 5)\n data_array = np.arange(10 * 3 * 2 * 6, dtype=np.float32).reshape(10, 3, 2, 6)\n\n cls.test_cases = [\n cls.TestCase('scalar_timeless', FeatureType.SCALAR_TIMELESS, np.arange(3)),\n cls.TestCase('scalar_timeless_list', FeatureType.SCALAR_TIMELESS, np.arange(5), bands=[3, 0, 2]),\n cls.TestCase('scalar_timeless_tuple', FeatureType.SCALAR_TIMELESS, np.arange(6), bands=(1, 4)),\n cls.TestCase('scalar_band_single_time_single', FeatureType.SCALAR, scalar_array, bands=[3], times=[7]),\n cls.TestCase('scalar_band_list_time_list', FeatureType.SCALAR, scalar_array,\n bands=[2, 4, 1, 0], times=[1, 7, 0, 2, 3]),\n cls.TestCase('scalar_band_tuple_time_tuple', FeatureType.SCALAR, scalar_array, bands=(1, 4), times=(2, 8)),\n cls.TestCase('mask_timeless', FeatureType.MASK_TIMELESS, np.arange(3*3*1).reshape(3, 3, 1)),\n cls.TestCase('mask_single', FeatureType.MASK, mask_array, times=[4]),\n cls.TestCase('mask_list', FeatureType.MASK, mask_array, times=[4, 2]),\n cls.TestCase('mask_tuple_int', FeatureType.MASK, mask_array, times=(2, 4)),\n cls.TestCase('mask_tuple_datetime', FeatureType.MASK, mask_array, times=(dates[2], dates[4]),\n expected_times=(2, 4)),\n cls.TestCase('mask_tuple_string', FeatureType.MASK, mask_array,\n times=(serialize_time(dates[2]), serialize_time(dates[4])),\n expected_times=(2, 4)),\n cls.TestCase('data_timeless_band_list', FeatureType.DATA_TIMELESS, data_timeless_array, bands=[2, 4, 1, 0]),\n cls.TestCase('data_timeless_band_tuple', FeatureType.DATA_TIMELESS, data_timeless_array, bands=(1, 4)),\n cls.TestCase('data_band_list_time_list', FeatureType.DATA, data_array,\n bands=[2, 4, 1, 0], times=[1, 7, 0, 2, 3]),\n cls.TestCase('data_band_tuple_time_tuple', FeatureType.DATA, data_array, bands=(1, 4), times=(2, 8)),\n cls.TestCase('data_normal', FeatureType.DATA, data_array),\n ]\n\n def test_export_import(self):\n for test_case in self.test_cases:\n with self.subTest(msg='Test case {}'.format(test_case.name)):\n\n self.eopatch[test_case.feature_type][test_case.name] = test_case.data\n\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n tmp_file_name = 'temp_file.tiff'\n tmp_file_name_reproject = 'temp_file_4326.tiff'\n feature = test_case.feature_type, test_case.name\n\n export_task = ExportToTiff(feature, folder=tmp_dir_name,\n band_indices=test_case.bands, date_indices=test_case.times)\n export_task.execute(self.eopatch, filename=tmp_file_name)\n\n export_task = ExportToTiff(feature, folder=tmp_dir_name,\n band_indices=test_case.bands, date_indices=test_case.times,\n crs='EPSG:4326', compress='lzw')\n export_task.execute(self.eopatch, filename=tmp_file_name_reproject)\n\n import_task = ImportFromTiff(feature, folder=tmp_dir_name,\n timestamp_size=test_case.get_expected_timestamp_size())\n\n expected_raster = test_case.get_expected()\n\n new_eop = import_task.execute(filename=tmp_file_name)\n old_eop = import_task.execute(self.eopatch, filename=tmp_file_name)\n\n self.assertTrue(np.array_equal(expected_raster, new_eop[test_case.feature_type][test_case.name]),\n msg='Tiff imported into new EOPatch is not the same as expected')\n self.assertTrue(np.array_equal(expected_raster, old_eop[test_case.feature_type][test_case.name]),\n msg='Tiff imported into old EOPatch is not the same as expected')\n self.assertEqual(expected_raster.dtype, new_eop[test_case.feature_type][test_case.name].dtype,\n msg='Tiff imported into new EOPatch has different dtype as expected')\n\n def test_export2tiff_wrong_format(self):\n data = np.arange(10*3*2*6, dtype=float).reshape(10, 3, 2, 6)\n\n self.eopatch.data['data'] = data\n\n for bands, times in [([2, 'string', 1, 0], [1, 7, 0, 2, 3]),\n ([2, 3, 1, 0], [1, 7, 'string', 2, 3])]:\n with tempfile.TemporaryDirectory() as tmp_dir_name, self.assertRaises(ValueError):\n tmp_file_name = 'temp_file.tiff'\n task = ExportToTiff((FeatureType.DATA, 'data'), folder=tmp_dir_name,\n band_indices=bands, date_indices=times, image_dtype=data.dtype)\n task.execute(self.eopatch, filename=tmp_file_name)\n\n @patch('logging.Logger.warning')\n def test_export2tiff_wrong_feature(self, mocked_logger):\n\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n tmp_file_name = 'temp_file.tiff'\n feature = FeatureType.MASK_TIMELESS, 'feature-not-present'\n\n export_task = ExportToTiff(feature, folder=tmp_dir_name, fail_on_missing=False)\n export_task.execute(self.eopatch, filename=tmp_file_name)\n assert mocked_logger.call_count == 1\n val_err_tup, _ = mocked_logger.call_args\n val_err, = val_err_tup\n assert str(val_err) == 'Feature feature-not-present of type FeatureType.MASK_TIMELESS ' \\\n 'was not found in EOPatch'\n\n with self.assertRaises(ValueError):\n export_task_fail = ExportToTiff(feature, folder=tmp_dir_name, fail_on_missing=True)\n export_task_fail.execute(self.eopatch, filename=tmp_file_name)\n\n def test_export2tiff_separate_timestamps(self):\n test_case = self.test_cases[-1]\n eopatch = copy.deepcopy(self.eopatch)\n eopatch[test_case.feature_type][test_case.name] = test_case.data\n eopatch.timestamp = self.eopatch.timestamp[:test_case.data.shape[0]]\n\n with tempfile.TemporaryDirectory() as tmp_dir_name:\n tmp_file_name = 'temp_file_*'\n tmp_file_name_reproject = 'temp_file_4326_%Y%m%d.tif'\n feature = test_case.feature_type, test_case.name\n\n export_task = ExportToTiff(feature,\n band_indices=test_case.bands, date_indices=test_case.times)\n full_path = os.path.join(tmp_dir_name, tmp_file_name)\n export_task.execute(eopatch, filename=full_path)\n\n for timestamp in eopatch.timestamp:\n expected_path = os.path.join(tmp_dir_name, timestamp.strftime('temp_file_%Y%m%dT%H%M%S.tif'))\n self.assertTrue(os.path.exists(expected_path), f'Path {expected_path} does not exist')\n\n full_path = os.path.join(tmp_dir_name, tmp_file_name_reproject)\n export_task = ExportToTiff(feature, folder=full_path,\n band_indices=test_case.bands, date_indices=test_case.times,\n crs='EPSG:4326', compress='lzw')\n export_task.execute(eopatch)\n\n for timestamp in eopatch.timestamp:\n expected_path = os.path.join(tmp_dir_name, timestamp.strftime(tmp_file_name_reproject))\n self.assertTrue(os.path.exists(expected_path), f'Path {expected_path} does not exist')\n\n\nclass TestImportTiff(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.eopatch = EOPatch.load(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '../../../example_data/TestEOPatch'))\n\n def test_import_tiff_subset(self):\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../example_data/import-tiff-test1.tiff')\n\n mask_feature = FeatureType.MASK_TIMELESS, 'TEST_TIF'\n mask_type, mask_name = mask_feature\n\n task = ImportFromTiff(mask_feature, path)\n task.execute(self.eopatch)\n\n tiff_img = read_data(path)\n\n self.assertTrue(np.array_equal(tiff_img[20: 53, 21: 54], self.eopatch[mask_type][mask_name][..., 0]),\n msg='Imported tiff data should be the same as original')\n\n def test_import_tiff_intersecting(self):\n path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../example_data/import-tiff-test2.tiff')\n\n mask_feature = FeatureType.MASK_TIMELESS, 'TEST_TIF'\n mask_type, mask_name = mask_feature\n no_data_value = 1.0\n\n task = ImportFromTiff(mask_feature, path, image_dtype=np.float64, no_data_value=no_data_value)\n task.execute(self.eopatch)\n\n tiff_img = read_data(path)\n\n self.assertTrue(np.array_equal(tiff_img[-6:, :3, :], self.eopatch[mask_type][mask_name][:6, -3:, :]),\n msg='Imported tiff data should be the same as original')\n feature_dtype = self.eopatch[mask_type][mask_name].dtype\n self.assertEqual(feature_dtype, np.float64,\n msg='Feature should have dtype numpy.float64 but {} found'.format(feature_dtype))\n\n self.eopatch[mask_type][mask_name][:6, -3:, :] = no_data_value\n unique_values = list(np.unique(self.eopatch[mask_type][mask_name][:6, -3:, :]))\n self.assertEqual(unique_values, [no_data_value],\n msg='No data values should all be equal to {}'.format(no_data_value))\n\n\n@mock_s3\ndef _create_s3_bucket(bucket_name):\n s3resource = boto3.resource('s3', region_name='eu-central-1')\n bucket = s3resource.Bucket(bucket_name)\n\n if bucket.creation_date: # If bucket already exists\n for key in bucket.objects.all():\n key.delete()\n bucket.delete()\n\n s3resource.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration={'LocationConstraint': 'eu-central-1'})\n\n\n@mock_s3\nclass TestS3ExportAndImport(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n bucket_name = 'mocked-test-bucket'\n _create_s3_bucket(bucket_name)\n\n cls.eopatch = EOPatch.load(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n '../../../example_data/TestEOPatch'))\n cls.path = f's3://{bucket_name}/some-folder'\n\n def test_timeless_feature(self):\n feature = FeatureType.DATA_TIMELESS, 'DEM'\n filename = 'relative-path/my-filename.tiff'\n\n export_task = ExportToTiff(feature, folder=self.path)\n import_task = ImportFromTiff(feature, folder=self.path)\n\n export_task.execute(self.eopatch, filename=filename)\n new_eopatch = import_task.execute(self.eopatch, filename=filename)\n\n self.assertTrue(np.array_equal(new_eopatch[feature], self.eopatch[feature]))\n\n def test_time_dependent_feature(self):\n feature = FeatureType.DATA, 'NDVI'\n filename_export = 'relative-path/*.tiff'\n filename_import = [f'relative-path/{timestamp.strftime(\"%Y%m%dT%H%M%S\")}.tiff'\n for timestamp in self.eopatch.timestamp]\n\n export_task = ExportToTiff(feature, folder=self.path)\n import_task = ImportFromTiff(feature, folder=self.path, timestamp_size=68)\n\n export_task.execute(self.eopatch, filename=filename_export)\n new_eopatch = import_task.execute(filename=filename_import)\n\n self.assertTrue(np.array_equal(new_eopatch[feature], self.eopatch[feature]))\n\n self.eopatch.timestamp[-1] = datetime.datetime(2020, 10, 10)\n filename_import = [f'relative-path/{timestamp.strftime(\"%Y%m%dT%H%M%S\")}.tiff'\n for timestamp in self.eopatch.timestamp]\n\n with self.assertRaises(ResourceNotFound):\n import_task.execute(filename=filename_import)\n\n def test_time_dependent_feature_with_timestamps(self):\n feature = FeatureType.DATA, 'NDVI'\n filename = 'relative-path/%Y%m%dT%H%M%S.tiff'\n\n export_task = ExportToTiff(feature, folder=self.path)\n import_task = ImportFromTiff(feature, folder=self.path)\n\n export_task.execute(self.eopatch, filename=filename)\n new_eopatch = import_task.execute(self.eopatch, filename=filename)\n\n self.assertTrue(np.array_equal(new_eopatch[feature], self.eopatch[feature]))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"\"\"\"\nModule for classification helper classes and classification task.\n\nCredits:\nCopyright (c) 2017-2019 Matej Aleksandrov, Matej Batič, Andrej Burja, Eva Erzin (Sinergise)\nCopyright (c) 2017-2019 Grega Milčinski, Matic Lubej, Devis Peresutti, Jernej Puc, Tomislav Slijepčević (Sinergise)\nCopyright (c) 2017-2019 Blaž Sovdat, Nejc Vesel, Jovan Višnjić, Anže Zupanc, Lojze Žust (Sinergise)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\n# pylint: disable=invalid-name\n\nimport itertools\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\n\nfrom eolearn.core import EOTask\n\nfrom .utilities import rolling_window\n\n\nclass ImageBaseClassifier(ABC):\n \"\"\" Abstract class for image classifiers\n\n Image Classifier extends the receptive field of trained classifier with smaller\n receptive field over entire image. The classifier's receptive field is\n usually small, i.e.:\n\n - pixel based classifier has receptive field `(1,1)`\n - patch based classifier has receptive field `(num_pixels_y, num_pixels_x)`\n\n Image Classifier divides the image into non-overlapping pieces of same size\n as trained classifier's receptive field and runs classifier over them thus\n producing a classification mask of the same size as image.\n\n The classifier can be of any type as long as it has the following two\n methods implemented:\n\n - `predict(X)`\n - `predict_proba(X)`\n\n This is true for all classifiers that follow scikit-learn's API.\n The APIs of scikit-learn's objects is described\n at: http://scikit-learn.org/stable/developers/contributing.html#apis-of-scikit-learn-objects.\n \"\"\"\n\n def __init__(self, classifier, receptive_field):\n \"\"\"\n :param classifier: The actual trained classifier that will be executed over entire image\n :type classifier: object\n :param receptive_field: Sensitive area of the classifier ((1,1) for pixel based or (n,m) for patch base)\n :type receptive_field: tuple, (n_rows, n_columns)\n \"\"\"\n self.receptive_field = receptive_field\n\n self._check_classifier(classifier)\n self.classifier = classifier\n\n self._samples = None\n self._image_size = None\n\n @staticmethod\n def _check_classifier(classifier):\n \"\"\"\n Check if the classifier implements predict and predict_proba methods.\n \"\"\"\n predict = getattr(classifier, \"predict\", None)\n if not callable(predict):\n raise ValueError('Classifier does not have predict method!')\n\n predict_proba = getattr(classifier, \"predict_proba\", None)\n if not callable(predict_proba):\n raise ValueError('Classifier does not have predict_proba method!')\n\n def _check_image(self, X):\n \"\"\"\n Checks the image size and its compatibility with classifier's receptive field.\n\n At this moment it is required that image size = K * receptive_field. This will\n be relaxed in future with the introduction of padding.\n \"\"\"\n\n if (len(X.shape) < 3) or (len(X.shape) > 4):\n raise ValueError('Input has to have shape [n_samples, n_pixels_y, n_pixels_x] '\n 'or [n_samples, n_pixels_y, n_pixels_x, n_bands].')\n\n self._samples = X.shape[0]\n self._image_size = X.shape[1:3]\n\n if (self._image_size[0] % self.receptive_field[0]) or (self._image_size[0] % self.receptive_field[0]):\n raise ValueError('Image (%d,%d) and receptive fields (%d,%d) mismatch.\\n'\n 'Resize your image to be divisible with receptive field.'\n % (self._image_size[0], self._image_size[0], self.receptive_field[0],\n self.receptive_field[1]))\n\n @staticmethod\n def _transform_input(X):\n \"\"\"\n Transform the input in the form expected by the classifier. For example reshape matrix to vector.\n \"\"\"\n return X\n\n @abstractmethod\n def image_predict(self, X):\n \"\"\" Predicts class label for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted labels of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x)`\n :rtype: np.ndarray\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def image_predict_proba(self, X):\n \"\"\" Predicts class probabilities for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted probabilities of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x, n_classes)`\n :rtype: np.ndarray\n \"\"\"\n raise NotImplementedError\n\n\nclass ImagePixelClassifier(ImageBaseClassifier):\n \"\"\" Performs a per-pixel classification\n\n It divides the image into individual pixels, runs classifier and collects the result in the shape of the input\n image.\n \"\"\"\n\n def __init__(self, classifier):\n \"\"\"\n :param classifier: The actual trained classifier that will be executed over entire image\n :type classifier: object\n \"\"\"\n ImageBaseClassifier.__init__(self, classifier, (1, 1))\n\n def image_predict(self, X):\n \"\"\" Predicts class label for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted labels of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x)`\n :rtype: np.ndarray\n \"\"\"\n self._check_image(X)\n\n new_shape = (X.shape[0] * X.shape[1] * X.shape[2],)\n\n if len(X.shape) == 4:\n new_shape += (X.shape[3],)\n\n pixels = X.reshape(new_shape)\n\n predictions = self.classifier.predict(self._transform_input(pixels))\n\n return predictions.reshape(X.shape[0], X.shape[1], X.shape[2])\n\n def image_predict_proba(self, X):\n \"\"\" Predicts class probabilities for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted probabilities of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x, n_classes)`\n :rtype: np.ndarray\n \"\"\"\n self._check_image(X)\n\n new_shape = (X.shape[0] * X.shape[1] * X.shape[2],)\n\n if len(X.shape) == 4:\n new_shape += (X.shape[3],)\n\n pixels = X.reshape(new_shape)\n\n probabilities = self.classifier.predict_proba(self._transform_input(pixels))\n\n return probabilities.reshape(X.shape[0], X.shape[1], X.shape[2],\n probabilities.shape[1])\n\n\nclass ImagePatchClassifier(ImageBaseClassifier):\n \"\"\" Performs a per-patch classification\n\n It divides the image into non-overlapping patches of same size as trained classifier's receptieve field and\n runs classifier over them thus producing a classification mask of the same size as image.\n \"\"\"\n\n def _to_patches(self, X):\n \"\"\" Reshapes input to patches of the size of classifier's receptive field.\n\n For example:\n\n input X shape: [n_samples, n_pixels_y, n_pixels_x, n_bands]\n\n output: [n_samples * n_pixels_y/receptive_field_y * n_pixels_x/receptive_field_x,\n receptive_field_y, receptive_field_x, n_bands]\n \"\"\"\n\n window = self.receptive_field\n asteps = self.receptive_field\n\n if len(X.shape) == 4:\n window += (0,)\n asteps += (1,)\n\n image_view = rolling_window(X, window, asteps)\n\n new_shape = image_view.shape\n\n # this makes a copy of the array? can we do without reshaping?\n image_view = image_view.reshape((new_shape[0] * new_shape[1] * new_shape[2],) + new_shape[3:])\n\n if len(X.shape) == 4:\n image_view = np.moveaxis(image_view, 1, -1)\n\n return image_view, new_shape\n\n def image_predict(self, X):\n \"\"\" Predicts class label for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted labels of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x)`\n :rtype: np.ndarray\n \"\"\"\n self._check_image(X)\n\n patches, patches_shape = self._to_patches(X)\n\n predictions = self.classifier.predict(self._transform_input(patches))\n\n image_predictions = predictions.reshape(patches_shape[0:3])\n\n image_results = np.zeros((self._samples,) + self._image_size)\n\n nx, ny = self.receptive_field\n row_steps = self._image_size[0] // nx\n col_steps = self._image_size[1] // ny\n\n # how can this be optimised?\n for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):\n image_results[k, nx * i:nx * (i + 1), ny * j:ny * (j + 1)] = image_predictions[k, i, j]\n\n return image_results\n\n def image_predict_proba(self, X):\n \"\"\" Predicts class probabilities for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted probabilities of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x, n_classes)`\n :rtype: np.ndarray\n \"\"\"\n self._check_image(X)\n\n patches, patches_shape = self._to_patches(X)\n\n probabilities = self.classifier.predict_proba(self._transform_input(patches))\n\n image_probabilities = probabilities.reshape(patches_shape[0:3] + (probabilities.shape[1],))\n\n image_results = np.zeros((self._samples,) + self._image_size + (probabilities.shape[1],))\n\n nx, ny = self.receptive_field\n row_steps = self._image_size[0] // nx\n col_steps = self._image_size[1] // ny\n\n # how can this be optimised?\n for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):\n image_results[k, nx * i:nx * (i + 1), ny * j:ny * (j + 1), :] = image_probabilities[k, i, j, :]\n\n return image_results\n\n\nclass ImagePixel2PatchClassifier(ImageBaseClassifier):\n \"\"\" Pixel to patch classifier first performs classification on pixel level\n and then combines the results in user defined patches. In case of combining\n probabilities the weighted sum is taken over all pixels in a patch. In case\n of predictions the user defines what fraction of pixels within the patch\n has to belong to signal class ot be considered as signal.\n \"\"\"\n\n def __init__(self, classifier, patch_size, mode='mean_prob', target=None, target_threshold=None):\n \"\"\"\n :param classifier: The actual trained classifier that will be executed over entire image\n :type classifier: object\n :param patch_size: A tuple defining `(n_rows, n_columns)`\n :type patch_size: (int, int)\n :param mode: The way predictions are obtained from prediction probabilities\n :type mode: str\n :param target: Target class value. Set the patch class to this target class if its fractional representation\n within this patch is above the target_threshols\n :param target: int or None\n :param target_threshold: A target prediction threshold\n :type target_threshold: float or None\n \"\"\"\n self.pixel_classifier = ImagePixelClassifier(classifier)\n self.patch_size = patch_size\n self.target = target\n self.target_threshold = target_threshold\n\n self.mode = mode\n\n ImageBaseClassifier.__init__(self, classifier, (1, 1))\n\n def _to_patches(self, X):\n \"\"\" Reshapes input to patches of the size of classifier's receptive field.\n\n For example:\n\n input X shape: [n_samples, n_pixels_y, n_pixels_x, n_bands]\n\n output: [n_samples * n_pixels_y/receptive_field_y * n_pixels_x/receptive_field_x,\n receptive_field_y, receptive_field_x, n_bands]\n \"\"\"\n window = self.patch_size\n asteps = self.patch_size\n\n if len(X.shape) == 4:\n window += (0,)\n asteps += (1,)\n\n image_view = rolling_window(X, window, asteps)\n\n new_shape = image_view.shape\n\n return image_view, new_shape\n\n def _target(self, array):\n unique, counts = np.unique(array, return_counts=True)\n valuecount = dict(zip(unique, counts))\n\n return 1 if self.target in valuecount.keys() and \\\n valuecount[self.target] / np.ma.size(array) >= self.target_threshold else 0\n\n def image_predict(self, X):\n \"\"\" Predicts class label for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted labels of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x)`\n :rtype: np.ndarray\n \"\"\"\n self._check_image(X)\n\n if self.mode == 'majority_class':\n predictions = self.pixel_classifier.image_predict(X)\n\n elif self.mode == 'mean_prob':\n probabilities = self.image_predict_proba(X)\n predictions = (probabilities[..., self.target] > self.target_threshold).astype(int)\n\n patches, _ = self._to_patches(predictions)\n\n row_steps = self._image_size[0] // self.patch_size[0]\n col_steps = self._image_size[1] // self.patch_size[1]\n\n # how can this be optimised?\n for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):\n patches[k, i, j] = self._target(patches[k, i, j])\n\n return predictions\n\n def image_predict_proba(self, X):\n \"\"\" Predicts class probabilities for the entire image.\n\n :param X: Data for prediction of shape `(n_samples, n_pixels_y, n_pixels_x, n_bands)`\n :type X: np.ndarray\n :returns: Predicted probabilities of shape `(n_samples,)` or `(n_samples, n_pixels_y, n_pixels_x, n_classes)`\n :rtype: np.ndarray\n \"\"\"\n self._check_image(X)\n\n probabilities = self.pixel_classifier.image_predict_proba(X)\n\n patches, _ = self._to_patches(probabilities)\n\n row_steps = self._image_size[0] // self.patch_size[0]\n col_steps = self._image_size[1] // self.patch_size[1]\n\n ps = self.patch_size[0] * self.patch_size[1]\n\n # how can this be optimised?\n for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)):\n patches[k, i, j, 0] = np.sum(patches[k, i, j, 0]) / ps\n patches[k, i, j, 1] = np.sum(patches[k, i, j, 1]) / ps\n\n return probabilities\n\n\nclass ImageClassificationMaskTask(EOTask):\n \"\"\" This task applies pixel-based uni-temporal classifier to each image in the patch and appends to each image\n the classification mask.\n \"\"\"\n def __init__(self, input_feature, output_feature, classifier):\n \"\"\" Run a classification task on a EOPatch feature\n\n Classifier is an instance of the ImageBaseClassifier that maps [w, h, d] numpy arrays (d-channel images)\n into [w, h, 1] numpy arrays (classification masks).\n\n :param input_feature: Feature which will be classified\n :type input_feature: (FeatureType, str)\n :param output_feature: Feature where classification results will be saved\n :type output_feature: (FeatureType, str)\n :param classifier: A classifier that works over [n, w, h, d]-dimensional numpy arrays.\n :type classifier: ImageBaseClassifier\n \"\"\"\n self.input_feature = self._parse_features(input_feature)\n self.output_feature = self._parse_features(output_feature)\n self.classifier = classifier\n\n def execute(self, eopatch):\n \"\"\" Transforms [n, w, h, d] eopatch into a [n, w, h, 1] eopatch, adding it the classification mask.\n\n :param eopatch: An input EOPatch\n :type eopatch: EOPatch\n :return: Outputs EOPatch with n classification masks appended to out_feature_type with out_feature_name key\n :rtype: EOPatch\n \"\"\"\n in_type, in_name = next(self.input_feature(eopatch))\n out_type, out_name = next(self.input_feature())\n\n eopatch[out_type][out_name] = self.classifier.image_predict(eopatch[in_type][in_name])\n\n return eopatch\n"
] | [
[
"numpy.nanmax",
"numpy.nanpercentile",
"numpy.nanmedian",
"numpy.isfinite",
"numpy.isnan",
"numpy.dstack",
"numpy.nanargmin",
"numpy.zeros_like",
"numpy.nanmean",
"numpy.floor",
"numpy.argsort",
"numpy.nanstd",
"numpy.array",
"numpy.where",
"numpy.empty"
],
[
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.arange",
"numpy.array_equal",
"numpy.unique"
],
[
"numpy.unique",
"numpy.moveaxis",
"numpy.zeros",
"numpy.sum",
"numpy.ma.size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MohamedAli1995/Cifar-100-Classifier | [
"924704a81ce13062825a88b90b80e8ac2ba45d63"
] | [
"src/base/base_train.py"
] | [
"import tensorflow as tf\n\n\nclass BaseTrain:\n \"\"\"Standard base_train-class for easy multiple-inheritance.\n It is responsible for defining the functions to be implemented with any child.\n\n Attributes:\n sess: Tensorflow session to use.\n model: Model to be trained.\n data: Data_loader object to interact with dataset.\n config: Config object to store data related to training, testing and validation.\n logger: Logger object to use tensorboard.\n \"\"\"\n\n def __init__(self, sess, model, data, config, logger):\n self.model = model\n self.config = config\n self.sess = sess\n self.data = data\n self.logger = logger\n if not self.config.pretrain: # If not pretrain then initialize variables.\n self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n self.sess.run(self.init)\n\n def train(self):\n \"\"\"Train the model for the number of epochs in config.num_epochs.\n Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch.\n Returns:\n\n \"\"\"\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\n self.data.prepare_new_epoch_data()\n self.train_epoch()\n if self.config.use_val and (\n cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):\n self.validate_epoch()\n\n self.sess.run(self.model.increment_cur_epoch_tensor)\n\n def train_epoch(self):\n \"\"\"Implements the logic of training_epoch:\n -Loop over the batches of the training data and call the train step for each.\n -Add any summaries you want using the summary\n \"\"\"\n raise NotImplemented\n\n def train_step(self):\n \"\"\"Implements the logic of the train step:\n -Run the tensorflow session\n -Returns:\n Any of the metrics needs to be summarized.\n \"\"\"\n\n raise NotImplementedError\n\n def validate_epoch(self):\n \"\"\"Implements the logic of validation_epoch:\n -Loop over the batches of the validation data and call the validate step for each.\n -Add any summaries you want using the summary\n \"\"\"\n raise NotImplemented\n\n def validate_step(self):\n \"\"\"Implements the logic of the validate step:\n -Run the tensorflow session\n -Returns:\n Any of the metrics needs to be summarized.\n \"\"\"\n raise NotImplemented\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.local_variables_initializer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
sparkma/arrow | [
"62fd703a4ef0abbecb02397a06a630a9dee382d9"
] | [
"python/pyarrow/tests/test_parquet.py"
] | [
"# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\nimport datetime\nimport decimal\nimport io\nimport json\nimport os\nimport six\nimport pickle\nimport pytest\n\nimport numpy as np\n\nimport pyarrow as pa\nfrom pyarrow.compat import guid, u, BytesIO, unichar, PY2\nfrom pyarrow.pandas_compat import _pandas_api\nfrom pyarrow.tests import util\nfrom pyarrow.filesystem import LocalFileSystem, FileSystem\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = None\n\n\ntry:\n import pandas as pd\n import pandas.util.testing as tm\n from .pandas_examples import dataframe_with_arrays, dataframe_with_lists\nexcept ImportError:\n pd = tm = None\n\n\n# Marks all of the tests in this module\n# Ignore these with pytest ... -m 'not parquet'\npytestmark = pytest.mark.parquet\n\n\[email protected](scope='module')\ndef datadir(datadir):\n return datadir / 'parquet'\n\n\ndef _write_table(table, path, **kwargs):\n # So we see the ImportError somewhere\n import pyarrow.parquet as pq\n\n if _pandas_api.is_data_frame(table):\n table = pa.Table.from_pandas(table)\n\n pq.write_table(table, path, **kwargs)\n return table\n\n\ndef _read_table(*args, **kwargs):\n return pq.read_table(*args, **kwargs)\n\n\ndef _roundtrip_table(table, read_table_kwargs=None,\n write_table_kwargs=None):\n read_table_kwargs = read_table_kwargs or {}\n write_table_kwargs = write_table_kwargs or {}\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_table_kwargs)\n buf.seek(0)\n return _read_table(buf, **read_table_kwargs)\n\n\ndef _check_roundtrip(table, expected=None, read_table_kwargs=None,\n **write_table_kwargs):\n if expected is None:\n expected = table\n\n read_table_kwargs = read_table_kwargs or {}\n\n # intentionally check twice\n result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n\n\ndef _roundtrip_pandas_dataframe(df, write_kwargs):\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_kwargs)\n\n buf.seek(0)\n table1 = _read_table(buf)\n return table1.to_pandas()\n\n\[email protected]('dtype', [int, float])\ndef test_single_pylist_column_roundtrip(tempdir, dtype):\n filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)\n data = [pa.array(list(map(dtype, range(5))))]\n table = pa.Table.from_arrays(data, names=['a'])\n _write_table(table, filename)\n table_read = _read_table(filename)\n for i in range(table.num_columns):\n col_written = table[i]\n col_read = table_read[i]\n assert table.field(i).name == table_read.field(i).name\n assert col_read.num_chunks == 1\n data_written = col_written.chunk(0)\n data_read = col_read.chunk(0)\n assert data_written.equals(data_read)\n\n\ndef alltypes_sample(size=10000, seed=0, categorical=False):\n np.random.seed(seed)\n arrays = {\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Test other timestamp resolutions now that arrow supports\n # them\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': pd.Series([str(x) for x in range(size)]),\n 'empty_str': [''] * size,\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'null': [None] * size,\n 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],\n }\n if categorical:\n arrays['str_category'] = arrays['str'].astype('category')\n return pd.DataFrame(arrays)\n\n\[email protected]\[email protected]('chunk_size', [None, 1000])\ndef test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):\n df = alltypes_sample(size=10000, categorical=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version=\"2.0\",\n coerce_timestamps='ms', chunk_size=chunk_size)\n table_read = pq.read_pandas(filename)\n assert table_read.schema.pandas_metadata is not None\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef test_set_data_page_size():\n arr = pa.array([1, 2, 3] * 1000000)\n t = pa.Table.from_arrays([arr], names=['f0'])\n\n # 128K, 256K, 512K\n page_sizes = [2 << 16, 2 << 17, 2 << 18]\n for target_page_size in page_sizes:\n _check_roundtrip(t, data_page_size=target_page_size)\n\n\[email protected]\ndef test_chunked_table_write():\n # ARROW-232\n df = alltypes_sample(size=10)\n\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n df, _ = dataframe_with_lists()\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_no_memory_map(tempdir):\n df = alltypes_sample(size=10)\n\n table = pa.Table.from_pandas(df)\n _check_roundtrip(table, read_table_kwargs={'memory_map': False},\n version='2.0')\n\n filename = str(tempdir / 'tmp_file')\n with open(filename, 'wb') as f:\n _write_table(table, f, version='2.0')\n table_read = pq.read_pandas(filename, memory_map=False)\n assert table_read.equals(table)\n\n\ndef test_special_chars_filename(tempdir):\n table = pa.Table.from_arrays([pa.array([42])], [\"ints\"])\n filename = \"foo # bar\"\n path = tempdir / filename\n assert not path.exists()\n _write_table(table, str(path))\n assert path.exists()\n table_read = _read_table(str(path))\n assert table_read.equals(table)\n\n\[email protected]\ndef test_empty_table_roundtrip():\n df = alltypes_sample(size=10)\n\n # Create a non-empty table to infer the types correctly, then slice to 0\n table = pa.Table.from_pandas(df)\n table = pa.Table.from_arrays(\n [col.chunk(0)[:0] for col in table.itercolumns()],\n names=table.schema.names)\n\n assert table.schema.field_by_name('null').type == pa.null()\n assert table.schema.field_by_name('null_list').type == pa.list_(pa.null())\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_empty_table_no_columns():\n df = pd.DataFrame()\n empty = pa.Table.from_pandas(df, preserve_index=False)\n _check_roundtrip(empty)\n\n\ndef test_empty_lists_table_roundtrip():\n # ARROW-2744: Shouldn't crash when writing an array of empty lists\n arr = pa.array([[], []], type=pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr], [\"A\"])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_pandas_parquet_datetime_tz():\n s = pd.Series([datetime.datetime(2017, 9, 6)])\n s = s.dt.tz_localize('utc')\n\n s.index = s\n\n # Both a column and an index to hit both use cases\n df = pd.DataFrame({'tz_aware': s,\n 'tz_eastern': s.dt.tz_convert('US/Eastern')},\n index=s)\n\n f = BytesIO()\n\n arrow_table = pa.Table.from_pandas(df)\n\n _write_table(arrow_table, f, coerce_timestamps='ms')\n f.seek(0)\n\n table_read = pq.read_pandas(f)\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\[email protected](six.PY2, reason='datetime.timezone is available since '\n 'python version 3.2')\ndef test_datetime_timezone_tzinfo():\n value = datetime.datetime(2018, 1, 1, 1, 23, 45,\n tzinfo=datetime.timezone.utc)\n df = pd.DataFrame({'foo': [value]})\n\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_pandas_parquet_custom_metadata(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert b'pandas' in arrow_table.schema.metadata\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n metadata = pq.read_metadata(filename).metadata\n assert b'pandas' in metadata\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n assert js['index_columns'] == [{'kind': 'range',\n 'name': None,\n 'start': 0, 'stop': 10000,\n 'step': 1}]\n\n\[email protected]\ndef test_pandas_parquet_column_multiindex(tempdir):\n df = alltypes_sample(size=10)\n df.columns = pd.MultiIndex.from_tuples(\n list(zip(df.columns, df.columns[::-1])),\n names=['level_1', 'level_2']\n )\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n table_read = pq.read_pandas(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n js = arrow_table.schema.pandas_metadata\n assert not js['index_columns']\n # ARROW-2170\n # While index_columns should be empty, columns needs to be filled still.\n assert js['columns']\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n table_read = pq.read_pandas(filename)\n\n js = table_read.schema.pandas_metadata\n assert not js['index_columns']\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_1_0_roundtrip(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename, version='1.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n # We pass uint32_t as int64_t if we write Parquet version 1.0\n df['uint32'] = df['uint32'].values.astype(np.int64)\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_multiple_path_types(tempdir):\n # Test compatibility with PEP 519 path-like objects\n path = tempdir / 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n # Test compatibility with plain string paths\n path = str(tempdir) + 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_column_selection(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16)\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename)\n table_read = _read_table(filename, columns=['uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n table_read = _read_table(filename, columns=['uint8', 'uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n\ndef _random_integers(size, dtype):\n # We do not generate integers outside the int64 range\n platform_int_info = np.iinfo('int_')\n iinfo = np.iinfo(dtype)\n return np.random.randint(max(iinfo.min, platform_int_info.min),\n min(iinfo.max, platform_int_info.max),\n size=size).astype(dtype)\n\n\ndef _test_dataframe(size=10000, seed=0):\n np.random.seed(seed)\n df = pd.DataFrame({\n 'uint8': _random_integers(size, np.uint8),\n 'uint16': _random_integers(size, np.uint16),\n 'uint32': _random_integers(size, np.uint32),\n 'uint64': _random_integers(size, np.uint64),\n 'int8': _random_integers(size, np.int8),\n 'int16': _random_integers(size, np.int16),\n 'int32': _random_integers(size, np.int32),\n 'int64': _random_integers(size, np.int64),\n 'float32': np.random.randn(size).astype(np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': [tm.rands(10) for i in range(size)],\n 'all_none': [None] * size,\n 'all_none_category': [None] * size\n })\n # TODO(PARQUET-1015)\n # df['all_none_category'] = df['all_none_category'].astype('category')\n return df\n\n\[email protected]\ndef test_pandas_parquet_native_file_roundtrip(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_parquet_incremental_file_build(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n writer.close()\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_read_pandas_column_subset(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()\n tm.assert_frame_equal(df[['strings', 'uint8']], df_read)\n\n\[email protected]\ndef test_pandas_parquet_empty_roundtrip(tempdir):\n df = _test_dataframe(0)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_pyfile_roundtrip(tempdir):\n filename = tempdir / 'pandas_pyfile_roundtrip.parquet'\n size = 5\n df = pd.DataFrame({\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': ['foo', 'bar', None, 'baz', 'qux']\n })\n\n arrow_table = pa.Table.from_pandas(df)\n\n with filename.open('wb') as f:\n _write_table(arrow_table, f, version=\"1.0\")\n\n data = io.BytesIO(filename.read_bytes())\n\n table_read = _read_table(data)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_configuration_options(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n\n for use_dictionary in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n use_dictionary=use_dictionary)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for write_statistics in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n write_statistics=write_statistics)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:\n _write_table(arrow_table, filename, version='2.0',\n compression=compression)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef make_sample_file(table_or_df):\n if isinstance(table_or_df, pa.Table):\n a_table = table_or_df\n else:\n a_table = pa.Table.from_pandas(table_or_df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='SNAPPY', version='2.0',\n coerce_timestamps='ms')\n\n buf.seek(0)\n return pq.ParquetFile(buf)\n\n\[email protected]\ndef test_parquet_metadata_api():\n df = alltypes_sample(size=10000)\n df = df.reindex(columns=sorted(df.columns))\n df.index = np.random.randint(0, 1000000, size=len(df))\n\n fileh = make_sample_file(df)\n ncols = len(df.columns)\n\n # Series of sniff tests\n meta = fileh.metadata\n repr(meta)\n assert meta.num_rows == len(df)\n assert meta.num_columns == ncols + 1 # +1 for index\n assert meta.num_row_groups == 1\n assert meta.format_version == '2.0'\n assert 'parquet-cpp' in meta.created_by\n assert isinstance(meta.serialized_size, int)\n assert isinstance(meta.metadata, dict)\n\n # Schema\n schema = fileh.schema\n assert meta.schema is schema\n assert len(schema) == ncols + 1 # +1 for index\n repr(schema)\n\n col = schema[0]\n repr(col)\n assert col.name == df.columns[0]\n assert col.max_definition_level == 1\n assert col.max_repetition_level == 0\n assert col.max_repetition_level == 0\n\n assert col.physical_type == 'BOOLEAN'\n assert col.converted_type == 'NONE'\n\n with pytest.raises(IndexError):\n schema[ncols + 1] # +1 for index\n\n with pytest.raises(IndexError):\n schema[-1]\n\n # Row group\n for rg in range(meta.num_row_groups):\n rg_meta = meta.row_group(rg)\n assert isinstance(rg_meta, pq.RowGroupMetaData)\n repr(rg_meta)\n\n for col in range(rg_meta.num_columns):\n col_meta = rg_meta.column(col)\n assert isinstance(col_meta, pq.ColumnChunkMetaData)\n repr(col_meta)\n\n with pytest.raises(IndexError):\n meta.row_group(-1)\n\n with pytest.raises(IndexError):\n meta.row_group(meta.num_row_groups + 1)\n\n rg_meta = meta.row_group(0)\n assert rg_meta.num_rows == len(df)\n assert rg_meta.num_columns == ncols + 1 # +1 for index\n assert rg_meta.total_byte_size > 0\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(-1)\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(ncols + 2)\n\n col_meta = rg_meta.column(0)\n assert col_meta.file_offset > 0\n assert col_meta.file_path == '' # created from BytesIO\n assert col_meta.physical_type == 'BOOLEAN'\n assert col_meta.num_values == 10000\n assert col_meta.path_in_schema == 'bool'\n assert col_meta.is_stats_set is True\n assert isinstance(col_meta.statistics, pq.Statistics)\n assert col_meta.compression == 'SNAPPY'\n assert col_meta.encodings == ('PLAIN', 'RLE')\n assert col_meta.has_dictionary_page is False\n assert col_meta.dictionary_page_offset is None\n assert col_meta.data_page_offset > 0\n assert col_meta.total_compressed_size > 0\n assert col_meta.total_uncompressed_size > 0\n with pytest.raises(NotImplementedError):\n col_meta.has_index_page\n with pytest.raises(NotImplementedError):\n col_meta.index_page_offset\n\n\[email protected]\[email protected](\n (\n 'data',\n 'type',\n 'physical_type',\n 'min_value',\n 'max_value',\n 'null_count',\n 'num_values',\n 'distinct_count'\n ),\n [\n ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float32(),\n 'FLOAT', -1.1, 4.4, 1, 4, 0\n ),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float64(),\n 'DOUBLE', -1.1, 4.4, 1, 4, 0\n ),\n (\n [u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0\n ),\n (\n [True, False, False, True, True], pa.bool_(),\n 'BOOLEAN', False, True, 0, 5, 0\n ),\n (\n [b'\\x00', b'b', b'12', None, b'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'\\x00', b'b', 1, 4, 0\n ),\n ]\n)\ndef test_parquet_column_statistics_api(data, type, physical_type, min_value,\n max_value, null_count, num_values,\n distinct_count):\n df = pd.DataFrame({'data': data})\n schema = pa.schema([pa.field('data', type)])\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n fileh = make_sample_file(table)\n\n meta = fileh.metadata\n\n rg_meta = meta.row_group(0)\n col_meta = rg_meta.column(0)\n\n stat = col_meta.statistics\n assert stat.has_min_max\n assert _close(type, stat.min, min_value)\n assert _close(type, stat.max, max_value)\n assert stat.null_count == null_count\n assert stat.num_values == num_values\n # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount\n # method, missing distinct_count is represented as zero instead of None\n assert stat.distinct_count == distinct_count\n assert stat.physical_type == physical_type\n\n\ndef _close(type, left, right):\n if type == pa.float32():\n return abs(left - right) < 1E-7\n elif type == pa.float64():\n return abs(left - right) < 1E-13\n else:\n return left == right\n\n\ndef test_statistics_convert_logical_types(tempdir):\n # ARROW-5166, ARROW-4139\n\n # (min, max, type)\n cases = [(10, 11164359321221007157, pa.uint64()),\n (10, 4294967295, pa.uint32()),\n (u\"ähnlich\", u\"öffentlich\", pa.utf8()),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time32('ms')),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time64('us')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('ms')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('us'))]\n\n for i, (min_val, max_val, typ) in enumerate(cases):\n t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],\n ['col'])\n path = str(tempdir / ('example{}.parquet'.format(i)))\n pq.write_table(t, path, version='2.0')\n pf = pq.ParquetFile(path)\n stats = pf.metadata.row_group(0).column(0).statistics\n assert stats.min == min_val\n assert stats.max == max_val\n\n\ndef test_parquet_write_disable_statistics(tempdir):\n table = pa.Table.from_pydict(\n {'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})\n _write_table(table, tempdir / 'data.parquet')\n meta = pq.read_metadata(tempdir / 'data.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is True\n assert cc.statistics is not None\n\n _write_table(table, tempdir / 'data2.parquet', write_statistics=False)\n meta = pq.read_metadata(tempdir / 'data2.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is False\n assert cc.statistics is None\n\n _write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])\n meta = pq.read_metadata(tempdir / 'data3.parquet')\n cc_a = meta.row_group(0).column(0)\n assert cc_a.is_stats_set is True\n assert cc_a.statistics is not None\n cc_b = meta.row_group(0).column(1)\n assert cc_b.is_stats_set is False\n assert cc_b.statistics is None\n\n\[email protected]\ndef test_compare_schemas():\n df = alltypes_sample(size=10000)\n\n fileh = make_sample_file(df)\n fileh2 = make_sample_file(df)\n fileh3 = make_sample_file(df[df.columns[::2]])\n\n # ParquetSchema\n assert isinstance(fileh.schema, pq.ParquetSchema)\n assert fileh.schema.equals(fileh.schema)\n assert fileh.schema == fileh.schema\n assert fileh.schema.equals(fileh2.schema)\n assert fileh.schema == fileh2.schema\n assert fileh.schema != 'arbitrary object'\n assert not fileh.schema.equals(fileh3.schema)\n assert fileh.schema != fileh3.schema\n\n # ColumnSchema\n assert isinstance(fileh.schema[0], pq.ColumnSchema)\n assert fileh.schema[0].equals(fileh.schema[0])\n assert fileh.schema[0] == fileh.schema[0]\n assert not fileh.schema[0].equals(fileh.schema[1])\n assert fileh.schema[0] != fileh.schema[1]\n assert fileh.schema[0] != 'arbitrary object'\n\n\ndef test_validate_schema_write_table(tempdir):\n # ARROW-2926\n simple_fields = [\n pa.field('POS', pa.uint32()),\n pa.field('desc', pa.string())\n ]\n\n simple_schema = pa.schema(simple_fields)\n\n # simple_table schema does not match simple_schema\n simple_from_array = [pa.array([1]), pa.array(['bla'])]\n simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])\n\n path = tempdir / 'simple_validate_schema.parquet'\n\n with pq.ParquetWriter(path, simple_schema,\n version='2.0',\n compression='snappy', flavor='spark') as w:\n with pytest.raises(ValueError):\n w.write_table(simple_table)\n\n\[email protected]\ndef test_column_of_arrays(tempdir):\n df, schema = dataframe_with_arrays()\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='ms')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_coerce_timestamps(tempdir):\n from collections import OrderedDict\n # ARROW-622\n arrays = OrderedDict()\n fields = [pa.field('datetime64',\n pa.list_(pa.timestamp('ms')))]\n arrays['datetime64'] = [\n np.array(['2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n None,\n None,\n np.array(['2007-07-13T02',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n ]\n\n df = pd.DataFrame(arrays)\n schema = pa.schema(fields)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='us')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n df_expected = df.copy()\n for i, x in enumerate(df_expected['datetime64']):\n if isinstance(x, np.ndarray):\n df_expected['datetime64'][i] = x.astype('M8[us]')\n\n tm.assert_frame_equal(df_expected, df_read)\n\n with pytest.raises(ValueError):\n _write_table(arrow_table, filename, version='2.0',\n coerce_timestamps='unknown')\n\n\[email protected]\ndef test_coerce_timestamps_truncated(tempdir):\n \"\"\"\n ARROW-2555: Test that we can truncate timestamps when coercing if\n explicitly allowed.\n \"\"\"\n dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1, microsecond=1)\n dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1)\n\n fields_us = [pa.field('datetime64', pa.timestamp('us'))]\n arrays_us = {'datetime64': [dt_us, dt_ms]}\n\n df_us = pd.DataFrame(arrays_us)\n schema_us = pa.schema(fields_us)\n\n filename = tempdir / 'pandas_truncated.parquet'\n table_us = pa.Table.from_pandas(df_us, schema=schema_us)\n\n _write_table(table_us, filename, version=\"2.0\", coerce_timestamps='ms',\n allow_truncated_timestamps=True)\n table_ms = _read_table(filename)\n df_ms = table_ms.to_pandas()\n\n arrays_expected = {'datetime64': [dt_ms, dt_ms]}\n df_expected = pd.DataFrame(arrays_expected)\n tm.assert_frame_equal(df_expected, df_ms)\n\n\[email protected]\ndef test_column_of_lists(tempdir):\n df, schema = dataframe_with_lists(parquet_compatible=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version='2.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n if PY2:\n # assert_frame_equal fails when comparing datetime.date and\n # np.datetime64, even with check_datetimelike_compat=True so\n # convert the values to np.datetime64 instead\n for col in ['date32[day]_list', 'date64[ms]_list']:\n df[col] = df[col].apply(\n lambda x: list(map(np.datetime64, x)) if x else x\n )\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_date_time_types(tempdir):\n t1 = pa.date32()\n data1 = np.array([17259, 17260, 17261], dtype='int32')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.date64()\n data2 = data1.astype('int64') * 86400000\n a2 = pa.array(data2, type=t2)\n\n t3 = pa.timestamp('us')\n start = pd.Timestamp('2001-01-01').value / 1000\n data3 = np.array([start, start + 1, start + 2], dtype='int64')\n a3 = pa.array(data3, type=t3)\n\n t4 = pa.time32('ms')\n data4 = np.arange(3, dtype='i4')\n a4 = pa.array(data4, type=t4)\n\n t5 = pa.time64('us')\n a5 = pa.array(data4.astype('int64'), type=t5)\n\n t6 = pa.time32('s')\n a6 = pa.array(data4, type=t6)\n\n ex_t6 = pa.time32('ms')\n ex_a6 = pa.array(data4 * 1000, type=ex_t6)\n\n t7 = pa.timestamp('ns')\n start = pd.Timestamp('2001-01-01').value\n data7 = np.array([start, start + 1000, start + 2000],\n dtype='int64')\n a7 = pa.array(data7, type=t7)\n\n table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n # date64 as date32\n # time32[s] to time32[ms]\n expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n _check_roundtrip(table, expected=expected, version='2.0')\n\n t0 = pa.timestamp('ms')\n data0 = np.arange(4, dtype='int64')\n a0 = pa.array(data0, type=t0)\n\n t1 = pa.timestamp('us')\n data1 = np.arange(4, dtype='int64')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.timestamp('ns')\n data2 = np.arange(4, dtype='int64')\n a2 = pa.array(data2, type=t2)\n\n table = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n expected = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int64 for all timestamps supported by default\n filename = tempdir / 'int64_timestamps.parquet'\n _write_table(table, filename, version='2.0')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT64'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n t0_ns = pa.timestamp('ns')\n data0_ns = np.array(data0 * 1000000, dtype='int64')\n a0_ns = pa.array(data0_ns, type=t0_ns)\n\n t1_ns = pa.timestamp('ns')\n data1_ns = np.array(data1 * 1000, dtype='int64')\n a1_ns = pa.array(data1_ns, type=t1_ns)\n\n expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int96 nanosecond timestamps produced upon request\n filename = tempdir / 'explicit_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n use_deprecated_int96_timestamps=True)\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n # int96 nanosecond timestamps implied by flavor 'spark'\n filename = tempdir / 'spark_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n flavor='spark')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n\ndef test_timestamp_restore_timezone():\n # ARROW-5888, restore timezone from serialized metadata\n ty = pa.timestamp('ms', tz='America/New_York')\n arr = pa.array([1, 2, 3], type=ty)\n t = pa.table([arr], names=['f0'])\n _check_roundtrip(t)\n\n\[email protected]\ndef test_list_of_datetime_time_roundtrip():\n # ARROW-4135\n times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',\n '11:30', '12:00'])\n df = pd.DataFrame({'time': [times.time]})\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_parquet_version_timestamp_differences():\n i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000\n\n d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')\n d_ms = d_s * 1000\n d_us = d_ms * 1000\n d_ns = d_us * 1000\n\n a_s = pa.array(d_s, type=pa.timestamp('s'))\n a_ms = pa.array(d_ms, type=pa.timestamp('ms'))\n a_us = pa.array(d_us, type=pa.timestamp('us'))\n a_ns = pa.array(d_ns, type=pa.timestamp('ns'))\n\n names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']\n table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)\n\n # Using Parquet version 1.0, seconds should be coerced to milliseconds\n # and nanoseconds should be coerced to microseconds by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)\n _check_roundtrip(table, expected)\n\n # Using Parquet version 2.0, seconds should be coerced to milliseconds\n # and nanoseconds should be retained by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)\n _check_roundtrip(table, expected, version='2.0')\n\n # Using Parquet version 1.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)\n _check_roundtrip(table, expected, coerce_timestamps='ms')\n\n # Using Parquet version 2.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)\n _check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')\n\n # TODO: after pyarrow allows coerce_timestamps='ns', tests like the\n # following should pass ...\n\n # Using Parquet version 1.0, coercing to nanoseconds is not allowed\n # expected = None\n # with pytest.raises(NotImplementedError):\n # _roundtrip_table(table, coerce_timestamps='ns')\n\n # Using Parquet version 2.0, coercing to nanoseconds is allowed\n # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n # _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')\n\n # For either Parquet version, coercing to nanoseconds is allowed\n # if Int96 storage is used\n expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n _check_roundtrip(table, expected,\n use_deprecated_int96_timestamps=True)\n _check_roundtrip(table, expected, version='2.0',\n use_deprecated_int96_timestamps=True)\n\n\ndef test_large_list_records():\n # This was fixed in PARQUET-1100\n\n list_lengths = np.random.randint(0, 500, size=50)\n list_lengths[::10] = 0\n\n list_values = [list(map(int, np.random.randint(0, 100, size=x)))\n if i % 8 else None\n for i, x in enumerate(list_lengths)]\n\n a1 = pa.array(list_values)\n\n table = pa.Table.from_arrays([a1], ['int_lists'])\n _check_roundtrip(table)\n\n\ndef test_sanitized_spark_field_names():\n a0 = pa.array([0, 1, 2, 3, 4])\n name = 'prohib; ,\\t{}'\n table = pa.Table.from_arrays([a0], [name])\n\n result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})\n\n expected_name = 'prohib______'\n assert result.schema[0].name == expected_name\n\n\[email protected]\ndef test_spark_flavor_preserves_pandas_metadata():\n df = _test_dataframe(size=100)\n df.index = np.arange(0, 10 * len(df), 10)\n df.index.name = 'foo'\n\n result = _roundtrip_pandas_dataframe(df, {'version': '2.0',\n 'flavor': 'spark'})\n tm.assert_frame_equal(result, df)\n\n\ndef test_fixed_size_binary():\n t0 = pa.binary(10)\n data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']\n a0 = pa.array(data, type=t0)\n\n table = pa.Table.from_arrays([a0],\n ['binary[10]'])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_multithreaded_read():\n df = alltypes_sample(size=10000)\n\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, compression='SNAPPY', version='2.0')\n\n buf.seek(0)\n table1 = _read_table(buf, use_threads=True)\n\n buf.seek(0)\n table2 = _read_table(buf, use_threads=False)\n\n assert table1.equals(table2)\n\n\[email protected]\ndef test_min_chunksize():\n data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])\n table = pa.Table.from_pandas(data.reset_index())\n\n buf = io.BytesIO()\n _write_table(table, buf, chunk_size=-1)\n\n buf.seek(0)\n result = _read_table(buf)\n\n assert result.equals(table)\n\n with pytest.raises(ValueError):\n _write_table(table, buf, chunk_size=0)\n\n\[email protected]\ndef test_pass_separate_metadata():\n # ARROW-471\n df = alltypes_sample(size=10000)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='snappy', version='2.0')\n\n buf.seek(0)\n metadata = pq.read_metadata(buf)\n\n buf.seek(0)\n\n fileh = pq.ParquetFile(buf, metadata=metadata)\n\n tm.assert_frame_equal(df, fileh.read().to_pandas())\n\n\[email protected]\ndef test_read_single_row_group():\n # ARROW-471\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n\n pf = pq.ParquetFile(buf)\n\n assert pf.num_row_groups == K\n\n row_groups = [pf.read_row_group(i) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df, result.to_pandas())\n\n\[email protected]\ndef test_read_single_row_group_with_column_subset():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n cols = list(df.columns[:2])\n row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n\[email protected]\ndef test_scan_contents():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n assert pf.scan_contents() == 10000\n assert pf.scan_contents(df.columns[:4]) == 10000\n\n\[email protected]\ndef test_parquet_piece_read(tempdir):\n df = _test_dataframe(1000)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece1 = pq.ParquetDatasetPiece(path)\n\n result = piece1.read()\n assert result.equals(table)\n\n\[email protected]\ndef test_parquet_piece_open_and_get_metadata(tempdir):\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece = pq.ParquetDatasetPiece(path)\n table1 = piece.read()\n assert isinstance(table1, pa.Table)\n meta1 = piece.get_metadata()\n assert isinstance(meta1, pq.FileMetaData)\n\n assert table == table1\n\n\ndef test_parquet_piece_basics():\n path = '/baz.parq'\n\n piece1 = pq.ParquetDatasetPiece(path)\n piece2 = pq.ParquetDatasetPiece(path, row_group=1)\n piece3 = pq.ParquetDatasetPiece(\n path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])\n\n assert str(piece1) == path\n assert str(piece2) == '/baz.parq | row_group=1'\n assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'\n\n assert piece1 == piece1\n assert piece2 == piece2\n assert piece3 == piece3\n assert piece1 != piece3\n\n\ndef test_partition_set_dictionary_type():\n set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])\n set2 = pq.PartitionSet('key2', [2007, 2008, 2009])\n\n assert isinstance(set1.dictionary, pa.StringArray)\n assert isinstance(set2.dictionary, pa.IntegerArray)\n\n set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])\n with pytest.raises(TypeError):\n set3.dictionary\n\n\[email protected]\ndef test_read_partitioned_directory(tempdir):\n fs = LocalFileSystem.get_instance()\n _partition_test_for_filesystem(fs, tempdir)\n\n\[email protected]\ndef test_create_parquet_dataset_multi_threaded(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n _partition_test_for_filesystem(fs, base_path)\n\n manifest = pq.ParquetManifest(base_path, filesystem=fs,\n metadata_nthreads=1)\n dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)\n assert len(dataset.pieces) > 0\n partitions = dataset.partitions\n assert len(partitions.partition_names) > 0\n assert partitions.partition_names == manifest.partitions.partition_names\n assert len(partitions.levels) == len(manifest.partitions.levels)\n\n\[email protected]\ndef test_equivalency(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n # Old filters syntax:\n # integer == 1 AND string != b AND boolean == True\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', '=', 1), ('string', '!=', 'b'),\n ('boolean', '==', True)]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'b' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n # filters in disjunctive normal form:\n # (integer == 1 AND string != b AND boolean == True) OR\n # (integer == 2 AND boolean == False)\n # TODO(ARROW-3388): boolean columns are reconstructed as string\n filters = [\n [\n ('integer', '=', 1),\n ('string', '!=', 'b'),\n ('boolean', '==', 'True')\n ],\n [('integer', '=', 0), ('boolean', '==', 'False')]\n ]\n dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n table = dataset.read()\n result_df = table.to_pandas().reset_index(drop=True)\n\n # Check that all rows in the DF fulfill the filter\n # Pandas 0.23.x has problems with indexing constant memoryviews in\n # categoricals. Thus we need to make an explicity copy here with np.array.\n df_filter_1 = (np.array(result_df['integer']) == 1) \\\n & (np.array(result_df['string']) != 'b') \\\n & (np.array(result_df['boolean']) == 'True')\n df_filter_2 = (np.array(result_df['integer']) == 0) \\\n & (np.array(result_df['boolean']) == 'False')\n assert df_filter_1.sum() > 0\n assert df_filter_2.sum() > 0\n assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())\n\n # Check for \\0 in predicate values. Until they are correctly implemented\n # in ARROW-3391, they would otherwise lead to weird results with the\n # current code.\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', b'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', u'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n\n\[email protected]\ndef test_cutoff_exclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<', 4),\n ('integers', '>', 1),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [x for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\[email protected](\n raises=TypeError,\n reason='Loss of type information in creation of categoricals.'\n)\ndef test_cutoff_exclusive_datetime(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n date_keys = [\n datetime.date(2018, 4, 9),\n datetime.date(2018, 4, 10),\n datetime.date(2018, 4, 11),\n datetime.date(2018, 4, 12),\n datetime.date(2018, 4, 13)\n ]\n partition_spec = [\n ['dates', date_keys]\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'dates': np.array(date_keys, dtype='datetime64'),\n }, columns=['index', 'dates'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('dates', '<', \"2018-04-12\"),\n ('dates', '>', \"2018-04-10\")\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected = pd.Categorical(\n np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),\n categories=np.array(date_keys, dtype='datetime64'))\n\n assert result_df['dates'].values == expected\n\n\[email protected]\ndef test_inclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<=', 3),\n ('integers', '>=', 2),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [int(x) for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\ndef test_inclusive_set(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),\n ('boolean', 'in', {True})]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'c' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n\[email protected]\ndef test_invalid_pred_op(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '=<', 3),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', 'in', set()),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '!=', {3}),\n ])\n\n\[email protected]\ndef test_filters_read_table(tempdir):\n # test that filters keyword is passed through in read_table\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[[('integers', '<', 3)]])\n assert table.num_rows == 3\n\n table = pq.read_pandas(\n base_path, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n\[email protected]_fixture\ndef s3_example():\n access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']\n secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']\n bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']\n\n import s3fs\n fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)\n\n test_dir = guid()\n\n bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)\n fs.mkdir(bucket_uri)\n yield fs, bucket_uri\n fs.rm(bucket_uri, recursive=True)\n\n\[email protected]\[email protected]\ndef test_read_partitioned_directory_s3fs(s3_example):\n from pyarrow.filesystem import S3FSWrapper\n\n fs, bucket_uri = s3_example\n wrapper = S3FSWrapper(fs)\n _partition_test_for_filesystem(wrapper, bucket_uri)\n\n # Check that we can auto-wrap\n dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)\n dataset.read()\n\n\ndef _partition_test_for_filesystem(fs, base_path):\n foo_keys = [0, 1]\n bar_keys = ['a', 'b', 'c']\n partition_spec = [\n ['foo', foo_keys],\n ['bar', bar_keys]\n ]\n N = 30\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'foo': np.array(foo_keys, dtype='i4').repeat(15),\n 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),\n 'values': np.random.randn(N)\n }, columns=['index', 'foo', 'bar', 'values'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected_df = (df.sort_values(by='index')\n .reset_index(drop=True)\n .reindex(columns=result_df.columns))\n expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)\n expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)\n\n assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\ndef _generate_partition_directories(fs, base_dir, partition_spec, df):\n # partition_spec : list of lists, e.g. [['foo', [0, 1, 2],\n # ['bar', ['a', 'b', 'c']]\n # part_table : a pyarrow.Table to write to each partition\n DEPTH = len(partition_spec)\n\n def _visit_level(base_dir, level, part_keys):\n name, values = partition_spec[level]\n for value in values:\n this_part_keys = part_keys + [(name, value)]\n\n level_dir = base_dir / '{0}={1}'.format(name, value)\n fs.mkdir(level_dir)\n\n if level == DEPTH - 1:\n # Generate example data\n file_path = level_dir / guid()\n\n filtered_df = _filter_partition(df, this_part_keys)\n part_table = pa.Table.from_pandas(filtered_df)\n with fs.open(file_path, 'wb') as f:\n _write_table(part_table, f)\n assert fs.exists(file_path)\n\n (level_dir / '_SUCCESS').touch()\n else:\n _visit_level(level_dir, level + 1, this_part_keys)\n (level_dir / '_SUCCESS').touch()\n\n _visit_level(base_dir, 0, [])\n\n\ndef _test_read_common_metadata_files(fs, base_path):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n base_path = str(base_path)\n data_path = os.path.join(base_path, 'data.parquet')\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n assert dataset.common_metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n common_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(common_schema)\n\n # handle list of one directory\n dataset2 = pq.ParquetDataset([base_path], filesystem=fs)\n assert dataset2.schema.equals(dataset.schema)\n\n\[email protected]\ndef test_read_common_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n _test_read_common_metadata_files(fs, tempdir)\n\n\[email protected]\ndef test_read_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'data.parquet'\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n metadata_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(metadata_schema)\n\n\[email protected]\ndef test_read_schema(tempdir):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'test.parquet'\n\n table = pa.Table.from_pandas(df)\n _write_table(table, data_path)\n\n read1 = pq.read_schema(data_path)\n read2 = pq.read_schema(data_path, memory_map=True)\n assert table.schema.equals(read1, check_metadata=False)\n assert table.schema.equals(read2, check_metadata=False)\n\n assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']\n\n\ndef _filter_partition(df, part_keys):\n predicate = np.ones(len(df), dtype=bool)\n\n to_drop = []\n for name, value in part_keys:\n to_drop.append(name)\n\n # to avoid pandas warning\n if isinstance(value, (datetime.date, datetime.datetime)):\n value = pd.Timestamp(value)\n\n predicate &= df[name] == value\n\n return df[predicate].drop(to_drop, axis=1)\n\n\[email protected]\ndef test_read_multiple_files(tempdir):\n nfiles = 10\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n\n test_data.append(table)\n paths.append(path)\n\n # Write a _SUCCESS.crc file\n (dirpath / '_SUCCESS.crc').touch()\n\n def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):\n dataset = pq.ParquetDataset(paths, **kwargs)\n return dataset.read(columns=columns, use_threads=use_threads)\n\n result = read_multiple_files(paths)\n expected = pa.concat_tables(test_data)\n\n assert result.equals(expected)\n\n # Read with provided metadata\n metadata = pq.read_metadata(paths[0])\n\n result2 = read_multiple_files(paths, metadata=metadata)\n assert result2.equals(expected)\n\n result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)\n assert result3.equals(expected)\n\n # Read column subset\n to_read = [0, 2, 6, result.num_columns - 1]\n\n col_names = [result.field(i).name for i in to_read]\n out = pa.localfs.read_parquet(dirpath, columns=col_names)\n expected = pa.Table.from_arrays([result.column(i) for i in to_read],\n names=col_names,\n metadata=result.schema.metadata)\n assert out.equals(expected)\n\n # Read with multiple threads\n pa.localfs.read_parquet(dirpath, use_threads=True)\n\n # Test failure modes with non-uniform metadata\n bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]\n bad_apple_path = tempdir / '{}.parquet'.format(guid())\n\n t = pa.Table.from_pandas(bad_apple)\n _write_table(t, bad_apple_path)\n\n bad_meta = pq.read_metadata(bad_apple_path)\n\n with pytest.raises(ValueError):\n read_multiple_files(paths + [bad_apple_path])\n\n with pytest.raises(ValueError):\n read_multiple_files(paths, metadata=bad_meta)\n\n mixed_paths = [bad_apple_path, paths[0]]\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths, schema=bad_meta.schema)\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths)\n\n\[email protected]\ndef test_dataset_read_pandas(tempdir):\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = np.arange(i * size, (i + 1) * size)\n df.index.name = 'index'\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_dataset_no_memory_map(tempdir):\n # ARROW-2627: Check that we can use ParquetDataset without memory-mapping\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n df = _test_dataframe(10, seed=0)\n path = dirpath / '{}.parquet'.format(0)\n table = pa.Table.from_pandas(df)\n _write_table(table, path, version='2.0')\n\n # TODO(wesm): Not sure how to easily check that memory mapping is _not_\n # used. Mocking is not especially easy for pa.memory_map\n dataset = pq.ParquetDataset(dirpath, memory_map=False)\n assert dataset.pieces[0].read().equals(table)\n\n\[email protected]\[email protected]('preserve_index', [True, False, None])\ndef test_dataset_read_pandas_common_metadata(tempdir, preserve_index):\n # ARROW-1103\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df, preserve_index=preserve_index)\n\n # Obliterate metadata\n table = table.replace_schema_metadata(None)\n assert table.schema.metadata is None\n\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n # Write _metadata common file\n table_for_metadata = pa.Table.from_pandas(\n df, preserve_index=preserve_index\n )\n pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n expected.index.name = (\n df.index.name if preserve_index is not False else None)\n tm.assert_frame_equal(result, expected)\n\n\ndef _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(file_nrows, seed=i)\n path = base_path / '{}.parquet'.format(i)\n\n test_data.append(_write_table(df, path))\n paths.append(path)\n return paths\n\n\[email protected]\ndef test_ignore_private_directories(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n # private directory\n (dirpath / '_impala_staging').mkdir()\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_dot(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '.DS_Store').open('wb') as f:\n f.write(b'gibberish')\n\n with (dirpath / '.private').open('wb') as f:\n f.write(b'gibberish')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_underscore(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '_committed_123').open('wb') as f:\n f.write(b'abcd')\n\n with (dirpath / '_started_321').open('wb') as f:\n f.write(b'abcd')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_multiindex_duplicate_values(tempdir):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n table = pa.Table.from_pandas(df)\n\n filename = tempdir / 'dup_multi_index_levels.parquet'\n\n _write_table(table, filename)\n result_table = _read_table(filename)\n assert table.equals(result_table)\n\n result_df = result_table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n\[email protected]\ndef test_write_error_deletes_incomplete_file(tempdir):\n # ARROW-1285\n df = pd.DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc')),\n 'g': pd.date_range('20130101', periods=3),\n 'h': pd.date_range('20130101', periods=3,\n tz='US/Eastern'),\n 'i': pd.date_range('20130101', periods=3, freq='ns')})\n\n pdf = pa.Table.from_pandas(df)\n\n filename = tempdir / 'tmp_file'\n try:\n _write_table(pdf, filename)\n except pa.ArrowException:\n pass\n\n assert not filename.exists()\n\n\[email protected]\ndef test_noncoerced_nanoseconds_written_without_exception(tempdir):\n # ARROW-1957: the Parquet version 2.0 writer preserves Arrow\n # nanosecond timestamps by default\n n = 9\n df = pd.DataFrame({'x': range(n)},\n index=pd.DatetimeIndex(start='2017-01-01',\n freq='1n',\n periods=n))\n tb = pa.Table.from_pandas(df)\n\n filename = tempdir / 'written.parquet'\n try:\n pq.write_table(tb, filename, version='2.0')\n except Exception:\n pass\n assert filename.exists()\n\n recovered_table = pq.read_table(filename)\n assert tb.equals(recovered_table)\n\n # Loss of data thru coercion (without explicit override) still an error\n filename = tempdir / 'not_written.parquet'\n with pytest.raises(ValueError):\n pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')\n\n\ndef test_read_non_existent_file(tempdir):\n path = 'non-existent-file.parquet'\n try:\n pq.read_table(path)\n except Exception as e:\n assert path in e.args[0]\n\n\ndef test_read_table_doesnt_warn(datadir):\n with pytest.warns(None) as record:\n pq.read_table(datadir / 'v0.7.1.parquet')\n\n assert len(record) == 0\n\n\ndef _test_write_to_dataset_with_partitions(base_path,\n filesystem=None,\n schema=None,\n index_name=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,\n preserve_index=False)\n pq.write_to_dataset(output_table, base_path, partition_by,\n filesystem=filesystem)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n\n if filesystem is not None:\n with filesystem.open(metadata_path, 'wb') as f:\n pq.write_metadata(output_table.schema, f)\n else:\n pq.write_metadata(output_table.schema, metadata_path)\n\n # ARROW-2891: Ensure the output_schema is preserved when writing a\n # partitioned dataset\n dataset = pq.ParquetDataset(base_path,\n filesystem=filesystem,\n validate_schema=True)\n # ARROW-2209: Ensure the dataset schema also includes the partition columns\n dataset_cols = set(dataset.schema.to_arrow_schema().names)\n assert dataset_cols == set(output_table.schema.names)\n\n input_table = dataset.read()\n input_df = input_table.to_pandas()\n\n # Read data back in and compare with original DataFrame\n # Partitioned columns added to the end of the DataFrame when read\n input_df_cols = input_df.columns.tolist()\n assert partition_by == input_df_cols[-1 * len(partition_by):]\n\n # Partitioned columns become 'categorical' dtypes\n input_df = input_df[cols]\n for col in partition_by:\n output_df[col] = output_df[col].astype('category')\n assert output_df.equals(input_df)\n\n\ndef _test_write_to_dataset_no_partitions(base_path, filesystem=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n output_table = pa.Table.from_pandas(output_df)\n\n if filesystem is None:\n filesystem = LocalFileSystem.get_instance()\n\n # Without partitions, append files to root_path\n n = 5\n for i in range(n):\n pq.write_to_dataset(output_table, base_path,\n filesystem=filesystem)\n output_files = [file for file in filesystem.ls(base_path)\n if file.endswith(\".parquet\")]\n assert len(output_files) == n\n\n # Deduplicated incoming DataFrame should match\n # original outgoing Dataframe\n input_table = pq.ParquetDataset(base_path,\n filesystem=filesystem).read()\n input_df = input_table.to_pandas()\n input_df = input_df.drop_duplicates()\n input_df = input_df[cols]\n assert output_df.equals(input_df)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_schema(tempdir):\n schema = pa.schema([pa.field('group1', type=pa.string()),\n pa.field('group2', type=pa.string()),\n pa.field('num', type=pa.int64()),\n pa.field('nan', type=pa.int32()),\n pa.field('date', type=pa.timestamp(unit='us'))])\n _test_write_to_dataset_with_partitions(str(tempdir), schema=schema)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_index_name(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir),\n index_name='index_name')\n\n\[email protected]\ndef test_write_to_dataset_no_partitions(tempdir):\n _test_write_to_dataset_no_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df)\n path = str(tempdir)\n\n def partition_filename_callback(keys):\n return \"{0}-{1}.parquet\".format(*keys)\n\n pq.write_to_dataset(output_table, path,\n partition_by, partition_filename_callback)\n\n dataset = pq.ParquetDataset(path)\n\n # ARROW-3538: Ensure partition filenames match the given pattern\n # defined in the local function partition_filename_callback\n expected_basenames = [\n 'a-e.parquet', 'a-f.parquet',\n 'b-e.parquet', 'b-f.parquet',\n 'b-g.parquet', 'c-e.parquet'\n ]\n output_basenames = [os.path.basename(p.path) for p in dataset.pieces]\n\n assert sorted(expected_basenames) == sorted(output_basenames)\n\n\[email protected]_memory\ndef test_large_table_int32_overflow():\n size = np.iinfo('int32').max + 1\n\n arr = np.ones(size, dtype='uint8')\n\n parr = pa.array(arr, type=pa.uint8())\n\n table = pa.Table.from_arrays([parr], names=['one'])\n f = io.BytesIO()\n _write_table(table, f)\n\n\ndef _simple_table_roundtrip(table):\n stream = pa.BufferOutputStream()\n _write_table(table, stream)\n buf = stream.getvalue()\n return _read_table(buf)\n\n\[email protected]\[email protected]_memory\ndef test_binary_array_overflow_to_chunked():\n # ARROW-3762\n\n # 2^31 + 1 bytes\n values = [b'x'] + [\n b'x' * (1 << 20)\n ] * 2 * (1 << 10)\n df = pd.DataFrame({'byte_col': values})\n\n tbl = pa.Table.from_pandas(df, preserve_index=False)\n read_tbl = _simple_table_roundtrip(tbl)\n\n col0_data = read_tbl[0]\n assert isinstance(col0_data, pa.ChunkedArray)\n\n # Split up into 2GB chunks\n assert col0_data.num_chunks == 2\n\n assert tbl.equals(read_tbl)\n\n\[email protected]\[email protected]_memory\ndef test_list_of_binary_large_cell():\n # ARROW-4688\n data = []\n\n # TODO(wesm): handle chunked children\n # 2^31 - 1 bytes in a single cell\n # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])\n\n # A little under 2GB in cell each containing approximately 10MB each\n data.extend([[b'x' * 1000000] * 10] * 214)\n\n arr = pa.array(data)\n table = pa.Table.from_arrays([arr], ['chunky_cells'])\n read_table = _simple_table_roundtrip(table)\n assert table.equals(read_table)\n\n\[email protected]\ndef test_index_column_name_duplicate(tempdir):\n data = {\n 'close': {\n pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,\n pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,\n },\n 'time': {\n pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(\n '2017-06-30 01:31:00'\n ),\n pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(\n '2017-06-30 01:32:00'\n ),\n }\n }\n path = str(tempdir / 'data.parquet')\n dfx = pd.DataFrame(data).set_index('time', drop=False)\n tdfx = pa.Table.from_pandas(dfx)\n _write_table(tdfx, path)\n arrow_table = _read_table(path)\n result_df = arrow_table.to_pandas()\n tm.assert_frame_equal(result_df, dfx)\n\n\[email protected]\ndef test_parquet_nested_convenience(tempdir):\n # ARROW-1684\n df = pd.DataFrame({\n 'a': [[1, 2, 3], None, [4, 5], []],\n 'b': [[1.], None, None, [6., 7.]],\n })\n\n path = str(tempdir / 'nested_convenience.parquet')\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n _write_table(table, path)\n\n read = pq.read_table(path, columns=['a'])\n tm.assert_frame_equal(read.to_pandas(), df[['a']])\n\n read = pq.read_table(path, columns=['a', 'b'])\n tm.assert_frame_equal(read.to_pandas(), df)\n\n\[email protected]\ndef test_backwards_compatible_index_naming(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=None, header=0, engine='python')\n table = _read_table(datadir / 'v0.7.1.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n\n table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_some_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string),\n sep=r'\\s{2,}', index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n expected.index = expected.index.set_names(['cut', None, 'clarity'])\n\n table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_column_metadata_handling(datadir):\n expected = pd.DataFrame(\n {'a': [1, 2, 3], 'b': [.1, .2, .3],\n 'c': pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')})\n expected.index = pd.MultiIndex.from_arrays(\n [['a', 'b', 'c'],\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')],\n names=['index', None])\n\n path = datadir / 'v0.7.1.column-metadata-handling.parquet'\n table = _read_table(path)\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n table = _read_table(path, columns=['a'])\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))\n\n\ndef _make_dataset_for_pickling(tempdir, N=100):\n path = tempdir / 'data.parquet'\n fs = LocalFileSystem.get_instance()\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n table = pa.Table.from_pandas(df)\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n return dataset\n\n\[email protected]\[email protected]('pickler', [\n pytest.param(pickle, id='builtin'),\n pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')\n])\ndef test_pickle_dataset(tempdir, datadir, pickler):\n def is_pickleable(obj):\n return obj == pickler.loads(pickler.dumps(obj))\n\n dataset = _make_dataset_for_pickling(tempdir)\n\n assert is_pickleable(dataset)\n assert is_pickleable(dataset.metadata)\n assert is_pickleable(dataset.metadata.schema)\n assert len(dataset.metadata.schema)\n for column in dataset.metadata.schema:\n assert is_pickleable(column)\n\n for piece in dataset.pieces:\n assert is_pickleable(piece)\n metadata = piece.get_metadata()\n assert metadata.num_row_groups\n for i in range(metadata.num_row_groups):\n assert is_pickleable(metadata.row_group(i))\n\n\[email protected]\ndef test_decimal_roundtrip(tempdir):\n num_values = 10\n\n columns = {}\n for precision in range(1, 39):\n for scale in range(0, precision + 1):\n with util.random_seed(0):\n random_decimal_values = [\n util.randdecimal(precision, scale)\n for _ in range(num_values)\n ]\n column_name = ('dec_precision_{:d}_scale_{:d}'\n .format(precision, scale))\n columns[column_name] = random_decimal_values\n\n expected = pd.DataFrame(columns)\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n table = pa.Table.from_pandas(expected)\n _write_table(table, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\[email protected](\n raises=pa.ArrowException, reason='Parquet does not support negative scale'\n)\ndef test_decimal_roundtrip_negative_scale(tempdir):\n expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n t = pa.Table.from_pandas(expected)\n _write_table(t, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj_with_exception(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n error_text = 'Artificial Error'\n\n try:\n with pq.ParquetWriter(out,\n arrow_table.schema,\n version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n frames.append(df.copy())\n if i == 5:\n raise ValueError(error_text)\n except Exception as e:\n assert str(e) == error_text\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_zlib_compression_bug():\n # ARROW-3514: \"zlib deflate failed, output buffer too small\"\n table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])\n f = io.BytesIO()\n pq.write_table(table, f, compression='gzip')\n\n f.seek(0)\n roundtrip = pq.read_table(f)\n tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())\n\n\[email protected]\ndef test_merging_parquet_tables_with_different_pandas_metadata(tempdir):\n # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch\n schema = pa.schema([\n pa.field('int', pa.int16()),\n pa.field('float', pa.float32()),\n pa.field('string', pa.string())\n ])\n df1 = pd.DataFrame({\n 'int': np.arange(3, dtype=np.uint8),\n 'float': np.arange(3, dtype=np.float32),\n 'string': ['ABBA', 'EDDA', 'ACDC']\n })\n df2 = pd.DataFrame({\n 'int': [4, 5],\n 'float': [1.1, None],\n 'string': [None, None]\n })\n table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)\n\n assert not table1.schema.equals(table2.schema)\n assert table1.schema.equals(table2.schema, check_metadata=False)\n\n writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)\n writer.write_table(table1)\n writer.write_table(table2)\n\n\ndef test_empty_row_groups(tempdir):\n # ARROW-3020\n table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])\n\n path = tempdir / 'empty_row_groups.parquet'\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n for i in range(num_groups):\n assert reader.read_row_group(i).equals(table)\n\n\[email protected]\ndef test_parquet_writer_with_caller_provided_filesystem():\n out = pa.BufferOutputStream()\n\n class CustomFS(FileSystem):\n def __init__(self):\n self.path = None\n self.mode = None\n\n def open(self, path, mode='rb'):\n self.path = path\n self.mode = mode\n return out\n\n fs = CustomFS()\n fname = 'expected_fname.parquet'\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \\\n as writer:\n writer.write_table(table)\n\n assert fs.path == fname\n assert fs.mode == 'wb'\n assert out.closed\n\n buf = out.getvalue()\n table_read = _read_table(pa.BufferReader(buf))\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df_read, df)\n\n # Should raise ValueError when filesystem is passed with file-like object\n with pytest.raises(ValueError) as err_info:\n pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)\n expected_msg = (\"filesystem passed but where is file-like, so\"\n \" there is nothing to open with filesystem.\")\n assert str(err_info) == expected_msg\n\n\ndef test_writing_empty_lists():\n # ARROW-2591: [Python] Segmentation fault issue in pq.write_table\n arr1 = pa.array([[], []], pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr1], ['list(int32)'])\n _check_roundtrip(table)\n\n\ndef test_write_nested_zero_length_array_chunk_failure():\n # Bug report in ARROW-3792\n cols = OrderedDict(\n int32=pa.int32(),\n list_string=pa.list_(pa.string())\n )\n data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]\n\n # This produces a table with a column like\n # <Column name='list_string' type=ListType(list<item: string>)>\n # [\n # [],\n # [\n # [\n # \"G\"\n # ]\n # ]\n # ]\n #\n # Each column is a ChunkedArray with 2 elements\n my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()\n for batch in data]\n my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))\n for batch in my_arrays]\n tbl = pa.Table.from_batches(my_batches, pa.schema(cols))\n _check_roundtrip(tbl)\n\n\[email protected]\ndef test_partitioned_dataset(tempdir):\n # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset\n # to a Parquet file\n path = tempdir / \"ARROW-3208\"\n df = pd.DataFrame({\n 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],\n 'two': [-1, 10, 2, 100, 1000, 1, 11],\n 'three': [0, 0, 0, 0, 0, 0, 0]\n })\n table = pa.Table.from_pandas(df)\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'])\n table = pq.ParquetDataset(path).read()\n pq.write_table(table, path / \"output.parquet\")\n\n\ndef test_read_column_invalid_index():\n table = pa.table([pa.array([4, 5]), pa.array([\"foo\", \"bar\"])],\n names=['ints', 'strs'])\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n f = pq.ParquetFile(bio.getvalue())\n assert f.reader.read_column(0).to_pylist() == [4, 5]\n assert f.reader.read_column(1).to_pylist() == [\"foo\", \"bar\"]\n for index in (-1, 2):\n with pytest.raises((ValueError, IndexError)):\n f.reader.read_column(index)\n\n\ndef test_direct_read_dictionary():\n # ARROW-3325\n repeats = 10\n nunique = 5\n\n data = [\n [tm.rands(10) for i in range(nunique)] * repeats,\n\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0'])\n\n # Compute dictionary-encoded subfield\n expected = pa.table([table[0].dictionary_encode()], names=['f0'])\n assert result.equals(expected)\n\n\ndef test_dataset_read_dictionary(tempdir):\n path = tempdir / \"ARROW-3325-dataset\"\n t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n pq.write_to_dataset(t1, root_path=str(path))\n pq.write_to_dataset(t2, root_path=str(path))\n\n result = pq.ParquetDataset(path, read_dictionary=['f0']).read()\n\n # The order of the chunks is non-deterministic\n ex_chunks = [t1[0].chunk(0).dictionary_encode(),\n t2[0].chunk(0).dictionary_encode()]\n\n assert result[0].num_chunks == 2\n c0, c1 = result[0].chunk(0), result[0].chunk(1)\n if c0.equals(ex_chunks[0]):\n assert c1.equals(ex_chunks[1])\n else:\n assert c0.equals(ex_chunks[1])\n assert c1.equals(ex_chunks[0])\n\n\ndef test_direct_read_dictionary_subfield():\n repeats = 10\n nunique = 5\n\n data = [\n [[tm.rands(10)] for i in range(nunique)] * repeats,\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0.list.item'])\n\n arr = pa.array(data[0])\n values_as_dict = arr.values.dictionary_encode()\n\n inner_indices = values_as_dict.indices.cast('int32')\n new_values = pa.DictionaryArray.from_arrays(inner_indices,\n values_as_dict.dictionary)\n\n offsets = pa.array(range(51), type='int32')\n expected_arr = pa.ListArray.from_arrays(offsets, new_values)\n expected = pa.table([expected_arr], names=['f0'])\n\n assert result.equals(expected)\n assert result[0].num_chunks == 1\n\n\[email protected]\ndef test_dataset_metadata(tempdir):\n path = tempdir / \"ARROW-1983-dataset\"\n\n # create and write a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n metadata_list = []\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'],\n metadata_collector=metadata_list)\n\n # open the dataset and collect metadata from pieces:\n dataset = pq.ParquetDataset(path)\n metadata_list2 = [p.get_metadata() for p in dataset.pieces]\n\n # compare metadata list content:\n assert len(metadata_list) == len(metadata_list2)\n for md, md2 in zip(metadata_list, metadata_list2):\n d = md.to_dict()\n d2 = md2.to_dict()\n # serialized_size is initialized in the reader:\n assert d.pop('serialized_size') == 0\n assert d2.pop('serialized_size') > 0\n assert d == d2\n\n\ndef test_parquet_file_too_small(tempdir):\n path = str(tempdir / \"test.parquet\")\n with pytest.raises(pa.ArrowIOError,\n match='size is 0 bytes'):\n with open(path, 'wb') as f:\n pass\n pq.read_table(path)\n\n with pytest.raises(pa.ArrowIOError,\n match='size is 4 bytes'):\n with open(path, 'wb') as f:\n f.write(b'ffff')\n pq.read_table(path)\n\n\[email protected]\ndef test_categorical_index_survives_roundtrip():\n # ARROW-3652, addressed by ARROW-3246\n df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])\n df['c1'] = df['c1'].astype('category')\n df = df.set_index(['c1'])\n\n table = pa.Table.from_pandas(df)\n bos = pa.BufferOutputStream()\n pq.write_table(table, bos)\n ref_df = pq.read_pandas(bos.getvalue()).to_pandas()\n assert isinstance(ref_df.index, pd.CategoricalIndex)\n assert ref_df.index.equals(df.index)\n\n\ndef test_dictionary_array_automatically_read():\n # ARROW-3246\n\n # Make a large dictionary, a little over 4MB of data\n dict_length = 4000\n dict_values = pa.array([('x' * 1000 + '_{}'.format(i))\n for i in range(dict_length)])\n\n num_chunks = 10\n chunk_size = 100\n chunks = []\n for i in range(num_chunks):\n indices = np.random.randint(0, dict_length,\n size=chunk_size).astype(np.int32)\n chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),\n dict_values))\n\n table = pa.table([pa.chunked_array(chunks)], names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents))\n\n assert result.equals(table)\n\n # The only key in the metadata was the Arrow schema key\n assert result.schema.metadata is None\n\n\[email protected]\ndef test_pandas_categorical_na_type_row_groups():\n # ARROW-5085\n df = pd.DataFrame({\"col\": [None] * 100, \"int\": [1.0] * 100})\n df_category = df.astype({\"col\": \"category\", \"int\": \"category\"})\n table = pa.Table.from_pandas(df)\n table_cat = pa.Table.from_pandas(df_category)\n buf = pa.BufferOutputStream()\n\n # it works\n pq.write_table(table_cat, buf, version=\"2.0\", chunk_size=10)\n result = pq.read_table(buf.getvalue())\n\n # Result is non-categorical\n assert result[0].equals(table[0])\n assert result[1].equals(table[1])\n\n\[email protected]\ndef test_pandas_categorical_roundtrip():\n # ARROW-5480, this was enabled by ARROW-3246\n\n # Have one of the categories unobserved and include a null (-1)\n codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')\n categories = ['foo', 'bar', 'baz']\n df = pd.DataFrame({'x': pd.Categorical.from_codes(\n codes, categories=categories)})\n\n buf = pa.BufferOutputStream()\n pq.write_table(pa.table(df), buf)\n\n result = pq.read_table(buf.getvalue()).to_pandas()\n assert result.x.dtype == 'category'\n assert (result.x.cat.categories == categories).all()\n tm.assert_frame_equal(result, df)\n\n\[email protected]\ndef test_multi_dataset_metadata(tempdir):\n filenames = [\"ARROW-1983-dataset.0\", \"ARROW-1983-dataset.1\"]\n metapath = str(tempdir / \"_metadata\")\n\n # create a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n # write dataset twice and collect/merge metadata\n _meta = None\n for filename in filenames:\n meta = []\n pq.write_table(table, str(tempdir / filename),\n metadata_collector=meta)\n meta[0].set_file_path(filename)\n if _meta is None:\n _meta = meta[0]\n else:\n _meta.append_row_groups(meta[0])\n\n # Write merged metadata-only file\n with open(metapath, \"wb\") as f:\n _meta.write_metadata_file(f)\n\n # Read back the metadata\n meta = pq.read_metadata(metapath)\n md = meta.to_dict()\n _md = _meta.to_dict()\n for key in _md:\n if key != 'serialized_size':\n assert _md[key] == md[key]\n assert _md['num_columns'] == 3\n assert _md['num_rows'] == 6\n assert _md['num_row_groups'] == 2\n assert _md['serialized_size'] == 0\n assert md['serialized_size'] > 0\n\n\[email protected]\ndef test_filter_before_validate_schema(tempdir):\n # ARROW-4076 apply filter before schema validation\n # to avoid checking unneeded schemas\n\n # create partitioned dataset with mismatching schemas which would\n # otherwise raise if first validation all schemas\n dir1 = tempdir / 'A=0'\n dir1.mkdir()\n table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))\n pq.write_table(table1, dir1 / 'data.parquet')\n\n dir2 = tempdir / 'A=1'\n dir2.mkdir()\n table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))\n pq.write_table(table2, dir2 / 'data.parquet')\n\n # read single file using filter\n table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])\n assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))\n"
] | [
[
"pandas.Categorical.from_codes",
"pandas.concat",
"pandas.to_datetime",
"numpy.random.seed",
"pandas.Timestamp",
"numpy.arange",
"pandas.Categorical",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas.util.testing.assert_frame_equal",
"numpy.ones",
"pandas.DatetimeIndex",
"numpy.random.randn",
"numpy.iinfo",
"pandas.util.testing.rands",
"pandas.date_range",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
drvinceknight/amwoss | [
"8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d"
] | [
"src/assets/sd_vaccine_plots/main.py"
] | [
"from scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\n\ndef derivatives(t, y, vaccine_rate, birth_rate=0.01):\n \"\"\"Defines the system of differential equations that\n describe the epidemiology model.\n\n Args:\n t: a positive float\n y: a tuple of three integers\n vaccine_rate: a positive float <= 1\n birth_rate: a positive float <= 1\n\n Returns:\n A tuple containing dS, dI, and dR\n \"\"\"\n infection_rate = 0.3\n recovery_rate = 0.02\n death_rate = 0.01\n S, I, R = y\n N = S + I + R\n dSdt = (\n -((infection_rate * S * I) / N)\n + ((1 - vaccine_rate) * birth_rate * N)\n - (death_rate * S)\n )\n dIdt = (\n ((infection_rate * S * I) / N)\n - (recovery_rate * I)\n - (death_rate * I)\n )\n dRdt = (\n (recovery_rate * I)\n - (death_rate * R)\n + (vaccine_rate * birth_rate * N)\n )\n return dSdt, dIdt, dRdt\n\ndef integrate_ode(\n derivative_function,\n t_span,\n y0=(2999, 1, 0),\n vaccine_rate=0.85,\n birth_rate=0.01,\n):\n \"\"\"Numerically solve the system of differential equations.\n\n Args:\n derivative_function: a function returning a tuple\n of three floats\n t_span: endpoints oif the time range to integrate over\n y0: a tuple of three integers (default: (2999, 1, 0))\n vaccine_rate: a positive float <= 1 (default: 0.85)\n birth_rate: a positive float <= 1 (default: 0.01)\n\n Returns:\n A tuple of three arrays\n \"\"\"\n sol = solve_ivp(\n derivative_function,\n t_span,\n y0,\n args=(vaccine_rate, birth_rate),\n )\n ts, S, I, R = sol.t, sol.y[0], sol.y[1], sol.y[2]\n return ts, S, I, R\n\nt_span = [0, 730]\nt, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.0)\n\nfig, ax = plt.subplots(1, figsize=(10, 5))\nax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)\nax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)\nax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)\nax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))\nax.set_xlabel('Time', fontsize=14)\nax.set_ylabel('People', fontsize=14)\nfig.savefig(\"plot_no_vaccine.pdf\")\n\nt, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.85)\n\nfig, ax = plt.subplots(1, figsize=(10, 5))\nax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)\nax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)\nax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)\nax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))\nax.set_xlabel('Time', fontsize=14)\nax.set_ylabel('People', fontsize=14)\nfig.savefig(\"plot_with_vaccine.pdf\")"
] | [
[
"matplotlib.pyplot.subplots",
"scipy.integrate.solve_ivp",
"matplotlib.pyplot.style.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
agstub/sglake-detectability | [
"5556250a59d7f500bcee86899dd9a497a368faca"
] | [
"source/boundaryconds.py"
] | [
"#-------------------------------------------------------------------------------\n# This file contains functions that:\n# (1) define the boundaries (ice-air,ice-water,ice-bed) of the mesh, AND...\n# (2) mark the boundaries of the mesh\n#-------------------------------------------------------------------------------\nfrom params import tol,Lngth,Hght\nfrom geometry import bed\nimport numpy as np\nfrom dolfin import *\n\n#-------------------------------------------------------------------------------\n# Define SubDomains for ice-water boundary, ice-bed boundary, inflow (x=0) and\n# outflow (x=Length of domain). The parameter 'tol' is a minimal water depth\n# used to distinguish the ice-water and ice-bed surfaces.\n\nclass WaterBoundary(SubDomain):\n # Ice-water boundary.\n # This boundary is marked first and all of the irrelevant portions are\n # overwritten by the other boundary markers.\n def inside(self, x, on_boundary):\n return (on_boundary and (x[1]<0.5*Hght))\n\nclass BedBoundary(SubDomain):\n # Ice-bed boundary away from the lake; the portions near the lake are overwritten\n # by BasinBoundary.\n # Lifting of ice from the bed *is not* allowed on this boundary.\n def inside(self, x, on_boundary):\n return (on_boundary and ((x[1]-bed(x[0]))<=tol))\n\nclass LeftBoundary(SubDomain):\n # Left boundary\n def inside(self, x, on_boundary):\n return (on_boundary and np.abs(x[0])<tol)\n\nclass RightBoundary(SubDomain):\n # Right boundary\n def inside(self, x, on_boundary):\n return (on_boundary and np.abs(x[0]-Lngth)<tol)\n\n#-------------------------------------------------------------------------------\n\ndef mark_boundary(mesh):\n # Assign markers to each boundary segment (except the upper surface).\n # This is used at each time step to update the markers.\n #\n # Boundary marker numbering convention:\n # 1 - Left boundary\n # 2 - Right boundary\n # 3 - Ice-bed boundary\n # 4 - Ice-water boundary\n #\n # This function returns these markers, which are used to define the\n # boundary integrals and dirichlet conditions.\n\n boundary_markers = MeshFunction('size_t', mesh,dim=1)\n boundary_markers.set_all(0)\n\n # Mark ice-water boundary\n bdryWater = WaterBoundary()\n bdryWater.mark(boundary_markers, 4)\n\n # Mark ice-bed boundary away from lake\n bdryBed = BedBoundary()\n bdryBed.mark(boundary_markers, 3)\n\n # Mark inflow boundary\n bdryLeft = LeftBoundary()\n bdryLeft.mark(boundary_markers, 1)\n\n # Mark outflow boundary\n bdryRight = RightBoundary()\n bdryRight.mark(boundary_markers, 2)\n\n return boundary_markers\n"
] | [
[
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caogao/param | [
"9de2602c894df264a004c352ee16abc14f93da76",
"9de2602c894df264a004c352ee16abc14f93da76",
"9de2602c894df264a004c352ee16abc14f93da76",
"9de2602c894df264a004c352ee16abc14f93da76"
] | [
"train/comms/pt/comms.py",
"train/compute/python/workloads/pytorch/split_table_batched_embeddings_ops.py",
"train/comms/pt/commsTraceReplay.py",
"train/comms/pt/pytorch_dist_backend.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport logging\nimport time\n\nimport comms_utils\nimport numpy as np\n\n# pytorch\nimport torch\nfrom comms_utils import paramCommsBench, ensureTensorFlush\n\n### TODO: add these to class variables?\nsupportedCollectives = [\n \"reduce\",\n \"all_reduce\",\n \"all_to_all\",\n \"all_to_allv\",\n \"all_gather\",\n \"broadcast\",\n \"reduce_scatter\",\n \"reduce_scatter_base\",\n \"all_gather_base\",\n \"incast\",\n \"multicast\",\n] # , \"scatter\", \"gather\"]\npt2ptPatterns = [\n \"one2one\",\n \"pairwise\",\n]\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):\n def _split_lines(self, text, width):\n if text.startswith(\"R|\"):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)\n\n\n# define the collective benchmark\nclass commsCollBench(paramCommsBench):\n def __init__(self):\n super().__init__(supportedNwstacks=[\"pytorch-dist\", \"pytorch-xla-tpu\"])\n\n # def readCollArgs(self, parser):\n def readArgs(self, parser):\n # read the common/basic arguments\n super().readArgs(parser)\n parser.add_argument(\n \"--w\", type=int, default=5, help=\"number of warmup iterations\"\n ) # number of warmup-iterations\n parser.add_argument(\n \"--n\", type=int, default=5, help=\"number of iterations\"\n ) # number of iterations\n # experiment related parameters\n parser.add_argument(\n \"--mode\",\n type=str,\n default=\"comms\",\n help=\"benchmark mode\",\n choices=[\"comms\", \"compute\", \"dlrm\", \"comms-compute\"],\n ) # alternative is DLRM mode or comm-compute mode\n parser.add_argument(\n \"--b\", type=str, default=\"8\", help=\"minimum size, in bytes, to start with\"\n ) # COMMS mode, begin the sweep at.\n parser.add_argument(\n \"--e\", type=str, default=\"64\", help=\"maximum size, in bytes, to end at\"\n ) # COMMS mode, end the sweep at.\n parser.add_argument(\n \"--f\", type=int, default=2, help=\"multiplication factor between sizes\"\n ) # COMMS mode, multiplication factor.\n parser.add_argument(\n \"--collective\",\n type=str,\n default=\"all_reduce\",\n help=\"Collective operation to be evaluated\",\n choices=supportedCollectives,\n ) # collective op to benchmark\n # For comm-compute or compute mode\n parser.add_argument(\n \"--kernel\",\n type=str,\n default=\"gemm\",\n help=\"Compute kernel, used for comms-compute or compute mode\",\n choices=[\"gemm\", \"emb_lookup\"],\n ) # Compute kernel: \"gemm\"\n parser.add_argument(\n \"--num-compute\",\n type=int,\n default=100,\n help=\"one collective for every NUM_COMPUTE compute kernels\",\n ) # Launch one coll for every n compute kernels\n # For GEMM\n parser.add_argument(\n \"--mm-dim\",\n type=int,\n default=100,\n help=\"dimension size for GEMM compute kernel\",\n ) # Matrix multiplication dim n, A[n,n] * B [n,n]\n # For emb lookup\n parser.add_argument(\n \"--emb-dim\",\n type=int,\n default=128,\n help=\"dimension size for Embedding table compute kernel\",\n ) # Embedding table dimension\n parser.add_argument(\n \"--num-embs\",\n type=int,\n default=100000,\n help=\"Embedding table hash size for Embedding table compute kernel\",\n ) # Embedding table hash size\n parser.add_argument(\n \"--avg-len\",\n type=int,\n default=28,\n help=\"Average lookup operations per sample\",\n ) # Average #lookup per sample\n parser.add_argument(\n \"--batch-size\",\n type=int,\n default=512,\n help=\"number of samples reading the table concurrently\",\n ) # #Samples reading the table concurrently\n parser.add_argument(\n \"--root\", type=int, default=0, help=\"root process for reduce benchmark\"\n ) # root process for reduce and bcast (and gather, scatter, etc., if support in the future)\n # TODO: check the correctness of root, should be between 0 to [world_size -1]\n parser.add_argument(\n \"--src-ranks\",\n type=str,\n nargs=\"?\",\n help=\"R|src ranks for many-to-one incast pattern or pt2pt.\\n\"\n \"List of ranks separated by comma or a range specified by start:end.\\n\"\n \"Pt2pt one2one should set only one rank.\\n\"\n \"The default value of incast includes all ranks, pt2pt includes rank 0.\",\n ) # optional: group of src ranks in many-to-one incast or pt2pt\n parser.add_argument(\n \"--dst-ranks\",\n type=str,\n nargs=\"?\",\n help=\"R|dst ranks for one-to-many multicast pattern or pt2pt.\\n\"\n \"List of ranks separated by comma or a range specified by start:end.\\n\"\n \"Pt2pt one2one should set only one rank\\n\"\n \"The default value of multicast includes all ranks, pt2pt includes rank 1.\",\n ) # optional: group of dst ranks in one-to-many multicast or pt2pt\n parser.add_argument(\n \"--pair\",\n action=\"store_true\",\n default=False,\n help=\"Toggle to enable collective pair mode\",\n )\n parser.add_argument(\n \"--collective-pair\",\n type=str,\n default=\"all_reduce\",\n help=\"Collective pair operation to be evaluated\",\n choices=supportedCollectives,\n ) # collective op to pair with the other collective, --collective should be non-empty\n parser.add_argument(\n \"--overlap-pair-pgs\",\n action=\"store_true\",\n default=False,\n help=\"Toggle to enable overlapping collective pair with two pgs\",\n ) # overlap collective pair with two pgs\n parser.add_argument(\n \"--pt2pt\",\n type=str,\n default=None,\n help=\"point to point pattern\",\n choices=pt2ptPatterns,\n ) # point to point mode\n parser.add_argument(\n \"--window\",\n type=int,\n default=100,\n help=\"window size for pt2pt throughput test\",\n ) # optional: point to point throughput test window size\n\n return parser.parse_known_args()\n\n def checkArgs(self, args):\n super().checkArgs(args)\n\n if args.pt2pt is not None:\n args.collective = \"pt2pt\"\n if args.pt2pt not in pt2ptPatterns:\n logger.error(\n f\"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}\"\n )\n comms_utils.gracefulExit()\n\n args.b = comms_utils.parsesize(args.b)\n args.e = comms_utils.parsesize(args.e)\n args.dtype = self.dtypeMap[args.data_type]\n\n if args.b < 1:\n logger.warning(\n f\"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue\"\n )\n args.b = 1\n\n if args.e < args.b:\n logger.warning(\n f\"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})\"\n )\n\n if args.device == \"cpu\" and args.backend == \"nccl\":\n raise ValueError(f\"NCCL is not supported for device type {args.device}\")\n\n if args.c == 1 and args.z == 0 and args.collective in (\"all_reduce\", \"reduce\", \"reduce_scatter\"):\n logger.warning(\n f\"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue\"\n )\n args.c = 0\n\n # run a few sanity checks\n if args.bitwidth < 32:\n if args.device != \"cuda\":\n logger.error(\n f\"collective quantization may not be fully supported for {args.device}\"\n )\n comms_utils.checkQuantArgs(\n args.collective,\n args.dtype,\n args.b,\n args.quant_a2a_embedding_dim,\n args.z,\n )\n\n def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):\n self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)\n self.backendFuncs.sync_barrier(self.collectiveArgs, desc=\"runColl_begin\")\n\n elapsedTimeNS = 0.0\n is_blocking = not self.collectiveArgs.asyncOp\n enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True\n enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True\n enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True\n\n # for comms pair mode, force async comms for overlapping evaluation\n if enable_comms_pair:\n self.collectiveArgs.asyncOp = True\n for nIter in range(\n self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters\n ):\n if nIter == self.collectiveArgs.numWarmupIters:\n # Flush non-blocking ops to ensure warmup is really complete\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n ensureTensorFlush(self.collectiveArgs.opTensor)\n if enable_comms_pair:\n ensureTensorFlush(self.collectiveArgs.opTensor_pair)\n # Start measuring time after warmup iterations\n elapsedTimeNS = 0.0\n self.collectiveArgs.quant_time.reset()\n self.collectiveArgs.dequant_time.reset()\n # reset tensor values for data validation check\n if enable_comms:\n self.setTensorVal(self.collectiveArgs.opTensor)\n # for blocking mode, do barrier before starting collective\n if is_blocking:\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n\n start = time.monotonic() # available only in py3\n self.collectiveArgs.group = self.backendFuncs.get_next_group()\n comm_fn(self.collectiveArgs)\n # post another collecitve if on comms pair mode, otherwise it's noop\n self.collectiveArgs.group = self.backendFuncs.get_next_group()\n comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)\n\n if enable_compute:\n for _ in range(self.collectiveArgs.numComputePerColl):\n # TODO: investigate the cache effect\n # Flush the cache\n # _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache\n compute_fn(self.collectiveArgs)\n if is_blocking: # should be sychronous, wait for the collective\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n # Measuring time.\n elapsedTimeNS += (\n time.monotonic() - start\n ) * 1e9 # keeping time in NS, helps in divising data by nanosecond\n\n start = time.monotonic() # available only in py3\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n end = time.monotonic() # available only in py3\n\n ensureTensorFlush(self.collectiveArgs.opTensor)\n if enable_comms_pair:\n ensureTensorFlush(self.collectiveArgs.opTensor_pair)\n\n elapsedTimeNS += (\n end - start\n ) * 1e9 # keeping time in NS, helps in divising data by nanoseconds\n\n memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)\n\n avgIterNS, algBW = comms_utils.getAlgBW(\n elapsedTimeNS, memSize, self.collectiveArgs.numIters\n )\n busBW = self.backendFuncs.getBusBW(\n self.collectiveArgs.collective,\n algBW,\n self.collectiveArgs,\n )\n if enable_comms_pair:\n memSize_pair = self.backendFuncs.get_mem_size(\n self.collectiveArgs, pair=enable_comms_pair\n )\n memSize += memSize_pair\n\n _, algBW_pair = comms_utils.getAlgBW(\n elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters\n )\n algBW += algBW_pair\n\n busBW += self.backendFuncs.getBusBW(\n self.collectiveArgs.collective_pair,\n algBW_pair,\n self.collectiveArgs,\n )\n\n self.backendFuncs.sync_barrier(self.collectiveArgs, desc=\"runColl_end\")\n\n results = {\n \"timeUS\": avgIterNS / 1e3,\n \"algBW\": algBW,\n \"busBW\": busBW,\n \"memSize\": memSize,\n }\n return results\n\n def runPt2Pt(self):\n self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)\n # warm-up\n memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)\n self.getPingLatency(self.collectiveArgs.numWarmupIters)\n self.getPingPongLatency(self.collectiveArgs.numWarmupIters)\n self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)\n self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)\n self.backendFuncs.sync_barrier(self.collectiveArgs, \"runpt2pt_begin\")\n # pt2pt benchmark\n pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)\n pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)\n avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)\n avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)\n self.backendFuncs.sync_barrier(self.collectiveArgs, \"runpt2pt\")\n results = {\n \"pingPerIterNS\": pingPerIterNS,\n \"pingPongPerIterNS\": pingPongPerIterNS,\n \"avgUniBW\": avgUniBW,\n \"avgBiBW\": avgBiBW,\n \"memSize\": memSize,\n }\n return results\n\n def getPingLatency(self, numIters):\n logger.debug(\n \"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = False\n # get one-way latency\n pingLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.send(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.recv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx]\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n pingLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n logger.debug(\"STATUS: end ping test.\")\n return pingLatencyNS\n\n def getPingPongLatency(self, numIters):\n logger.debug(\n \"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = False\n # get round-trip latency\n pingPongLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.send(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]\n )\n self.backendFuncs.recv(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.recv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx]\n )\n self.backendFuncs.send(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx]\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n pingPongLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n logger.debug(\"STATUS: end ping-pong test.\")\n return pingPongLatencyNS\n\n def getUniBW(self, numIters, memSize):\n logger.debug(\n \"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = True\n # get unidirectional bandwidth\n uniLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n for w in range(self.collectiveArgs.window):\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.isend(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.irecv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n uniLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]\n uniLatencyNS = np.mean(np.array(uniLatencyNS))\n _, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)\n logger.debug(\"STATUS: end UniBW test.\")\n return avgUniBW\n\n def getBiBW(self, numIters, memSize):\n logger.debug(\n \"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = True\n # get bidirectional bandwidth\n biLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n for w in range(self.collectiveArgs.window):\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.isend(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w\n )\n self.backendFuncs.irecv(\n self.collectiveArgs,\n self.collectiveArgs.dst_ranks[idx],\n tag=w + self.collectiveArgs.window,\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.irecv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w\n )\n self.backendFuncs.isend(\n self.collectiveArgs,\n self.collectiveArgs.src_ranks[idx],\n tag=w + self.collectiveArgs.window,\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n biLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]\n biLatencyNS = np.mean(np.array(biLatencyNS))\n _, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)\n logger.debug(\"STATUS: end UniBW test.\")\n return avgBiBW\n\n def checkPt2PtRanks(self):\n # set default values\n if not self.collectiveArgs.src_ranks:\n self.collectiveArgs.src_ranks = [0]\n if not self.collectiveArgs.dst_ranks:\n self.collectiveArgs.dst_ranks = [1]\n\n # sanity check\n if self.collectiveArgs.pt2pt == \"one2one\":\n if (\n len(self.collectiveArgs.src_ranks) > 1\n or len(self.collectiveArgs.dst_ranks) > 1\n ):\n if self.global_rank == 0:\n logger.error(\n \"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! \"\n )\n comms_utils.gracefulExit()\n elif self.collectiveArgs.pt2pt == \"pairwise\":\n # pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.\n if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):\n if self.global_rank == 0:\n logger.error(\n \"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! \"\n )\n comms_utils.gracefulExit()\n # pairwise pt2pt does not allow same rank to exist in both groups\n if bool(\n set(self.collectiveArgs.src_ranks).intersection(\n self.collectiveArgs.dst_ranks\n )\n ):\n if self.global_rank == 0:\n logger.error(\n \"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! \"\n )\n comms_utils.gracefulExit()\n\n if self.global_rank == 0:\n print(\n f\"\\t collective={self.collectiveArgs.collective}\\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}\"\n )\n\n def checkCollectiveRanks(self):\n if self.collectiveArgs.collective == \"incast\":\n # incast: set default value and exclude root\n if not self.collectiveArgs.src_ranks:\n self.collectiveArgs.src_ranks = [*range(self.comm_size)]\n if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:\n self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)\n elif self.collectiveArgs.collective == \"multicast\":\n # multicast: set default value and exclude root\n if not self.collectiveArgs.dst_ranks:\n self.collectiveArgs.dst_ranks = [*range(self.comm_size)]\n if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:\n self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)\n\n if self.global_rank == 0:\n print(\n f\"\\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}\"\n )\n\n def initCollectiveArgs(self, commsParams):\n # lint was complaining that benchTime was too complex!\n (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n ) = comms_utils.get_rank_details(\n self.backendFuncs\n ) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.\n self.backendFuncs.sayHello() # Informs us where each process is running.\n groups = self.backendFuncs.get_groups()\n num_pgs = len(groups)\n\n self.comm_size = world_size\n self.global_rank = global_rank\n\n comms_utils.fixBeginSize(\n commsParams, world_size\n ) # Ensuring that all-reduce and all-to-all has atleast one member per rank.\n allSizes = comms_utils.getSizes(\n commsParams.beginSize, commsParams.endSize, commsParams.stepFactor\n ) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.\n\n if global_rank == 0:\n print(\n f\"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}\"\n )\n\n self.collectiveArgs.group = group\n self.collectiveArgs.groups = groups\n self.collectiveArgs.num_pgs = num_pgs\n self.collectiveArgs.device = curDevice\n self.collectiveArgs.world_size = world_size\n self.collectiveArgs.numIters = commsParams.numIters\n self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters\n self.collectiveArgs.global_rank = global_rank\n self.collectiveArgs.backendFuncs = self.backendFuncs\n self.collectiveArgs.collective = commsParams.collective\n op = self.backendFuncs.get_reduce_op(\"sum\")\n self.collectiveArgs.op = op\n self.collectiveArgs.srcOrDst = commsParams.srcOrDst\n self.collectiveArgs.src_ranks = commsParams.src_ranks\n self.collectiveArgs.dst_ranks = commsParams.dst_ranks\n self.collectiveArgs.pair = commsParams.pair\n self.collectiveArgs.collective_pair = commsParams.collective_pair\n self.collectiveArgs.pt2pt = commsParams.pt2pt\n self.collectiveArgs.window = commsParams.window\n self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True\n\n if commsParams.bitwidth < 32:\n comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)\n\n if self.collectiveArgs.collective == \"pt2pt\":\n self.checkPt2PtRanks()\n else:\n self.checkCollectiveRanks()\n\n computeFunc = self.backendFuncs.noop\n if (\n commsParams.mode != \"comms\"\n ): # Compute mode related initialization if not in comms-only mode\n if commsParams.kernel == \"gemm\":\n computeFunc = self.backendFuncs.gemm\n\n mm_dim = commsParams.mm_dim\n in1 = np.random.rand(mm_dim, mm_dim)\n MMin1 = torch.FloatTensor(in1).to(curDevice)\n in2 = np.random.rand(mm_dim, mm_dim)\n MMin2 = torch.FloatTensor(in2).to(curDevice)\n in3 = np.random.rand(mm_dim, mm_dim)\n MMin3 = torch.FloatTensor(in3).to(curDevice)\n MMout = self.backendFuncs.alloc_empty(\n [mm_dim, mm_dim], commsParams.dtype, curDevice\n )\n self.collectiveArgs.MMout = MMout\n self.collectiveArgs.MMin1 = MMin1\n self.collectiveArgs.MMin2 = MMin2\n self.collectiveArgs.MMin3 = MMin3\n self.collectiveArgs.numComputePerColl = commsParams.num_compute\n elif commsParams.kernel == \"emb_lookup\":\n computeFunc = self.backendFuncs.emb_lookup\n\n emb_dim = commsParams.emb_dim\n num_embeddings = commsParams.num_embs\n avg_length = commsParams.avg_len\n batch_size = commsParams.batch_size\n print(\n f\"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}\"\n )\n self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(\n [num_embeddings, emb_dim], torch.double, curDevice\n )\n self.collectiveArgs.TableOffsets = torch.LongTensor(\n [0, num_embeddings]\n ).to(curDevice)\n self.collectiveArgs.Indices = torch.LongTensor(\n np.random.randint(0, num_embeddings - 1, avg_length * batch_size)\n ).to(curDevice)\n lengths = np.ones((1, batch_size)) * avg_length\n flat_lengths = lengths.flatten()\n self.collectiveArgs.Offsets = torch.LongTensor(\n [0] + np.cumsum(flat_lengths).tolist()\n ).to(curDevice)\n self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(\n [batch_size, emb_dim], torch.double, curDevice\n )\n self.collectiveArgs.AvgLengths = avg_length\n self.collectiveArgs.numComputePerColl = commsParams.num_compute\n\n return (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n allSizes,\n computeFunc,\n )\n\n def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):\n # Push the list to device, then do an all-gather.\n timeElapsedTensor = torch.tensor(\n timeUsElapsedList, device=self.backendFuncs.get_device()\n )\n collectiveArgs.opTensor = None\n if commsParams.backend != \"xla\":\n timeList = list(torch.ones(\n (self.comm_size,) + timeElapsedTensor.shape,\n dtype=timeElapsedTensor.dtype,\n device=timeElapsedTensor.device,\n ).unbind(0))\n collectiveArgs.opTensor = timeList\n\n collectiveArgs.ipTensor = timeElapsedTensor\n collectiveArgs.asyncOp = False\n collectiveArgs.dataSize = (\n timeElapsedTensor.nelement() * timeElapsedTensor.element_size()\n )\n collectiveArgs.numElements = timeElapsedTensor.nelement()\n\n # use allgather as all process group should support it\n self.backendFuncs.all_gather(collectiveArgs)\n self.backendFuncs.complete_accel_ops(collectiveArgs)\n\n return timeList\n\n def printPreamble(self, commsParams):\n logger.debug(f\"\\tcommsParams: {str(commsParams.__dict__)}\")\n header = \"\\n\\tCOMMS-RES\"\n if self.collectiveArgs.collective == \"pt2pt\":\n header += \"{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}\".format(\n \"size (B)\",\n \"pingLatency(us):p50\",\n \"p75\",\n \"p95\",\n \"pingPongLatency(us):p50\",\n \"p75\",\n \"p95\",\n \"avgUniBW(GB/s)\",\n \"avgBiBW(GB/s)\",\n \"totalUniBW(GB/s)\",\n \"totalBiBW(GB/s)\",\n )\n else:\n if commsParams.bitwidth < 32:\n header += \"-QUANT\\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}\".format(\n \"size (B)\",\n \"nElementsPerRank\",\n \"P95 Latency(us): Quant\",\n \"Comms\",\n \"De-Quant\",\n \"Overall\",\n )\n elif not self.collectiveArgs.pair:\n header += (\n \"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n \"size (B)\",\n \"nElementsPerRank\",\n \"Latency(us):p50\",\n \"p75\",\n \"p95\",\n \"Min\",\n \"Max\",\n \"AlgBW(GB/s)\",\n \"BusBW(GB/s)\",\n )\n )\n else:\n header += \"{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n \"total-size (B)\",\n \"nElementsPerRank\",\n \"nElementsPairPerRank\",\n \"Latency(us):p50\",\n \"p75\",\n \"p95\",\n \"Min\",\n \"Max\",\n \"AlgBW(GB/s)\",\n \"BusBW(GB/s)\",\n )\n\n print(header)\n\n def reportBenchTimeCollWithQuant(\n self,\n commsParams,\n results,\n tensorList,\n quantTimeTensorList,\n dequantTimeTensorList,\n ):\n if commsParams.backend == \"xla\":\n latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]\n latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()\n # quant tensor\n quantLatencyAcrossRanks = torch.transpose(\n quantTimeTensorList.view(-1, 1), 0, 1\n )[0]\n quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()\n # dequant tensor\n dequantLatencyAcrossRanks = torch.transpose(\n dequantTimeTensorList.view(-1, 1), 0, 1\n )[0]\n dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()\n else:\n if isinstance(tensorList, list):\n tensorList = [t.cpu().detach().numpy() for t in tensorList]\n latencyAcrossRanks = np.array(tensorList)\n # quant tensor\n quantLatencyAcrossRanks = np.array(quantTimeTensorList)\n # dequant tensor\n dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)\n\n p95 = np.percentile(latencyAcrossRanks, 95)\n\n quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)\n dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)\n\n print(\n \"\\tCOMMS-RES-QUANT\\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}\".format(\n results[\"memSize\"],\n str(\"%d\" % (results[\"numElements\"])),\n str(\"%.1f\" % (quant_p95)),\n str(\"%.1f\" % (p95 - quant_p95 - dequant_p95)),\n str(\"%.1f\" % (dequant_p95)),\n str(\"%.1f\" % (p95)),\n # str(\"%.3f\" % (algBW)),\n # str(\"%.3f\" % (busBW)),\n )\n )\n\n def reportBenchTime(\n self,\n commsParams,\n results,\n tensorList,\n quantTimeTensorList,\n dequantTimeTensorList,\n ):\n # convernt num_elements to # of elements per rank\n if commsParams.collective in (\"all_to_all\", \"all_to_allv\"):\n results[\"numElements\"] = int(\n results[\"numElements\"] // commsParams.comms_world_info.world_size\n )\n\n if commsParams.collective == \"pt2pt\":\n self.reportBenchTimePt2Pt(commsParams, tensorList, results)\n elif commsParams.bitwidth < 32:\n self.reportBenchTimeCollWithQuant(\n commsParams,\n results,\n tensorList,\n quantTimeTensorList,\n dequantTimeTensorList,\n )\n else:\n self.reportBenchTimeColl(commsParams, results, tensorList)\n\n def reportBenchTimeColl(self, commsParams, results, tensorList):\n if commsParams.backend == \"xla\":\n latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]\n latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()\n else:\n if isinstance(tensorList, list):\n tensorList = [t.cpu().detach().numpy() for t in tensorList]\n latencyAcrossRanks = np.array(tensorList)\n\n logger.debug(f\"Latency across all ranks: {latencyAcrossRanks}\")\n\n # Include only communicating ranks\n if self.collectiveArgs.collective == \"multicast\":\n commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks\n elif self.collectiveArgs.collective == \"incast\":\n commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks\n else:\n commRanks = range(self.collectiveArgs.world_size)\n\n latencyAcrossCommRanks = latencyAcrossRanks[commRanks]\n logger.debug(\n \"Latency across communicating ranks (%s): %s\"\n % (commRanks, latencyAcrossCommRanks)\n )\n\n p50 = np.percentile(latencyAcrossCommRanks, 50)\n p75 = np.percentile(latencyAcrossCommRanks, 75)\n p95 = np.percentile(latencyAcrossCommRanks, 95)\n minlat = np.amin(latencyAcrossCommRanks)\n maxlat = np.amax(latencyAcrossCommRanks)\n\n # adjust busBW\n busBW = results[\"busBW\"] * (commsParams.bitwidth / 32.0)\n\n if not self.collectiveArgs.pair:\n print(\n \"\\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n results[\"memSize\"],\n str(\"%d\" % (results[\"numElements\"])),\n str(\"%.1f\" % (p50)),\n str(\"%.1f\" % (p75)),\n str(\"%.1f\" % (p95)),\n str(\"%.1f\" % (minlat)),\n str(\"%.1f\" % (maxlat)),\n str(\"%.3f\" % (results[\"algBW\"])),\n str(\"%.3f\" % (busBW)),\n )\n )\n else:\n # convernt to # of elements per rank\n if commsParams.collective_pair in (\"all_to_all\", \"all_to_allv\"):\n results[\"numElements_pair\"] = int(\n results[\"numElements_pair\"]\n // commsParams.comms_world_info.world_size\n )\n print(\n \"\\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n results[\"memSize\"],\n str(\"%d\" % (results[\"numElements\"])),\n str(\"%d\" % (results[\"numElements_pair\"])),\n str(\"%.1f\" % (p50)),\n str(\"%.1f\" % (p75)),\n str(\"%.1f\" % (p95)),\n str(\"%.1f\" % (minlat)),\n str(\"%.1f\" % (maxlat)),\n str(\"%.3f\" % (results[\"algBW\"])),\n str(\"%.3f\" % (busBW)),\n )\n )\n\n def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):\n pingLatencyAcrossRanks = []\n pingPongLatencyAcrossRanks = []\n uniBWAcrossRanks = []\n biBWAcrossRanks = []\n # idx = 0\n for curRankTensor in resultsAcrossRanks:\n pingLatencyAcrossRanks.append(curRankTensor[0].item())\n pingPongLatencyAcrossRanks.append(curRankTensor[1].item())\n uniBWAcrossRanks.append(curRankTensor[2].item())\n biBWAcrossRanks.append(curRankTensor[3].item())\n\n pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)\n pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)\n uniBWAcrossRanks = np.array(uniBWAcrossRanks)\n biBWAcrossRanks = np.array(biBWAcrossRanks)\n\n # Include only communicating ranks\n commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks\n pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]\n pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]\n uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]\n biBWAcrossCommRanks = biBWAcrossRanks[commRanks]\n\n logger.debug(\n \"Ping latency across communicating ranks (%s): %s\"\n % (commRanks, pingLatencyAcrossCommRanks)\n )\n logger.debug(\n \"PingPong latency across communicating ranks (%s): %s\"\n % (commRanks, pingPongLatencyAcrossCommRanks)\n )\n logger.debug(\n \"UniBW across all communicating ranks (%s): %s\"\n % (commRanks, uniBWAcrossCommRanks)\n )\n logger.debug(\n \"BiBW across all communicating ranks (%s): %s\"\n % (commRanks, biBWAcrossCommRanks)\n )\n\n avgUniBW = np.mean(uniBWAcrossCommRanks)\n avgBiBW = np.mean(biBWAcrossCommRanks)\n totalUniBW = np.sum(uniBWAcrossCommRanks) / 2\n totalBiBW = np.sum(biBWAcrossCommRanks) / 2\n\n ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)\n ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)\n ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)\n\n ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)\n ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)\n ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)\n\n print(\n \"\\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}\".format(\n results[\"memSize\"],\n str(\"%.1f\" % (ping_p50)),\n str(\"%.1f\" % (ping_p75)),\n str(\"%.1f\" % (ping_p95)),\n str(\"%.1f\" % (ping_pong_p50)),\n str(\"%.1f\" % (ping_pong_p75)),\n str(\"%.1f\" % (ping_pong_p95)),\n str(\"%.3f\" % (avgUniBW)),\n str(\"%.3f\" % (avgBiBW)),\n str(\"%.3f\" % (totalUniBW)),\n str(\"%.3f\" % (totalBiBW)),\n )\n )\n\n def benchTime(self, index, commsParams, backendFuncs):\n # Get NW stack specific parameters\n (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n allSizes,\n computeFunc,\n ) = self.initCollectiveArgs(commsParams)\n\n backendFuncs.sync_barrier(self.collectiveArgs)\n if global_rank == 0:\n self.printPreamble(commsParams)\n\n for curSize in allSizes:\n results = {}\n timeUsElapsedList = []\n quantTimeElapsedList = []\n dequantTimeElapsedList = []\n numElements = int(curSize // commsParams.element_size)\n collectiveFunc = self.backendFuncs.noop\n collectiveFunc_pair = self.backendFuncs.noop\n\n if (\n commsParams.mode != \"compute\"\n ): # comms specific initializations if not in compute-only mode\n # set corresponding function pointers\n if commsParams.collective != \"pt2pt\":\n collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]\n\n (\n self.collectiveArgs.ipTensor,\n self.collectiveArgs.opTensor,\n ) = self.prepComm(\n curComm={\n \"in_msg_size\": numElements,\n \"out_msg_size\": numElements,\n \"world_size\": world_size,\n },\n commsParams=commsParams,\n )\n\n # Setup the arguments.\n self.collectiveArgs.dataSize = curSize\n self.collectiveArgs.numElements = numElements\n self.collectiveArgs.waitObj = []\n results[\"numElements\"] = numElements\n\n if (\n commsParams.pair and commsParams.mode != \"compute\"\n ): # comms-pair specific initializations if not in compute-only mode:\n # set corresponding function pointers\n collectiveFunc_pair = backendFuncs.collectiveFunc[\n commsParams.collective_pair\n ]\n # TODO: allow user to set specific size\n # Setup the arguments.\n self.collectiveArgs.dataSize_pair = curSize\n self.collectiveArgs.numElements_pair = int(\n self.collectiveArgs.dataSize_pair // commsParams.element_size\n )\n results[\"numElements_pair\"] = self.collectiveArgs.numElements_pair\n (\n self.collectiveArgs.ipTensor_pair,\n self.collectiveArgs.opTensor_pair,\n ) = self.prepComm(\n curComm={\n \"in_msg_size\": self.collectiveArgs.numElements_pair,\n \"out_msg_size\": self.collectiveArgs.numElements_pair,\n \"world_size\": world_size,\n },\n commsParams=commsParams,\n )\n\n # self.collectiveArgs has all the information on the experiment.\n if commsParams.collective == \"pt2pt\":\n results.update(self.runPt2Pt())\n\n timeUsElapsedList = [\n np.mean(np.array(results[\"pingPerIterNS\"])) / 1e3,\n np.mean(np.array(results[\"pingPongPerIterNS\"])) / 1e3,\n results[\"avgUniBW\"],\n results[\"avgBiBW\"],\n ] # time in US\n if (\n global_rank in self.collectiveArgs.src_ranks\n or global_rank in self.collectiveArgs.dst_ranks\n ):\n logger.debug(timeUsElapsedList)\n else:\n results.update(\n self.runColl(\n comm_fn=collectiveFunc,\n compute_fn=computeFunc,\n comm_fn_pair=collectiveFunc_pair,\n )\n )\n timeUsElapsedList = [results[\"timeUS\"]]\n\n # perfom data validation check on the final opTensor\n if commsParams.dcheck == 1:\n self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)\n\n backendFuncs.clear_memory(self.collectiveArgs)\n\n # gather quantization overhead if enabled\n if commsParams.bitwidth < 32:\n # calculate average (de-)quantization overhead\n results[\"quantTimeUS\"] = (\n self.collectiveArgs.quant_time.getTimeUS()\n / self.collectiveArgs.numIters\n )\n results[\"dequantTimeUS\"] = (\n self.collectiveArgs.dequant_time.getTimeUS()\n / self.collectiveArgs.numIters\n )\n quantTimeElapsedList.append(results[\"quantTimeUS\"])\n dequantTimeElapsedList.append(results[\"dequantTimeUS\"])\n\n logger.debug(quantTimeElapsedList)\n quantTimeElapsedList = self.gatherBenchTime(\n self.collectiveArgs, commsParams, quantTimeElapsedList\n )\n dequantTimeElapsedList = self.gatherBenchTime(\n self.collectiveArgs, commsParams, dequantTimeElapsedList\n )\n\n # gather and report performance to stdout\n tensorList = self.gatherBenchTime(\n self.collectiveArgs, commsParams, timeUsElapsedList\n )\n if global_rank == 0:\n self.reportBenchTime(\n commsParams,\n results,\n tensorList,\n quantTimeElapsedList,\n dequantTimeElapsedList,\n )\n\n self.backendFuncs.sync_barrier(\n self.collectiveArgs, desc=f\"curSize_{curSize}\"\n )\n\n comms_utils.clearQuantCommCtx(self.collectiveArgs)\n\n # wait rank 0 reports results to avoid other ranks mess up the output\n self.backendFuncs.sync_barrier(self.collectiveArgs, \"benchtime\")\n\n def runBench(self, comms_world_info, commsParams):\n # Init the desired backend\n if commsParams.nw_stack == \"pytorch-dist\":\n from pytorch_dist_backend import PyTorchDistBackend\n\n backendObj = PyTorchDistBackend(comms_world_info, commsParams)\n elif commsParams.nw_stack == \"pytorch-xla-tpu\":\n from pytorch_tpu_backend import PyTorchTPUBackend\n\n backendObj = PyTorchTPUBackend(comms_world_info, commsParams)\n else:\n logger.error(\"Unsupported NW stack! \")\n comms_utils.gracefulExit()\n\n self.backendFuncs = backendObj\n try:\n backendObj.benchmark_comms()\n except ValueError as ve:\n if commsParams.backend == \"ucc\":\n logger.critical(\"PyTorch UCC not implemented? {}\".format(repr(ve)))\n raise\n\n\ndef main():\n collBenchObj = commsCollBench()\n\n ### parse arguments ###\n parser = argparse.ArgumentParser(\n description=\"PARAM-Comm Benchmark\",\n formatter_class=MultilineFormatter,\n )\n args, leftovers = collBenchObj.readArgs(parser)\n\n collBenchObj.checkArgs(args)\n\n comms_env_params = comms_utils.read_comms_env_vars()\n if comms_env_params[\"global_rank\"] == 0:\n print(\"\\t MPI environment: %s \" % (str(comms_env_params)))\n print(\n \"\\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s \"\n % (\n args.backend,\n args.nw_stack,\n args.mode,\n args.b,\n args.e,\n args.f,\n args.z,\n args.master_ip,\n )\n )\n\n element_size = torch.ones([1], dtype=args.dtype).element_size()\n comms_world_info = comms_utils.comms_world_info_holder(\n args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params\n )\n\n commsParams = comms_utils.commsParamsHolder(\n args, comms_world_info, element_size, collBenchObj.benchTime\n )\n\n if args.pair and args.overlap_pair_pgs:\n commsParams.num_pgs = 2\n collBenchObj.runBench(comms_world_info, commsParams)\n\n\nif __name__ == \"__main__\":\n main()\n",
"import copy\nimport gc\nimport os\nfrom typing import Any\nfrom typing import (\n Dict,\n Optional,\n)\nfrom typing import List\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom fbgemm_gpu.split_table_batched_embeddings_ops import (\n CacheAlgorithm,\n OptimType,\n SparseType,\n PoolingMode,\n EmbeddingLocation,\n ComputeDevice,\n SplitTableBatchedEmbeddingBagsCodegen,\n)\n\nfrom ...lib.data import register_data_generator\nfrom ...lib.generator import full_range, TableProduct, IterableList, ListProduct\nfrom ...lib.init_helper import get_logger\nfrom ...lib.iterator import (\n ConfigIterator,\n remove_meta_attr,\n register_config_iterator,\n genericList_to_list,\n)\nfrom ...lib.operator import OperatorInterface, register_operator\n\nlogger = get_logger()\n\n\nclass SplitTableBatchedEmbeddingBagsCodegenInputIterator(ConfigIterator):\n def __init__(\n self,\n configs: Dict[str, Any],\n key: str,\n device: str,\n ):\n super(SplitTableBatchedEmbeddingBagsCodegenInputIterator, self).__init__(\n configs, key, device\n )\n logger.debug(f\"build_input_config: {configs}\")\n build_config = configs[\"build\"]\n logger.debug(f\"build_config: {build_config}\")\n self.num_tables = build_config[\"args\"][0]\n self.rows = build_config[\"args\"][1]\n self.dim = build_config[\"args\"][2]\n self.weighted = build_config[\"args\"][4]\n self.weights_precision = build_config[\"args\"][5]\n self.generator = self._generator()\n\n def _generator(self):\n inputs = self.configs[self.key]\n var_id = 0\n for input in inputs:\n input_config = copy.deepcopy(input)\n args = []\n for arg in input_config[\"args\"]:\n if \"__range__\" in arg:\n arg[\"value\"] = full_range(*arg[\"value\"])\n if \"__list__\" in arg:\n arg[\"value\"] = IterableList(arg[\"value\"])\n args.append(TableProduct(arg))\n\n config_id = 0\n for arg_config in ListProduct(args):\n batch_size = arg_config[0]\n pooling_factor = arg_config[1]\n result = {\n \"args\": [\n self.num_tables,\n self.rows,\n self.dim,\n batch_size,\n pooling_factor,\n self.weighted,\n self.weights_precision,\n ],\n \"kwargs\": {},\n }\n yield (f\"{var_id}_{config_id}\", remove_meta_attr(result))\n config_id += 1\n\n def __next__(self):\n return next(self.generator)\n\n\nregister_config_iterator(\n \"SplitTableBatchedEmbeddingBagsCodegenInputIterator\",\n SplitTableBatchedEmbeddingBagsCodegenInputIterator,\n)\n\n\ndef generate_requests(\n B: int, # batch size\n L: int, # pooling factor\n E: int, # emb size\n offset_start: int, # indices offset from previous generator\n # alpha <= 1.0: use uniform distribution\n # alpha > 1.0: use zjpf distribution\n alpha: float = 1.0,\n weighted: bool = False,\n) -> List[Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]]:\n indices_size = B * L\n # indices\n if alpha == 0:\n # linear sequence by pooling factor\n indices = torch.arange(0, indices_size).long() % L\n elif alpha <= 0.5:\n # linear sequence by embedding size\n indices = torch.arange(0, indices_size).long() % E\n elif alpha <= 1.0:\n indices = torch.randint(\n low=0,\n high=E,\n size=(indices_size,),\n dtype=torch.int64,\n )\n else:\n indices = torch.as_tensor(np.random.zipf(a=alpha, size=indices_size)).long() % E\n\n # offsets\n lengths = np.ones(B, dtype=np.int64) * L\n # here we want to add the start of previous offset to all the offsets\n # if offset_start = 0, we insert it in the beginning\n if offset_start == 0:\n offsets = torch.tensor(np.cumsum([0] + lengths.tolist()))\n else:\n offsets = torch.tensor(offset_start + np.cumsum(lengths))\n\n # weights\n weights_tensor = (\n torch.randn(indices_size, dtype=torch.float32) if weighted else None\n )\n\n return (indices, offsets, weights_tensor)\n\n\nclass SplitTableBatchedEmbeddingBagsCodegenInputDataGenerator:\n def get_data(self, config, device):\n logger.debug(f\"data generator config: {config}\")\n # batch size * pooling_factor\n num_tables = config[\"args\"][0][\"value\"]\n if num_tables > 1:\n rows = genericList_to_list(config[\"args\"][1])\n pooling_factors = genericList_to_list(config[\"args\"][4])\n else:\n rows = [config[\"args\"][1][\"value\"]]\n pooling_factors = [config[\"args\"][4][\"value\"]]\n batch_size = config[\"args\"][3][\"value\"]\n weighted = config[\"args\"][5][\"value\"]\n\n indices_list = []\n offsets_list = []\n per_sample_weights_list = []\n offset_start = 0\n distribution = os.getenv(\"split_embedding_distribution\")\n if distribution is None:\n distribution = 1\n logger.debug(f\"distribution = {distribution}\")\n\n target_device = torch.device(device)\n\n indices_file = None\n offsets_file = None\n weights_file = None\n if (\"indices_tensor\" in config[\"args\"][4]) and (\n \"offsets_tensor\" in config[\"args\"][4]\n ):\n indices_file = config[\"args\"][4][\"indices_tensor\"]\n offsets_file = config[\"args\"][4][\"offsets_tensor\"]\n if weighted and \"weights_tensor\" in config[\"args\"][4]:\n weights_file = config[\"args\"][4][\"weights_tensor\"]\n else:\n indices_file = os.getenv(\"split_embedding_indices\")\n offsets_file = os.getenv(\"split_embedding_offsets\")\n if weighted:\n weights_file = os.getenv(\"split_embedding_weights\")\n\n logger.debug(f\"indices_file: {indices_file}, offsets_file: {offsets_file}\")\n if indices_file is not None and offsets_file is not None:\n indices_tensor = torch.load(indices_file, map_location=target_device)\n offsets_tensor = torch.load(offsets_file, map_location=target_device)\n per_sample_weights_tensor = None\n if weights_file:\n per_sample_weights_tensor = torch.load(\n weights_file, map_location=target_device\n )\n else:\n for i in range(num_tables):\n indices, offsets, per_sample_weights = generate_requests(\n batch_size,\n pooling_factors[i],\n rows[i],\n offset_start,\n float(distribution),\n weighted,\n )\n indices_list.append(indices)\n offsets_list.append(offsets)\n # update to the offset_start to the last element of current offset\n offset_start = offsets[-1].item()\n if weighted:\n per_sample_weights_list.append(per_sample_weights)\n\n indices_tensor = torch.cat(indices_list)\n offsets_tensor = torch.cat(offsets_list)\n\n # check for per sample weights\n per_sample_weights_tensor = (\n torch.cat(per_sample_weights_list) if weighted else None\n )\n\n logger.debug(f\"indices: {indices_tensor.shape}\")\n logger.debug(f\"offsets: {offsets_tensor.shape}\")\n if per_sample_weights_tensor is not None:\n logger.debug(\n f\"per_sample_weights: {per_sample_weights_tensor.shape}, {per_sample_weights_tensor}\"\n )\n\n return (\n [\n indices_tensor.to(target_device),\n offsets_tensor.to(target_device),\n per_sample_weights_tensor.to(target_device) if weighted else None,\n ],\n {},\n )\n\n\nregister_data_generator(\n \"SplitTableBatchedEmbeddingBagsCodegenInputDataGenerator\",\n SplitTableBatchedEmbeddingBagsCodegenInputDataGenerator,\n)\n\n# Callable ops are ops can be called in the form of op(*args, **kwargs)\nclass SplitTableBatchedEmbeddingBagsCodegenOp(OperatorInterface):\n def __init__(\n self,\n ):\n super(SplitTableBatchedEmbeddingBagsCodegenOp, self).__init__()\n self.op = None\n self.fwd_out: torch.tensor = None\n self.grad_in: torch.tensor = None\n\n def build(\n self,\n num_tables: int,\n rows: int,\n dims: int,\n pooling: int,\n weighted: bool,\n weights_precision: str,\n optimizer: str,\n ):\n logger.debug(\n f\"build: [{num_tables}, {rows}, {dims}, {pooling}, {weighted}, {weights_precision}, {optimizer}]\"\n )\n if num_tables == 1:\n rows_list = [rows]\n dims_list = [dims]\n else:\n rows_list = rows\n dims_list = dims\n if self.device.startswith(\"cpu\"):\n compute_device = ComputeDevice.CPU\n location = EmbeddingLocation.HOST\n elif self.device.startswith(\"cuda\"):\n compute_device = ComputeDevice.CUDA\n location = EmbeddingLocation.DEVICE\n else:\n raise ValueError(f\"Unknown compute device {self.device}\")\n\n # split_table op options from actual runs of\n # caffe2/torch/fb/module_factory/proxy_module/grouped_sharded_embedding_bag.py\n self.op = SplitTableBatchedEmbeddingBagsCodegen(\n [\n (\n rows_list[i],\n dims_list[i],\n location,\n compute_device,\n )\n for i in range(num_tables)\n ],\n optimizer=OptimType(optimizer),\n pooling_mode=PoolingMode(pooling),\n weights_precision=SparseType(weights_precision),\n stochastic_rounding=True,\n cache_algorithm=CacheAlgorithm.LFU,\n cache_load_factor=0.0,\n cache_reserved_memory=12.0,\n )\n logger.debug(f\"op embedding_specs: {self.op.embedding_specs}\")\n\n def cleanup(self):\n logger.debug(\"op cleanup\")\n self.op = None\n self.grad_in = None\n self.fwd_out = None\n gc.collect()\n\n def forward(self, *args, **kwargs):\n self.fwd_out = self.op.forward(args[0], args[1], args[2])\n\n def create_grad(self):\n self.grad_in = torch.ones_like(self.fwd_out)\n\n def backward(self):\n self.fwd_out.backward(self.grad_in)\n\n\nregister_operator(\n \"SplitTableBatchedEmbeddingBagsCodegen\", SplitTableBatchedEmbeddingBagsCodegenOp()\n)\n",
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport json\nimport logging\nimport time\nfrom os import path\nfrom typing import Dict, List, Set\n\nimport comms_utils\nimport numpy as np\nfrom comms_utils import paramCommsBench, paramTimer, paramProfile, paramToCommName\n\nlogger = logging.getLogger(__name__)\n\n\ndef writeCommDetails(commsTracePerf, rank, folder=\"./\"):\n if len(folder) == 0:\n # skip output if the path is explicitly set to \"\"\n return\n comms_file = folder + f\"/replayedCommsPerf.rank{rank}.json\"\n logger.info(f\"[Rank {rank:3}] Writing comms details to {comms_file}\")\n\n saveToLocal = True\n if \"://\" in comms_file:\n saveToLocal = False\n try:\n from internals import writeRemoteTrace as writeFbRemoteTrace\n except ImportError:\n saveToLocal = True\n pass\n else:\n writeFbRemoteTrace(commsTracePerf, remotePath=comms_file)\n\n if saveToLocal:\n try:\n import subprocess\n\n subprocess.check_output(\n [\"mkdir\", \"-p\", str(folder)], universal_newlines=True\n )\n except Exception as err:\n logger.error(\"\\t Error: %s while creating directory: %s \" % (err, folder))\n pass\n with open(comms_file, \"w\") as write_file:\n json.dump(commsTracePerf, write_file, indent=2)\n\n\nclass commsTraceReplayBench(paramCommsBench):\n def __init__(self):\n super().__init__(supportedNwstacks=[\"pytorch-dist\", \"pytorch-xla-tpu\"])\n self.comms_trace = {}\n self.trace_file = \"\"\n self.use_remote_trace = False\n self.is_dry_run = False\n self.shrink = False\n self.max_msg_cnt = 0 # 0 means no limit\n self.num_msg = 0\n self.is_blocking = True\n self.do_warm_up = True\n self.allowList = \"\"\n self.out_path = \"/tmp/paramReplayedTrace\"\n self.colls_per_batch = -1\n\n self.collInMsgSizes: Dict[str, List] = {}\n self.collInUniMsgSizes: Dict[str, Set] = {}\n self.collOutMsgSizes: Dict[str, List] = {}\n self.collOutUniMsgSizes: Dict[str, Set] = {}\n\n self.batchLat = []\n self.collLat: Dict[str, List] = {}\n\n self.comms_blocks: Dict[str, List] = {}\n self.traceWithPerf = []\n self.blockStack = []\n self.totalCommsLatency = 0.0\n\n import torch\n\n self.strToTorchDtype = {\n \"Byte\": torch.uint8,\n \"Float\": torch.float32,\n \"Int\": torch.int32,\n \"Long\": torch.long,\n \"Double\": torch.double,\n \"Half\": torch.half,\n }\n\n def readArgs(self, parser):\n # read the common/basic arguments\n super().readArgs(parser)\n parser.add_argument(\n \"--trace-path\",\n type=str,\n default=\"./\",\n help=\"File path to read the trace. All rank read their own trace file unless `--use-one-trace` is used.\",\n )\n parser.add_argument(\n \"--use-one-trace\",\n action=\"store_true\",\n default=False,\n help=\"Toggle to use only one trace for all ranks\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n default=self.is_dry_run,\n help=\"Toggle to only analyze trace without actually replaying collectives\",\n )\n parser.add_argument(\n \"--auto-shrink\",\n action=\"store_true\",\n default=self.shrink,\n help=\"Toggle to shrink message size when it does not match with the current scale (only for debug purpose)\",\n )\n parser.add_argument(\n \"--max-msg-cnt\",\n type=int,\n default=self.max_msg_cnt,\n help=\"Only replay first N operations (0 means no limit)\",\n )\n parser.add_argument(\n \"--no-warm-up\",\n action=\"store_true\",\n default=False,\n help=\"Toggle to disable performing extra replaying for warm-up\",\n )\n parser.add_argument(\n \"--allow-ops\",\n \"--allow-list\",\n type=str,\n default=\"all\",\n help=\"List of desired collectives (separate by comma) to be replayed, e.g., `--allow-ops all_reduce,all_to_allv,wait`, typo or not supported collectives will be ignored.\",\n )\n parser.add_argument(\n \"--output-path\",\n type=str,\n default=self.out_path,\n nargs=\"?\",\n const=\"\",\n help='Output path to write the replayed trace for post performance analysis. Set as empty string, i.e., \"\", to skip output',\n )\n parser.add_argument(\n \"--colls-per-batch\",\n type=int,\n default=self.colls_per_batch,\n help=\"Toggle to set number of consecutive collectives in a batch. This also enables per batch latency stats.\",\n )\n return parser.parse_args()\n\n def checkArgs(self, args):\n super().checkArgs(args)\n\n if (not self.use_remote_trace) and (\n path.exists(self.trace_file) is False\n or path.isfile(self.trace_file) is False\n ):\n raise ValueError(\n f\"Trace file {self.trace_file} not exist or not a file! Please specifiy the correct path using --trace-path\"\n )\n comms_utils.gracefulExit()\n\n def reportBenchTime(self, commsParams):\n # TODO:\n # 1) dry run: output some statistics, e.g., # of msgs, distribtuion of sizes (max, min, avg, p50, p95...ect)\n # 2) normal run: output 1) as well as perf. breakdown (e.g., a2a latencies at different phase, some percentages...ect)\n # some basic stats\n print(\n f\"\\n+++++ {len(self.comms_trace)} msgs recorded in {self.trace_file} +++++\\n\"\n )\n\n for curBlock, blockComms in self.comms_blocks.items():\n lat_list = []\n if not self.is_dry_run:\n lat_list = [comm[\"latency_us\"] for comm in blockComms]\n Lats = np.array(lat_list)\n\n logger.info(\n f\"+ {len(blockComms)} comms in block {curBlock}: {Lats.sum():.2f} us in total\"\n )\n\n logger.info(\"\\n{} Message size Statistcs {}\".format(\"=\" * 20, \"=\" * 20))\n\n for (name, collMsgs) in self.collInMsgSizes.items():\n # input tensor\n msgSizes = np.array(collMsgs)\n print(\"-\" * 50)\n print(f\"+ {len(msgSizes)} {name}\")\n print(\"-\" * 50)\n print(\n f\"Size of Input tensors (bytes)\\n {'Total (MB)':>10} {'Max.':>15} {'Min.':>10} {'Average':>13} {'p50':>13} {'p95':>13}\"\n )\n print(\n \"{:>10.2f} {:15.2f} {:10.2f} {:15.2f} {:15.2f} {:15.2f}\".format(\n msgSizes.sum() / 1024 / 1024,\n msgSizes.max(),\n msgSizes.min(),\n np.average(msgSizes),\n np.percentile(msgSizes, 50),\n np.percentile(msgSizes, 95),\n )\n )\n logger.debug(f\" - Used sizes: {sorted(self.collInUniMsgSizes[name])}\")\n\n # output tensor\n msgSizes = np.array(self.collOutMsgSizes[name])\n print(\n f\"Size of Output tensors (bytes)\\n {'Total (MB)':>10} {'Max.':>15} {'Min.':>10} {'Average':>13} {'p50':>13} {'p95':>13}\"\n )\n print(\n \"{:>10.2f} {:15.2f} {:10.2f} {:15.2f} {:15.2f} {:15.2f}\".format(\n msgSizes.sum() / 1024 / 1024,\n msgSizes.max(),\n msgSizes.min(),\n np.average(msgSizes),\n np.percentile(msgSizes, 50),\n np.percentile(msgSizes, 95),\n )\n )\n logger.debug(f\" - Used sizes: {sorted(self.collOutUniMsgSizes[name])}\")\n\n if not self.is_dry_run:\n print(\"\\n{} Performance of replayed comms {}\".format(\"=\" * 20, \"=\" * 20))\n for (coll, lats) in self.collLat.items():\n if len(lats) == 0:\n continue\n\n Lat = np.array(lats)\n print(\n \"{}\\n Replayed {} {} ({:.2f}%): \\n{}\".format(\n \"-\" * 50,\n len(lats),\n coll,\n (Lat.sum() / self.totalCommsLatency) * 100,\n \"-\" * 50,\n )\n )\n\n print(\n f\"Latency (us)\\n {'Total':>10} {'Max.':>10} {'Min.':>10} {'Average':>10} {'p50':>10} {'p95':>10}\"\n )\n print(\n \" {:10.2f} {:10.2f} {:10.2f} {:10.2f} {:10.2f} {:10.2f}\".format(\n Lat.sum(),\n Lat.max(),\n Lat.min(),\n np.average(Lat),\n np.percentile(Lat, 50),\n np.percentile(Lat, 95),\n )\n )\n msgSizeAndLatency = (\n tuple(\n zip(lats, self.collInMsgSizes[coll], self.collOutMsgSizes[coll])\n )\n if coll in self.collInMsgSizes\n else lats\n )\n logger.debug(f\"Latency and size of First ten: {msgSizeAndLatency[:10]}\")\n\n if self.colls_per_batch > 0:\n print(\"\\n{} Batch Latency Performance {}\".format(\"=\" * 20, \"=\" * 20))\n BatchLat = np.array(self.batchLat)\n print(\n f\"Batch Latency (ms)\\n {'Total':>10} {'Max.':>10} {'Min.':>10} {'Average':>10} {'p50':>10} {'p95':>10}\"\n )\n print(\n \" {:10.2f} {:10.2f} {:10.2f} {:10.2f} {:10.2f} {:10.2f}\".format(\n BatchLat.sum(),\n BatchLat.max(),\n BatchLat.min(),\n np.average(BatchLat),\n np.percentile(BatchLat, 50),\n np.percentile(BatchLat, 95),\n )\n )\n\n def initTraceStat(self):\n maxInMsgsize = 0\n maxOutMsgsize = 0\n self.num_msg = len(self.comms_trace)\n self.max_msg_cnt = self.num_msg if self.max_msg_cnt == 0 else self.max_msg_cnt\n # first pass to know the statistics and get required info.\n for curComm in self.comms_trace[: self.max_msg_cnt]:\n # record the current comm\n collName = paramToCommName(curComm[\"comms\"])\n curBlocks = curComm[\"marker_stack\"] if \"marker_stack\" in curComm else []\n if collName not in self.collLat.keys():\n self.collLat[collName] = []\n # some ops don't have sizes\n if \"in_msg_size\" in curComm:\n self.collInMsgSizes[collName] = []\n self.collInUniMsgSizes[collName] = set()\n self.collOutMsgSizes[collName] = []\n self.collOutUniMsgSizes[collName] = set()\n if \"in_msg_size\" in curComm:\n self.collInMsgSizes[collName].append(curComm[\"in_msg_size\"])\n self.collInUniMsgSizes[collName].add(curComm[\"in_msg_size\"])\n self.collOutMsgSizes[collName].append(curComm[\"out_msg_size\"])\n self.collOutUniMsgSizes[collName].add(curComm[\"out_msg_size\"])\n maxInMsgsize = max(curComm[\"in_msg_size\"], maxInMsgsize)\n maxOutMsgsize = max(curComm[\"out_msg_size\"], maxOutMsgsize)\n # get info sorted by code block\n for curBlock in curBlocks:\n if curBlock not in self.comms_blocks:\n self.comms_blocks[curBlock] = []\n # only add entries if on dry run, otherwise, we'll deal with later during replay w/ more info\n if self.is_dry_run:\n if collName not in (\"wait\", \"barrier\"):\n self.comms_blocks[curBlock].append(\n {\n \"comms\": collName,\n \"in_msg_size\": curComm[\"in_msg_size\"],\n \"out_msg_size\": curComm[\"out_msg_size\"],\n }\n )\n else:\n self.comms_blocks[curBlock].append(\n {\n \"comms\": collName,\n }\n )\n\n def prepComms(self, curComm, commsParams):\n commOp = paramToCommName(curComm[\"comms\"])\n if commOp in (\"wait\", \"barrier\"):\n return ([], [])\n\n # for all_to_allv, we can shrink the size if running on smaller scale\n # this is for sanity test or debug purpose only since we don't always get to run very large scale\n if self.shrink:\n cur_world_size = self.collectiveArgs.world_size\n real_world_size = cur_world_size\n\n if \"world_size\" in curComm.keys():\n real_world_size = curComm[\"world_size\"]\n else:\n # if the trace does not record world size, we may use a2av splits to infer it\n if commOp == \"all_to_allv\":\n in_split_len = len(curComm[\"in_split\"])\n out_split_len = len(curComm[\"out_split\"])\n if in_split_len > 0:\n real_world_size = in_split_len\n elif out_split_len > 0:\n real_world_size = out_split_len\n\n newNumElemsIn = (curComm[\"in_msg_size\"] // real_world_size) * cur_world_size\n newNumElemsOut = (\n curComm[\"out_msg_size\"] // real_world_size\n ) * cur_world_size\n\n if commOp == \"all_to_allv\":\n curComm[\"out_split\"] = (\n curComm[\"out_split\"][:cur_world_size]\n if (\"out_split\" in curComm.keys())\n else []\n )\n curComm[\"in_split\"] = (\n curComm[\"in_split\"][:cur_world_size]\n if (\"in_split\" in curComm.keys())\n else []\n )\n if len(curComm[\"in_split\"]) > 0:\n newNumElemsIn = sum(curComm[\"in_split\"])\n if len(curComm[\"out_split\"]) > 0:\n newNumElemsOut = sum(curComm[\"out_split\"])\n elif commOp == \"all_gather\":\n newNumElemsOut = newNumElemsIn * cur_world_size\n\n curComm[\"in_msg_size\"] = newNumElemsIn\n curComm[\"out_msg_size\"] = newNumElemsOut\n\n logger.debug(\n f\"shrink message sizes to curInNumElem {curComm['in_msg_size']}, curOutNumElem {curComm['out_msg_size']}\"\n )\n\n commsParams.dtype = self.strToTorchDtype[curComm[\"dtype\"]]\n # allocate and return tensors\n return super().prepComm(curComm, commsParams)\n\n def warmUpBench(self, commsParams):\n for cnt, curComm in enumerate(self.comms_trace[: self.max_msg_cnt]):\n if curComm[\"comms\"] not in self.allowList:\n continue\n if self.backendFuncs.get_global_rank() == 0:\n logger.debug(\n f\"[Rank {self.collectiveArgs.global_rank:3}] Replaying \\n{str(curComm)}\\n\"\n )\n print(\n f\"[Warm-up][{cnt} / {self.max_msg_cnt}] Replaying {curComm['comms']:>10}...\",\n end=\"\\r\",\n )\n\n # read fields and prepare the tensors\n (\n self.collectiveArgs.ipTensor,\n self.collectiveArgs.opTensor,\n ) = self.prepComms(curComm, commsParams)\n\n if curComm[\"comms\"] in self.backendFuncs.collectiveFunc.keys():\n self.backendFuncs.collectiveFunc[curComm[\"comms\"]](self.collectiveArgs)\n # skip not supported ops\n\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n\n def runComms(self, collName, curBlockStack):\n self.collectiveArgs.quant_time.reset()\n self.collectiveArgs.dequant_time.reset()\n collTimer = paramTimer()\n\n if self.is_blocking:\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n # replay the collective\n with paramProfile(\n timer=collTimer, description=\"# PARAM replay: \" + curBlockStack\n ):\n if collName in self.backendFuncs.collectiveFunc.keys():\n self.backendFuncs.collectiveFunc[collName](\n self.collectiveArgs, retFlag=True\n )\n # skip not supported ops\n\n if self.is_blocking:\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n\n # For non-blocking, latency and global_latency are the same\n global_latency = latency = collTimer.getTimeUS()\n\n if self.is_blocking:\n with paramProfile(\n description=\"# PARAM replay barrier # \" + curBlockStack\n ) as bt:\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n\n # We sync the global_latency for blocking\n global_latency = latency + (bt.intervalNS / 1e3)\n\n return (latency, global_latency)\n\n def benchTime(self, commsParams):\n \"\"\"\n The json format is expecting to be either\n {\n \"marker_stack\": [\"## all2all ##\"]\n \"comms\": \"all_to_allv\",\n \"in_msg_size\": 10357149,\n \"out_msg_size\": 23093760,\n \"in_split\": [],\n \"out_split\": [],\n \"dtype\": \"Int\"\n },\n or w/o in/out_split\n {\n \"marker_stack\": [\"## all2all ##\"]\n \"comms\": \"all_reduce\",\n \"in_msg_size\": 1048576,\n \"out_msg_size\": 1048576,\n \"dtype\": \"Int\"\n }\n or wait/barrier\n {\n \"marker_stack\": [\"## all2all ##\"]\n \"comms\": \"wait\",\n }\n NOTE:\n - this format is subject to be changed anytime\n - the unit of all size fields is # of elements (not bytes)\n \"\"\"\n # warm-up\n if self.do_warm_up:\n self.warmUpBench(commsParams)\n\n # sync everything before starting real runs\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n\n if self.backendFuncs.get_global_rank() == 0:\n print(\n f\"\\n+ {self.max_msg_cnt} messages in the trace...replaying (if present) {list(self.allowList)}\"\n )\n for coll, sizes in self.collInMsgSizes.items():\n logger.info(f\"\\t{coll}: {len(sizes)}\")\n\n coll_in_batch_num = 0\n for cnt, curComm in enumerate(self.comms_trace[: self.max_msg_cnt]):\n collName = paramToCommName(curComm[\"comms\"])\n if collName not in self.allowList:\n continue\n\n curBlocks = curComm[\"marker_stack\"] if \"marker_stack\" in curComm else []\n curBlockStack = (\n \" \".join(curBlocks) if len(curBlocks) > 0 else \"Unamed/Unknown\"\n )\n\n if self.backendFuncs.get_global_rank() == 0:\n logger.debug(\n f\"[Rank {self.collectiveArgs.global_rank:3}] Replaying \\n{str(curComm)}\\n\"\n )\n print(f\"[{cnt} / {self.max_msg_cnt}]\", end=\"\\r\")\n\n # read fields and prepare the tensors\n (\n self.collectiveArgs.ipTensor,\n self.collectiveArgs.opTensor,\n ) = self.prepComms(curComm, commsParams)\n\n if self.colls_per_batch > 0 and coll_in_batch_num == 0:\n batch_begin = time.monotonic()\n\n (latency, global_latency) = self.runComms(collName, curBlockStack)\n\n # calculating batch latency (batch defined by --colls-per-batch)\n if collName == \"wait\" and self.colls_per_batch > 0:\n coll_in_batch_num += 1\n if coll_in_batch_num == self.colls_per_batch:\n batch_latency = (\n time.monotonic() - batch_begin\n ) * 1e3 # make it millisecond\n coll_in_batch_num = 0\n self.batchLat.append(batch_latency)\n\n # perfom data validation check on the final opTensor\n if self.is_blocking and commsParams.dcheck == 1 and collName not in (\"wait\",\"barrier\"):\n commsParams.collective = collName\n commsParams.srcOrDst = curComm[\"root\"] if \"root\" in curComm else 0\n self.dcheck(commsParams, curComm[\"out_msg_size\"], self.collectiveArgs.opTensor)\n\n self.collLat[collName].append(latency)\n\n curComm[\"seqnum\"] = cnt\n curComm[\"latency_us\"] = latency\n curComm[\"global_latency_us\"] = global_latency\n curComm[\"quant_us\"] = self.collectiveArgs.quant_time.getTimeUS()\n curComm[\"dequant_us\"] = self.collectiveArgs.dequant_time.getTimeUS()\n self.totalCommsLatency += latency\n # Keep a copy of trace with performance (latency) and seqnum\n self.traceWithPerf.append(curComm)\n\n # categorized by the marker\n for curBlock in curBlocks:\n # elem_size = self.collectiveArgs.ipTensor.element_size()\n self.comms_blocks[curBlock].append(curComm)\n\n if self.backendFuncs.get_global_rank() == 0:\n logger.info(\n f\"[{cnt} / {self.max_msg_cnt}] Replayed {collName} in block [{curBlockStack}]... {global_latency:.2f} us\"\n )\n\n # make sure all ops are completed\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n self.backendFuncs.clear_memory(self.collectiveArgs)\n\n def runBench(self, comms_world_info, commsParams):\n \"\"\"Run the comms-replay benchmark:\n 1) Each rank reads its trace\n 2) First pass of the trace to ensure the format is valid and get basic stats\n 3) Execute communication replay [Skip if on dry-run mode]\n 4) report stats and performance (if not dry-run)\n \"\"\"\n logger.info(\n f\"[Rank-{comms_world_info.global_rank}] reading trace from {self.trace_file}\"\n )\n self.comm_size = comms_world_info.world_size\n self.global_rank = comms_world_info.global_rank\n\n self.readTrace(remotePath=self.trace_file)\n\n self.initTraceStat()\n # only setup and perform collectives if not dry run mode\n if not self.is_dry_run:\n self.setBench(comms_world_info, commsParams)\n # start benchmark\n self.benchTime(commsParams)\n elif comms_world_info.global_rank == 0:\n print(\n \"+ Dry run mode...No replaying, Only Rank 0 read and analyze the trace...\"\n )\n\n # rank 0 reports statistics\n if comms_world_info.global_rank == 0:\n self.reportBenchTime(commsParams)\n # writeCommDetails(self.comms_blocks, rank=comms_world_info.global_rank)\n\n if not self.is_dry_run:\n writeCommDetails(\n self.traceWithPerf,\n folder=self.out_path,\n rank=comms_world_info.global_rank,\n )\n # TODO: collect perf. from all ranks to rank 0 and detect any imbalanced perf?\n self.backendFuncs.barrier(self.collectiveArgs)\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n\n def setBench(self, comms_world_info, commsParams):\n # init backend and corresponding function pointers\n if commsParams.nw_stack == \"pytorch-dist\":\n from pytorch_dist_backend import PyTorchDistBackend\n\n self.backendFuncs = PyTorchDistBackend(comms_world_info, commsParams)\n elif commsParams.nw_stack == \"pytorch-xla-tpu\":\n from pytorch_tpu_backend import PyTorchTPUBackend\n\n self.backendFuncs = PyTorchTPUBackend(comms_world_info, commsParams)\n else:\n logger.error(\"Unsopported NW stack! \")\n comms_utils.gracefulExit()\n\n self.backendFuncs.initialize_backend(\n comms_world_info.master_ip,\n comms_world_info.master_port,\n backend=commsParams.backend,\n )\n self.backendFuncs.sayHello()\n\n # set basic collective info\n (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n ) = comms_utils.get_rank_details(\n self.backendFuncs\n ) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes\n\n self.collectiveArgs.group = group\n self.collectiveArgs.device = curDevice\n self.collectiveArgs.world_size = world_size\n self.collectiveArgs.global_rank = global_rank\n self.collectiveArgs.backendFuncs = self.backendFuncs\n # FIXME: 0 is a common case, need this info from trace for more accurate replay\n self.collectiveArgs.srcOrDst = 0\n # FIXME: assuming it's always sum for reduce/allreduce operations\n self.collectiveArgs.op = self.backendFuncs.get_reduce_op(\"sum\")\n # FIXME: alwasy perfom blocking comms; may study non-blocking in the future\n self.collectiveArgs.asyncOp = not self.is_blocking\n self.collectiveArgs.ipTensor = None\n self.collectiveArgs.opTensor = None\n self.collectiveArgs.quant_threshold = commsParams.quant_threshold\n\n # set of collectives to be replayed\n if self.allowList in (\"all\", \"default\", \"*\"):\n self.allowList = self.backendFuncs.collectiveFunc.keys()\n else:\n self.allowList = [paramToCommName(op) for op in self.allowList.split(\",\")]\n\n def initBench(self, comms_world_info, commsParams, args):\n self.is_dry_run = args.dry_run\n self.shrink = args.auto_shrink\n self.max_msg_cnt = args.max_msg_cnt\n self.is_blocking = args.z\n self.do_warm_up = not args.no_warm_up\n self.allowList = args.allow_ops\n self.out_path = args.output_path\n self.colls_per_batch = args.colls_per_batch\n\n if commsParams.bitwidth < 32:\n comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)\n\n def setTraceFile(self, args, comms_env_params):\n # TODO: file name may get changed later\n if args.use_one_trace:\n self.trace_file = args.trace_path\n else:\n self.trace_file = (\n f\"{args.trace_path}/rank{comms_env_params['global_rank']}.json\"\n )\n # assume the prefix is always \"xxx://\" when reading remote trace, e.g., http://xxx\n if \"://\" in args.trace_path:\n self.use_remote_trace = True\n\n def readTrace(self, remotePath):\n \"\"\"Read trace file from remote server or local disk\"\"\"\n if self.use_remote_trace:\n protocol = remotePath.split(\"://\", 2)[\n 0\n ] # format \"<protocol prefix>://<url or path>\"\n raw_comms_trace = []\n if protocol in (\"http\", \"https\", \"ftp\"):\n raw_comms_trace = comms_utils.commonUrlRead(remotePath=remotePath)\n else:\n try:\n from internals import readRemoteTrace as readFbRemoteTrace\n except ImportError:\n logger.error(\n f\"Not supported protocol for the URL provided {remotePath}\"\n )\n else:\n raw_comms_trace = readFbRemoteTrace(remotePath=remotePath)\n\n self.comms_trace = json.load(raw_comms_trace)\n else:\n # read the json file from local disk\n with open(self.trace_file) as f:\n self.comms_trace = json.load(f)\n\n # additional check the trace format and convert it if needed\n try:\n from internals import fbTraceParser\n except ImportError:\n logger.info(\"FB internals not present, skipping Kineto fbTraceParser\")\n else:\n self.comms_trace = fbTraceParser(\n self.comms_trace, target_rank=self.global_rank\n )\n\n\ndef main():\n\n comms_env_params = comms_utils.read_comms_env_vars()\n\n traceBench = commsTraceReplayBench()\n parser = argparse.ArgumentParser(\n description=\"PARAM-Comms Trace Replay Mode\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n args = traceBench.readArgs(parser)\n traceBench.setTraceFile(args, comms_env_params)\n traceBench.checkArgs(args)\n\n time.sleep(1)\n comms_world_info = comms_utils.comms_world_info_holder(\n args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params\n )\n commsParams = comms_utils.commsParamsHolderBase(args)\n traceBench.initBench(comms_world_info, commsParams, args)\n traceBench.runBench(comms_world_info, commsParams)\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport os\nfrom itertools import cycle\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom comms_utils import (\n backendFunctions,\n collectiveArgsHolder,\n paramProfile,\n)\n\ntry:\n from internals import all_to_allv_internal, all_to_all_internal\nexcept ImportError:\n pass\n\nlogger = logging.getLogger(__name__)\n\n\ndef _downcast(input, bitwidth):\n if bitwidth == 16:\n return input.to(torch.float16)\n elif bitwidth == 8:\n return input.to(torch.int8)\n else:\n raise NotImplementedError(\"Unsupported bitwidth. Set --bitwidth to 8/16/32\")\n\n\n# a future object or a tensor\n# okay to use float32 because a prior check that ensures\n# the original dtype is float32.\ndef _dequantize(obj):\n if obj is None:\n # invoked in a irrelevant rank\n return None\n elif type(obj) == torch.Tensor:\n # only call to() if it is not a float32 tensor\n if obj.dtype != torch.float32:\n return obj.to(torch.float32)\n else:\n return obj\n else:\n resultTensor = obj.value()[0]\n if resultTensor.dtype != torch.float32:\n return resultTensor.to(torch.float32)\n else:\n return resultTensor\n\n\nclass PyTorchDistBackend(backendFunctions):\n def sayHello(self):\n myhost = os.uname()[1]\n global_rank = self.get_global_rank()\n local_rank = self.get_local_rank()\n world_size = self.get_world_size()\n master_ip = self.comms_world_info.master_ip\n device = self.get_device()\n\n hello_msg = f\"[Rank {global_rank:3}] host {myhost}, device: {device}, local_rank: {local_rank} world_size: {world_size}, master_ip: {master_ip}\"\n\n try:\n from mpi4py import MPI\n except ImportError:\n print(hello_msg)\n else:\n # if mpi4py exists, use mpi to collect info and print prettier message :)\n comm = MPI.COMM_WORLD\n\n all_hello_msgs = comm.gather(hello_msg, root=0)\n if global_rank == 0:\n print(all_hello_msgs)\n\n # Collectives\n def all_reduce(self, collectiveArgs, retFlag=False, pair=False):\n # pair=True mode does not support quantization\n if (\n collectiveArgs.allreduce_qcomm != 32\n and collectiveArgs.allreduce_qcomm > 4\n and collectiveArgs.ipTensor.dtype == torch.float32\n and not pair\n ):\n # note: note that quantized is a new tensor\n # that is not collectiveArgs.ipTensor.\n # this means when all_reduce/reduce finished\n # quantized will hold the result instead of collectiveArgs.ipTensor\n # this is intended because we don't want to allocate new buffers\n # every time we call all_reduce (because if we don't, it will be float16 instead of float32).\n # That also means we can't use the output of quantized all_reduce's for anything other than\n # benchmarking purpose.\n with paramProfile(\n timer=collectiveArgs.quant_time,\n description=\"# PARAM: Allreduce quantization #\",\n ):\n quantized = _downcast(\n collectiveArgs.ipTensor, collectiveArgs.allreduce_qcomm\n )\n else:\n quantized = (\n collectiveArgs.ipTensor if not pair else collectiveArgs.ipTensor_pair\n )\n retObj = dist.all_reduce(\n quantized,\n op=collectiveArgs.op,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n if (id(quantized) != id(collectiveArgs.ipTensor)) and not pair:\n if collectiveArgs.asyncOp:\n retObj = retObj.get_future().then(_dequantize)\n else:\n with paramProfile(\n timer=collectiveArgs.dequant_time,\n description=\"# PARAM: Allreduce de-quantization #\",\n ):\n retObj = _dequantize(quantized)\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def reduce(self, collectiveArgs, retFlag=False, pair=False):\n # pair=True mode does not support quantization\n if collectiveArgs.reduce_qcomm != 32 and not pair:\n assert collectiveArgs.ipTensor.dtype == torch.float32\n with paramProfile(\n timer=collectiveArgs.quant_time,\n description=\"# PARAM: Reduce quantization #\",\n ):\n quantized = _downcast(\n collectiveArgs.ipTensor, collectiveArgs.allreduce_qcomm\n )\n else:\n quantized = (\n collectiveArgs.ipTensor if not pair else collectiveArgs.ipTensor_pair\n )\n retObj = dist.reduce(\n quantized,\n dst=collectiveArgs.srcOrDst,\n op=collectiveArgs.op,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n if collectiveArgs.reduce_qcomm != 32 and not pair:\n if collectiveArgs.asyncOp:\n retObj = retObj.get_future().then(_dequantize)\n else:\n with paramProfile(\n timer=collectiveArgs.dequant_time,\n description=\"# PARAM: Reduce de-quantization #\",\n ):\n retObj = _dequantize(quantized)\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def all_to_all(\n self, collectiveArgs: collectiveArgsHolder, retFlag=False, pair=False\n ):\n # pair=True mode does not support quantization\n if collectiveArgs.all2all_qcomm and not pair:\n work = all_to_all_internal(collectiveArgs)\n else:\n work = dist.all_to_all_single(\n collectiveArgs.opTensor if not pair else collectiveArgs.opTensor_pair,\n collectiveArgs.ipTensor if not pair else collectiveArgs.ipTensor_pair,\n None,\n None,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n )\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(work)\n\n if retFlag:\n return work\n\n def all_to_allv(self, collectiveArgs, retFlag=False, pair=False):\n # pair=True mode does not support quantization\n if (\n collectiveArgs.all2all_qcomm\n and collectiveArgs.ipTensor.dtype == torch.float32\n and (\n collectiveArgs.opTensor.nelement() >= collectiveArgs.quant_threshold\n or collectiveArgs.ipTensor.nelement() >= collectiveArgs.quant_threshold\n )\n and not pair\n ):\n work = all_to_allv_internal(collectiveArgs)\n else:\n work = dist.all_to_all_single(\n collectiveArgs.opTensor if not pair else collectiveArgs.opTensor_pair,\n collectiveArgs.ipTensor if not pair else collectiveArgs.ipTensor_pair,\n collectiveArgs.opTensor_split\n if not pair\n else collectiveArgs.opTensor_split_pair,\n collectiveArgs.ipTensor_split\n if not pair\n else collectiveArgs.ipTensor_split_pair,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n )\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(work)\n\n if retFlag:\n return work\n\n def all_gather(self, collectiveArgs, retFlag=False, pair=False):\n retObj = dist.all_gather(\n tensor_list=collectiveArgs.opTensor\n if not pair\n else collectiveArgs.opTensor_pair,\n tensor=collectiveArgs.ipTensor\n if not pair\n else collectiveArgs.ipTensor_pair,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def reduce_scatter(self, collectiveArgs, retFlag=False, pair=False):\n retObj = dist.reduce_scatter(\n output=collectiveArgs.opTensor,\n input_list=collectiveArgs.ipTensor,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def reduce_scatter_base(self, collectiveArgs, retFlag=False, pair=False):\n retObj = dist._reduce_scatter_base(\n output=collectiveArgs.opTensor,\n input=collectiveArgs.ipTensor,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def all_gather_base(self, collectiveArgs, retFlag=False, pair=False):\n retObj = dist._all_gather_base(\n output_tensor=collectiveArgs.opTensor,\n input_tensor=collectiveArgs.ipTensor,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def gather(self, collectiveArgs, retFlag=False):\n retObj = dist.gather(\n tensor=collectiveArgs.ipTensor,\n gather_list=collectiveArgs.opTensor,\n dst=collectiveArgs.srcOrDst,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n )\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n # Many-to-one pattern\n def incast(self, collectiveArgs):\n if collectiveArgs.global_rank == collectiveArgs.srcOrDst:\n # root receives tensor from each of user-specified source ranks\n for idx, src_rank in enumerate(collectiveArgs.src_ranks):\n retObj = dist.irecv(\n tensor=collectiveArgs.opTensor[idx],\n src=src_rank,\n group=collectiveArgs.group,\n tag=0,\n )\n collectiveArgs.waitObj.append(retObj)\n # complete outstanding irecvs if blocking\n if not collectiveArgs.asyncOp:\n self.complete_accel_ops(collectiveArgs, devSync=False)\n elif collectiveArgs.global_rank in collectiveArgs.src_ranks:\n # send local tensor to root\n if collectiveArgs.asyncOp:\n self.isend(collectiveArgs, collectiveArgs.srcOrDst)\n else:\n self.send(collectiveArgs, collectiveArgs.srcOrDst)\n\n def broadcast(self, collectiveArgs, retFlag=False, pair=False):\n retObj = dist.broadcast(\n tensor=collectiveArgs.opTensor\n if not pair\n else collectiveArgs.opTensor_pair,\n src=collectiveArgs.srcOrDst,\n group=collectiveArgs.group,\n async_op=collectiveArgs.asyncOp,\n ) # synchronicity is maintained in runColl\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n # One-to-many pattern\n def multicast(self, collectiveArgs):\n if collectiveArgs.global_rank == collectiveArgs.srcOrDst:\n # root sends tensor to each of user-specified destination ranks\n for dst_rank in collectiveArgs.dst_ranks:\n self.isend(collectiveArgs, dst_rank)\n # complete outstanding isends if blocking\n if not collectiveArgs.asyncOp:\n self.complete_accel_ops(collectiveArgs, devSync=False)\n elif collectiveArgs.global_rank in collectiveArgs.dst_ranks:\n # recvs tensor from root\n if collectiveArgs.asyncOp:\n self.irecv(collectiveArgs, collectiveArgs.srcOrDst)\n else:\n self.recv(collectiveArgs, collectiveArgs.srcOrDst)\n\n def send(self, collectiveArgs, dst_rank, retFlag=False, tag=0):\n dist.send(\n tensor=collectiveArgs.ipTensor,\n dst=dst_rank,\n group=collectiveArgs.group,\n tag=tag,\n )\n\n def recv(self, collectiveArgs, src_rank, retFlag=False, tag=0):\n dist.recv(\n tensor=collectiveArgs.opTensor,\n src=src_rank,\n group=collectiveArgs.group,\n tag=tag,\n )\n\n def isend(self, collectiveArgs, dst_rank, retFlag=False, tag=0):\n retObj = dist.isend(\n tensor=collectiveArgs.ipTensor,\n dst=dst_rank,\n group=collectiveArgs.group,\n tag=tag,\n )\n\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def irecv(self, collectiveArgs, src_rank, retFlag=False, tag=0):\n retObj = dist.irecv(\n tensor=collectiveArgs.opTensor,\n src=src_rank,\n group=collectiveArgs.group,\n tag=tag,\n )\n\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def device_sync(self, collectiveArgs):\n dev_str = (\n self.commsParams[\"device\"]\n if isinstance(self.commsParams, dict)\n else self.commsParams.device\n )\n if dev_str == \"cuda\":\n torch.cuda.synchronize(collectiveArgs.device)\n\n def complete_accel_ops(self, collectiveArgs, initOp=False, devSync=True):\n if initOp is True:\n temp = torch.ones([1], device=collectiveArgs.device)\n dist.all_reduce(temp)\n for waitReq in collectiveArgs.waitObj:\n if waitReq is not None:\n waitReq.wait()\n collectiveArgs.waitObj.clear()\n\n if devSync:\n self.device_sync(collectiveArgs)\n\n # retFlag not used\n def complete_single_op(self, collectiveArgs, retFlag=False):\n \"\"\"only wait the first op in the queue\"\"\"\n if len(collectiveArgs.waitObj) > 0:\n waitReq = collectiveArgs.waitObj.pop(0)\n if waitReq is not None:\n waitReq.wait()\n\n # to ensure GPU collective is completed\n self.device_sync(collectiveArgs)\n\n def barrier(self, collectiveArgs, name=\"dummy\", retFlag=False):\n retObj = dist.barrier(collectiveArgs.group, async_op=collectiveArgs.asyncOp)\n\n if collectiveArgs.asyncOp:\n collectiveArgs.waitObj.append(retObj)\n\n if retFlag:\n return retObj\n\n def sync_barrier(self, collectiveArgs, desc=\"dummy\"):\n self.barrier(collectiveArgs, name=desc)\n self.complete_accel_ops(collectiveArgs)\n\n def get_reduce_op(self, opName):\n if opName == \"sum\":\n return dist.ReduceOp.SUM\n elif opName == \"max\":\n return dist.ReduceOp.MAX\n else:\n return dist.ReduceOp.SUM\n\n # Compute functions\n def compute_mm(self, collectiveArgs):\n self.gemm(collectiveArgs)\n\n def gemm(self, collectiveArgs):\n # Matrix multiplication as compute kernel\n collectiveArgs.MMout = torch.mm(collectiveArgs.MMin1, collectiveArgs.MMin2)\n\n # Memory related\n def get_mem_size(self, collectiveArgs, pair=False):\n _sizeBytes = 0\n # opTensor could be a list of tensor for all_gather/gather/incast, get the aggregated size\n if isinstance(collectiveArgs.opTensor, list):\n _sizeBytes = sum(\n [t.nelement() * t.element_size() for t in collectiveArgs.opTensor]\n )\n # reduce scatter\n elif isinstance(collectiveArgs.ipTensor, list):\n _sizeBytes = sum(\n [t.nelement() * t.element_size() for t in collectiveArgs.ipTensor]\n )\n # reduce_scatter_base should use input tensor for total memory size\n elif collectiveArgs.collective == \"reduce_scatter_base\":\n _sizeBytes = (\n collectiveArgs.ipTensor.nelement()\n * collectiveArgs.ipTensor.element_size()\n )\n else:\n _sizeBytes = (\n collectiveArgs.opTensor.nelement()\n * collectiveArgs.opTensor.element_size()\n )\n if pair:\n if isinstance(collectiveArgs.opTensor_pair, list):\n _sizeBytes = sum(\n [\n t.nelement() * t.element_size()\n for t in collectiveArgs.opTensor_pair\n ]\n )\n else:\n _sizeBytes = (\n collectiveArgs.opTensor_pair.nelement()\n * collectiveArgs.opTensor_pair.element_size()\n )\n\n return _sizeBytes\n\n def alloc_random(\n self, sizeArr, curRankDevice=\"cuda\", dtype=torch.float32, scaleFactor=1.0\n ):\n if dtype in (torch.uint8, torch.int16, torch.int32, torch.long):\n ipTensor = torch.randint(\n low=0, high=10, size=sizeArr, device=curRankDevice, dtype=dtype\n )\n else:\n ipTensor = torch.rand(sizeArr, device=curRankDevice, dtype=dtype)\n if (scaleFactor) != 0:\n ipTensor = ipTensor / scaleFactor\n return ipTensor\n\n def alloc_embedding_tables(self, n, m, curRankDevice, dtype):\n EE = nn.EmbeddingBag(n, m, mode=\"sum\", sparse=True)\n\n W = np.random.uniform(\n low=-(np.sqrt(1 / n)), high=np.sqrt(1 / n), size=(n, m)\n ).astype(np.float32)\n # approach 1\n\n EE.weight.data = torch.tensor(\n W, dtype=dtype, requires_grad=True, device=curRankDevice\n )\n return EE\n\n def alloc_empty(self, sizeArr, dtype, curRankDevice):\n return torch.empty(sizeArr, device=curRankDevice, dtype=dtype)\n\n def clear_memory(self, collectiveArgs):\n del collectiveArgs.ipTensor\n del collectiveArgs.opTensor\n if collectiveArgs.ipTensor_pair is not None:\n del collectiveArgs.ipTensor_pair\n del collectiveArgs.opTensor_pair\n\n torch.cuda.empty_cache()\n\n # Getting world-size and other information.\n def get_local_rank(self):\n return self.comms_world_info.local_rank\n\n def get_global_rank(self):\n return self.comms_world_info.global_rank\n\n def get_world_size(self):\n return self.comms_world_info.world_size\n\n def get_device(self):\n \"\"\"get current device: 'cpu' or 'cuda'\"\"\"\n # TODO: this is a temporary workaround; need to unify the type of commsParams in comms and dlrm\n dev_str = (\n self.commsParams[\"device\"]\n if isinstance(self.commsParams, dict)\n else self.commsParams.device\n )\n my_dev = torch.device(dev_str)\n if dev_str == \"cuda\":\n # explicitly select the device ordinal based on the local rank\n ordinal = self.get_local_rank()\n if self.get_local_rank() == -1:\n logger.warning(\n \"Cannot determine device ordinal since LOCAL_RANK is -1. Try GPU 0 and continue. \"\n )\n ordinal = 0\n my_dev = torch.device(f\"cuda:{ordinal}\")\n elif dev_str != \"cpu\":\n # sanity check, such error should be catched when parsing arguments\n raise ValueError(f\"{dev_str} is not a valid device option\")\n\n return my_dev\n\n def get_hw_device(self):\n self.get_device()\n\n def get_default_group(self):\n # return the world group to always perform collectives on default PG\n return dist.GroupMember.WORLD\n\n def get_groups(self):\n return self.groups\n\n def get_next_group(self):\n return next(self.round_robin_group)\n\n def set_device(self):\n \"\"\"set current device: 'cpu' or 'cuda'\"\"\"\n dev_str = (\n self.commsParams[\"device\"]\n if isinstance(self.commsParams, dict)\n else self.commsParams.device\n )\n if dev_str.startswith(\"cuda\"):\n if self.get_local_rank() > torch.cuda.device_count():\n raise ValueError(\n \"Insufficient #GPUs: \"\n f\"available {torch.cuda.device_count()} \"\n f\"requested {self.get_local_rank()}\"\n )\n torch.cuda.set_device(self.get_local_rank())\n\n logger.info(f\"rank {self.get_global_rank()} set torch device to {dev_str}\")\n\n # Init functions\n def __init__(self, comms_world_info, commsParams):\n super().__init__()\n self.comms_world_info = comms_world_info\n self.commsParams = commsParams\n # extra ops supported (Note these are not supported in pytorch_tpu_backend.py)\n self.collectiveFunc[\"wait\"] = self.complete_single_op\n self.collectiveFunc[\"send\"] = self.send\n self.collectiveFunc[\"recv\"] = self.recv\n self.collectiveFunc[\"isend\"] = self.isend\n self.collectiveFunc[\"irecv\"] = self.irecv\n self.collectiveFunc[\n \"pt2pt\"\n ] = self.noop # dummy entry to support pt2pt benchmark\n\n backend = (\n self.commsParams[\"backend\"]\n if isinstance(self.commsParams, dict)\n else self.commsParams.backend\n )\n # Import ucc plugin\n if backend == \"ucc\":\n # try OSS/setup.py\n try:\n import torch_ucc # noqa\n except ImportError:\n try:\n from ucc_plugin import initialize_ucc_plugin\n except ImportError:\n raise RuntimeError(\"Unable to import initialize_ucc_plugin\")\n else:\n initialize_ucc_plugin(backend)\n # Import Fairring\n if backend == \"fairring\":\n try:\n import fairring # noqa\n except ImportError:\n raise RuntimeError(\"Unable to import Fairring\")\n\n def initialize_backend(self, master_ip, master_port, backend=\"gloo\"):\n # Set CUDA device before initializing backend\n # Required for backends that don't do lazy initialization, e.g. UCC\n self.set_device()\n\n global_rank = self.get_global_rank()\n world_size = self.get_world_size()\n\n # Torch initializaiton\n # NOTE: MASTER_ADDR and MASTER_PORT should be set already in `comms_utils.py`\n if world_size > 0:\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n if global_rank >= 0:\n os.environ[\"RANK\"] = str(global_rank)\n\n # default group\n dist.init_process_group(backend, rank=global_rank, world_size=world_size)\n self.groups = []\n self.groups.append(self.get_default_group())\n\n # non-default groups\n for _ in range(1, self.commsParams.num_pgs):\n pg = dist.new_group(backend=backend)\n self.groups.append(pg)\n\n self.round_robin_group = cycle(self.groups)\n\n def benchmark_comms(self):\n self.initialize_backend(\n self.comms_world_info.master_ip,\n self.comms_world_info.master_port,\n self.commsParams.backend,\n )\n index = 0 # used in TPU, where it is not initialized!\n self.commsParams.benchTime(index, self.commsParams, self)\n return\n\n def __del__(self):\n if dist.is_initialized():\n dist.destroy_process_group()\n pass\n"
] | [
[
"numpy.amax",
"torch.LongTensor",
"torch.ones",
"numpy.amin",
"numpy.cumsum",
"numpy.percentile",
"numpy.ones",
"numpy.mean",
"numpy.random.rand",
"torch.FloatTensor",
"numpy.array",
"numpy.sum",
"numpy.random.randint"
],
[
"torch.randint",
"torch.load",
"torch.cat",
"torch.randn",
"numpy.cumsum",
"numpy.random.zipf",
"numpy.ones",
"torch.arange",
"torch.device",
"torch.ones_like"
],
[
"numpy.array",
"numpy.percentile",
"numpy.average"
],
[
"torch.distributed.broadcast",
"torch.randint",
"numpy.sqrt",
"torch.device",
"torch.distributed.irecv",
"torch.nn.EmbeddingBag",
"torch.cuda.synchronize",
"torch.mm",
"torch.ones",
"torch.distributed.init_process_group",
"torch.distributed._reduce_scatter_base",
"torch.distributed.barrier",
"torch.tensor",
"torch.rand",
"torch.distributed.send",
"torch.empty",
"torch.distributed.is_initialized",
"torch.cuda.empty_cache",
"torch.distributed._all_gather_base",
"torch.distributed.destroy_process_group",
"torch.distributed.isend",
"torch.cuda.device_count",
"torch.distributed.all_to_all_single",
"torch.distributed.all_gather",
"torch.distributed.recv",
"torch.distributed.reduce",
"torch.distributed.new_group",
"torch.distributed.gather",
"torch.distributed.all_reduce",
"torch.distributed.reduce_scatter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rjgpinel/mime-release | [
"26a850c4ba5b702b86d068995614163338fb01df"
] | [
"mime/agent/script_agent_augmented.py"
] | [
"import itertools\nimport types\nimport numpy as np\n\nimport torch\nimport click\nimport gym\nimport time\nimport yaml\n\nfrom robos2r.model import build_model\nfrom .agent import Agent\nfrom .script_agent import ScriptAgent, make_noised\nfrom .utils import Rate\nfrom PIL import Image\nfrom pathlib import Path\nfrom einops import rearrange\nfrom torchvision import transforms as T\n\n\[email protected](help=\"script_agent env_name [options]\")\[email protected](\"env_name\", type=str)\[email protected](\"-s\", \"--seed\", default=0, help=\"seed\")\[email protected](\"-t\", \"--times-repeat\", default=1, help=\"times to repeat the script\")\[email protected](\"-n\", \"--add-noise\", is_flag=True, help=\"adding noise to actions or not\")\[email protected](\n \"-sc\",\n \"--skill-collection/--no-skill-collection\",\n is_flag=True,\n help=\"whether to show the skills collection\",\n)\ndef main(env_name, seed, times_repeat, add_noise, skill_collection):\n print(\"Loading Augmentor model...\")\n diffaug_model_path = \"/home/rgarciap/Remote/models/diffs2r_new/resnet_adam_lr_1e-3_lraug0.01_bs_64_L8/\"\n diffaug_model_path = Path(diffaug_model_path)\n diffaug_cfg_path = diffaug_model_path / \"config.yml\"\n\n with open(str(diffaug_cfg_path), \"rb\") as f:\n diffaug_cfg = yaml.load(f, Loader=yaml.FullLoader)\n\n model_cfg = dict(\n name=\"diffaug\",\n reg_output_size=3,\n aug_pipeline=diffaug_cfg[\"aug_pipeline\"],\n multi=diffaug_cfg[\"multi_pipeline\"],\n num_layers=diffaug_cfg[\"num_layers\"],\n gumbel=diffaug_cfg[\"gumbel\"],\n backbone_name=diffaug_cfg[\"backbone_name\"],\n )\n diffaug_model = build_model(model_cfg)\n diffaug_ckp_path = diffaug_model_path / \"best_checkpoint.pth\"\n checkpoint = torch.load(str(diffaug_ckp_path), map_location=\"cpu\")\n diffaug_model.load_state_dict(checkpoint[\"model\"])\n augmentor = diffaug_model.augmentor\n augmentor.to(\"cpu\")\n augmentor.eval()\n print(\"Model loaded\")\n\n env = gym.make(env_name)\n scene = env.unwrapped.scene\n scene.renders(True)\n if skill_collection:\n scene.skill_data_collection = True\n env.seed(seed)\n for _ in range(times_repeat):\n obs = env.reset()\n\n agent = ScriptAgent(env)\n import matplotlib.pyplot as plt\n\n done = False\n i = 0\n rate = Rate(scene.dt)\n action = agent.get_action()\n if add_noise:\n make_noised(action)\n frames = []\n j = 0\n while not done and action is not None:\n obs, reward, done, info = env.step(action)\n\n im = T.ToTensor()(obs[\"rgb0\"]).unsqueeze(0)\n mask = torch.tensor(obs[\"mask0\"]).unsqueeze(0)\n\n im, mask = augmentor((im, mask))\n im = rearrange(im.detach().detach().squeeze(0).numpy(), \"c h w -> h w c\")\n im = Image.fromarray((im * 255).astype(np.uint8))\n im.save(f\"0/output{j}.jpeg\")\n j += 1\n action = agent.get_action()\n if add_noise and action is not None:\n make_noised(action)\n\n if action is None:\n info[\"failure_message\"] = \"End of Script.\"\n if not info[\"success\"]:\n click.secho(\n \"Failure Seed {}: {}\".format(seed, info[\"failure_message\"]), fg=\"red\"\n )\n\n print(\"Success\", info[\"success\"])\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhengxiawu/Transformer | [
"8cad013913254ea4e06c4a8d460d9f2cf42df086",
"8cad013913254ea4e06c4a8d460d9f2cf42df086"
] | [
"Embed.py",
"Sublayers.py"
] | [
"import torch\nimport torch.nn as nn\nimport math\nfrom torch.autograd import Variable\n\n\nclass Embedder(nn.Module):\n def __init__(self, vocab_size, d_model):\n super().__init__()\n self.d_model = d_model\n self.embed = nn.Embedding(vocab_size, d_model)\n\n def forward(self, x):\n return self.embed(x)\n\n\nclass PositionalEncoder(nn.Module):\n def __init__(self, d_model, max_seq_len=200, dropout=0.1):\n super().__init__()\n self.d_model = d_model\n self.dropout = nn.Dropout(dropout)\n # create constant 'pe' matrix with values dependant on\n # pos and i\n pe = torch.zeros(max_seq_len, d_model)\n for pos in range(max_seq_len):\n for i in range(0, d_model, 2):\n pe[pos, i] = \\\n math.sin(pos / (10000 ** ((2 * i)/d_model)))\n pe[pos, i + 1] = \\\n math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n # make embeddings relatively larger\n x = x * math.sqrt(self.d_model)\n # add constant to embedding\n seq_len = x.size(1)\n pe = Variable(self.pe[:, :seq_len], requires_grad=False)\n if x.is_cuda:\n pe.cuda()\n x = x + pe\n return self.dropout(x)\n",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\n\nclass Norm(nn.Module):\n def __init__(self, d_model, eps=1e-6):\n super().__init__()\n\n self.size = d_model\n\n # create two learnable parameters to calibrate normalisation\n self.alpha = nn.Parameter(torch.ones(self.size))\n self.bias = nn.Parameter(torch.zeros(self.size))\n\n self.eps = eps\n\n def forward(self, x):\n norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \\\n / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias\n return norm\n\n\ndef attention(q, k, v, d_k, mask=None, dropout=None):\n\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)\n\n if mask is not None:\n mask = mask.unsqueeze(1)\n scores = scores.masked_fill(mask == 0, -1e9)\n\n scores = F.softmax(scores, dim=-1)\n\n if dropout is not None:\n scores = dropout(scores)\n\n output = torch.matmul(scores, v)\n return output\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, heads, d_model, dropout=0.1):\n super().__init__()\n\n self.d_model = d_model\n self.d_k = d_model // heads\n self.h = heads\n\n self.q_linear = nn.Linear(d_model, d_model)\n self.v_linear = nn.Linear(d_model, d_model)\n self.k_linear = nn.Linear(d_model, d_model)\n\n self.dropout = nn.Dropout(dropout)\n self.out = nn.Linear(d_model, d_model)\n\n def forward(self, q, k, v, mask=None):\n\n bs = q.size(0)\n\n # perform linear operation and split into N heads\n k = self.k_linear(k).view(bs, -1, self.h, self.d_k)\n q = self.q_linear(q).view(bs, -1, self.h, self.d_k)\n v = self.v_linear(v).view(bs, -1, self.h, self.d_k)\n\n # transpose to get dimensions bs * N * sl * d_model\n k = k.transpose(1, 2)\n q = q.transpose(1, 2)\n v = v.transpose(1, 2)\n\n # calculate attention using function we will define next\n scores = attention(q, k, v, self.d_k, mask, self.dropout)\n # concatenate heads and put through final linear layer\n concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)\n output = self.out(concat)\n\n return output\n\n\nclass FeedForward(nn.Module):\n def __init__(self, d_model, d_ff=2048, dropout=0.1):\n super().__init__()\n\n # We set d_ff as a default to 2048\n self.linear_1 = nn.Linear(d_model, d_ff)\n self.dropout = nn.Dropout(dropout)\n self.linear_2 = nn.Linear(d_ff, d_model)\n\n def forward(self, x):\n x = self.dropout(F.relu(self.linear_1(x)))\n x = self.linear_2(x)\n return x\n"
] | [
[
"torch.autograd.Variable",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.zeros"
],
[
"torch.nn.functional.softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.zeros",
"torch.nn.Linear",
"torch.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ds4dm/GraphRL | [
"b5b1519f6dd92b401625d51add9ae5829004a30b"
] | [
"rl/train_a2c_mc.py"
] | [
"import torch\nimport torch.optim as optm\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom data.graph import Graph\nfrom collections import namedtuple\n\nSavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])\n\n# Mont Carlo methods\nclass TrainModel_MC:\n\n def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):\n self.model = model\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.max_grad_norm = max_grad_norm\n self.use_cuda = use_cuda\n\n self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)\n self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)\n\n self.epochs = 0\n self.beta = 0.9\n self.eps = np.finfo(np.float32).eps.item()\n\n\n def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):\n\n self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)\n\n print(use_critic)\n if use_critic:\n self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)\n self.critic_loss_criterion = torch.nn.MSELoss()\n else:\n baseline = torch.zeros(1)\n if self.use_cuda:\n baseline = baseline.cuda()\n\n for epoch in range(1):\n\n n_graphs_proceed = 0\n for X in self.train_loader:\n for x in X:\n\n self.model.train()\n ratio_gcn2mind = []\n ratio_gcn2rand = []\n\n for epoch in range(n_epochs):\n\n rewards_mindegree = 0 # number of added edges\n rewards_random = 0\n x_mind = Graph(x.M)\n x_rand = Graph(x.M)\n x_rl = Graph(x.M)\n\n # loop for training while eliminating a graph iteratively\n for i in range(x.n - 2):\n\n # baseline1: compute return of min degree\n if i % 100 == 0:\n print('iterations {}'.format(i))\n node_mind, d_min = x_mind.min_degree(x_mind.M)\n rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)\n\n # baseline2: compute return of random\n rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)\n\n # call actor-critic model\n\n action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl) # forward propagation,action: node selected, reward: nb edges added\n self.model.rewards.append(reward)\n self.model.actions.append(action)\n self.model.saved_actions.append(SavedAction(log_prob, value_current))\n\n R = 0\n actor_losses = []\n critic_losses = []\n returns = []\n\n # compute sampled return for each step\n for r in self.model.rewards[::-1]:\n R = r + gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns)\n returns = (returns - returns.mean()) / (returns.std() + self.eps)\n saved_actions = self.model.saved_actions\n # compute cummulated loss of actor and critic of one graph\n for (log_prob, value_current), R in zip(saved_actions, returns):\n if use_critic:\n advantage = R - value_current\n critic_losses.append(-value_current* advantage)\n # critic_losses.append(self.critic_loss_criterion(value_current, torch.Tensor([R.detach()])))\n else:\n advantage = R - baseline\n actor_losses.append(log_prob * advantage.detach()) # the return here is discounted nb of added edges,\n # hence, it actually represents loss\n # step update of actor\n self.actor_optim.zero_grad()\n actor_loss = torch.stack(actor_losses).sum()\n actor_loss.backward(retain_graph=True)\n self.actor_optim.step()\n\n # step update of critic\n if use_critic:\n self.critic_optim.zero_grad()\n critic_closs = torch.stack(critic_losses).sum()\n critic_closs.backward()\n self.critic_optim.step()\n else:\n baseline = baseline.detach()\n\n rewards_gcn = sum(self.model.rewards)\n\n _ratio_gcn2mind = rewards_gcn / rewards_mindegree\n _ratio_gcn2rand = rewards_gcn / rewards_random\n\n print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),\n 'gcn2mind ratio {}'.format(_ratio_gcn2mind),\n 'value {}'.format(saved_actions[0].value_current),\n 'R {}'.format(returns[0]))\n print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),\n 'gcn2rand ratio {}'.format(_ratio_gcn2rand))\n\n ratio_gcn2mind.append(_ratio_gcn2mind)\n ratio_gcn2rand.append(_ratio_gcn2rand)\n del self.model.rewards[:]\n del self.model.actions[:]\n del self.model.saved_actions[:]\n\n ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)\n ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)\n\n min_ratio_gcn2mind = np.min(ratio_gcn2mind)\n max_ratio_gcn2mind = np.max(ratio_gcn2mind)\n av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs\n\n min_ratio_gcn2rand = np.min(ratio_gcn2rand)\n max_ratio_gcn2rand = np.max(ratio_gcn2rand)\n av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs\n\n print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',\n 'min_ratio {}'.format(min_ratio_gcn2mind),\n 'max_ratio {}'.format(max_ratio_gcn2mind),\n 'av_ratio {}'.format(av_ratio_gcn2mind))\n print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',\n 'min_ratio {}'.format(min_ratio_gcn2rand),\n 'max_ratio {}'.format(max_ratio_gcn2rand),\n 'av_ratio {}'.format(av_ratio_gcn2rand),\n 'nb graph proceeded {}'.format(n_graphs_proceed))\n\n n_graphs_proceed += len(X)\n\n # ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)\n # ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)\n #\n # total_ratio_gcn2mind = np.sum(ratio_gcn2mind)\n # total_ratio_gcn2rand = np.sum(ratio_gcn2rand)\n #\n # min_ratio_gcn2mind = np.min(ratio_gcn2mind)\n # max_ratio_gcn2mind = np.max(ratio_gcn2mind)\n # av_ratio_gcn2mind = total_ratio_gcn2mind / n_graphs_proceed\n #\n # min_ratio_gcn2rand = np.min(ratio_gcn2rand)\n # max_ratio_gcn2rand = np.max(ratio_gcn2rand)\n # av_ratio_gcn2rand = total_ratio_gcn2rand / n_graphs_proceed\n #\n # print('epoch {:04d}'.format(epoch), 'gcn2mind{:04d}',\n # 'min_ratio {}'.format(min_ratio_gcn2mind),\n # 'max_ratio {}'.format(max_ratio_gcn2mind),\n # 'av_ratio {}'.format(av_ratio_gcn2mind))\n # print('epoch {:04d}'.format(epoch), 'gcn2rand{:04d}',\n # 'min_ratio {}'.format(min_ratio_gcn2rand),\n # 'max_ratio {}'.format(max_ratio_gcn2rand),\n # 'av_ratio {}'.format(av_ratio_gcn2rand),\n # 'nb graph proceeded {}'.format(n_graphs_proceed))\n"
] | [
[
"numpy.sum",
"numpy.min",
"torch.zeros",
"torch.utils.data.DataLoader",
"numpy.finfo",
"torch.tensor",
"numpy.max",
"torch.stack",
"numpy.array",
"torch.nn.MSELoss",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JeremieMelo/ADEPT | [
"f79f518197798735cb684b373e11cdcc8a80d872"
] | [
"unitest/test_supermesh.py"
] | [
"'''\nDescription:\nAuthor: Jiaqi Gu ([email protected])\nDate: 2021-09-27 23:48:01\nLastEditors: Jiaqi Gu ([email protected])\nLastEditTime: 2022-02-26 02:22:52\n'''\nimport torch\nfrom core.models.layers.super_mesh import super_layer_name_dict\n\ndef test():\n device=torch.device(\"cuda:0\")\n p, q, k = 2, 2, 4\n x = torch.eye(k, dtype=torch.cfloat, device=device).unsqueeze(0).repeat(q,1,1).permute(1,0,2).contiguous()\n sigma = torch.ones(p,q,k, device=device)\n # x [bs, q, k]\n\n arch = dict(\n n_waveguides=k,\n n_front_share_waveguides=k,\n n_front_share_ops=k,\n n_blocks=4,\n n_layers_per_block=2,\n n_front_share_blocks=2,\n share_ps=\"row_col\",\n interleave_dc=True,\n )\n sample_arch = [\n k//3,1,\n k//2,1,\n k//2,1,\n k//2,1,\n 4\n ]\n layer = super_layer_name_dict[\"ps_dc_cr\"](arch, device=device)\n super_ps_layers = layer.build_ps_layser(grid_dim_x=q, grid_dim_y=p)\n for m in super_ps_layers:\n # m.reset_parameters(alg=\"identity\")\n m.reset_parameters(alg=\"uniform\")\n layer.set_sample_arch(sample_arch)\n print(layer)\n layer.set_identity_cr()\n layer.build_sampling_coefficients()\n layer.set_gumbel_temperature(0.1)\n layer.set_aux_skip_path(0)\n layer.build_arch_mask()\n U,V = layer.get_UV(super_ps_layers, q, p)\n print(U, U.size())\n print(U[0,0].conj().t().matmul(U[0,0]))\n print(V)\n print(V[0,0].conj().t().matmul(V[0,0]))\n weight = layer.get_weight_matrix(super_ps_layers, sigma)\n print(weight)\n weight.sum().backward()\n print(super_ps_layers[0].weight.grad.norm(p=2))\n print(layer.super_layers_all[0].weight.grad.norm(p=2))\n\n print(layer.super_layers_all[1].weight.grad.norm(p=2))\n\n\nif __name__ == \"__main__\":\n test()\n"
] | [
[
"torch.device",
"torch.eye",
"torch.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nbortolotti/tflite-tpu-experiences | [
"8f613e059335d1d90886282f005261917fd9cfd3"
] | [
"inference_exploration/cpu/main.py"
] | [
"import os\nimport numpy as np\nimport PIL.Image as Image\nimport matplotlib.pylab as plt\nimport time\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.keras import layers\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef image_analysis(classifier, image_shape, img_array):\n result = classifier.predict(img_array[np.newaxis, ...])\n # result.shape\n\n predicted_class = np.argmax(result[0], axis=-1)\n return predicted_class\n\n\ndef main():\n classifier_url = \"https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/4\"\n image_shape = (224, 224)\n classifier = tf.keras.Sequential([\n hub.KerasLayer(classifier_url, input_shape=image_shape + (3,))\n ])\n\n img_file = tf.keras.utils.get_file('image.jpg', 'https://storage.googleapis.com/demostration_images/2.jpg')\n img = Image.open(img_file).resize(image_shape)\n\n img_array = np.array(img) / 255.0\n # img_array.shape\n\n predicted_class = image_analysis(classifier, image_shape, img_array)\n\n labels_path = tf.keras.utils.get_file('ImageNetLabels.txt',\n 'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')\n imagenet_labels = np.array(open(labels_path).read().splitlines())\n #\n # plt.imshow(img_array)\n # plt.axis('off')\n # predicted_class_name = imagenet_labels[predicted_class]\n # _ = plt.title(\"Prediction: \" + predicted_class_name.title())\n # plt.show()\n for _ in range(5):\n inferenceTime(img_array, classifier)\n\n\n# explore time to do the inference\ndef inferenceTime(image, mClassifier):\n start = time.time()\n result = mClassifier.predict(image[np.newaxis, ...])\n end = time.time()\n print((end - start)*1000) #milliseconds\n\n # predicted_class = np.argmax(result[0], axis=-1)\n # predicted_class_name = mLabels[predicted_class]\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.argmax",
"tensorflow.keras.utils.get_file"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
FaceThePirate/pyspeckit | [
"734b9f81d440ca3a6db9bf68e9409dbddb52d08b"
] | [
"pyspeckit/spectrum/readers/read_class.py"
] | [
"\"\"\"\n------------------------\nGILDAS CLASS file reader\n------------------------\n\nRead a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`\n\"\"\"\nfrom __future__ import print_function\nfrom six.moves import xrange\nfrom six import iteritems\nimport six\nimport astropy.io.fits as pyfits\nimport numpy\nimport numpy as np\nfrom numpy import pi\nfrom astropy import log\n# from astropy.time import Time\nfrom astropy import units as u\nimport pyspeckit\nimport sys\nimport re\ntry:\n from astropy.utils.console import ProgressBar\nexcept ImportError:\n ProgressBar = lambda x: None\n ProgressBar.update = lambda x: None\nimport struct\n\nimport time\n\n# 'range' is needed as a keyword\nirange = range\n\ndef print_timing(func):\n \"\"\"\n Prints execution time of decorated function.\n Included here because CLASS files can take a little while to read;\n this should probably be replaced with a progressbar\n \"\"\"\n def wrapper(*arg,**kwargs):\n t1 = time.time()\n res = func(*arg,**kwargs)\n t2 = time.time()\n log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))\n return res\n wrapper.__doc__ = func.__doc__\n return wrapper\n\ndef ensure_bytes(string):\n \"\"\"\n Ensure a given string is in byte form\n \"\"\"\n if six.PY3:\n return bytes(string, 'utf-8')\n else:\n return str(string)\n\n\"\"\" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html \"\"\"\nfiletype_dict = {'1A ':'Multiple_IEEE',\n '1 ':'Multiple_Vax',\n '1B ':'Multiple_EEEI',\n '2A ':'v2',\n '2 ':'v2',\n '2B ':'v2',\n '9A ':'Single_IEEE',\n '9 ':'Single_Vax',\n '9B ':'Single_EEEI'}\nfor key in list(filetype_dict.keys()):\n filetype_dict[ensure_bytes(key)] = filetype_dict[key]\n\nfileversion_dict = {'1A ':'v1',\n '2A ':'v2',\n '9A ':'v1', # untested\n }\nfor key in list(fileversion_dict.keys()):\n fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]\n\nrecord_lengths = {'1A': 512,\n '2A': 1024*4}\n\nheader_id_numbers = {0: 'USER CODE',\n -1: 'COMMENT',\n -2: 'GENERAL',\n -3: 'POSITION',\n -4: 'SPECTRO',\n -5: 'BASELINE',\n -6: 'HISTORY',\n -7: 'UNKNOWN-APEX',\n # -8: 'SWITCH',\n -9: 'GAUSSFIT', # \"private\"; see class-interfaces-private.f90\n -10: 'DRIFT',\n -11: 'BEAMSWITCH', # \"private\"; see class-interfaces-private.f90\n -12: 'SHELLFIT', # \"private\"; see class-interfaces-private.f90\n -13: 'NH3FIT', # \"private\"; see class-interfaces-private.f90\n -14: 'CALIBRATION',\n -18: 'ABSFIT', # \"private\"; see class-interfaces-private.f90\n }\n\nheader_id_lengths = {-2: 9, # may really be 10?\n -3: 17,\n -4: 17,\n -5: None, # variable length\n -6: 3, # variable length\n -14: 25,\n }\n\n# from packages/classic/lib/classic_mod.f90\nfiledescv2_nw1=14\n\n\n\"\"\"\nGENERAL\n integer(kind=obsnum_length) :: num ! [ ] Observation number\n integer(kind=4) :: ver ! [ ] Version number\n integer(kind=4) :: teles(3) ! [ ] Telescope name\n integer(kind=4) :: dobs ! [MJD-60549] Date of observation\n integer(kind=4) :: dred ! [MJD-60549] Date of reduction\n integer(kind=4) :: typec ! [ code] Type of coordinates\n integer(kind=4) :: kind ! [ code] Type of data\n integer(kind=4) :: qual ! [ code] Quality of data\n integer(kind=4) :: subscan ! [ ] Subscan number\n integer(kind=obsnum_length) :: scan ! [ ] Scan number\n ! Written in the entry\n real(kind=8) :: ut ! 1-2 [ rad] UT of observation\n real(kind=8) :: st ! 3-4 [ rad] LST of observation\n real(kind=4) :: az ! 5 [ rad] Azimuth\n real(kind=4) :: el ! 6 [ rad] Elevation\n real(kind=4) :: tau ! 7 [neper] Opacity\n real(kind=4) :: tsys ! 8 [ K] System temperature\n real(kind=4) :: time ! 9 [ s] Integration time\n ! Not in this section in file\n integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)\n ! NOT in data ---\n character(len=12) :: cdobs ! [string] Duplicate of dobs\n character(len=12) :: cdred ! [string] Duplicate of dred\n\n\"\"\"\n\nkeys_lengths = {\n 'unknown': [\n #('NUM' ,1,'int32'), # Observation number\n ('VER' ,1,'int32'), # Version number\n ('TELES' ,3,'|S12') , # Telescope name\n ('DOBS' ,1,'int32'), # Date of observation\n ('DRED' ,1,'int32'), # Date of reduction\n ('TYPEC' ,1,'int32'), # Type of coordinates\n ('KIND' ,1,'int32'), # Type of data\n ('QUAL' ,1,'int32'), # Quality of data\n ('SCAN' ,1,'int32'), # Scan number\n ('SUBSCAN' ,1,'int32'), # Subscan number\n ],\n\n 'COMMENT': [ # -1\n ('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment\n ('CTEXT',1024//4,'|S1024'), # character ctext*1024 ! Comment string\n ],\n\n 'GENERAL': [ # -2\n ('UT' ,2,'float64'), # rad UT of observation\n ('ST' ,2,'float64'), # rad LST of observation\n ('AZ' ,1,'float32'), # rad Azimuth\n ('EL' ,1,'float32'), # rad Elevation\n ('TAU' ,1,'float32'), # neper Opacity\n ('TSYS' ,1,'float32'), # K System temperature\n ('TIME' ,1,'float32'), # s Integration time\n # XUNIT should not be there?\n #( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)\n ] ,\n 'POSITION': [ # -3\n ('SOURC',3,'|S12') , # [ ] Source name\n ('EPOCH',1,'float32'), # [ ] Epoch of coordinates\n ('LAM' ,2,'float64'), #[rad] Lambda\n ('BET' ,2,'float64'), #[rad] Beta\n ('LAMOF',1,'float32'), # [rad] Offset in Lambda\n ('BETOF',1,'float32'), # [rad] Offset in Beta\n ('PROJ' ,1,'int32') , # [rad] Projection system\n ('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS\n ('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS\n ('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS\n ],\n 'SPECTRO': [ # -4\n #('align' ,1,'int32'), # [ ] Alignment padding\n ('LINE' ,3,'|S12'), # [ ] Line name\n ('RESTF' ,2,'float64'), # [ MHz] Rest frequency\n ('NCHAN' ,1,'int32'), # [ ] Number of channels\n ('RCHAN' ,1,'float32'), # [ ] Reference channels\n ('FRES' ,1,'float32'), # [ MHz] Frequency resolution\n ('FOFF' ,1,'float32'), # [ MHz] Frequency offset\n ('VRES' ,1,'float32'), # [km/s] Velocity resolution\n ('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel\n ('BAD' ,1,'float32'), # [ ] Blanking value\n #('ALIGN_1',1,'int32'), # [ ] Alignment padding\n ('IMAGE' ,2,'float64'), # [ MHz] Image frequency\n #('ALIGN_2',1,'int32'), # [ ] Alignment padding\n ('VTYPE' ,1,'int32'), # [code] Type of velocity\n ('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)\n ],\n 'CALIBRATION': [ # -14\n ('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)\n ('BEEFF',1,'float32'), # [ ] Beam efficiency\n ('FOEFF',1,'float32'), # [ ] Forward efficiency\n ('GAINI',1,'float32'), # [ ] Image/Signal gain ratio\n ('H2OMM',1,'float32'), # [ mm] Water vapor content\n ('PAMB',1,'float32'), # [ hPa] Ambient pressure\n ('TAMB',1,'float32'), # [ K] Ambient temperature\n ('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band\n ('TCHOP',1,'float32'), # [ K] Chopper temperature\n ('TCOLD',1,'float32'), # [ K] Cold load temperature\n ('TAUS',1,'float32'), # [neper] Opacity in signal band\n ('TAUI',1,'float32'), # [neper] Opacity in image band\n ('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band\n ('TREC',1,'float32'), # [ K] Receiver temperature\n ('CMODE',1,'int32'), # [ code] Calibration mode\n ('ATFAC',1,'float32'), # [ ] Applied calibration factor\n ('ALTI',1,'float32'), # [ m] Site elevation\n ('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold\n ('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement\n ('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement\n ('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS\n ('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS\n ],\n 'BASELINE':[\n ('DEG',1,'int32'), #! [ ] Degree of last baseline\n ('SIGFI',1,'float32'), #! [Int. unit] Sigma\n ('AIRE',1,'float32'), #! [Int. unit] Area under windows\n ('NWIND',1,'int32'), #! [ ] Number of line windows\n # WARNING: These should probably have 'n', the second digit, = NWIND\n # The docs are really unclear about this, they say \"W1(MWIND)\"\n ('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows\n ('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows\n ('SINUS',3,'float32'), #![] Sinus baseline results\n ],\n\n 'DRIFT':[ # 16?\n ('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::\n ('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::\n ('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::\n ('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::\n ('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::\n ('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::\n ('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::\n ('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::\n ('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::\n ('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::\n ('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::\n ('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::\n ('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::\n ('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::\n ],\n\n }\n\ndef _read_bytes(f, n):\n '''Read the next `n` bytes (from idlsave)'''\n return f.read(n)\n\n\"\"\"\nWarning: UNCLEAR what endianness should be!\nNumpy seemed to get it right, and I think numpy assumes NATIVE endianness\n\"\"\"\n\ndef _read_byte(f):\n '''Read a single byte (from idlsave)'''\n return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])\n\ndef _read_int16(f):\n '''Read a signed 16-bit integer (from idlsave)'''\n return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])\n\ndef _read_int32(f):\n '''Read a signed 32-bit integer (from idlsave)'''\n return numpy.int32(struct.unpack('=i', f.read(4))[0])\n\ndef _read_int64(f):\n '''Read a signed 64-bit integer '''\n return numpy.int64(struct.unpack('=q', f.read(8))[0])\n\ndef _read_float32(f):\n '''Read a 32-bit float (from idlsave)'''\n return numpy.float32(struct.unpack('=f', f.read(4))[0])\n\ndef _align_32(f):\n '''Align to the next 32-bit position in a file (from idlsave)'''\n\n pos = f.tell()\n if pos % 4 != 0:\n f.seek(pos + 4 - pos % 4)\n return\n\ndef _read_word(f,length):\n if length > 0:\n chars = _read_bytes(f, length)\n _align_32(f)\n else:\n chars = None\n return chars\n\ndef _read_int(f):\n return struct.unpack('i',f.read(4))\n\ndef is_ascii(s):\n \"\"\"Check if there are non-ascii characters in Unicode string\n\n Parameters\n ----------\n s : str\n The string to be checked\n\n Returns\n -------\n is_ascii : bool\n Returns True if all characters in the string are ascii. False\n otherwise.\n \"\"\"\n return len(s) == len(s.decode('ascii').encode('utf-8'))\n\ndef is_all_null(s):\n return all(x=='\\x00' for x in s) or all(x==b'\\x00' for x in s)\n\n\n\"\"\"\nfrom clic_file.f90: v1, v2\n integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index\n integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read\n integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index\n integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index\n integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index\n integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index\n integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index\n integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index\n real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1\n real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2\n integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets\n integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation\n integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index\n integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index\n integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle\n integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number\n real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words\n integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]\n integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name\n integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status\n integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status\n integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number\n real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]\n integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word\n\nequivalently\n\n integer(kind=obsnum_length) :: num ! [ ] Observation number\n integer(kind=4) :: ver ! [ ] Version number\n integer(kind=4) :: teles(3) ! [ ] Telescope name\n integer(kind=4) :: dobs ! [MJD-60549] Date of observation\n integer(kind=4) :: dred ! [MJD-60549] Date of reduction\n integer(kind=4) :: typec ! [ code] Type of coordinates\n integer(kind=4) :: kind ! [ code] Type of data\n integer(kind=4) :: qual ! [ code] Quality of data\n integer(kind=4) :: subscan ! [ ] Subscan number\n integer(kind=obsnum_length) :: scan ! [ ] Scan number\n\"\"\"\n\n\"\"\"\nindex.f90:\n\n call conv%read%i8(data(1), indl%bloc, 1) ! bloc\n call conv%read%i4(data(3), indl%word, 1) ! word\n call conv%read%i8(data(4), indl%num, 1) ! num\n call conv%read%i4(data(6), indl%ver, 1) ! ver\n call conv%read%cc(data(7), indl%csour, 3) ! csour\n call conv%read%cc(data(10),indl%cline, 3) ! cline\n call conv%read%cc(data(13),indl%ctele, 3) ! ctele\n call conv%read%i4(data(16),indl%dobs, 1) ! dobs\n call conv%read%i4(data(17),indl%dred, 1) ! dred\n call conv%read%r4(data(18),indl%off1, 1) ! off1\n call conv%read%r4(data(19),indl%off2, 1) ! off2\n call conv%read%i4(data(20),indl%type, 1) ! type\n call conv%read%i4(data(21),indl%kind, 1) ! kind\n call conv%read%i4(data(22),indl%qual, 1) ! qual\n call conv%read%r4(data(23),indl%posa, 1) ! posa\n call conv%read%i8(data(24),indl%scan, 1) ! scan\n call conv%read%i4(data(26),indl%subscan,1) ! subscan\n if (isv3) then\n call conv%read%r8(data(27),indl%ut, 1) ! ut\n else\n\"\"\"\n\ndef _read_indices(f, file_description):\n #if file_description['version'] in (1,2):\n # extension_positions = (file_description['aex']-1)*file_description['reclen']*4\n # all_indices = {extension:\n # [_read_index(f,\n # filetype=file_description['version'],\n # entry=ii,\n # #position=position,\n # )\n # for ii in range(file_description['lex1'])]\n # for extension,position in enumerate(extension_positions)\n # if position > 0\n # }\n\n #elif file_description['version'] == 1:\n extension_positions = ((file_description['aex'].astype('int64')-1)\n *file_description['reclen']*4)\n all_indices = [_read_index(f,\n filetype=file_description['version'],\n # 1-indexed files\n entry_number=ii+1,\n file_description=file_description,\n )\n for ii in range(file_description['xnext']-1)]\n #else:\n # raise ValueError(\"Invalid file version {0}\".format(file_description['version']))\n\n\n return all_indices\n\n\ndef _find_index(entry_number, file_description, return_position=False):\n if file_description['gex'] == 10:\n kex=(entry_number-1)//file_description['lex1'] + 1\n else:\n # exponential growth:\n #kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1\n kex = len([xx for xx in file_description['lexn'] if xx<entry_number])\n\n ken = entry_number - file_description['lexn'][kex-1]\n #! Find ken (relative entry number in the extension, starts from 1)\n #ken = entry_num - file%desc%lexn(kex-1)\n\n kb = ((ken-1)*file_description['lind'])//file_description['reclen']\n #kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the\n # ! relative record position (as an offset, starts from 0) where the\n # ! Entry Index starts. NB: there can be a non-integer number of Entry\n # ! Indexes per record\n\n # Subtract 1: 'aex' is 1-indexed\n kbl = (file_description['aex'][kex-1]+kb)-1\n # kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes\n\n k = ((ken-1)*file_description['lind']) % file_description['reclen']\n #k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the\n # ! first word of the Entry Index of the entry number 'entry_num'\n\n\n if return_position:\n return (kbl*file_description['reclen']+k)*4\n else:\n return kbl,k\n\n\ndef _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,\n entry_number=None, file_description=None):\n\n if position is not None:\n f.seek(position)\n if entry_number is not None:\n indpos = _find_index(entry_number, file_description, return_position=True)\n f.seek(indpos)\n\n x0 = f.tell()\n\n if filetype in ('1A ','v1', 1):\n log.debug('Index filetype 1A')\n index = {\n \"XBLOC\":_read_int32(f),\n \"XNUM\":_read_int32(f),\n \"XVER\":_read_int32(f),\n \"XSOURC\":_read_word(f,12),\n \"XLINE\":_read_word(f,12),\n \"XTEL\":_read_word(f,12),\n \"XDOBS\":_read_int32(f),\n \"XDRED\":_read_int32(f),\n \"XOFF1\":_read_float32(f),# \t first offset (real, radians)\n \"XOFF2\":_read_float32(f),# \t second offset (real, radians)\n \"XTYPE\":_read_int32(f),# \t coordinate system ('EQ'', 'GA', 'HO')\n \"XKIND\":_read_int32(f),# \t Kind of observation (0: spectral, 1: continuum, )\n \"XQUAL\":_read_int32(f),# \t Quality (0-9)\n \"XSCAN\":_read_int32(f),# \t Scan number\n }\n index['BLOC'] = index['XBLOC'] # v2 compatibility\n index['WORD'] = 1 # v2 compatibility\n index['SOURC'] = index['CSOUR'] = index['XSOURC']\n index['DOBS'] = index['CDOBS'] = index['XDOBS']\n index['CTELE'] = index['XTEL']\n index['LINE'] = index['XLINE']\n index['OFF1'] = index['XOFF1']\n index['OFF2'] = index['XOFF2']\n index['QUAL'] = index['XQUAL']\n index['SCAN'] = index['XSCAN']\n index['KIND'] = index['XKIND']\n if clic: # use header set up in clic\n nextchunk = {\n \"XPROC\":_read_int32(f),# \"procedure type\"\n \"XITYPE\":_read_int32(f),#\n \"XHOURANG\":_read_float32(f),#\n \"XPROJNAME\":_read_int32(f),#\n \"XPAD1\":_read_int32(f),\n \"XBPC\" :_read_int32(f),\n \"XIC\" :_read_int32(f),\n \"XRECEI\" :_read_int32(f),\n \"XUT\":_read_float32(f),\n \"XPAD2\":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW\n }\n else:\n nextchunk = {\"XPOSA\":_read_float32(f),\n \"XSUBSCAN\":_read_int32(f),\n 'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),\n }\n nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']\n nextchunk['POSA'] = nextchunk['XPOSA']\n index.update(nextchunk)\n if (f.tell() - x0 != 128):\n missed_bits = (f.tell()-x0)\n X = f.read(128-missed_bits)\n if DEBUG: print(\"read_index missed %i bits: %s\" % (128-missed_bits,X))\n #raise IndexError(\"read_index did not successfully read 128 bytes at %i. Read %i bytes.\" % (x0,f.tell()-x0))\n if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):\n raise ValueError(\"Invalid index read from {0}.\".format(x0))\n elif filetype in ('2A ','v2', 2):\n log.debug('Index filetype 2A')\n index = {\n \"BLOC\" : _read_int64(f) , #(data(1), 1) ! bloc\n \"WORD\" : _read_int32(f) , #(data(3), 1) ! word\n \"NUM\" : _read_int64(f) , #(data(4), 1) ! num\n \"VER\" : _read_int32(f) , #(data(6), 1) ! ver\n \"CSOUR\" : _read_word(f,12), #(data(7), 3) ! csour\n \"CLINE\" : _read_word(f,12), #(data(10), 3) ! cline\n \"CTELE\" : _read_word(f,12), #(data(13), 3) ! ctele\n \"DOBS\" : _read_int32(f) , #(data(16), 1) ! dobs\n \"DRED\" : _read_int32(f) , #(data(17), 1) ! dred\n \"OFF1\" : _read_float32(f), #(data(18), 1) ! off1\n \"OFF2\" : _read_float32(f), #(data(19), 1) ! off2\n \"TYPE\" : _read_int32(f) , #(data(20), 1) ! type\n \"KIND\" : _read_int32(f) , #(data(21), 1) ! kind\n \"QUAL\" : _read_int32(f) , #(data(22), 1) ! qual\n \"POSA\" : _read_float32(f), #(data(23), 1) ! posa\n \"SCAN\" : _read_int64(f) , #(data(24), 1) ! scan\n \"SUBSCAN\": _read_int32(f) , #(data(26), 1) ! subscan\n }\n #last24bits = f.read(24)\n #log.debug(\"Read 24 bits: '{0}'\".format(last24bits))\n if any((is_all_null(index[x]) or not is_ascii(index[x]))\n for x in ('CSOUR','CLINE','CTELE')):\n raise ValueError(\"Invalid index read from {0}.\".format(x0))\n index['SOURC'] = index['XSOURC'] = index['CSOUR']\n index['LINE'] = index['XLINE'] = index['CLINE']\n index['XKIND'] = index['KIND']\n try:\n index['DOBS'] = index['XDOBS'] = index['CDOBS']\n except KeyError:\n index['CDOBS'] = index['XDOBS'] = index['DOBS']\n\n else:\n raise NotImplementedError(\"Filetype {0} not implemented.\".format(filetype))\n\n # from kernel/lib/gsys/date.f90: gag_julda\n index['MJD'] = index['DOBS'] + 60549\n class_dobs = index['DOBS']\n index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)\n # SLOW\n #index['DATEOBS'] = Time(index['DOBS'], format='jyear')\n #index['DATEOBSS'] = index['DATEOBS'].iso\n\n log.debug(\"Indexing finished at {0}\".format(f.tell()))\n return index\n\ndef _read_header(f, type=0, position=None):\n \"\"\"\n Read a header entry from a CLASS file\n (helper function)\n \"\"\"\n if position is not None:\n f.seek(position)\n if type in keys_lengths:\n hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])\n for x in keys_lengths[type]]\n return dict(hdrsec)\n else:\n return {}\n raise ValueError(\"Unrecognized type {0}\".format(type))\n\ndef _read_first_record(f):\n f.seek(0)\n filetype = f.read(4)\n if fileversion_dict[filetype] == 'v1':\n return _read_first_record_v1(f)\n elif fileversion_dict[filetype] == 'v2':\n return _read_first_record_v2(f)\n else:\n raise ValueError(\"Unrecognized filetype {0}\".format(filetype))\n\ndef _read_first_record_v1(f, record_length_words=128):\n r\"\"\"\n Position & Parameter & Fortran Kind & Purpose \\\\\n \\hline\n 1 & {\\tt code} & Character*4 & File code \\\\\n 2 & {\\tt next} & Integer*4 & Next free record \\\\\n 3 & {\\tt lex} & Integer*4 & Length of first extension (number of entries) \\\\\n 4 & {\\tt nex} & Integer*4 & Number of extensions \\\\\n 5 & {\\tt xnext} & Integer*4 & Next available entry number \\\\\n 6:2*{\\tt reclen} & {\\tt ex(:)} & Integer*4 & Array of extension addresses\n\n from classic_mod.f90:\n integer(kind=4) :: code ! 1 File code\n integer(kind=4) :: next ! 2 Next free record\n integer(kind=4) :: lex ! 3 Extension length (number of entries)\n integer(kind=4) :: nex ! 4 Number of extensions\n integer(kind=4) :: xnext ! 5 Next available entry number\n integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses\n\n from old (<dec2013) class, file.f90:\n read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &\n & ibx%ilex,ibx%imex,ibx%xnext\n\n also uses filedesc_v1tov2 from classic/lib/file.f90\n \"\"\"\n\n# OLD NOTES\n# hdr = header\n# hdr.update(obshead) # re-overwrite things\n# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})\n# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})\n# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})\n# hdr.update({'OBJECT':hdr['SOURC'].strip()})\n# hdr.update({'BUNIT':'Tastar'})\n# hdr.update({'EXPOSURE':hdr['TIME']})\n\n\n f.seek(0)\n file_description = {\n 'code': f.read(4),\n 'next': _read_int32(f),\n 'lex': _read_int32(f),\n 'nex': _read_int32(f),\n 'xnext': _read_int32(f),\n 'gex': 10.,\n 'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90\n 'version': 1,\n 'nextrec': 3,\n 'nextword': 1,\n 'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90\n 'kind': 'unknown',\n 'flags': 0,\n }\n file_description['reclen'] = record_length_words # should be 128w = 512 bytes\n ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')\n file_description['ex'] = ex[ex!=0]\n file_description['nextrec'] = file_description['next'] # this can't be...\n file_description['lex1'] = file_description['lex'] # number of entries\n file_description['lexn'] = (np.arange(file_description['nex']+1) *\n file_description['lex1'])\n file_description['nentries'] = np.sum(file_description['lexn'])\n file_description['aex'] = file_description['ex'][:file_description['nex']]\n #file_description['version'] = fileversion_dict[file_description['code']]\n assert f.tell() == 1024\n # Something is not quite right with the 'ex' parsing\n #assert len(file_description['ex']) == file_description['nex']\n return file_description\n\ndef _read_first_record_v2(f):\n r\"\"\" packages/classic/lib/file.f90\n Position & Parameter & Fortran Kind & Purpose & Unit \\\\\n \\hline\n 1 & {\\tt code} & Character*4 & File code & - \\\\\n 2 & {\\tt reclen} & Integer*4 & Record length & words \\\\\n 3 & {\\tt kind} & Integer*4 & File kind & - \\\\\n 4 & {\\tt vind} & Integer*4 & Index version & - \\\\\n 5 & {\\tt lind} & Integer*4 & Index length & words \\\\\n 6 & {\\tt flags} & Integer*4 & Bit flags. \\#1: single or multiple, & - \\\\\n & & & \\#2-32: provision (0-filled) & \\\\\n \\hline\n 7:8 & {\\tt xnext} & Integer*8 & Next available entry number & - \\\\\n 9:10 & {\\tt nextrec} & Integer*8 & Next record which contains free space & record \\\\\n 11 & {\\tt nextword} & Integer*4 & Next free word in this record & word \\\\\n \\hline\n 12 & {\\tt lex1} & Integer*4 & Length of first extension index & entries \\\\\n 13 & {\\tt nex} & Integer*4 & Number of extensions & - \\\\\n 14 & {\\tt gex} & Integer*4 & Extension growth rule & - \\\\\n 15:{\\tt reclen} & {\\tt aex(:)} & Integer*8 & Array of extension addresses & record\n \"\"\"\n f.seek(0)\n file_description = {\n 'code': f.read(4),\n 'reclen': _read_int32(f),\n 'kind': _read_int32(f),\n 'vind': _read_int32(f),\n 'lind': _read_int32(f),\n 'flags': _read_int32(f),\n 'xnext': _read_int64(f),\n 'nextrec': _read_int64(f),\n 'nextword': _read_int32(f),\n 'lex1': _read_int32(f),\n 'nex': _read_int32(f),\n 'gex': _read_int32(f),\n }\n file_description['lexn'] = [0]\n if file_description['gex'] == 10:\n for ii in range(1, file_description['nex']+1):\n file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])\n else:\n #! Exponential growth. Only growth with mantissa 2.0 is supported\n for ii in range(1, file_description['nex']):\n # I don't know what the fortran does here!!!\n # ahh, maybe 2_8 means int(2, dtype='int64')\n nent = int(file_description['lex1'] * 2**(ii-1))\n #nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)\n file_description['lexn'].append(file_description['lexn'][-1]+nent)\n #file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent\n file_description['nentries'] = np.sum(file_description['lexn'])\n record_length_words = file_description['reclen']\n aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')\n file_description['aex'] = aex[aex!=0]\n assert len(file_description['aex']) == file_description['nex']\n file_description['version'] = 2\n return file_description\n\ndef gi8_dicho(ninp,lexn,xval,ceil=True):\n \"\"\"\n ! @ public\n ! Find ival such as\n ! X(ival-1) < xval <= X(ival) (ceiling mode)\n ! or\n ! X(ival) <= xval < X(ival+1) (floor mode)\n ! for input data ordered. Use a dichotomic search for that.\n call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)\n \"\"\"\n #integer(kind=size_length), intent(in) :: np ! Number of input points\n #integer(kind=8), intent(in) :: x(np) ! Input ordered Values\n #integer(kind=8), intent(in) :: xval ! The value we search for\n #logical, intent(in) :: ceil ! Ceiling or floor mode?\n #integer(kind=size_length), intent(out) :: ival ! Position in the array\n #logical, intent(inout) :: error ! Logical error flag\n iinf = 1\n isup = ninp\n #! Ceiling mode\n while isup > (iinf+1):\n imid = int(np.floor((isup + iinf)/2.))\n if (lexn[imid-1] < xval):\n iinf = imid\n else:\n isup = imid\n ival = isup\n return ival\n\ndef _read_obshead(f, file_description, position=None, verbose=False):\n if file_description['version'] == 1:\n return _read_obshead_v1(f, position=position, verbose=verbose)\n if file_description['version'] == 2:\n return _read_obshead_v2(f, position=position)\n else:\n raise ValueError(\"Invalid file version {0}.\".\n format(file_description['version']))\n\ndef _read_obshead_v2(f, position=None):\n \"\"\"\n ! Version 2 (public)\n integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part\n integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part\n type classic_entrydesc_t\n sequence\n integer(kind=4) :: code ! 1 : code observation icode\n integer(kind=4) :: version ! 2 : observation version\n integer(kind=4) :: nsec ! 3 : number of sections\n integer(kind=4) :: pad1 ! - : memory padding (not in data)\n integer(kind=8) :: nword ! 4- 5: number of words\n integer(kind=8) :: adata ! 6- 7: data address\n integer(kind=8) :: ldata ! 8- 9: data length\n integer(kind=8) :: xnum ! 10-11: entry number\n ! Out of the 'sequence' block:\n integer(kind=4) :: msec ! Not in data: maximum number of sections the\n ! Observation Index can hold\n integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment\n integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)\n integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)\n integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)\n end type classic_entrydesc_t\n \"\"\"\n if position is not None:\n f.seek(position)\n else:\n position = f.tell()\n IDcode = f.read(4)\n if IDcode.strip() != b'2':\n raise IndexError(\"Observation Header reading failure at {0}. \"\n \"Record does not appear to be an observation header.\".\n format(position))\n f.seek(position)\n\n entrydescv2_nw1 = 11\n entrydescv2_nw2 = 5\n obshead = {\n 'CODE': f.read(4),\n 'VERSION': _read_int32(f),\n 'NSEC': _read_int32(f),\n #'_blank': _read_int32(f),\n 'NWORD': _read_int64(f),\n 'ADATA': _read_int64(f),\n 'LDATA': _read_int64(f),\n 'XNUM': _read_int64(f),\n #'MSEC': _read_int32(f),\n #'_blank2': _read_int32(f),\n }\n section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')\n section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')\n section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')\n\n return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))\n\ndef _read_obshead_v1(f, position=None, verbose=False):\n \"\"\"\n Read the observation header of a CLASS file\n (helper function for read_class; should not be used independently)\n \"\"\"\n if position is not None:\n f.seek(position)\n IDcode = f.read(4)\n if IDcode.strip() != b'2':\n raise IndexError(\"Observation Header reading failure at {0}. \"\n \"Record does not appear to be an observation header.\".\n format(f.tell() - 4))\n (nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,\n obsnum) = numpy.fromfile(f, count=8, dtype='int32')\n if verbose:\n print(\"nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum\",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)\n print(\"DATA_LENGTH: \",data_length)\n\n seccodes = numpy.fromfile(f,count=nsec,dtype='int32')\n # Documentation says addresses then length: It is apparently wrong\n seclen = numpy.fromfile(f,count=nsec,dtype='int32')\n secaddr = numpy.fromfile(f,count=nsec,dtype='int32')\n if verbose:\n print(\"Section codes, addresses, lengths: \",seccodes,secaddr,seclen)\n\n hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,\n 'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,\n 'NSEC':nsec, 'OBSNUM':obsnum}\n\n #return obsnum,seccodes\n return obsnum,hdr,dict(zip(seccodes,secaddr))\n\n# THIS IS IN READ_OBSHEAD!!!\n# def _read_preheader(f):\n# \"\"\"\n# Not entirely clear what this is, but it is stuff that precedes the actual data\n#\n# Looks something like this:\n# array([ 1, -2, -3, -4, -14,\n# 9, 17, 18, 25, 55,\n# 64, 81, 99, -1179344801, 979657591,\n#\n# -2, -3, -4, -14 indicate the 4 header types\n# 9,17,18,25 *MAY* indicate the number of bytes in each\n#\n#\n# HOW is it indicated how many entries there are?\n# \"\"\"\n# # 13 comes from counting 1, -2,....99 above\n# numbers = np.fromfile(f, count=13, dtype='int32')\n# sections = [n for n in numbers if n in header_id_numbers]\n# return sections\n\ndef downsample_1d(myarr,factor,estimator=np.mean, weight=None):\n \"\"\"\n Downsample a 1D array by averaging over *factor* pixels.\n Crops right side if the shape is not a multiple of factor.\n\n This code is pure numpy and should be fast.\n\n keywords:\n estimator - default to mean. You can downsample by summing or\n something else if you want a different estimator\n (e.g., downsampling error: you want to sum & divide by sqrt(n))\n weight: np.ndarray\n An array of weights to use for the downsampling. If None,\n assumes uniform 1\n \"\"\"\n if myarr.ndim != 1:\n raise ValueError(\"Only works on 1d data. Says so in the title.\")\n xs = myarr.size\n crarr = myarr[:xs-(xs % int(factor))]\n if weight is None:\n dsarr = estimator(np.concatenate([[crarr[i::factor] for i in\n range(factor)]]),axis=0)\n else:\n dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in\n range(factor)]]),axis=0)\n warr = estimator(np.concatenate([[weight[i::factor] for i in\n range(factor)]]),axis=0)\n dsarr = dsarr/warr\n return dsarr\n\n# unit test\ndef test_downsample1d():\n data = np.arange(10)\n weight = np.ones(10)\n weight[5]=0\n assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==\n np.array([0.5, 2.5, 4.0, 6.5, 8.5]))\n\ndef read_observation(f, obsid, file_description=None, indices=None,\n my_memmap=None, memmap=True, verbose=False):\n if isinstance(f, str):\n f = open(f,'rb')\n opened = True\n if memmap:\n my_memmap = numpy.memmap(f, offset=0, dtype='float32',\n mode='r')\n else:\n my_memmap = None\n elif my_memmap is None and memmap:\n raise ValueError(\"Must pass in a memmap object if passing in a file object.\")\n else:\n opened = False\n\n if file_description is None:\n file_description = _read_first_record(f)\n\n if indices is None:\n indices = _read_indices(f, file_description)\n\n index = indices[obsid]\n\n obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4\n log.debug(\"Reading observation at position {0}\".format(obs_position))\n obsnum,obshead,sections = _read_obshead(f, file_description,\n position=obs_position,\n verbose=verbose)\n header = obshead\n\n datastart = 0\n for section_id,section_address in iteritems(sections):\n # Section addresses are 1-indexed byte addresses\n # in the current \"block\"\n sec_position = obs_position + (section_address-1)*4\n temp_hdr = _read_header(f, type=header_id_numbers[section_id],\n position=sec_position)\n header.update(temp_hdr)\n datastart = max(datastart,f.tell())\n\n hdr = header\n hdr.update(obshead) # re-overwrite things\n hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})\n hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})\n hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})\n hdr.update({'OBJECT':hdr['SOURC'].strip()})\n hdr.update({'BUNIT':'Tastar'})\n hdr.update({'EXPOSURE':float(hdr['TIME'])})\n hdr['HDRSTART'] = obs_position\n hdr['DATASTART'] = datastart\n hdr.update(indices[obsid])\n # Define MJD as mid-exposure time in MJD\n hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})\n\n # Apparently the data are still valid in this case?\n #if hdr['XNUM'] != obsid+1:\n # log.error(\"The spectrum read was {0} but {1} was requested.\".\n # format(hdr['XNUM']-1, obsid))\n\n if hdr['KIND'] == 1: # continuum\n nchan = hdr['NPOIN']\n elif 'NCHAN' in hdr:\n nchan = hdr['NCHAN']\n else:\n log.error(\"No NCHAN in header. This is not a spectrum.\")\n import ipdb; ipdb.set_trace()\n # There may be a 1-channel offset? CHECK!!!\n # (changed by 1 pixel - October 14, 2014)\n # (changed back - October 21, 2014 - I think the ends are just bad, but not\n # zero.)\n f.seek(datastart-1)\n spec = _read_spectrum(f, position=datastart-1, nchan=nchan,\n memmap=memmap, my_memmap=my_memmap)\n\n if opened:\n f.close()\n\n return spec, hdr\n\ndef _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):\n if position != f.tell():\n log.warning(\"Reading data from {0}, but the file is wound \"\n \"to {1}.\".format(position, f.tell()))\n if memmap:\n here = position\n #spectrum = numpy.memmap(filename, offset=here, dtype='float32',\n # mode='r', shape=(nchan,))\n spectrum = my_memmap[here//4:here//4+nchan]\n f.seek(here+nchan*4)\n else:\n f.seek(position)\n spectrum = numpy.fromfile(f,count=nchan,dtype='float32')\n\n return spectrum\n\ndef _spectrum_from_header(fileobj, header, memmap=None):\n return _read_spectrum(fileobj, position=header['DATASTART'],\n nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],\n my_memmap=memmap)\n\ndef clean_header(header):\n newheader = {}\n for k in header:\n if not isinstance(header[k], (int, float, str)):\n if isinstance(header[k], np.ndarray) and header[k].size > 1:\n if header[k].size > 10:\n raise ValueError(\"Large array being put in header. That's no good. key={0}\".format(k))\n for ii,val in enumerate(header[k]):\n newheader[k[:7]+str(ii)] = val\n else:\n newheader[k[:8]] = str(header[k])\n else:\n newheader[k[:8]] = header[k]\n\n return newheader\n\nclass ClassObject(object):\n def __init__(self, filename, verbose=False):\n t0 = time.time()\n self._file = open(filename, 'rb')\n self.file_description = _read_first_record(self._file)\n self.allind = _read_indices(self._file, self.file_description)\n self._data = np.memmap(self._file, dtype='float32', mode='r')\n if verbose: log.info(\"Setting _spectra\")\n self._spectra = LazyItem(self)\n t1 = time.time()\n if verbose: log.info(\"Setting posang. t={0}\".format(t1-t0))\n self.set_posang()\n t2 = time.time()\n if verbose: log.info(\"Identifying otf scans. t={0}\".format(t2-t1))\n self._identify_otf_scans(verbose=verbose)\n t3 = time.time()\n #self._load_all_spectra()\n if verbose:\n log.info(\"Loaded CLASS object with {3} indices. Time breakdown:\"\n \" {0}s for indices, \"\n \"{1}s for posang, and {2}s for OTF scan identification\"\n .format(t1-t0, t2-t1, t3-t2, len(self.allind)))\n\n\n def __repr__(self):\n s = \"\\n\".join([\"{k}: {v}\".format(k=k,v=v)\n for k,v in iteritems(self.getinfo())])\n return \"ClassObject({id}) with {nspec} entries\\n\".format(id=id(self),\n nspec=len(self.allind)) + s\n\n def getinfo(self, allsources=False):\n info = dict(\n tels = self.tels,\n lines = self.lines,\n scans = self.scans,\n sources = self.sources if allsources else self.sci_sources,\n )\n return info\n\n def set_posang(self):\n h0 = self.headers[0]\n for h in self.headers:\n dx = h['OFF1'] - h0['OFF1']\n dy = h['OFF2'] - h0['OFF2']\n h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi\n h0 = h\n\n\n def _identify_otf_scans(self, verbose=False):\n h0 = self.allind[0]\n st = 0\n otfscan = 0\n posangs = [h['COMPPOSA'] for h in self.allind]\n if verbose:\n pb = ProgressBar(len(self.allind))\n\n for ii,h in enumerate(self.allind):\n if (h['SCAN'] != h0['SCAN']\n or h['SOURC'] != h0['SOURC']):\n\n h0['FIRSTSCAN'] = st\n cpa = np.median(posangs[st:ii])\n for hh in self.allind[st:ii]:\n hh['SCANPOSA'] = cpa % 180\n st = ii\n if h['SCAN'] == h0['SCAN']:\n h0['OTFSCAN'] = otfscan\n otfscan += 1\n h['OTFSCAN'] = otfscan\n else:\n otfscan = 0\n h['OTFSCAN'] = otfscan\n else:\n h['OTFSCAN'] = otfscan\n\n if verbose:\n pb.update(ii)\n\n def listscans(self, source=None, telescope=None, out=sys.stdout):\n minid=0\n scan = -1\n sourc = \"\"\n #tel = ''\n minoff1,maxoff1 = np.inf,-np.inf\n minoff2,maxoff2 = np.inf,-np.inf\n ttlangle,nangle = 0.0,0\n print(\"{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} \"\n \"[ {RAmin:>12s}, {RAmax:>12s} ] \"\n \"[ {DECmin:>12s}, {DECmax:>12s} ] \"\n \"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}\"\n .format(entries='Scans', SOURC='Source', XTEL='Telescope',\n SCAN='Scan', SUBSCAN='Subscan',\n RAmin='min(RA)', RAmax='max(RA)',\n DECmin='min(DEC)', DECmax='max(DEC)',\n SCANPOSA='Scan PA',\n angle='Angle', OTFSCAN='OTFscan',\n TSYS='TSYS', UTD='UTD'),\n file=out)\n\n data_rows = []\n\n for ii,row in enumerate(self.headers):\n if (row['SCAN'] == scan\n and row['SOURC'] == sourc\n #and row['XTEL'] == tel\n ):\n minoff1 = min(minoff1, row['OFF1'])\n maxoff1 = max(maxoff1, row['OFF1'])\n minoff2 = min(minoff2, row['OFF2'])\n maxoff2 = max(maxoff2, row['OFF2'])\n ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],\n row['OFF1'] - prevrow['OFF1'])%np.pi\n nangle += 1\n prevrow = row\n\n else:\n if scan == -1:\n scan = row['SCAN']\n sourc = row['SOURC']\n #tel = row['XTEL']\n prevrow = row\n continue\n\n ok = True\n if source is not None:\n if isinstance(source, (list,tuple)):\n ok = ok and any(re.search((s), prevrow['SOURC'])\n for s in source)\n else:\n ok = ok and re.search((source), prevrow['SOURC'])\n if telescope is not None:\n ok = ok and re.search((telescope), prevrow['XTEL'])\n if ok:\n data = dict(RAmin=minoff1*180/np.pi*3600,\n RAmax=maxoff1*180/np.pi*3600,\n DECmin=minoff2*180/np.pi*3600,\n DECmax=maxoff2*180/np.pi*3600,\n angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,\n e0=minid,\n e1=ii-1,\n #TSYS=row['TSYS'] if 'TSYS' in row else '--',\n UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,\n **prevrow)\n print(\"{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} \"\n \"[ {RAmin:12f}, {RAmax:12f} ] \"\n \"[ {DECmin:12f}, {DECmax:12f} ] \"\n \"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}\"\n \" {TSYS:>8.1f} {UTD:12f}\".\n format(**data),\n file=out)\n\n data_rows.append(data)\n\n minoff1,maxoff1 = np.inf,-np.inf\n minoff2,maxoff2 = np.inf,-np.inf\n ttlangle,nangle = 0.0,0\n scan = row['SCAN']\n sourc = row['SOURC']\n #tel = row['XTEL']\n minid = ii\n\n return data\n\n @property\n def tels(self):\n if hasattr(self,'_tels'):\n return self._tels\n else:\n self._tels = set([h['CTELE'] for h in self.allind])\n #testing if CTELE even works\n return self._tels\n\n @property\n def sources(self):\n if hasattr(self,'_source'):\n return self._source\n else:\n self._source = set([h['SOURC'] for h in self.allind])\n return self._source\n\n @property\n def scans(self):\n if hasattr(self,'_scan'):\n return self._scan\n else:\n self._scan = set([h['SCAN'] for h in self.allind])\n return self._scan\n\n @property\n def sci_sources(self):\n return set([s for s in self.sources\n if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',\n 'COLD')])\n\n @property\n def lines(self):\n if hasattr(self,'_lines'):\n return self._lines\n else:\n self._lines = set([h['LINE'] for h in self.allind])\n return self._lines\n\n def _load_all_spectra(self, indices=None):\n if indices is None:\n indices = range(self.file_description['xnext']-1)\n\n if hasattr(self, '_loaded_indices'):\n indices_set = set(indices)\n indices_to_load = (indices_set.difference(self._loaded_indices))\n self._loaded_indices = self._loaded_indices.union(indices_set)\n\n if any(indices_to_load):\n pb = ProgressBar(len(indices_to_load))\n for ii,k in enumerate(xrange(indices_to_load)):\n self._spectra[k]\n pb.update(ii)\n\n else:\n self._loaded_indices = set(indices)\n\n self._spectra.load_all()\n\n\n @property\n def spectra(self):\n return [x[0] for x in self._spectra]\n\n @property\n def headers(self):\n return [self._spectra[ii][1]\n if ii in self._spectra else x\n for ii,x in enumerate(self.allind)]\n\n def select_spectra(self,\n all=None,\n line=None,\n linere=None,\n linereflags=re.IGNORECASE,\n number=None,\n scan=None,\n offset=None,\n source=None,\n sourcere=None,\n sourcereflags=re.IGNORECASE,\n range=None,\n quality=None,\n telescope=None,\n telescopere=None,\n telescopereflags=re.IGNORECASE,\n subscan=None,\n entry=None,\n posang=None,\n #observed=None,\n #reduced=None,\n frequency=None,\n section=None,\n user=None,\n include_old_versions=False,\n ):\n \"\"\"\n Parameters\n ----------\n include_old_versions: bool\n Include spectra with XVER numbers <0? These are CLASS spectra that\n have been \"overwritten\" (re-reduced?)\n \"\"\"\n if entry is not None and len(entry)==2:\n return irange(entry[0], entry[1])\n\n if frequency is not None:\n self._load_all_spectra()\n\n sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)\n if line is not None else True) and\n (re.search(ensure_bytes(linere), h['LINE'], linereflags)\n if linere is not None else True) and\n (h['SCAN'] == scan if scan is not None else True) and\n ((h['OFF1'] == offset or\n h['OFF2'] == offset) if offset is not None else True) and\n (re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)\n if source is not None else True) and\n (re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)\n if sourcere is not None else True) and\n (h['OFF1']>range[0] and h['OFF1'] < range[1] and\n h['OFF2']>range[2] and h['OFF2'] < range[3]\n if range is not None and len(range)==4 else True) and\n (h['QUAL'] == quality if quality is not None else True) and\n (re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)\n if telescope is not None else True) and\n (re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)\n if telescopere is not None else True) and\n (h['SUBSCAN']==subscan if subscan is not None else True) and\n ('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way\n h['RESTF'] > frequency[0] and\n h['RESTF'] < frequency[1]\n if frequency is not None and len(frequency)==2\n else True) and\n (h['COMPPOSA']%180 > posang[0] and\n h['COMPPOSA']%180 < posang[1]\n if posang is not None and len(posang)==2\n else True) and\n # 1A uses XVER, 2A uses VER. If neither are present, it's\n # probably not a valid spectrum?\n (h.get('XVER', h.get('VER', -999)) > 0\n if not include_old_versions else True)\n for h in self.headers\n ]\n\n return [ii for ii,k in enumerate(sel) if k]\n\n def get_spectra(self, progressbar=True, **kwargs):\n selected_indices = self.select_spectra(**kwargs)\n\n if not any(selected_indices):\n raise ValueError(\"Selection yielded empty.\")\n\n self._spectra.load(selected_indices, progressbar=progressbar)\n return [self._spectra[ii] for ii in selected_indices]\n\n def get_pyspeckit_spectra(self, progressbar=True, **kwargs):\n\n spdata = self.get_spectra(progressbar=progressbar, **kwargs)\n\n spectra = [pyspeckit.Spectrum(data=data,\n xarr=make_axis(header),\n header=clean_header(header))\n for data,header in spdata]\n\n return spectra\n\n\n def read_observations(self, observation_indices, progressbar=True):\n self._spectra.load(observation_indices, progressbar=progressbar)\n return [self._spectra[ii] for ii in observation_indices]\n\n\n@print_timing\ndef read_class(filename, downsample_factor=None, sourcename=None,\n telescope=None, line=None, posang=None, verbose=False,\n flag_array=None):\n \"\"\"\n Read a binary class file.\n Based on the\n `GILDAS CLASS file type Specification\n <http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_\n\n Parameters\n ----------\n filename: str\n downsample_factor: None or int\n Factor by which to downsample data by averaging. Useful for\n overresolved data.\n sourcename: str or list of str\n Source names to match to the data (uses regex)\n telescope: str or list of str\n 'XTEL' or 'TELE' parameters: the telescope & instrument\n line: str or list of str\n The line name\n posang: tuple of 2 floats\n The first float is the minimum value for the position angle. The second\n float is the maximum value for the position angle.\n verbose: bool\n Log messages with severity INFO\n flag_array: np.ndarray\n An array with the same shape as the data used to flag out\n (remove) data when downsampling. True = flag out\n \"\"\"\n classobj = ClassObject(filename)\n\n if not isinstance(sourcename, (list,tuple)):\n sourcename = [sourcename]\n if not isinstance(telescope, (list,tuple)):\n telescope = [telescope]\n if not isinstance(line, (list,tuple)):\n line = [line]\n\n spectra,headers = [],[]\n if verbose:\n log.info(\"Reading...\")\n selection = [ii\n for source in sourcename\n for tel in telescope\n for li in line\n for ii in classobj.select_spectra(sourcere=source,\n telescope=tel,\n line=li,\n posang=posang)]\n\n sphdr = classobj.read_observations(selection)\n if len(sphdr) == 0:\n return None\n spec,hdr = zip(*sphdr)\n spectra += spec\n headers += hdr\n\n indexes = headers\n\n weight = ~flag_array if flag_array is not None else None\n\n if downsample_factor is not None:\n if verbose:\n log.info(\"Downsampling...\")\n spectra = [downsample_1d(spec, downsample_factor,\n weight=weight)\n for spec in ProgressBar(spectra)]\n headers = [downsample_header(h, downsample_factor)\n for h in ProgressBar(headers)]\n\n for hdr in headers:\n stringify_header(hdr)\n\n return spectra,headers,indexes\n\ndef stringify_header(header):\n from six import string_types, integer_types\n import string\n FITS_allowed_types = (string_types + integer_types +\n (float, complex, bool, np.floating, np.integer,\n np.complexfloating, np.bool_))\n bad_chars = string.printable[96:]\n badcharre = re.compile(\"[{0}]\".format(bad_chars))\n for key, value in header.items():\n if isinstance(value, bytes):\n header[key] = value.decode()\n elif not isinstance(value, FITS_allowed_types):\n header[key] = badcharre.sub(\"\", str(header[key]))\n\ndef downsample_header(hdr, downsample_factor):\n for k in ('NCHAN','NPOIN','DATALEN'):\n if k in hdr:\n hdr[k] = int((hdr[k] / downsample_factor))\n # maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1\n scalefactor = 1./downsample_factor\n hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.\n for kw in ['FRES','VRES']:\n if kw in hdr:\n hdr[kw] *= downsample_factor\n return hdr\n\ndef make_axis(header,imagfreq=False):\n \"\"\"\n Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS \"header\"\n \"\"\"\n from .. import units\n\n rest_frequency = header.get('RESTF')\n xunits = 'MHz'\n nchan = header.get('NCHAN')\n voff = header.get('VOFF')\n foff = header.get('FOFF')\n doppler = header.get('DOPPLER')\n fres = header.get('FRES')\n refchan = header.get('RCHAN')\n imfreq = header.get('IMAGE')\n\n if foff in (None, 0.0) and voff not in (None, 0.0):\n # Radio convention\n foff = -voff/2.997924580e5 * rest_frequency\n\n if not imagfreq:\n xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres\n XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)\n else:\n xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres\n XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)\n\n return XAxis\n\n@print_timing\ndef class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,\n imagfreq=False, DEBUG=False, **kwargs):\n \"\"\"\n Load an entire CLASS observing session into a list of ObsBlocks based on\n matches to the 'telescope', 'line' and 'source' names\n\n Parameters\n ----------\n filename : string\n The Gildas CLASS data file to read the spectra from.\n telescope : list\n List of telescope names to be matched.\n line : list\n List of line names to be matched.\n source : list (optional)\n List of source names to be matched. Defaults to None.\n imagfreq : bool\n Create a SpectroscopicAxis with the image frequency.\n \"\"\"\n if datatuple is None:\n spectra,header,indexes = read_class(filename, **kwargs)\n else:\n spectra,header,indexes = datatuple\n\n obslist = []\n lastscannum = -1\n spectrumlist = None\n for sp,hdr,ind in zip(spectra,header,indexes):\n hdr.update(ind)\n # this is slow but necessary...\n H = pyfits.Header()\n for k,v in iteritems(hdr):\n if hasattr(v,\"__len__\") and not isinstance(v,str):\n # make an array of header entries, but this\n # supports only up to 10 of them...\n if len(v) > 1:\n if len(v) < 10:\n for ii,vv in enumerate(v):\n newkey = k[:7]+str(ii)\n H[newkey] = vv\n elif len(v) < 100:\n for ii,vv in enumerate(v):\n newkey = k[:6]+str(ii)\n H[newkey] = vv\n else:\n raise ValueError(\"Too many entries for {0}\".format(k))\n else:\n H[k] = v[0]\n #elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):\n # # do not try to add comments...\n # This commented out block used to attempt to reject comments\n # using a private regex in the old pyfits which no longer exists.\n # I don't know if it was necessary.\n else:\n H[k] = v\n scannum = hdr['SCAN']\n if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:\n continue\n if hdr['LINE'].strip() not in line:\n continue\n if (source is not None) and (hdr['SOURC'].strip() not in source):\n continue\n hdr['RESTFREQ'] = hdr.get('RESTF')\n H['RESTFREQ'] = hdr.get('RESTF')\n\n #print \"Did not skip %s,%s. Scannum, last: %i,%i\" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)\n\n if scannum != lastscannum:\n lastscannum = scannum\n if spectrumlist is not None:\n obslist.append(pyspeckit.ObsBlock(spectrumlist))\n xarr = make_axis(hdr,imagfreq=imagfreq)\n spectrumlist = [(\n pyspeckit.Spectrum(xarr=xarr,\n header=H,\n data=sp))]\n else:\n spectrumlist.append(\n pyspeckit.Spectrum(xarr=xarr,\n header=H,\n data=sp))\n\n return obslist\n\nclass LazyItem(object):\n \"\"\"\n Simple lazy spectrum-retriever wrapper\n \"\"\"\n def __init__(self, parent):\n self.parent = parent\n self.sphdr = {}\n self.nind = len(self.parent.allind)\n self.nloaded = 0\n\n def __repr__(self):\n return (\"Set of {0} spectra & headers, {1} loaded\"\n \" ({2:0.2f}%)\".format(self.nind, self.nloaded,\n (float(self.nloaded)/self.nind)*100))\n\n def load_all(self, progressbar=True):\n self.load(range(self.nind))\n\n def load(self, indices, progressbar=True):\n pb = ProgressBar(len(indices))\n counter = 0\n for k in indices:\n self[k]\n counter += 1\n pb.update(counter)\n\n def __getitem__(self, key):\n if key in self.sphdr:\n return self.sphdr[key]\n elif isinstance(key, slice):\n return [self[k] for k in xrange(key.start or 0,\n key.end or len(self.parent.allind),\n key.step or 1)]\n else:\n sphd = read_observation(self.parent._file, key,\n file_description=self.parent.file_description,\n indices=self.parent.allind,\n my_memmap=self.parent._data)\n # Update the header with OTFSCAN and POSANG info\n sphd[1].update(self.parent.allind[key])\n self.sphdr[key] = sphd\n self.nloaded += 1\n return sphd\n\n def __iter__(self):\n return self.next()\n\n def __next__(self):\n for k in self.spheader:\n yield self.spheader[k]\n\n def __contains__(self, key):\n return key in self.sphdr\n\n\n\n@print_timing\ndef class_to_spectra(filename, datatuple=None, **kwargs):\n \"\"\"\n Load each individual spectrum within a CLASS file into a list of Spectrum\n objects\n \"\"\"\n if datatuple is None:\n spectra,header,indexes = read_class(filename, **kwargs)\n else:\n spectra,header,indexes = datatuple\n\n spectrumlist = []\n for sp,hdr,ind in zip(spectra,header,indexes):\n hdr.update(ind)\n xarr = make_axis(hdr)\n spectrumlist.append(\n pyspeckit.Spectrum(xarr=xarr,\n header=hdr,\n data=sp))\n\n return pyspeckit.Spectra(spectrumlist)\n\ndef tests():\n \"\"\"\n Tests are specific to the machine on which this code was developed.\n \"\"\"\n fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'\n #fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'\n #fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'\n #fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'\n #F1 = read_class(fn1)#,DEBUG=True)\n #F2 = read_class(fn2)\n n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])\n hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])\n"
] | [
[
"numpy.fromfile",
"numpy.arange",
"numpy.memmap",
"numpy.median",
"numpy.ones",
"numpy.arctan2",
"numpy.floor",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tarpelite/BERT_self_training | [
"f50ff015f0d3669b5d927a6d28d8a08201c101b6"
] | [
"examples/ner/run_ner_strain.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). \"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom seqeval.metrics import f1_score, precision_score, recall_score\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nimport pickle\n# from pudb import set_trace\n# set_trace()\n\nfrom transformers import (\n MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n get_linear_schedule_with_warmup,\n)\nfrom utils_ner import convert_examples_to_features, get_labels, read_examples_from_file\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())\n\nTOKENIZER_ARGS = [\"do_lower_case\", \"strip_accents\", \"keep_accents\", \"use_fast\"]\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n if args.warmup_ratio > 0:\n args.warmup_steps = int(t_total * args.warmup_ratio)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to gobal_step of last saved checkpoint from model path\n try:\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n except ValueError:\n global_step = 0\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iter(loss=X.XXX, lr=X.XXXXXXXX)\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"soft_labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n ) # XLM and RoBERTa don\"t use segment_ids\n\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n\n if (step + 1) % args.gradient_accumulation_steps == 0:\n epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\")\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=\"\"):\n eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation %s *****\", prefix)\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n ) # XLM and RoBERTa don\"t use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating\n\n eval_loss += tmp_eval_loss.item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds = np.argmax(preds, axis=2)\n\n label_map = {i: label for i, label in enumerate(labels)}\n\n out_label_list = [[] for _ in range(out_label_ids.shape[0])]\n preds_list = [[] for _ in range(out_label_ids.shape[0])]\n\n for i in range(out_label_ids.shape[0]):\n for j in range(out_label_ids.shape[1]):\n if out_label_ids[i, j] != pad_token_label_id:\n out_label_list[i].append(label_map[out_label_ids[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n\n print(\"preds:\", preds_list[0])\n print(\"labels:\", out_label_list[0])\n results = {\n \"loss\": eval_loss,\n \"precision\": precision_score(out_label_list, preds_list),\n \"recall\": recall_score(out_label_list, preds_list),\n \"f1\": f1_score(out_label_list, preds_list),\n }\n\n logger.info(\"***** Eval results %s *****\", prefix)\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n\n return results, preds_list\n\n\ndef load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Load data features from cache or dataset file\n \n \n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n examples = read_examples_from_file(args.eval_file, mode)\n features = convert_examples_to_features(\n examples,\n labels,\n args.max_seq_length,\n tokenizer,\n cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n # xlnet has a cls token at the end\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n sep_token=tokenizer.sep_token,\n sep_token_extra=bool(args.model_type in [\"roberta\"]),\n # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n pad_on_left=bool(args.model_type in [\"xlnet\"]),\n # pad on the left for xlnet\n pad_token=tokenizer.pad_token_id,\n pad_token_segment_id=tokenizer.pad_token_type_id,\n pad_token_label_id=pad_token_label_id,\n )\n \n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n return dataset\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n help=\"The input data dir. Should contain the training files for the CoNLL-2003 NER task.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_TYPES),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--labels\",\n default=\"\",\n type=str,\n help=\"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\", action=\"store_true\", help=\"Whether to run predictions on the test set.\")\n parser.add_argument(\n \"--evaluate_during_training\",\n action=\"store_true\",\n help=\"Whether to run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n parser.add_argument(\n \"--keep_accents\", action=\"store_const\", const=True, help=\"Set this flag if model is trained with accents.\"\n )\n parser.add_argument(\n \"--strip_accents\", action=\"store_const\", const=True, help=\"Set this flag if model is trained without accents.\"\n )\n parser.add_argument(\"--use_fast\", action=\"store_const\", const=True, help=\"Set this flag to use fast tokenization.\")\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n\n parser.add_argument(\"--logits_file\", type=str, default=\"\")\n parser.add_argument(\"--eval_file\", type=str, default=\"\")\n parser.add_argument(\"--warmup_ratio\", type=float, default=0.1)\n\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n \n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Prepare CONLL-2003 task\n labels = get_labels(args.labels)\n num_labels = len(labels)\n # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later\n pad_token_label_id = CrossEntropyLoss().ignore_index\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config = AutoConfig.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n id2label={str(i): label for i, label in enumerate(labels)},\n label2id={label: i for i, label in enumerate(labels)},\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}\n logger.info(\"Tokenizer arguments: %s\", tokenizer_args)\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n **tokenizer_args,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n # train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=\"train\")\n with open(args.logits_file, \"rb\") as f:\n datasets = pickle.load(f)\n \n all_input_ids = torch.tensor(datasets[0], dtype=torch.long)\n all_input_mask = torch.tensor(datasets[1], dtype=torch.long)\n all_segment_ids = torch.tensor(datasets[2], dtype=torch.long)\n all_ner_logits = torch.tensor(datasets[3], dtype=torch.float)\n\n train_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ner_logits)\n\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"pytorch_transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n model = AutoModelForTokenClassification.from_pretrained(checkpoint)\n model.to(args.device)\n result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", prefix=global_step)\n if global_step:\n result = {\"{}_{}\".format(global_step, k): v for k, v in result.items()}\n results.update(result)\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n if args.do_predict and args.local_rank in [-1, 0]:\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)\n model = AutoModelForTokenClassification.from_pretrained(args.output_dir)\n model.to(args.device)\n result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"test\")\n # Save results\n output_test_results_file = os.path.join(args.output_dir, \"test_results.txt\")\n with open(output_test_results_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n # Save predictions\n output_test_predictions_file = os.path.join(args.output_dir, \"test_predictions.txt\")\n with open(output_test_predictions_file, \"w\") as writer:\n with open(os.path.join(args.data_dir, \"test.txt\"), \"r\") as f:\n example_id = 0\n for line in f:\n if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n writer.write(line)\n if not predictions[example_id]:\n example_id += 1\n elif predictions[example_id]:\n output_line = line.split()[0] + \" \" + predictions[example_id].pop(0) + \"\\n\"\n writer.write(output_line)\n else:\n logger.warning(\"Maximum sequence length exceeded: No prediction for '%s'.\", line.split()[0])\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.utils.data.distributed.DistributedSampler",
"torch.utils.data.TensorDataset",
"torch.distributed.barrier",
"torch.tensor",
"numpy.argmax",
"torch.cuda.device_count",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chipmuenk/acoustics | [
"c85ac95a10c09d7fa15d63b2bdb24acab89fec60",
"c85ac95a10c09d7fa15d63b2bdb24acab89fec60"
] | [
"code/LTI/Demos/Tex_matplotlib.py",
"python-acoustics/tests/test_imaging.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 14 14:15:52 2012\n\nPlot mit TeX-Formatierung der Labels\n(LaTeX muss auf dem Rechner installiert sein)\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\n\nrc('text', usetex=True)\nplt.figure(1)\nax = plt.axes([0.1, 0.1, 0.8, 0.7])\nt = np.arange(0.0, 1.0+0.01, 0.01)\ns = np.cos(2*2*np.pi*t)+2\nplt.plot(t, s)\n\nplt.xlabel(r'\\textbf{Time (s)}')\nplt.ylabel(r'\\textit{Voltage} (mV)',fontsize=16)\nplt.title(r\"\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n fontsize=16, color='r')\nplt.grid(True)\nplt.savefig('tex_demo')\n\nplt.show()",
"import numpy as np\n\nimport pytest\n\n\nhas_matplotlib = pytest.importorskip(\"matplotlib\")\n\nif has_matplotlib: \n from acoustics.bands import octave, third\n from acoustics.imaging import plot_octave, plot_third, plot_bands\n\n\ndef setup_module(imaging):\n imaging.octaves = octave(16, 16000)\n imaging.thirds = third(63, 8000)\n imaging.tl_oct = np.array([3, 4, 5, 12, 15, 24, 28, 23, 35, 45, 55])\n imaging.tl_third = np.array([0, 0, 0, 1, 1, 2, 3, 5, 8, 13, 21,\n 32, 41, 47, 46, 44, 58, 77, 61, 75, 56, 54])\n imaging.title = 'Title'\n imaging.label = 'Label'\n\n\ndef test_plot_octave():\n plot_octave(tl_oct, octaves)\n\n\ndef test_plot_octave_kHz():\n plot_octave(tl_oct, octaves, kHz=True, xlabel=label, ylabel=label,\n title=title, separator='.')\n\n\ndef test_plot_third_octave():\n plot_third(tl_third, thirds, marker='s', separator=',')\n\n\ndef test_plot_third_octave_kHz():\n plot_third(tl_third, thirds, marker='s', kHz=True, xlabel=label,\n ylabel=label, title=title)\n\n\ndef test_plot_band_oct():\n plot_bands(tl_oct, octaves, axes=None, band_type='octave')\n\n\ndef teardown_module(imaging):\n pass\n"
] | [
[
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.rc",
"matplotlib.pyplot.figure"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kuldeepaman/tf-pose | [
"8050912c52a7b4f3c8a2656f267d47ba21d093f6",
"8050912c52a7b4f3c8a2656f267d47ba21d093f6"
] | [
"scripts/pyqtgraph-develop/examples/MultiPlotWidget.py",
"scripts/pyqtgraph-develop/examples/ScaleBar.py"
] | [
"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n## Add path to library (just for examples; you do not need this)\r\nimport initExample\r\n\r\n\r\nfrom scipy import random\r\nfrom numpy import linspace\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph import MultiPlotWidget\r\ntry:\r\n from pyqtgraph.metaarray import *\r\nexcept:\r\n print(\"MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)\")\r\n exit()\r\n\r\napp = QtGui.QApplication([])\r\nmw = QtGui.QMainWindow()\r\nmw.resize(800,800)\r\npw = MultiPlotWidget()\r\nmw.setCentralWidget(pw)\r\nmw.show()\r\n\r\ndata = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\r\nma = MetaArray(data, info=[\r\n {'name': 'Signal', 'cols': [\r\n {'name': 'Col1', 'units': 'V'}, \r\n {'name': 'Col2', 'units': 'A'}, \r\n {'name': 'Col3'},\r\n ]}, \r\n {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}\r\n ])\r\npw.plot(ma)\r\n\r\n## Start Qt event loop unless running in interactive mode.\r\nif __name__ == '__main__':\r\n import sys\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n\r\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nDemonstrates ScaleBar\r\n\"\"\"\r\nimport initExample ## Add path to library (just for examples; you do not need this)\r\n\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph.Qt import QtCore, QtGui\r\nimport numpy as np\r\n\r\npg.mkQApp()\r\nwin = pg.GraphicsLayoutWidget(show=True)\r\nwin.setWindowTitle('pyqtgraph example: ScaleBar')\r\n\r\nvb = win.addViewBox()\r\nvb.setAspectLocked()\r\n\r\nimg = pg.ImageItem()\r\nimg.setImage(np.random.normal(size=(100,100)))\r\nimg.scale(0.01, 0.01)\r\nvb.addItem(img)\r\n\r\nscale = pg.ScaleBar(size=0.1)\r\nscale.setParentItem(vb)\r\nscale.anchor((1, 1), (1, 1), offset=(-20, -20))\r\n\r\n## Start Qt event loop unless running in interactive mode or using pyside.\r\nif __name__ == '__main__':\r\n import sys\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n"
] | [
[
"scipy.random.normal",
"numpy.linspace"
],
[
"numpy.random.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lilies/Cirq | [
"519b8b70ba4d2d92d1c034c398161ebdbd23e2e7",
"519b8b70ba4d2d92d1c034c398161ebdbd23e2e7",
"519b8b70ba4d2d92d1c034c398161ebdbd23e2e7",
"519b8b70ba4d2d92d1c034c398161ebdbd23e2e7"
] | [
"cirq/contrib/svg/svg_test.py",
"cirq/optimizers/two_qubit_decompositions_test.py",
"cirq/experiments/qubit_characterizations_test.py",
"cirq/protocols/has_stabilizer_effect_protocol_test.py"
] | [
"import pytest\nimport numpy as np\n\nimport cirq\nfrom cirq.contrib.svg import circuit_to_svg\n\n\ndef test_svg():\n a, b, c = cirq.LineQubit.range(3)\n\n svg_text = circuit_to_svg(\n cirq.Circuit(\n cirq.CNOT(a, b),\n cirq.CZ(b, c),\n cirq.SWAP(a, c),\n cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),\n cirq.Z(a),\n cirq.measure(a, b, c, key='z'),\n cirq.MatrixGate(np.eye(2)).on(a),\n ))\n assert '<svg' in svg_text\n assert '</svg>' in svg_text\n\n\ndef test_svg_noise():\n noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))\n q = cirq.LineQubit(0)\n circuit = cirq.Circuit(cirq.X(q))\n circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))\n svg = circuit_to_svg(circuit)\n assert '>D(0.001)</text>' in svg\n\n\ndef test_validation():\n with pytest.raises(ValueError):\n circuit_to_svg(cirq.Circuit())\n\n q0 = cirq.LineQubit(0)\n with pytest.raises(ValueError):\n circuit_to_svg(\n cirq.Circuit([cirq.Moment([cirq.X(q0)]),\n cirq.Moment([])]))\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cmath\nimport random\n\nimport numpy as np\nimport pytest\n\nimport cirq\nfrom cirq import value\nfrom cirq.optimizers.two_qubit_decompositions import (\n _parity_interaction, _is_trivial_angle\n)\n\n\[email protected]('rad,expected', (lambda err, largeErr: [\n (np.pi/4, True),\n (np.pi/4 + err, True),\n (np.pi/4 + largeErr, False),\n (np.pi/4 - err, True),\n (np.pi/4 - largeErr, False),\n (-np.pi/4, True),\n (-np.pi/4 + err, True),\n (-np.pi/4 + largeErr, False),\n (-np.pi/4 - err, True),\n (-np.pi/4 - largeErr, False),\n (0, True),\n (err, True),\n (largeErr, False),\n (-err, True),\n (-largeErr, False),\n (np.pi/8, False),\n (-np.pi/8, False),\n])(1e-8*2/3, 1e-8*4/3))\ndef test_is_trivial_angle(rad, expected):\n tolerance = 1e-8\n out = _is_trivial_angle(rad, tolerance)\n assert out == expected, 'rad = {}'.format(rad)\n\n\ndef _operations_to_matrix(operations, qubits):\n return cirq.Circuit(operations).unitary(\n qubit_order=cirq.QubitOrder.explicit(qubits),\n qubits_that_should_be_present=qubits)\n\n\ndef _random_single_partial_cz_effect():\n return cirq.dot(\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)),\n np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)))\n\n\ndef _random_double_partial_cz_effect():\n return cirq.dot(\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)),\n np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)),\n np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)))\n\n\ndef _random_double_full_cz_effect():\n return cirq.dot(\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)),\n cirq.unitary(cirq.CZ),\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)),\n cirq.unitary(cirq.CZ),\n cirq.kron(cirq.testing.random_unitary(2),\n cirq.testing.random_unitary(2)))\n\n\ndef assert_cz_depth_below(operations, threshold, must_be_full):\n total_cz = 0\n\n for op in operations:\n assert len(op.qubits) <= 2\n if len(op.qubits) == 2:\n assert isinstance(op.gate, cirq.CZPowGate)\n e = value.canonicalize_half_turns(op.gate.exponent)\n if must_be_full:\n assert e == 1\n total_cz += abs(e)\n\n assert total_cz <= threshold\n\n\ndef assert_ops_implement_unitary(q0, q1, operations, intended_effect,\n atol=0.01):\n actual_effect = _operations_to_matrix(operations, (q0, q1))\n assert cirq.allclose_up_to_global_phase(actual_effect, intended_effect,\n atol=atol)\n\n\[email protected]('max_partial_cz_depth,max_full_cz_depth,effect', [\n (0, 0, np.eye(4)),\n (0, 0, np.array([\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0j],\n ])),\n (0, 0, cirq.unitary(cirq.CZ**0.00000001)),\n\n (0.5, 2, cirq.unitary(cirq.CZ**0.5)),\n\n (1, 1, cirq.unitary(cirq.CZ)),\n (1, 1, cirq.unitary(cirq.CNOT)),\n (1, 1, np.array([\n [1, 0, 0, 1j],\n [0, 1, 1j, 0],\n [0, 1j, 1, 0],\n [1j, 0, 0, 1],\n ]) * np.sqrt(0.5)),\n (1, 1, np.array([\n [1, 0, 0, -1j],\n [0, 1, -1j, 0],\n [0, -1j, 1, 0],\n [-1j, 0, 0, 1],\n ]) * np.sqrt(0.5)),\n (1, 1, np.array([\n [1, 0, 0, 1j],\n [0, 1, -1j, 0],\n [0, -1j, 1, 0],\n [1j, 0, 0, 1],\n ]) * np.sqrt(0.5)),\n\n (1.5, 3, cirq.map_eigenvalues(cirq.unitary(cirq.SWAP),\n lambda e: e**0.5)),\n\n (2, 2, cirq.unitary(cirq.SWAP).dot(cirq.unitary(cirq.CZ))),\n\n (3, 3, cirq.unitary(cirq.SWAP)),\n (3, 3, np.array([\n [0, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [1, 0, 0, 0j],\n ])),\n] + [\n (1, 2, _random_single_partial_cz_effect()) for _ in range(10)\n] + [\n (2, 2, _random_double_full_cz_effect()) for _ in range(10)\n] + [\n (2, 3, _random_double_partial_cz_effect()) for _ in range(10)\n] + [\n (3, 3, cirq.testing.random_unitary(4)) for _ in range(10)\n])\ndef test_two_to_ops_equivalent_and_bounded_for_known_and_random(\n max_partial_cz_depth,\n max_full_cz_depth,\n effect):\n q0 = cirq.NamedQubit('q0')\n q1 = cirq.NamedQubit('q1')\n\n operations_with_partial = cirq.two_qubit_matrix_to_operations(\n q0, q1, effect, True)\n operations_with_full = cirq.two_qubit_matrix_to_operations(\n q0, q1, effect, False)\n\n assert_ops_implement_unitary(q0, q1, operations_with_partial, effect)\n assert_ops_implement_unitary(q0, q1, operations_with_full, effect)\n\n assert_cz_depth_below(operations_with_partial, max_partial_cz_depth, False)\n assert_cz_depth_below(operations_with_full, max_full_cz_depth, True)\n\n\ndef test_trivial_parity_interaction_corner_case():\n q0 = cirq.NamedQubit('q0')\n q1 = cirq.NamedQubit('q1')\n nearPi4 = np.pi/4 * 0.99\n tolerance = 1e-2\n circuit = cirq.Circuit(_parity_interaction(q0, q1, -nearPi4, tolerance))\n assert len(circuit) == 2\n\n\ndef test_kak_decomposition_depth_full_cz():\n a, b = cirq.LineQubit.range(2)\n\n # Random.\n u = cirq.testing.random_unitary(4)\n operations_with_full = cirq.two_qubit_matrix_to_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_full)\n # 3 CZ, 3+1 PhasedX, 1 Z\n assert len(c) <= 8\n\n # Double-axis interaction.\n u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_part)\n # 2 CZ, 2+1 PhasedX, 1 Z\n assert len(c) <= 6\n\n # Test unoptimized/un-cleaned length of Double-axis interaction.\n u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False,\n 1e-8, False)\n c = cirq.Circuit(operations_with_part)\n assert len(c) > 6 # Length should be 13 with extra Pauli gates\n\n # Partial single-axis interaction.\n u = cirq.unitary(cirq.CNOT**0.1)\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_part)\n # 2 CZ, 2+1 PhasedX, 1 Z\n assert len(c) <= 6\n\n # Full single-axis interaction.\n u = cirq.unitary(cirq.ControlledGate(cirq.Y))\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_part)\n # 1 CZ, 1+1 PhasedX, 1 Z\n assert len(c) <= 4\n\n\ndef test_kak_decomposition_depth_partial_cz():\n a, b = cirq.LineQubit.range(2)\n\n # Random.\n u = cirq.testing.random_unitary(4)\n operations_with_full = cirq.two_qubit_matrix_to_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_full)\n # 3 CP, 3+1 PhasedX, 1 Z\n assert len(c) <= 8\n\n # Double-axis interaction.\n u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_part)\n # 2 CP, 2+1 PhasedX, 1 Z\n assert len(c) <= 6\n\n # Partial single-axis interaction.\n u = cirq.unitary(cirq.CNOT**0.1)\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_part)\n # 1 CP, 1+1 PhasedX, 1 Z\n assert len(c) <= 4\n\n # Full single-axis interaction.\n u = cirq.unitary(cirq.ControlledGate(cirq.Y))\n operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_part)\n # 1 CP, 1+1 PhasedX, 1 Z\n assert len(c) <= 4\n",
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\n\nimport matplotlib.pyplot as plt\n\nimport cirq\nimport cirq.experiments.qubit_characterizations as ceqc\nfrom cirq import GridQubit\nfrom cirq import circuits, ops, sim\nfrom cirq.experiments import (rabi_oscillations,\n single_qubit_randomized_benchmarking,\n two_qubit_randomized_benchmarking,\n single_qubit_state_tomography,\n two_qubit_state_tomography)\n\n\ndef test_rabi_oscillations():\n # Check that the excited state population matches the ideal case within a\n # small statistical error.\n simulator = sim.Simulator()\n qubit = GridQubit(0, 0)\n results = rabi_oscillations(simulator, qubit, np.pi, repetitions=1000)\n data = np.asarray(results.data)\n angles = data[:, 0]\n actual_pops = data[:, 1]\n target_pops = 0.5 - 0.5 * np.cos(angles)\n rms_err = np.sqrt(np.mean((target_pops - actual_pops) ** 2))\n assert rms_err < 0.1\n\n\ndef test_single_qubit_cliffords():\n I = np.eye(2)\n X = np.array([[0, 1], [1, 0]])\n Y = np.array([[0, -1j], [1j, 0]])\n Z = np.diag([1, -1])\n PAULIS = (I, X, Y, Z)\n\n def is_pauli(u):\n return any(cirq.equal_up_to_global_phase(u, p) for p in PAULIS)\n\n cliffords = ceqc._single_qubit_cliffords()\n assert len(cliffords.c1_in_xy) == 24\n assert len(cliffords.c1_in_xz) == 24\n\n def unitary(gates):\n U = np.eye(2)\n for gate in gates:\n U = cirq.unitary(gate) @ U\n return U\n\n xy_unitaries = [unitary(gates) for gates in cliffords.c1_in_xy]\n xz_unitaries = [unitary(gates) for gates in cliffords.c1_in_xz]\n\n def check_distinct(unitaries):\n n = len(unitaries)\n for i in range(n):\n for j in range(i + 1, n):\n Ui, Uj = unitaries[i], unitaries[j]\n assert not cirq.allclose_up_to_global_phase(Ui, Uj), f'{i}, {j}'\n\n # Check that unitaries in each decomposition are distinct.\n check_distinct(xy_unitaries)\n check_distinct(xz_unitaries)\n\n # Check that each decomposition gives the same set of unitaries.\n for Uxy in xy_unitaries:\n assert any(\n cirq.allclose_up_to_global_phase(Uxy, Uxz) for Uxz in xz_unitaries)\n\n # Check that each unitary fixes the Pauli group.\n for u in xy_unitaries:\n for p in PAULIS:\n assert is_pauli(u @ p @ u.conj().T), str(u)\n\n # Check that XZ decomposition has at most one X gate per clifford.\n for gates in cliffords.c1_in_xz:\n num_x = len([gate for gate in gates if isinstance(gate, cirq.XPowGate)])\n num_z = len([gate for gate in gates if isinstance(gate, cirq.ZPowGate)])\n assert num_x + num_z == len(gates)\n assert num_x <= 1\n\n\ndef test_single_qubit_randomized_benchmarking():\n # Check that the ground state population at the end of the Clifford\n # sequences is always unity.\n simulator = sim.Simulator()\n qubit = GridQubit(0, 0)\n num_cfds = range(5, 20, 5)\n results = single_qubit_randomized_benchmarking(simulator,\n qubit,\n num_clifford_range=num_cfds,\n repetitions=100)\n g_pops = np.asarray(results.data)[:, 1]\n assert np.isclose(np.mean(g_pops), 1.0)\n\n\ndef test_two_qubit_randomized_benchmarking():\n # Check that the ground state population at the end of the Clifford\n # sequences is always unity.\n simulator = sim.Simulator()\n q_0 = GridQubit(0, 0)\n q_1 = GridQubit(0, 1)\n num_cfds = [5, 10]\n results = two_qubit_randomized_benchmarking(simulator,\n q_0,\n q_1,\n num_clifford_range=num_cfds,\n num_circuits=10,\n repetitions=100)\n g_pops = np.asarray(results.data)[:, 1]\n assert np.isclose(np.mean(g_pops), 1.0)\n\n\ndef test_single_qubit_state_tomography():\n # Check that the density matrices of the output states of X/2, Y/2 and\n # H + Y gates closely match the ideal cases.\n simulator = sim.Simulator()\n qubit = GridQubit(0, 0)\n\n circuit_1 = circuits.Circuit(ops.X(qubit)**0.5)\n circuit_2 = circuits.Circuit(ops.Y(qubit)**0.5)\n circuit_3 = circuits.Circuit(ops.H(qubit), ops.Y(qubit))\n\n act_rho_1 = single_qubit_state_tomography(simulator, qubit, circuit_1,\n 1000).data\n act_rho_2 = single_qubit_state_tomography(simulator, qubit, circuit_2,\n 1000).data\n act_rho_3 = single_qubit_state_tomography(simulator, qubit, circuit_3,\n 1000).data\n\n tar_rho_1 = np.array([[0.5, 0.5j], [-0.5j, 0.5]])\n tar_rho_2 = np.array([[0.5, 0.5], [0.5, 0.5]])\n tar_rho_3 = np.array([[0.5, -0.5], [-0.5, 0.5]])\n\n np.testing.assert_almost_equal(act_rho_1, tar_rho_1, decimal=1)\n np.testing.assert_almost_equal(act_rho_2, tar_rho_2, decimal=1)\n np.testing.assert_almost_equal(act_rho_3, tar_rho_3, decimal=1)\n\n\ndef test_two_qubit_state_tomography():\n # Check that the density matrices of the four Bell states closely match\n # the ideal cases. In addition, check that the output states of\n # single-qubit rotations (H, H), (X/2, Y/2), (Y/2, X/2) have the correct\n # density matrices.\n\n simulator = sim.Simulator()\n q_0 = GridQubit(0, 0)\n q_1 = GridQubit(0, 1)\n\n circuit_00 = circuits.Circuit(ops.H(q_0), ops.CNOT(q_0, q_1))\n circuit_01 = circuits.Circuit(ops.X(q_1), ops.H(q_0), ops.CNOT(q_0, q_1))\n circuit_10 = circuits.Circuit(ops.X(q_0), ops.H(q_0), ops.CNOT(q_0, q_1))\n circuit_11 = circuits.Circuit(ops.X(q_0), ops.X(q_1), ops.H(q_0),\n ops.CNOT(q_0, q_1))\n circuit_hh = circuits.Circuit(ops.H(q_0), ops.H(q_1))\n circuit_xy = circuits.Circuit(ops.X(q_0)**0.5, ops.Y(q_1)**0.5)\n circuit_yx = circuits.Circuit(ops.Y(q_0)**0.5, ops.X(q_1)**0.5)\n\n act_rho_00 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_00,\n 1000).data\n act_rho_01 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_01,\n 1000).data\n act_rho_10 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_10,\n 1000).data\n act_rho_11 = two_qubit_state_tomography(simulator, q_0, q_1, circuit_11,\n 1000).data\n act_rho_hh = two_qubit_state_tomography(simulator, q_0, q_1, circuit_hh,\n 1000).data\n act_rho_xy = two_qubit_state_tomography(simulator, q_0, q_1, circuit_xy,\n 1000).data\n act_rho_yx = two_qubit_state_tomography(simulator, q_0, q_1, circuit_yx,\n 1000).data\n\n tar_rho_00 = np.outer([1.0, 0, 0, 1.0], [1.0, 0, 0, 1.0]) * 0.5\n tar_rho_01 = np.outer([0, 1.0, 1.0, 0], [0, 1.0, 1.0, 0]) * 0.5\n tar_rho_10 = np.outer([1.0, 0, 0, -1.0], [1.0, 0, 0, -1.0]) * 0.5\n tar_rho_11 = np.outer([0, 1.0, -1.0, 0], [0, 1.0, -1.0, 0]) * 0.5\n tar_rho_hh = np.outer([0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5])\n tar_rho_xy = np.outer([0.5, 0.5, -0.5j, -0.5j], [0.5, 0.5, 0.5j, 0.5j])\n tar_rho_yx = np.outer([0.5, -0.5j, 0.5, -0.5j], [0.5, 0.5j, 0.5, 0.5j])\n\n np.testing.assert_almost_equal(act_rho_00, tar_rho_00, decimal=1)\n np.testing.assert_almost_equal(act_rho_01, tar_rho_01, decimal=1)\n np.testing.assert_almost_equal(act_rho_10, tar_rho_10, decimal=1)\n np.testing.assert_almost_equal(act_rho_11, tar_rho_11, decimal=1)\n np.testing.assert_almost_equal(act_rho_hh, tar_rho_hh, decimal=1)\n np.testing.assert_almost_equal(act_rho_xy, tar_rho_xy, decimal=1)\n np.testing.assert_almost_equal(act_rho_yx, tar_rho_yx, decimal=1)\n\n\ndef test_tomography_plot_raises_for_incorrect_number_of_axes():\n simulator = sim.Simulator()\n qubit = GridQubit(0, 0)\n circuit = circuits.Circuit(ops.X(qubit)**0.5)\n result = single_qubit_state_tomography(simulator, qubit, circuit, 1000)\n with pytest.raises(TypeError): # ax is not a List[plt.Axes]\n ax = plt.subplot()\n result.plot(ax)\n with pytest.raises(ValueError):\n _, axes = plt.subplots(1, 3)\n result.plot(axes)\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nimport cirq\n\n\nclass No:\n pass\n\n\nclass No1:\n\n def _has_stabilizer_effect_(self):\n return NotImplemented\n\n\nclass No2:\n\n def _has_stabilizer_effect_(self):\n return None\n\n\nclass No3:\n\n def _has_stabilizer_effect_(self):\n return False\n\n\nclass Yes:\n\n def _has_stabilizer_effect_(self):\n return True\n\n\nclass EmptyOp(cirq.Operation):\n \"\"\"A trivial operation.\"\"\"\n\n @property\n def qubits(self):\n # coverage: ignore\n return ()\n\n def with_qubits(self, *new_qubits):\n # coverage: ignore\n return self\n\n\nclass NoOp(EmptyOp):\n\n @property\n def gate(self):\n return No()\n\n\nclass NoOp1(EmptyOp):\n\n @property\n def gate(self):\n return No1()\n\n\nclass NoOp2(EmptyOp):\n\n @property\n def gate(self):\n return No2()\n\n\nclass NoOp3(EmptyOp):\n\n @property\n def gate(self):\n return No3()\n\n\nclass YesOp(EmptyOp):\n\n @property\n def gate(self):\n return Yes()\n\n\nclass OpWithUnitary(EmptyOp):\n\n def __init__(self, unitary):\n self.unitary = unitary\n\n def _unitary_(self):\n return self.unitary\n\n\ndef test_inconclusive():\n assert not cirq.has_stabilizer_effect(object())\n assert not cirq.has_stabilizer_effect('boo')\n assert not cirq.has_stabilizer_effect(cirq.SingleQubitGate())\n assert not cirq.has_stabilizer_effect(No())\n assert not cirq.has_stabilizer_effect(NoOp())\n\n\ndef test_via_has_stabilizer_effect_method():\n assert not cirq.has_stabilizer_effect(No1())\n assert not cirq.has_stabilizer_effect(No2())\n assert not cirq.has_stabilizer_effect(No3())\n assert cirq.has_stabilizer_effect(Yes())\n\n\ndef test_via_gate_of_op():\n assert cirq.has_stabilizer_effect(YesOp())\n assert not cirq.has_stabilizer_effect(NoOp1())\n assert not cirq.has_stabilizer_effect(NoOp2())\n assert not cirq.has_stabilizer_effect(NoOp3())\n\n\ndef test_via_unitary():\n op1 = OpWithUnitary(np.array([[0, 1], [1, 0]]))\n assert cirq.has_stabilizer_effect(op1)\n\n op2 = OpWithUnitary(np.array([[0, 1j], [1j, 0]]))\n assert cirq.has_stabilizer_effect(op2)\n\n op3 = OpWithUnitary(np.array([[1, 0], [0, np.sqrt(1j)]]))\n assert not cirq.has_stabilizer_effect(op3)\n\n\ndef test_via_unitary_not_supported():\n # Unitaries larger than 2x2 are not yet supported.\n op = OpWithUnitary(cirq.unitary(cirq.CNOT))\n assert not cirq.has_stabilizer_effect(op)\n assert not cirq.has_stabilizer_effect(op)"
] | [
[
"numpy.eye"
],
[
"numpy.eye",
"numpy.array",
"numpy.sqrt"
],
[
"numpy.diag",
"numpy.asarray",
"numpy.eye",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.testing.assert_almost_equal",
"matplotlib.pyplot.subplot",
"numpy.mean",
"numpy.outer",
"numpy.array"
],
[
"numpy.array",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
playerkk/HoiTransformer | [
"b710216d6b338863ebe9d40a96765ab52780cefa"
] | [
"models/backbone.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nBackbone modules.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torchvision.models._utils import IntermediateLayerGetter\nfrom typing import Dict, List\n\nfrom util.misc import NestedTensor, is_main_process\n\nfrom .position_encoding import build_position_encoding\n\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n rv = self.running_var.reshape(1, -1, 1, 1)\n rm = self.running_mean.reshape(1, -1, 1, 1)\n eps = 1e-5\n scale = w * (rv + eps).rsqrt()\n bias = b - rm * scale\n return x * scale + bias\n\n\nclass BackboneBase(nn.Module):\n\n def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):\n super().__init__()\n for name, parameter in backbone.named_parameters():\n if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:\n parameter.requires_grad_(False)\n if return_interm_layers:\n return_layers = {\"layer1\": \"0\", \"layer2\": \"1\", \"layer3\": \"2\", \"layer4\": \"3\"}\n else:\n return_layers = {'layer4': \"0\"}\n self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n self.num_channels = num_channels\n\n def forward(self, tensor_list: NestedTensor):\n xs = self.body(tensor_list.tensors)\n out: Dict[str, NestedTensor] = {}\n for name, x in xs.items():\n m = tensor_list.mask\n assert m is not None\n mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n out[name] = NestedTensor(x, mask)\n return out\n\n\nclass Backbone(BackboneBase):\n \"\"\"ResNet backbone with frozen BatchNorm.\"\"\"\n def __init__(self, name: str,\n train_backbone: bool,\n return_interm_layers: bool,\n dilation: bool):\n backbone = getattr(torchvision.models, name)(\n replace_stride_with_dilation=[False, False, dilation],\n pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)\n num_channels = 512 if name in ('resnet18', 'resnet34') else 2048\n super().__init__(backbone, train_backbone, num_channels, return_interm_layers)\n\n\nclass Joiner(nn.Sequential):\n def __init__(self, backbone, position_embedding):\n super().__init__(backbone, position_embedding)\n\n def forward(self, tensor_list: NestedTensor):\n xs = self[0](tensor_list)\n out: List[NestedTensor] = []\n pos = []\n for name, x in xs.items():\n out.append(x)\n # position encoding\n pos.append(self[1](x).to(x.tensors.dtype))\n\n return out, pos\n\n\ndef build_backbone(args):\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n return_interm_layers = False # args.masks\n backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n return model\n"
] | [
[
"torch.ones",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DonCammne/OpenSeesPyAssistant | [
"f380f0f2a2f3d1336320bd8d26fa5efe00a12134"
] | [
"DataManagement.py"
] | [
"\"\"\"\nModule with the parent abstract class DataManagement. \\n\nCarmine Schipani, 2021\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom OpenSeesPyAssistant.ErrorHandling import *\nimport numpy as np\n\n\nclass DataManagement(ABC):\n \"\"\"\n Abstract parent class for data management.\n Using the associated MATLAB class \\n\n LOAD_CLASS.m \\n\n for the postprocessing in MATLAB, allowing for simpler and more reliable data management because the parameters\n from the OpenSeesPy analysis are imported automatically. \n \"\"\"\n\n def SaveData(self, f):\n \"\"\"\n Function that lists in the command window and saves in a opened file text \"f\" the data from the \"self\" class that calls it. \n Example: call this function after this line: \\n \n with open(FileName, 'w') as f:\n\n @param f (io.TextIOWrapper): Opened file to write into\n\n @exception WrongDimension: The number of lists in the list self.data needs to be 2\n \"\"\"\n if len(self.data[0]) != 2: raise WrongDimension() \n \n delimiter = \"##############################\" # 30 times #\n col_delimiter = \"\\t\" # tab\n for data_line in self.data:\n f.write('\\n')\n for col in data_line:\n if type(col) == np.ndarray:\n tmp_str = np.array_str(col, max_line_width = np.inf)\n else:\n tmp_str = str(col)\n f.write(tmp_str)\n f.write(col_delimiter)\n f.write('\\n')\n f.write('NEW INFO SECTION DELIMITER \\t')\n f.write(delimiter)\n\n @abstractmethod\n def ShowInfo(self):\n \"\"\"\n Abstract method that shows the data stored in the class in the command window.\n In some cases, it's possible to plot some information (for example the curve of the material model).\n \"\"\"\n pass\n\n @abstractmethod\n def ReInit(self):\n \"\"\"\n Abstract method that computes the value of the parameters with respect of the arguments. \\n\n Use after changing the value of argument inside the class (to update the values accordingly). \\n\n This function can be very useful in combination with the function \"deepcopy()\" from the module \"copy\". \\n\n Be careful that the parameter self.Initialized is also copied, thus it is safer to copy the class before the method that calls the actual OpenSees commands (and initialise the object).\n \"\"\"\n pass\n\n @abstractmethod\n def UpdateStoredData(self):\n \"\"\"\n Abstract method used to define and update the self.data member variable. \\n\n This member variable (self.data) is a list of lists with 2 entries (info_name and info_value)\n and for each list is stored a different member variable of the class. \\n\n Useful to debug the model, export data, copy object.\n \"\"\"\n pass"
] | [
[
"numpy.array_str"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lonelu/Metalprot_learning | [
"8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b"
] | [
"src/extractor/make_bb_info_mats.py"
] | [
"from numpy.core.numeric import full\nfrom numpy.lib.function_base import append\nimport prody as pr\nimport os\nimport numpy\nimport matplotlib as mpl\nimport pylab\nfrom itertools import combinations, combinations_with_replacement\nfrom docopt import docopt\nimport itertools\nimport pickle\nimport sys\nfrom scipy.linalg.basic import matrix_balance\nfrom scipy.spatial.distance import cdist\n\nfrom . import ligand_database as ld\nfrom . import features_pdb2dihe as fpdh\n\nmetal_sel = 'ion or name NI MN ZN CO CU MG FE' \n\n\n#TO DO: create artificial aa in the 4th aa.\ndef get_atg(full_pdb):\n\t'''\n\tprody atomgroup will be used to calc bb info.\n\tIf the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.\n\t'''\n\n\tmetal = full_pdb.select(metal_sel)[0]\n\tcontact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))\n\tcontact_aa_resinds = numpy.unique(contact_aas.getResindices()) \n\textention = 1 \n\n\tcoords = []\n\tresnames = []\n\tnames = []\n\tresnums = []\n\tresn = 1\n\tfor resind in contact_aa_resinds:\t\t\n\t\text_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)\n\n\t\t#In some cases, the contact aa is at terminal. We can add more aa to match the shape.\n\t\tif len(ext_inds) == 2:\n\t\t\tif ext_inds[0] == resind:\n\t\t\t\text_inds.insert(0, resind)\n\t\t\telse:\n\t\t\t\text_inds.append(resind)\n\t\tif len(ext_inds) == 1:\n\t\t\text_inds.append(resind)\n\t\t\text_inds.append(resind)\n\n\t\tfor ind in ext_inds:\n\t\t\taa = full_pdb.select('resindex ' + str(ind))\n\t\t\tcoords.extend(aa.getCoords())\n\t\t\tresnames.extend(aa.getResnames())\n\t\t\tnames.extend(aa.getNames())\n\t\t\tresnums.extend([resn for _i in range(len(aa))])\n\t\t\tresn += 1\n\n\n\tif len(contact_aa_resinds) == 3:\n\t\tcoords.extend([])\n\t\tresnames.extend([])\n\t\tnames.extend([])\n\t\tresnums.extend([])\n\n\t#ag = pr.AtomGroup('-'.join([str(p) for p in per]))\n\tag = pr.AtomGroup('0-1-2-3')\n\tag.setCoords(coords)\n\tag.setResnums(resnums)\n\tag.setResnames(resnames)\n\tag.setNames(names)\n\n\treturn ag\n\n\ndef get_atgs(full_pdb, contain_metal = True):\n\t'''\n\tprody atomgroup will be used to calc bb info.\n\tIf the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.\n\t'''\n\tif contain_metal:\n\t\tmetal = full_pdb.select(metal_sel)[0]\n\t\tcontact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))\t\n\telse:\n\t\t#TO DO: it is not quite right here if the pdb happened to have more HIS-CYS-GLU-ASP. Skip now.\n\t\tcontact_aas = full_pdb.select('resname HIS CYS GLU ASP')\n\t\tif not contact_aas and len(numpy.unique(contact_aas.getResindices())) > 4: \n\t\t\treturn []\n\n\tcontact_aa_resinds = numpy.unique(contact_aas.getResindices()) \n\n\textention = 1 \n\t\n\t# TO DO: If the len of contact_ass is not 4...\n\tags = []\n\t#for per in itertools.permutations(range(len(contact_aa_resinds))):\n\tfor per in [range(len(contact_aa_resinds))]:\n\t\tprint(per)\n\n\t\tcoords = []\n\t\tresnames = []\n\t\tnames = []\n\t\tresnums = []\n\t\tresn = 1\n\t\tfor idx in per:\n\t\t\tresind = contact_aa_resinds[idx]\n\n\t\t\text_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)\n\n\t\t\t#In some cases, the contact aa is at terminal. We can add more aa to match the shape.\n\t\t\tif len(ext_inds) == 2:\n\t\t\t\tif ext_inds[0] == resind:\n\t\t\t\t\text_inds.insert(0, resind)\n\t\t\t\telse:\n\t\t\t\t\text_inds.append(resind)\n\t\t\tif len(ext_inds) == 1:\n\t\t\t\text_inds.append(resind)\n\t\t\t\text_inds.append(resind)\n\n\t\t\tfor ind in ext_inds:\n\t\t\t\taa = full_pdb.select('resindex ' + str(ind))\n\t\t\t\tcoords.extend(aa.getCoords())\n\t\t\t\tresnames.extend(aa.getResnames())\n\t\t\t\tnames.extend(aa.getNames())\n\t\t\t\tresnums.extend([resn for _i in range(len(aa))])\n\t\t\t\tresn += 1\n\n\t\tag = pr.AtomGroup('-'.join([str(p) for p in per]))\n\t\tag.setCoords(coords)\n\t\tag.setResnums(resnums)\n\t\tag.setResnames(resnames)\n\t\tag.setNames(names)\n\n\t\tags.append(ag)\n\n\treturn ags\n\n\n\ndef get_bb_dist_seq(core):\n\t'''\n\tIf we know N CA C, The coords of CB could be calcualted. So we may not need CB coords.\n\n\t'''\n\n\tn_coords = core.select('name N').getCoords()\n\n\tc_coords = core.select('name C').getCoords()\n\n\tca_coords = core.select('name CA').getCoords()\n\n\n\tn_n = cdist(n_coords, n_coords)\n\n\tc_c = cdist(c_coords, c_coords)\n\n\tca_ca = cdist(ca_coords, ca_coords)\n\n\tcb_coords = []\n\n\tfor i in range(len(n_coords)):\n\t\tCa = ca_coords[i]\n\t\tC = c_coords[i]\n\t\tN = n_coords[i]\n\n\t\tb = Ca - N\n\t\tc = C - Ca\n\t\ta = numpy.cross(b, c)\n\t\tCb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca\n\n\t\tcb_coords.append(Cb)\n\n\tcb_coords = core.select('name CB').getCoords()\n\n\tcb_cb = cdist(cb_coords, cb_coords)\n\n\treturn n_n, c_c, ca_ca, cb_cb\n\n\ndef get_dihe(ag):\n\t'''\n\tPlease check features_pdb2dihe.py.\n\tOnly the contact aa will be extracted.\n\t'''\n\t\n\tnres = len(ag.select('name CA'))\n\tprint(nres)\n\tdist, _omega, _theta_asym, _phi_asym = fpdh.get_neighbors(ag, nres, 20.0) \n\n\t#TO DO: extract info, only the contact aa matters?!\n\tomega = numpy.zeros((nres, nres))\n\ttheta_asym = numpy.zeros((nres, nres))\n\tphi_asym = numpy.zeros((nres, nres))\n\tfor i in range(1, nres, 3):\n\t\tfor j in range(1, nres, 3):\n\t\t\tomega[i, j] = _omega[i, j]\n\t\t\ttheta_asym[i, j] = _theta_asym[i, j]\n\t\t\tphi_asym[i, j] = _phi_asym[i, j]\n\n\treturn omega, theta_asym, phi_asym\n\n\ndef get_seq_mat(ag, matrix_size = 12):\n\n\tseq = ag.select('name CA').getResnames()\n\n\tthreelettercodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\\\n\t\t\t\t\t\t'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']\n\n\tseq_channels = numpy.zeros([40, matrix_size, matrix_size], dtype=int)\n\n\n\tfor i in range(len(seq)):\n\t\taa = seq[i]\n\t\ttry:\n\t\t\tidx = threelettercodes.index(aa)\n\t\texcept:\n\t\t\tprint('Resname of following atom not found: {}'.format(aa))\n\t\t\tcontinue\n\t\tfor j in range(len(seq)):\n\t\t\tseq_channels[idx][i][j] = 1 # horizontal rows of 1's in first 20 channels\n\t\t\tseq_channels[idx+20][j][i] = 1 # vertical columns of 1's in next 20 channels\n\t\t\n\treturn seq_channels\n\n\ndef mk_full_mats(ag, matrix_size = 12):\n\tnres = len(ag.select('name CA'))\n\n\tn_n, c_c, ca_ca, cb_cb = get_bb_dist_seq(ag)\n\n\tomega, theta_asym, phi_asym = get_dihe(ag)\n\n\tseq_mats = get_seq_mat(ag, matrix_size)\n\n\tfull_mat = numpy.zeros((47, matrix_size, matrix_size))\n\n\t# Make sure the shape of each matrix is smaller than the matrix_size.\n\n\tfull_mat[0,0:n_n.shape[0], 0:n_n.shape[1]] = n_n\n\tfull_mat[1,0:c_c.shape[0], 0:c_c.shape[1]] = c_c\n\tfull_mat[2,0:ca_ca.shape[0], 0:ca_ca.shape[1]] = ca_ca\n\tfull_mat[3,0:cb_cb.shape[0], 0:cb_cb.shape[1]] = cb_cb\n\n\tfull_mat[4,0:omega.shape[0], 0:omega.shape[1]] = omega\n\tfull_mat[5,0:theta_asym.shape[0], 0:theta_asym.shape[1]] = theta_asym\n\tfull_mat[6,0:phi_asym.shape[0], 0:phi_asym.shape[1]] = phi_asym\n\n\tfor i in range(7, 47):\n\t\tfull_mat[i, :, :] = seq_mats[i - 7]\n\n\treturn full_mat\n\n\ndef write_pickle_file(full_mat, pdb, ag, out_folder, tag = ''):\n\t\"\"\"\n\tWrites a pickle file containing the input numpy array into the current permutation's folder.\n\tCurrently using this only to save the full matrix (all 46 channels).\n\t\"\"\"\n\tnumpy.set_printoptions(threshold=numpy.inf)\n\tpdb_name = pdb.split('.')[0]\n\t\n\tpkl_file = out_folder + pdb_name + '_full_mat_' + ag.getTitle() + tag + '.pkl'\n\n\twith open(pkl_file, 'wb') as f:\n\t\tprint(pkl_file)\n\t\tpickle.dump(full_mat, f)\n\n\treturn\n\n\ndef write_dist_mat_file(mat, pdb, ag, out_folder, tag = ''):\n\t\"\"\"\n\tWrites out a file containing the distance matrix\n\t\"\"\"\n\t# output_folder = 'core_contact_maps/dist_mat_txt_folder/'\n\n\tnumpy.set_printoptions(threshold=numpy.inf)\n\n\tdist_mat_file = pdb.split('.')[0]\n\n\tdist_mat_file = out_folder + dist_mat_file + '_full_mat_' + ag.getTitle() + tag + '.txt'\n\n\twith open(dist_mat_file, 'w') as open_file:\n\t\tfor i in mat:\n\t\t\topen_file.write(str(i) + '\\n')\n\n\treturn\n\n\ndef run_mk_bb_info_mats(workdir, out_path, mat_size = 12, top = 1000, contain_metal = True, opts = None):\n\n\tos.makedirs(out_path, exist_ok=True)\n\n\tcount = 0\n\n\terrors = ''\n\t\n\tfor pdb_name in os.listdir(workdir):\n\n\t\tif count >= top:\n\t\t\tbreak\n\n\t\tif '.pdb' not in pdb_name:\n\t\t\tcontinue\n\n\t\tpdb_file = workdir + pdb_name\n\n\t\tpdb = pr.parsePDB(pdb_file)\n\n\t\tags = get_atgs(pdb, contain_metal)\n\t\n\t\tfor ag in ags:\n\t\t\ttry:\n\t\t\t\t#TO DO: currently, only consider 3 or 4 aa binding.\n\t\t\t\tif len(ag.select('name CA'))> 12 or len(ag.select('name CA')) < 7:\n\t\t\t\t\tprint(pdb_name + ' not used. ')\n\t\t\t\t\tcontinue\n\t\t\t\tfull_mat = mk_full_mats(ag, mat_size)\n\t\t\t\twrite_dist_mat_file(full_mat, pdb_name, ag, out_path)\n\t\t\t\twrite_pickle_file(full_mat, pdb_name, ag, out_path)\n\n\t\t\t\tcount += 1\n\t\t\texcept:\n\t\t\t\tprint('error: ' + pdb_name)\n\t\t\t\terrors += pdb_name + '\\n'\n\n\t\t\tif count >= top:\n\t\t\t\tbreak\n\t\n\twith open(out_path + '_error.txt', 'w') as f:\n\t\tf.write(errors)\n\n\treturn\n\n\n\n"
] | [
[
"numpy.set_printoptions",
"numpy.zeros",
"scipy.spatial.distance.cdist",
"numpy.cross"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
s123600g/openfaceInstallscript | [
"962b4b89c5626318b5701d7297d49df3423b0fe4"
] | [
"InstallOpenface/fix_sklearn/label.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Olivier Grisel <[email protected]>\n# Andreas Mueller <[email protected]>\n# Joel Nothman <[email protected]>\n# Hamzeh Alsalhi <[email protected]>\n# License: BSD 3 clause\n\nfrom collections import defaultdict\nimport itertools\nimport array\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom ..base import BaseEstimator, TransformerMixin\n\nfrom ..utils.fixes import sparse_min_max\nfrom ..utils import column_or_1d\nfrom ..utils.validation import check_array\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import _num_samples\nfrom ..utils.multiclass import unique_labels\nfrom ..utils.multiclass import type_of_target\n\nfrom ..externals import six\n\nzip = six.moves.zip\nmap = six.moves.map\n\n__all__ = [\n 'label_binarize',\n 'LabelBinarizer',\n 'LabelEncoder',\n 'MultiLabelBinarizer',\n]\n\n\nclass LabelEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Encode labels with value between 0 and n_classes-1.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n Attributes\n ----------\n classes_ : array of shape (n_class,)\n Holds the label for each class.\n\n Examples\n --------\n `LabelEncoder` can be used to normalize labels.\n\n >>> from sklearn import preprocessing\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([1, 2, 2, 6])\n LabelEncoder()\n >>> le.classes_\n array([1, 2, 6])\n >>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS\n array([0, 0, 1, 2]...)\n >>> le.inverse_transform([0, 0, 1, 2])\n array([1, 1, 2, 6])\n\n It can also be used to transform non-numerical labels (as long as they are\n hashable and comparable) to numerical labels.\n\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([\"paris\", \"paris\", \"tokyo\", \"amsterdam\"])\n LabelEncoder()\n >>> list(le.classes_)\n ['amsterdam', 'paris', 'tokyo']\n >>> le.transform([\"tokyo\", \"tokyo\", \"paris\"]) #doctest: +ELLIPSIS\n array([2, 2, 1]...)\n >>> list(le.inverse_transform([2, 2, 1]))\n ['tokyo', 'tokyo', 'paris']\n\n See also\n --------\n sklearn.preprocessing.OneHotEncoder : encode categorical integer features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n\n def fit(self, y):\n \"\"\"Fit label encoder\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_ = np.unique(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label encoder and return encoded labels\n\n Parameters\n ----------\n y : array-like of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : array-like of shape [n_samples]\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_, y = np.unique(y, return_inverse=True)\n return y\n\n def transform(self, y):\n \"\"\"Transform labels to normalized encoding.\n\n Parameters\n ----------\n y : array-like of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : array-like of shape [n_samples]\n \"\"\"\n check_is_fitted(self, 'classes_')\n y = column_or_1d(y, warn=True)\n\n classes = np.unique(y)\n if len(np.intersect1d(classes, self.classes_)) < len(classes):\n diff = np.setdiff1d(classes, self.classes_)\n # raise ValueError(\"y contains new labels: %s\" % str(diff))\n raise ValueError(\"y contains previously unseen labels: % s\" % str(diff))\n return np.searchsorted(self.classes_, y)\n\n def inverse_transform(self, y):\n \"\"\"Transform labels back to original encoding.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : numpy array of shape [n_samples]\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n diff = np.setdiff1d(y, np.arange(len(self.classes_)))\n # if diff:\n # raise ValueError(\"y contains new labels: %s\" % str(diff))\n if len(diff):\n raise ValueError(\"y contains previously unseen labels: %s\" % str(diff))\n y = np.asarray(y)\n return self.classes_[y]\n\n\nclass LabelBinarizer(BaseEstimator, TransformerMixin):\n \"\"\"Binarize labels in a one-vs-all fashion\n\n Several regression and binary classification algorithms are\n available in the scikit. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n At learning time, this simply consists in learning one regressor\n or binary classifier per class. In doing so, one needs to convert\n multi-class labels to binary labels (belong or does not belong\n to the class). LabelBinarizer makes this process easy with the\n transform method.\n\n At prediction time, one assigns the class for which the corresponding\n model gave the greatest confidence. LabelBinarizer makes this easy\n with the inverse_transform method.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n Parameters\n ----------\n\n neg_label : int (default: 0)\n Value with which negative labels must be encoded.\n\n pos_label : int (default: 1)\n Value with which positive labels must be encoded.\n\n sparse_output : boolean (default: False)\n True if the returned array from transform is desired to be in sparse\n CSR format.\n\n Attributes\n ----------\n\n classes_ : array of shape [n_class]\n Holds the label for each class.\n\n y_type_ : str,\n Represents the type of the target data as evaluated by\n utils.multiclass.type_of_target. Possible type are 'continuous',\n 'continuous-multioutput', 'binary', 'multiclass',\n 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.\n\n sparse_input_ : boolean,\n True if the input data to transform is given as a sparse matrix, False\n otherwise.\n\n Examples\n --------\n >>> from sklearn import preprocessing\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit([1, 2, 6, 4, 2])\n LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)\n >>> lb.classes_\n array([1, 2, 4, 6])\n >>> lb.transform([1, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n Binary targets transform to a column vector\n\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit_transform(['yes', 'no', 'no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n Passing a 2D matrix for multilabel classification\n\n >>> import numpy as np\n >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))\n LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)\n >>> lb.classes_\n array([0, 1, 2])\n >>> lb.transform([0, 1, 2, 1])\n array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [0, 1, 0]])\n\n See also\n --------\n label_binarize : function to perform the transform operation of\n LabelBinarizer with fixed classes.\n sklearn.preprocessing.OneHotEncoder : encode categorical integer features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n\n def __init__(self, neg_label=0, pos_label=1, sparse_output=False):\n if neg_label >= pos_label:\n raise ValueError(\"neg_label={0} must be strictly less than \"\n \"pos_label={1}.\".format(neg_label, pos_label))\n\n if sparse_output and (pos_label == 0 or neg_label != 0):\n raise ValueError(\"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label))\n\n self.neg_label = neg_label\n self.pos_label = pos_label\n self.sparse_output = sparse_output\n\n def fit(self, y):\n \"\"\"Fit label binarizer\n\n Parameters\n ----------\n y : array of shape [n_samples,] or [n_samples, n_classes]\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self.y_type_ = type_of_target(y)\n if 'multioutput' in self.y_type_:\n raise ValueError(\"Multioutput target data is not supported with \"\n \"label binarization\")\n if _num_samples(y) == 0:\n raise ValueError('y has 0 samples: %r' % y)\n\n self.sparse_input_ = sp.issparse(y)\n self.classes_ = unique_labels(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label binarizer and transform multi-class labels to binary\n labels.\n\n The output of transform is sometimes referred to as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : array or sparse matrix of shape [n_samples,] or \\\n [n_samples, n_classes]\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : array or CSR matrix of shape [n_samples, n_classes]\n Shape will be [n_samples, 1] for binary problems.\n \"\"\"\n return self.fit(y).transform(y)\n\n def transform(self, y):\n \"\"\"Transform multi-class labels to binary labels\n\n The output of transform is sometimes referred to by some authors as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : array or sparse matrix of shape [n_samples,] or \\\n [n_samples, n_classes]\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : numpy array or CSR matrix of shape [n_samples, n_classes]\n Shape will be [n_samples, 1] for binary problems.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n y_is_multilabel = type_of_target(y).startswith('multilabel')\n if y_is_multilabel and not self.y_type_.startswith('multilabel'):\n raise ValueError(\"The object was not fitted with multilabel\"\n \" input.\")\n\n return label_binarize(y, self.classes_,\n pos_label=self.pos_label,\n neg_label=self.neg_label,\n sparse_output=self.sparse_output)\n\n def inverse_transform(self, Y, threshold=None):\n \"\"\"Transform binary labels back to multi-class labels\n\n Parameters\n ----------\n Y : numpy array or sparse matrix with shape [n_samples, n_classes]\n Target values. All sparse matrices are converted to CSR before\n inverse transformation.\n\n threshold : float or None\n Threshold used in the binary and multi-label cases.\n\n Use 0 when ``Y`` contains the output of decision_function\n (classifier).\n Use 0.5 when ``Y`` contains the output of predict_proba.\n\n If None, the threshold is assumed to be half way between\n neg_label and pos_label.\n\n Returns\n -------\n y : numpy array or CSR matrix of shape [n_samples] Target values.\n\n Notes\n -----\n In the case when the binary labels are fractional\n (probabilistic), inverse_transform chooses the class with the\n greatest value. Typically, this allows to use the output of a\n linear model's decision_function method directly as the input\n of inverse_transform.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n if threshold is None:\n threshold = (self.pos_label + self.neg_label) / 2.\n\n if self.y_type_ == \"multiclass\":\n y_inv = _inverse_binarize_multiclass(Y, self.classes_)\n else:\n y_inv = _inverse_binarize_thresholding(Y, self.y_type_,\n self.classes_, threshold)\n\n if self.sparse_input_:\n y_inv = sp.csr_matrix(y_inv)\n elif sp.issparse(y_inv):\n y_inv = y_inv.toarray()\n\n return y_inv\n\n\ndef label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):\n \"\"\"Binarize labels in a one-vs-all fashion\n\n Several regression and binary classification algorithms are\n available in the scikit. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels or multilabel data to encode.\n\n classes : array-like of shape [n_classes]\n Uniquely holds the label for each class.\n\n neg_label : int (default: 0)\n Value with which negative labels must be encoded.\n\n pos_label : int (default: 1)\n Value with which positive labels must be encoded.\n\n sparse_output : boolean (default: False),\n Set to true if output binary array is desired in CSR sparse format\n\n Returns\n -------\n Y : numpy array or CSR matrix of shape [n_samples, n_classes]\n Shape will be [n_samples, 1] for binary problems.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n Binary targets transform to a column vector\n\n >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n See also\n --------\n LabelBinarizer : class used to wrap the functionality of label_binarize and\n allow for fitting to classes independently of the transform operation\n \"\"\"\n if not isinstance(y, list):\n # XXX Workaround that will be removed when list of list format is\n # dropped\n y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)\n else:\n if _num_samples(y) == 0:\n raise ValueError('y has 0 samples: %r' % y)\n if neg_label >= pos_label:\n raise ValueError(\"neg_label={0} must be strictly less than \"\n \"pos_label={1}.\".format(neg_label, pos_label))\n\n if (sparse_output and (pos_label == 0 or neg_label != 0)):\n raise ValueError(\"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label))\n\n # To account for pos_label == 0 in the dense case\n pos_switch = pos_label == 0\n if pos_switch:\n pos_label = -neg_label\n\n y_type = type_of_target(y)\n if 'multioutput' in y_type:\n raise ValueError(\"Multioutput target data is not supported with label \"\n \"binarization\")\n if y_type == 'unknown':\n raise ValueError(\"The type of target data is not known\")\n\n n_samples = y.shape[0] if sp.issparse(y) else len(y)\n n_classes = len(classes)\n classes = np.asarray(classes)\n\n if y_type == \"binary\":\n if n_classes == 1:\n if sparse_output:\n return sp.csr_matrix((n_samples, 1), dtype=int)\n else:\n Y = np.zeros((len(y), 1), dtype=np.int)\n Y += neg_label\n return Y\n elif len(classes) >= 3:\n y_type = \"multiclass\"\n\n sorted_class = np.sort(classes)\n if (y_type == \"multilabel-indicator\" and classes.size != y.shape[1]):\n raise ValueError(\"classes {0} missmatch with the labels {1}\"\n \"found in the data\".format(classes, unique_labels(y)))\n\n if y_type in (\"binary\", \"multiclass\"):\n y = column_or_1d(y)\n\n # pick out the known labels from y\n y_in_classes = np.in1d(y, classes)\n y_seen = y[y_in_classes]\n indices = np.searchsorted(sorted_class, y_seen)\n indptr = np.hstack((0, np.cumsum(y_in_classes)))\n\n data = np.empty_like(indices)\n data.fill(pos_label)\n Y = sp.csr_matrix((data, indices, indptr),\n shape=(n_samples, n_classes))\n elif y_type == \"multilabel-indicator\":\n Y = sp.csr_matrix(y)\n if pos_label != 1:\n data = np.empty_like(Y.data)\n data.fill(pos_label)\n Y.data = data\n else:\n raise ValueError(\"%s target data is not supported with label \"\n \"binarization\" % y_type)\n\n if not sparse_output:\n Y = Y.toarray()\n Y = Y.astype(int, copy=False)\n\n if neg_label != 0:\n Y[Y == 0] = neg_label\n\n if pos_switch:\n Y[Y == pos_label] = 0\n else:\n Y.data = Y.data.astype(int, copy=False)\n\n # preserve label ordering\n if np.any(classes != sorted_class):\n indices = np.searchsorted(sorted_class, classes)\n Y = Y[:, indices]\n\n if y_type == \"binary\":\n if sparse_output:\n Y = Y.getcol(-1)\n else:\n Y = Y[:, -1].reshape((-1, 1))\n\n return Y\n\n\ndef _inverse_binarize_multiclass(y, classes):\n \"\"\"Inverse label binarization transformation for multiclass.\n\n Multiclass uses the maximal score instead of a threshold.\n \"\"\"\n classes = np.asarray(classes)\n\n if sp.issparse(y):\n # Find the argmax for each row in y where y is a CSR matrix\n\n y = y.tocsr()\n n_samples, n_outputs = y.shape\n outputs = np.arange(n_outputs)\n row_max = sparse_min_max(y, 1)[1]\n row_nnz = np.diff(y.indptr)\n\n y_data_repeated_max = np.repeat(row_max, row_nnz)\n # picks out all indices obtaining the maximum per row\n y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)\n\n # For corner case where last row has a max of 0\n if row_max[-1] == 0:\n y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])\n\n # Gets the index of the first argmax in each row from y_i_all_argmax\n index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])\n # first argmax of each row\n y_ind_ext = np.append(y.indices, [0])\n y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]\n # Handle rows of all 0\n y_i_argmax[np.where(row_nnz == 0)[0]] = 0\n\n # Handles rows with max of 0 that contain negative numbers\n samples = np.arange(n_samples)[(row_nnz > 0) &\n (row_max.ravel() == 0)]\n for i in samples:\n ind = y.indices[y.indptr[i]:y.indptr[i + 1]]\n y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]\n\n return classes[y_i_argmax]\n else:\n return classes.take(y.argmax(axis=1), mode=\"clip\")\n\n\ndef _inverse_binarize_thresholding(y, output_type, classes, threshold):\n \"\"\"Inverse label binarization transformation using thresholding.\"\"\"\n\n if output_type == \"binary\" and y.ndim == 2 and y.shape[1] > 2:\n raise ValueError(\"output_type='binary', but y.shape = {0}\".\n format(y.shape))\n\n if output_type != \"binary\" and y.shape[1] != len(classes):\n raise ValueError(\"The number of class is not equal to the number of \"\n \"dimension of y.\")\n\n classes = np.asarray(classes)\n\n # Perform thresholding\n if sp.issparse(y):\n if threshold > 0:\n if y.format not in ('csr', 'csc'):\n y = y.tocsr()\n y.data = np.array(y.data > threshold, dtype=np.int)\n y.eliminate_zeros()\n else:\n y = np.array(y.toarray() > threshold, dtype=np.int)\n else:\n y = np.array(y > threshold, dtype=np.int)\n\n # Inverse transform data\n if output_type == \"binary\":\n if sp.issparse(y):\n y = y.toarray()\n if y.ndim == 2 and y.shape[1] == 2:\n return classes[y[:, 1]]\n else:\n if len(classes) == 1:\n return np.repeat(classes[0], len(y))\n else:\n return classes[y.ravel()]\n\n elif output_type == \"multilabel-indicator\":\n return y\n\n else:\n raise ValueError(\"{0} format is not supported\".format(output_type))\n\n\nclass MultiLabelBinarizer(BaseEstimator, TransformerMixin):\n \"\"\"Transform between iterable of iterables and a multilabel format\n\n Although a list of sets or tuples is a very intuitive format for multilabel\n data, it is unwieldy to process. This transformer converts between this\n intuitive format and the supported multilabel format: a (samples x classes)\n binary matrix indicating the presence of a class label.\n\n Parameters\n ----------\n classes : array-like of shape [n_classes] (optional)\n Indicates an ordering for the class labels\n\n sparse_output : boolean (default: False),\n Set to true if output binary array is desired in CSR sparse format\n\n Attributes\n ----------\n classes_ : array of labels\n A copy of the `classes` parameter where provided,\n or otherwise, the sorted set of classes found when fitting.\n\n Examples\n --------\n >>> from sklearn.preprocessing import MultiLabelBinarizer\n >>> mlb = MultiLabelBinarizer()\n >>> mlb.fit_transform([(1, 2), (3,)])\n array([[1, 1, 0],\n [0, 0, 1]])\n >>> mlb.classes_\n array([1, 2, 3])\n\n >>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])\n array([[0, 1, 1],\n [1, 0, 0]])\n >>> list(mlb.classes_)\n ['comedy', 'sci-fi', 'thriller']\n\n See also\n --------\n sklearn.preprocessing.OneHotEncoder : encode categorical integer features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n def __init__(self, classes=None, sparse_output=False):\n self.classes = classes\n self.sparse_output = sparse_output\n\n def fit(self, y):\n \"\"\"Fit the label sets binarizer, storing `classes_`\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n self : returns this MultiLabelBinarizer instance\n \"\"\"\n if self.classes is None:\n classes = sorted(set(itertools.chain.from_iterable(y)))\n else:\n classes = self.classes\n dtype = np.int if all(isinstance(c, int) for c in classes) else object\n self.classes_ = np.empty(len(classes), dtype=dtype)\n self.classes_[:] = classes\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit the label sets binarizer and transform the given label sets\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : array or CSR matrix, shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in\n `y[i]`, and 0 otherwise.\n \"\"\"\n if self.classes is not None:\n return self.fit(y).transform(y)\n\n # Automatically increment on new class\n class_mapping = defaultdict(int)\n class_mapping.default_factory = class_mapping.__len__\n yt = self._transform(y, class_mapping)\n\n # sort classes and reorder columns\n tmp = sorted(class_mapping, key=class_mapping.get)\n\n # (make safe for tuples)\n dtype = np.int if all(isinstance(c, int) for c in tmp) else object\n class_mapping = np.empty(len(tmp), dtype=dtype)\n class_mapping[:] = tmp\n self.classes_, inverse = np.unique(class_mapping, return_inverse=True)\n # ensure yt.indices keeps its current dtype\n yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,\n copy=False)\n\n if not self.sparse_output:\n yt = yt.toarray()\n\n return yt\n\n def transform(self, y):\n \"\"\"Transform the given label sets\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : array or CSR matrix, shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in\n `y[i]`, and 0 otherwise.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n class_to_index = dict(zip(self.classes_, range(len(self.classes_))))\n yt = self._transform(y, class_to_index)\n\n if not self.sparse_output:\n yt = yt.toarray()\n\n return yt\n\n def _transform(self, y, class_mapping):\n \"\"\"Transforms the label sets with a given mapping\n\n Parameters\n ----------\n y : iterable of iterables\n class_mapping : Mapping\n Maps from label to column index in label indicator matrix\n\n Returns\n -------\n y_indicator : sparse CSR matrix, shape (n_samples, n_classes)\n Label indicator matrix\n \"\"\"\n indices = array.array('i')\n indptr = array.array('i', [0])\n for labels in y:\n indices.extend(set(class_mapping[label] for label in labels))\n indptr.append(len(indices))\n data = np.ones(len(indices), dtype=int)\n\n return sp.csr_matrix((data, indices, indptr),\n shape=(len(indptr) - 1, len(class_mapping)))\n\n def inverse_transform(self, yt):\n \"\"\"Transform the given indicator matrix into label sets\n\n Parameters\n ----------\n yt : array or sparse matrix of shape (n_samples, n_classes)\n A matrix containing only 1s ands 0s.\n\n Returns\n -------\n y : list of tuples\n The set of labels for each sample such that `y[i]` consists of\n `classes_[j]` for each `yt[i, j] == 1`.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n if yt.shape[1] != len(self.classes_):\n raise ValueError('Expected indicator for {0} classes, but got {1}'\n .format(len(self.classes_), yt.shape[1]))\n\n if sp.issparse(yt):\n yt = yt.tocsr()\n if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator.')\n return [tuple(self.classes_.take(yt.indices[start:end]))\n for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]\n else:\n unexpected = np.setdiff1d(yt, [0, 1])\n if len(unexpected) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator. '\n 'Also got {0}'.format(unexpected))\n return [tuple(self.classes_.compress(indicators)) for indicators\n in yt]\n"
] | [
[
"numpy.asarray",
"numpy.in1d",
"numpy.cumsum",
"numpy.any",
"numpy.searchsorted",
"numpy.where",
"scipy.sparse.issparse",
"numpy.unique",
"numpy.empty_like",
"numpy.arange",
"numpy.flatnonzero",
"numpy.intersect1d",
"numpy.diff",
"numpy.repeat",
"scipy.sparse.csr_matrix",
"numpy.append",
"numpy.array",
"numpy.sort",
"numpy.setdiff1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
yangfengKAUST/cnn-text-classification-tf | [
"5f552df9887e57a4bc5638b3d36d7393254d2644"
] | [
"generate_embeddings.py"
] | [
"import numpy as np\nimport pickle\nimport argparse\nimport re\n\n\"\"\"\nConvert pre-trained Glove embeddings into npy file\nRun using:\npython3 generate_embeddings.py -d data/glove.6B.300d.txt --npy_output data/embeddings.npy --dict_output data/vocab.pckl --dict_whitelist data/polaritydata.vocab\n\"\"\"\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', '-d', type=str, required=True)\n parser.add_argument('--npy_output', type=str, required=True)\n parser.add_argument('--dict_output', type=str, required=True)\n parser.add_argument('--dict_whitelist', type=str, required=True)\n parser.add_argument('--dump_frequency', type=int, default=10000)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n # reserve 0 for unknown words\n data = {\n '': 0\n }\n embeddings = [\n np.zeros((300), dtype=np.float32)\n ]\n\n float_re = re.compile(' [-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?')\n\n with open(args.dict_whitelist) as wfile:\n whitelist = [line.strip() for line in wfile]\n\n print(\"Building vocabulary ...\")\n\n with open(args.dataset) as ofile, \\\n open(args.dict_output, 'wb') as dfile, \\\n open(args.npy_output, 'wb') as nfile:\n idx = 1\n for line in ofile:\n pos = next(re.finditer(float_re, line)).start()\n word, vector = line[:pos], line[pos + 1:].split()\n\n if word not in whitelist:\n continue\n\n if word in data:\n print('Possible duplicate at {} in {}'.format(idx, line))\n continue\n\n embedding = np.fromiter([float(d) for d in vector], np.float32)\n\n if embedding.shape != (300,):\n print('Shape is {}'.format(embedding.shape))\n print(line)\n embeddings.append(embedding)\n data[word] = idx\n\n idx += 1\n\n if not idx % args.dump_frequency:\n np.save(nfile, np.array(embeddings))\n embeddings.clear()\n\n np.save(nfile, np.array(embeddings))\n pickle.dump(data, dfile)\n\n print(\"Vocabulary saved, size is {} words\".format(idx))\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
renmengye/inc-few-shot-attractor-public | [
"c560d5a81480cb22d903fa746ab0cfc2eb964e4c",
"c560d5a81480cb22d903fa746ab0cfc2eb964e4c",
"c560d5a81480cb22d903fa746ab0cfc2eb964e4c"
] | [
"fewshot/data/compress_tiered_imagenet.py",
"fewshot/models/multi_task_model.py",
"fewshot/models/resnet_base.py"
] | [
"import cv2\nimport numpy as np\nimport six\nimport sys\nimport pickle as pkl\n\nfrom tqdm import tqdm\n\n\ndef compress(path, output):\n with np.load(path, mmap_mode=\"r\", encoding='latin1') as data:\n images = data[\"images\"]\n array = []\n for ii in tqdm(six.moves.xrange(images.shape[0]), desc='compress'):\n im = images[ii]\n im_str = cv2.imencode('.png', im)[1]\n array.append(im_str)\n with open(output, 'wb') as f:\n pkl.dump(array, f, protocol=pkl.HIGHEST_PROTOCOL)\n\n\ndef decompress(path, output):\n try:\n with open(output, 'rb') as f:\n array = pkl.load(f, encoding='bytes')\n except:\n with open(output, 'rb') as f:\n array = pkl.load(f)\n images = np.zeros([len(array), 84, 84, 3], dtype=np.uint8)\n for ii, item in tqdm(enumerate(array), desc='decompress'):\n im = cv2.imdecode(item, 1)\n images[ii] = im\n np.savez(path, images=images)\n\n\ndef main():\n if sys.argv[1] == 'compress':\n compress(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == 'decompress':\n decompress(sys.argv[2], sys.argv[3])\n\n\nif __name__ == '__main__':\n main()\n",
"\"\"\"Multi-task model.\nA primary model for representation learning and a secondary model for few-shot\ntransfer.\n\nAuthor: Mengye Ren ([email protected])\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport horovod.tensorflow as hvd\nimport numpy as np\nimport tensorflow as tf\n\nfrom horovod.tensorflow.mpi_ops import allgather\n\nfrom fewshot.models.kmeans_utils import compute_logits, compute_logits_cosine\nfrom fewshot.models.model_factory import get_model\nfrom fewshot.models.nnlib import weight_variable\nfrom fewshot.utils.logger import get as get_logger\n\nlog = get_logger()\n\n\nclass MultiTaskModel(object):\n \"\"\"A model with both regular classification branch and few-shot branch.\"\"\"\n\n def __init__(self,\n config,\n x,\n y,\n x_b,\n y_b,\n x_b_v,\n y_b_v,\n num_classes_a,\n num_classes_b,\n is_training=True,\n ext_wts=None,\n y_sel=None,\n w_class_a=None,\n b_class_a=None):\n self._config = config\n self._is_training = is_training\n self._num_classes_a = num_classes_a\n self._num_classes_b = num_classes_b\n\n if config.backbone_class == 'resnet_backbone':\n bb_config = config.resnet_config\n else:\n assert False, 'Not supported'\n opt_config = config.optimizer_config\n proto_config = config.protonet_config\n transfer_config = config.transfer_config\n\n self._backbone = get_model(config.backbone_class, bb_config)\n self._inputs = x\n self._labels = y\n if opt_config.num_gpu > 1:\n self._labels_all = allgather(self._labels)\n else:\n self._labels_all = self._labels\n self._inputs_b = x_b\n self._labels_b = y_b\n self._inputs_b_v = x_b_v\n self._labels_b_v = y_b_v\n if opt_config.num_gpu > 1:\n self._labels_b_v_all = allgather(self._labels_b_v)\n else:\n self._labels_b_v_all = self._labels_b_v\n self._y_sel = y_sel\n self._mask = tf.placeholder(tf.bool, [], name='mask')\n\n # global_step = tf.get_variable(\n # 'global_step', shape=[], dtype=tf.int64, trainable=False)\n global_step = tf.contrib.framework.get_or_create_global_step()\n self._global_step = global_step\n log.info('LR decay steps {}'.format(opt_config.lr_decay_steps))\n log.info('LR list {}'.format(opt_config.lr_list))\n learn_rate = tf.train.piecewise_constant(\n global_step, list(\n np.array(opt_config.lr_decay_steps).astype(np.int64)),\n list(opt_config.lr_list))\n self._learn_rate = learn_rate\n\n opt = self.get_optimizer(opt_config.optimizer, learn_rate)\n if opt_config.num_gpu > 1:\n opt = hvd.DistributedOptimizer(opt)\n\n with tf.name_scope('TaskA'):\n h_a = self.backbone(x, is_training=is_training, ext_wts=ext_wts)\n self._h_a = h_a\n\n # Apply BN ops.\n bn_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.name_scope('TaskB'):\n x_b_all = tf.concat([x_b, x_b_v], axis=0)\n if ext_wts is not None:\n h_b_all = self.backbone(\n x_b_all, is_training=is_training, reuse=True, ext_wts=ext_wts)\n else:\n h_b_all = self.backbone(x_b_all, is_training=is_training, reuse=True)\n\n with tf.name_scope('TaskA'):\n # Calculates hidden activation size.\n h_shape = h_a.get_shape()\n h_size = 1\n for ss in h_shape[1:]:\n h_size *= int(ss)\n\n if w_class_a is None:\n if ext_wts is not None:\n w_class_a = weight_variable(\n [h_size, num_classes_a],\n init_method='numpy',\n dtype=tf.float32,\n init_param={'val': np.transpose(ext_wts['w_class_a'])},\n wd=config.wd,\n name='w_class_a')\n b_class_a = weight_variable([],\n init_method='numpy',\n dtype=tf.float32,\n init_param={'val': ext_wts['b_class_a']},\n wd=0e0,\n name='b_class_a')\n else:\n w_class_a = weight_variable([h_size, num_classes_a],\n init_method='truncated_normal',\n dtype=tf.float32,\n init_param={'stddev': 0.01},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a = weight_variable([num_classes_a],\n init_method='constant',\n init_param={'val': 0.0},\n name='b_class_a')\n self._w_class_a_orig = w_class_a\n self._b_class_a_orig = b_class_a\n else:\n assert b_class_a is not None\n w_class_a_orig = weight_variable([h_size, num_classes_a],\n init_method='truncated_normal',\n dtype=tf.float32,\n init_param={'stddev': 0.01},\n wd=bb_config.wd,\n name='w_class_a')\n b_class_a_orig = weight_variable([num_classes_a],\n init_method='constant',\n init_param={'val': 0.0},\n name='b_class_a')\n self._w_class_a_orig = w_class_a_orig\n self._b_class_a_orig = b_class_a_orig\n\n self._w_class_a = w_class_a\n self._b_class_a = b_class_a\n num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)\n num_classes_a_dyn32 = tf.shape(b_class_a)[0]\n\n if proto_config.cosine_a:\n if proto_config.cosine_tau:\n if ext_wts is None:\n init_val = 10.0\n else:\n init_val = ext_wts['tau'][0]\n tau = weight_variable([],\n init_method='constant',\n init_param={'val': init_val},\n name='tau')\n else:\n tau = tf.constant(1.0)\n w_class_a_norm = self._normalize(w_class_a, 0)\n h_a_norm = self._normalize(h_a, 1)\n dot = tf.matmul(h_a_norm, w_class_a_norm)\n if ext_wts is not None:\n dot += b_class_a\n logits_a = tau * dot\n else:\n logits_a = tf.matmul(h_a, w_class_a) + b_class_a\n self._prediction_a = logits_a\n if opt_config.num_gpu > 1:\n self._prediction_a_all = allgather(self._prediction_a)\n else:\n self._prediction_a_all = self._prediction_a\n\n xent_a = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits_a, labels=y)\n cost_a = tf.reduce_mean(xent_a, name='xent')\n self._cost_a = cost_a\n cost_a += self._decay()\n correct_a = tf.equal(tf.argmax(logits_a, axis=1), y)\n self._correct_a = correct_a\n self._acc_a = tf.reduce_mean(tf.cast(correct_a, cost_a.dtype))\n\n with tf.name_scope('TaskB'):\n h_b = h_b_all[:tf.shape(x_b)[0]]\n h_b_v = h_b_all[tf.shape(x_b)[0]:]\n\n # Add new axes for the `batch` dimension.\n h_b_ = tf.expand_dims(h_b, 0)\n h_b_v_ = tf.expand_dims(h_b_v, 0)\n y_b_ = tf.expand_dims(y_b, 0)\n y_b_v_ = tf.expand_dims(y_b_v, 0)\n\n if transfer_config.old_and_new:\n protos_b = self._compute_protos(num_classes_b, h_b_,\n y_b_ - num_classes_a)\n else:\n protos_b = self._compute_protos(num_classes_b, h_b_, y_b_)\n\n w_class_a_ = tf.expand_dims(tf.transpose(w_class_a), 0)\n if proto_config.protos_phi:\n w_p1 = weight_variable([h_size],\n init_method='constant',\n dtype=tf.float32,\n init_param={'val': 1.0},\n wd=bb_config.wd,\n name='w_p1')\n if proto_config.cosine_attention:\n w_q = weight_variable([h_size, h_size],\n init_method='truncated_normal',\n dtype=tf.float32,\n init_param={'stddev': 0.1},\n wd=bb_config.wd,\n name='w_q')\n k_b = weight_variable([num_classes_a, h_size],\n init_method='truncated_normal',\n dtype=tf.float32,\n init_param={'stddev': 0.1},\n wd=bb_config.wd,\n name='k_b')\n tau_q = weight_variable([],\n init_method='constant',\n init_param={'val': 10.0},\n name='tau_q')\n if transfer_config.old_and_new:\n w_class_b = self._compute_protos_attend_fix(\n num_classes_b, h_b_, y_b_ - num_classes_a_dyn, w_q, tau_q, k_b,\n self._w_class_a_orig)\n else:\n w_class_b = self._compute_protos_attend_fix(\n num_classes_b, h_b_, y_b_, w_q, tau_q, k_b, self._w_class_a_orig)\n assert proto_config.protos_phi\n w_p2 = weight_variable([h_size],\n init_method='constant',\n dtype=tf.float32,\n init_param={'val': 1.0},\n wd=bb_config.wd,\n name='w_p2')\n self._k_b = tf.expand_dims(w_p2, 1) * self._w_class_a_orig\n self._k_b2 = k_b\n self.bias = w_class_b\n self.new_protos = w_p1 * protos_b\n self.new_bias = w_p2 * w_class_b\n w_class_b = w_p1 * protos_b + w_p2 * w_class_b\n self.protos = protos_b\n self.w_class_b_final = w_class_b\n else:\n w_class_b = protos_b\n if proto_config.protos_phi:\n w_class_b = w_p1 * w_class_b\n\n self._w_class_b = w_class_b\n\n if transfer_config.old_and_new:\n w_class_all = tf.concat([w_class_a_, w_class_b], axis=1)\n else:\n w_class_all = w_class_b\n\n if proto_config.cosine_softmax_tau:\n tau_b = weight_variable([],\n init_method='constant',\n init_param={'val': 10.0},\n name='tau_b')\n else:\n tau_b = tf.constant(1.0)\n\n if proto_config.similarity == 'euclidean':\n logits_b_v = compute_logits(w_class_all, h_b_v_)\n elif proto_config.similarity == 'cosine':\n logits_b_v = tau_b * compute_logits_cosine(w_class_all, h_b_v_)\n else:\n raise ValueError('Unknown similarity')\n self._logits_b_v = logits_b_v\n self._prediction_b = logits_b_v[0]\n if opt_config.num_gpu > 1:\n self._prediction_b_all = allgather(self._prediction_b)\n else:\n self._prediction_b_all = self._prediction_b\n\n # Mask out the old classes.\n def mask_fn():\n bin_mask = tf.expand_dims(\n tf.reduce_sum(\n tf.one_hot(y_sel, num_classes_a + num_classes_b),\n 0,\n keep_dims=True), 0)\n logits_b_v_m = logits_b_v * (1.0 - bin_mask)\n logits_b_v_m -= bin_mask * 100.0\n return logits_b_v_m\n\n if transfer_config.old_and_new:\n logits_b_v = tf.cond(self._mask, mask_fn, lambda: logits_b_v)\n xent_b_v = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits_b_v, labels=y_b_v_)\n cost_b = tf.reduce_mean(xent_b_v, name='xent')\n self._cost_b = cost_b\n\n if transfer_config.old_and_new:\n total_cost = cost_b\n else:\n total_cost = (transfer_config.cost_a_ratio * cost_a +\n transfer_config.cost_b_ratio * cost_b)\n self._total_cost = total_cost\n\n if not transfer_config.meta_only:\n # assert False, 'let us go for pretrained model first'\n var_list = tf.trainable_variables()\n var_list = list(filter(lambda x: 'phi' in x.name, var_list))\n layers = self.config.transfer_config.meta_layers\n if layers == \"all\":\n pass\n elif layers == \"4\":\n keywords = ['TaskB', 'unit_4_']\n filter_fn = lambda x: any([kw in x.name for kw in keywords])\n var_list = list(filter(filter_fn, var_list))\n else:\n raise ValueError('Unknown finetune layers {}'.format(layers))\n [log.info('Slow weights {}'.format(v.name)) for v in var_list]\n else:\n var_list = []\n\n if proto_config.cosine_softmax_tau:\n var_list += [tau_b]\n\n if proto_config.cosine_attention:\n var_list += [w_q, tau_q, k_b, w_p2]\n\n if proto_config.protos_phi:\n var_list += [w_p1]\n\n if transfer_config.train_wclass_a:\n if proto_config.similarity == 'euclidean':\n var_list += [w_class_a, b_class_a]\n elif proto_config.similarity == 'cosine':\n var_list += [w_class_a]\n\n if is_training:\n grads_and_vars = opt.compute_gradients(total_cost, var_list)\n with tf.control_dependencies(bn_ops):\n [log.info('BN op {}'.format(op.name)) for op in bn_ops]\n train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)\n\n grads_and_vars_b = opt.compute_gradients(cost_b, var_list)\n with tf.control_dependencies(bn_ops):\n train_op_b = opt.apply_gradients(\n grads_and_vars_b, global_step=global_step)\n\n with tf.control_dependencies(bn_ops):\n train_op_a = opt.minimize(cost_a, global_step=global_step)\n self._train_op = train_op\n self._train_op_a = train_op_a\n self._train_op_b = train_op_b\n self._initializer = tf.global_variables_initializer()\n self._w_class_a = w_class_a\n\n def _compute_protos_attend_fix(self, nclasses, h_train, y_train, w_q, tau_q,\n k_b, w_class_a):\n h_ = tf.matmul(tf.squeeze(h_train), w_q)\n # [B, D] * [D * K] = [B, K]\n # h_dot_kb = tf.matmul(h_, k_b, transpose_b=True)\n # h_dot_kb /= tf.sqrt(tf.reduce_sum(tf.square(h_), axis=1, keep_dims=True))\n # h_dot_kb /= tf.transpose(\n # tf.sqrt(tf.reduce_sum(tf.square(k_b), axis=1, keep_dims=True)))\n h_norm = self._normalize(h_, 1)\n k_b_norm = self._normalize(k_b, 1)\n h_dot_kb = tf.matmul(h_norm, k_b_norm, transpose_b=True)\n h_dot_kb *= tau_q\n attend = tf.nn.softmax(h_dot_kb)\n # [B, K] * [K, D] = [B, D]\n h_attend = tf.matmul(attend, w_class_a, transpose_b=True)\n # [1, B, D]\n h_attend = tf.expand_dims(h_attend, 0)\n # [1, K, D]\n return self._compute_protos(nclasses, h_attend, y_train)\n\n def _compute_protos_attend(self, nclasses, h_train, y_train, w_q, tau_q, k_b,\n w_class_a):\n h_ = tf.matmul(tf.squeeze(h_train), w_q)\n # [B, D] * [D * K] = [B, K]\n # h_dot_kb = tf.matmul(h_, k_b, transpose_b=True)\n # h_dot_kb /= tf.sqrt(tf.reduce_sum(tf.square(h_), axis=1, keep_dims=True))\n # h_dot_kb /= tf.transpose(\n # tf.sqrt(tf.reduce_sum(tf.square(k_b), axis=1, keep_dims=True)))\n h_norm = self._normalize(h_, 1)\n k_b_norm = self._normalize(k_b, 1)\n h_dot_kb = tf.matmul(h_norm, k_b_norm, transpose_b=True)\n h_dot_kb *= tau_q\n attend = tf.nn.softmax(h_dot_kb)\n # [B, K] * [K, D] = [B, D]\n h_attend = tf.matmul(h_dot_kb, w_class_a, transpose_b=True)\n # [1, B, D]\n h_attend = tf.expand_dims(h_attend, 0)\n # [1, K, D]\n return self._compute_protos(nclasses, h_attend, y_train)\n\n def _compute_protos(self, nclasses, h_train, y_train):\n \"\"\"Computes the prototypes, cluster centers.\n Args:\n nclasses: Int. Number of classes.\n h_train: [B, N, D], Train features.\n y_train: [B, N], Train class labels.\n Returns:\n protos: [B, K, D], Test prediction.\n \"\"\"\n protos = [None] * nclasses\n for kk in range(nclasses):\n # [B, N, 1]\n ksel = tf.expand_dims(tf.cast(tf.equal(y_train, kk), h_train.dtype), 2)\n # [B, N, D]\n protos[kk] = tf.reduce_sum(h_train * ksel, [1], keep_dims=True)\n protos[kk] /= tf.reduce_sum(ksel, [1, 2], keep_dims=True)\n protos = tf.concat(protos, axis=1) # [B, K, D]\n return protos\n\n def _decay(self):\n wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n log.info('Weight decay variables')\n [log.info(x) for x in wd_losses]\n log.info('Total length: {}'.format(len(wd_losses)))\n if len(wd_losses) > 0:\n return tf.add_n(wd_losses)\n else:\n log.warning('No weight decay variables!')\n return 0.0\n\n def get_fdict(self, task_a_data=None, task_b_data=None):\n \"\"\"Make a feed dict.\"\"\"\n fdict = {}\n\n if task_a_data is not None:\n x_a, y_a = task_a_data\n fdict[self.inputs] = x_a\n fdict[self.labels] = y_a\n\n if task_b_data is not None:\n fdict[self.inputs_b] = task_b_data.x_train\n fdict[self.labels_b] = task_b_data.y_train\n fdict[self.inputs_b_v] = task_b_data.x_test\n fdict[self.labels_b_v] = task_b_data.y_test\n\n if task_b_data.y_sel is not None:\n fdict[self._y_sel] = task_b_data.y_sel\n fdict[self._mask] = True\n else:\n fdict[self._y_sel] = np.zeros([self._num_classes_b], dtype=np.int64)\n fdict[self._mask] = False\n return fdict\n\n def get_optimizer(self, optname, learn_rate):\n \"\"\"Gets an optimizer.\n\n Args:\n optname: String. Name of the optimizer.\n \"\"\"\n if optname == 'adam':\n opt = tf.train.AdamOptimizer(learn_rate)\n elif optname == 'momentum':\n opt = tf.train.MomentumOptimizer(learn_rate, 0.9)\n elif optname == 'nesterov':\n opt = tf.train.MomentumOptimizer(learn_rate, 0.9, use_nesterov=True)\n else:\n raise ValueError('Unknown optimizer {}'.format(optname))\n return opt\n\n def initialize(self, sess):\n \"\"\"Initialize model.\"\"\"\n sess.run(self._initializer)\n\n def eval_step_a(self, sess, task_a_data):\n \"\"\"Evaluate one step on task A.\"\"\"\n x_a, y_a = task_a_data\n fdict = {self.inputs: x_a, self.labels: y_a}\n prediction_a, y_a = sess.run([self.prediction_a_all, self.labels_all],\n feed_dict=fdict)\n return prediction_a, y_a\n\n def eval_step_b(self, sess, task_b_data):\n \"\"\"Evaluate one step on task B.\"\"\"\n prediction_b, y_b = sess.run(\n [self.prediction_b_all, self.labels_b_v_all],\n feed_dict={\n self.inputs_b: task_b_data.x_train,\n self.labels_b: task_b_data.y_train,\n self.inputs_b_v: task_b_data.x_test,\n self.labels_b_v: task_b_data.y_test\n })\n return prediction_b, y_b\n\n def eval_step(self, sess, task_a_data, task_b_data):\n \"\"\"Evaluate one step.\"\"\"\n prediction_a = self.eval_step_a(sess, task_a_data)\n prediction_b = self.eval_step_b(sess, task_b_data)\n return prediction_a, prediction_b\n\n def train_step(self, sess, task_a_data, task_b_data):\n \"\"\"Train a single step, for optimizing a combined loss.\"\"\"\n fdict = self.get_fdict(task_a_data, task_b_data)\n cost_a, cost_b, total_cost, _ = sess.run(\n [self.cost_a, self.cost_b, self.total_cost, self.train_op],\n feed_dict=fdict)\n return cost_a, 0.0, cost_b\n\n def train_step_a(self, sess, task_a_data):\n \"\"\"Train a single step on task A.\"\"\"\n x_a, y_a = task_a_data\n fdict = {self.inputs: x_a, self.labels: y_a}\n cost_a, _ = sess.run([self.cost_a, self.train_op_a], feed_dict=fdict)\n return cost_a\n\n def train_step_b(self, sess, task_b_data):\n \"\"\"Train a single step on task B.\"\"\"\n fdict = {\n self.inputs_b: task_b_data.x_train,\n self.labels_b: task_b_data.y_train,\n self.inputs_b_v: task_b_data.x_test,\n self.labels_b_v: task_b_data.y_test\n }\n if task_b_data.y_sel is not None:\n fdict[self._y_sel] = task_b_data.y_sel\n fdict[self._mask] = True\n else:\n fdict[self._y_sel] = np.zeros([self._num_classes_b], dtype=np.int64)\n fdict[self._mask] = False\n cost_b, _ = sess.run([self.cost_b, self.train_op_b], feed_dict=fdict)\n return cost_b\n\n def _get_optimizer(self, optname, learn_rate):\n \"\"\"Gets an optimizer.\"\"\"\n if optname == 'adam':\n opt = tf.train.AdamOptimizer(learn_rate)\n elif optname == 'momentum':\n opt = tf.train.MomentumOptimizer(learn_rate, 0.9)\n elif optname == 'nesterov':\n opt = tf.train.MomentumOptimizer(learn_rate, 0.9, use_nesterov=True)\n else:\n raise ValueError('Unknown optimizer')\n return opt\n\n def _normalize(self, x, axis, eps=1e-5):\n return x / (\n tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True)) + 1e-5)\n\n @property\n def backbone(self):\n return self._backbone\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def labels_all(self):\n return self._labels_all\n\n @property\n def inputs_b(self):\n return self._inputs_b\n\n @property\n def labels_b(self):\n return self._labels_b\n\n @property\n def inputs_b_v(self):\n return self._inputs_b_v\n\n @property\n def labels_b_v(self):\n return self._labels_b_v\n\n @property\n def labels_b_v_all(self):\n return self._labels_b_v_all\n\n @property\n def cost_a(self):\n return self._cost_a\n\n @property\n def cost_b(self):\n return self._cost_b\n\n @property\n def total_cost(self):\n return self._total_cost\n\n @property\n def train_op(self):\n return self._train_op\n\n @property\n def config(self):\n return self._config\n\n @property\n def learn_rate(self):\n return self._learn_rate\n\n @property\n def train_op_a(self):\n return self._train_op_a\n\n @property\n def train_op_b(self):\n return self._train_op_b\n\n @property\n def h_a(self):\n return self._h_a\n\n @property\n def prediction_a(self):\n return self._prediction_a\n\n @property\n def prediction_a_all(self):\n return self._prediction_a_all\n\n @property\n def prediction_b(self):\n return self._prediction_b\n\n @property\n def prediction_b_all(self):\n return self._prediction_b_all\n\n @property\n def num_classes_a(self):\n return self._num_classes_a\n\n @property\n def num_classes_b(self):\n return self._num_classes_b\n\n @property\n def w_class_a(self):\n return self._w_class_a\n",
"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom fewshot.models.nnlib import weight_variable\nfrom fewshot.models.backbone import Backbone\nfrom fewshot.utils.logger import get as get_logger\n\nlog = get_logger()\n\n\nclass ResnetBase(Backbone):\n\n def _batch_norm_slow(self, name, x, is_training=True):\n \"\"\"A slow version of batch normalization, allows self-defined variables.\"\"\"\n if self.config.data_format == 'NCHW':\n axis = 1\n axes = [0, 2, 3]\n else:\n axis = -1\n axes = [0, 1, 2]\n\n shape = [int(x.get_shape()[axis])]\n vs = 'BatchNorm'\n\n decay = 0.999\n eps = 0.001\n\n def get_vars(vs):\n with tf.variable_scope(vs):\n beta = self._weight_variable(\n shape,\n name=\"beta\",\n init_method='constant',\n init_param={'val': 0.0})\n gamma = self._weight_variable(\n shape,\n name=\"gamma\",\n init_method='constant',\n init_param={'val': 1.0})\n emean = self._weight_variable(\n shape,\n name='moving_mean',\n trainable=False,\n dtype=x.dtype,\n init_method='constant',\n init_param={'val': 0.0})\n evar = self._weight_variable(\n shape,\n name='moving_variance',\n trainable=False,\n dtype=x.dtype,\n init_method='constant',\n init_param={'val': 0.0})\n mean, var = tf.nn.moments(x, axes=axes)\n if self.config.data_format == 'NCHW':\n gamma_ = tf.reshape(gamma, [1, -1, 1, 1])\n beta_ = tf.reshape(beta, [1, -1, 1, 1])\n mean_ = tf.reshape(mean, [1, -1, 1, 1])\n var_ = tf.reshape(var, [1, -1, 1, 1])\n emean_ = tf.reshape(emean, [1, -1, 1, 1])\n evar_ = tf.reshape(evar, [1, -1, 1, 1])\n else:\n gamma_ = gamma\n beta_ = beta\n mean_ = mean\n var_ = var\n emean_ = emean\n evar_ = evar\n ema_mean_op = tf.assign_sub(emean, (emean - mean) * (1 - decay))\n ema_var_op = tf.assign_sub(evar, (evar - var) * (1 - decay))\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_mean_op)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_var_op)\n return mean_, var_, emean_, evar_, beta_, gamma_\n\n try:\n mean_, var_, emean_, evar_, beta_, gamma_ = get_vars('BatchNorm')\n except Exception as e:\n log.error(e)\n log.error('Try another batch norm')\n mean_, var_, emean_, evar_, beta_, gamma_ = get_vars('BatchNorm_1')\n if is_training:\n return tf.nn.batch_normalization(x, mean_, var_, beta_, gamma_, eps)\n else:\n return tf.nn.batch_normalization(x, emean_, evar_, beta_, gamma_, eps)\n\n def _batch_norm(self, name, x, is_training=True):\n \"\"\"Batch normalization.\"\"\"\n if self._slow_bn:\n return self._batch_norm_slow(name, x, is_training=is_training)\n else:\n return tf.contrib.layers.batch_norm(\n x,\n fused=True,\n data_format=self.config.data_format,\n is_training=is_training,\n scale=True)\n\n def _group_norm_slow(self, name, x, is_training=True):\n \"\"\"Slow version of group norm, allow self defined variables.\"\"\"\n with tf.variable_scope(name):\n if self.config.data_format == 'NCHW':\n channels_axis = 1\n reduction_axes = (2, 3)\n elif self.config.data_format == 'NHWC':\n channels_axis = 3\n reduction_axes = (1, 2)\n x_shape = tf.shape(x)\n x_shape_list = [-1, x_shape[1], x_shape[2], x_shape[3]]\n axes_before_channel = x_shape_list[:channels_axis]\n axes_after_channel = x_shape_list[channels_axis + 1:]\n G = self.config.num_norm_groups\n C = int(x.shape[channels_axis])\n shape_after = axes_before_channel + [G, C // G] + axes_after_channel\n x_reshape = tf.reshape(x, shape_after)\n moment_axes = [channels_axis + 1]\n for a in reduction_axes:\n if a > channels_axis:\n moment_axes.append(a + 1)\n else:\n moment_axes.append(a)\n\n beta = self._weight_variable([C],\n name=\"beta\",\n init_method='constant',\n init_param={'val': 0.0})\n gamma = self._weight_variable([C],\n name=\"gamma\",\n init_method='constant',\n init_param={'val': 1.0})\n beta_shape = [1, 1, 1, 1, 1]\n beta_shape[channels_axis] = G\n beta_shape[channels_axis + 1] = C // G\n beta_reshape = tf.reshape(beta, beta_shape)\n gamma_reshape = tf.reshape(gamma, beta_shape)\n mean, variance = tf.nn.moments(x_reshape, moment_axes, keep_dims=True)\n log.info('Moment axes {}'.format(moment_axes))\n log.info(variance.shape)\n log.info(x_reshape.shape)\n log.info(mean.shape)\n epsilon = 1e-6\n gain = tf.rsqrt(variance + epsilon)\n offset = -mean * gain\n gain *= gamma_reshape\n offset *= gamma_reshape\n offset += beta_reshape\n normed = x_reshape * gain + offset\n return tf.reshape(normed, x_shape)\n\n def _group_norm(self, name, x, is_training=True):\n # if self._slow_bn:\n return self._group_norm_slow(name, x, is_training=is_training)\n if self.config.data_format == 'NCHW':\n channels_axis = -3\n reduction_axes = (-2, -1)\n elif self.config.data_format == 'NHWC':\n channels_axis = -1\n reduction_axes = (-3, -2)\n # print(x, x.name)\n normed = tf.contrib.layers.group_norm(\n x,\n groups=self.config.num_norm_groups,\n channels_axis=channels_axis,\n reduction_axes=reduction_axes,\n trainable=is_training,\n scope=name)\n return normed\n\n def _normalize(self, name, x, is_training=True):\n \"\"\"Normalize the activations\"\"\"\n if self.config.normalization == \"batch_norm\":\n return self._batch_norm(name, x, is_training=is_training)\n elif self.config.normalization == \"group_norm\":\n return self._group_norm(name, x, is_training=is_training)\n\n def _possible_downsample(self, x, in_filter, out_filter, stride):\n \"\"\"Downsample the feature map using average pooling, if the filter size\n does not match.\"\"\"\n if stride[2] > 1:\n with tf.variable_scope(\"downsample\"):\n x = tf.nn.avg_pool(\n x,\n stride,\n stride,\n padding=\"SAME\",\n data_format=self.config.data_format)\n\n if in_filter < out_filter:\n pad_ = [(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]\n with tf.variable_scope(\"pad\"):\n if self.config.data_format == 'NHWC':\n x = tf.pad(x, [[0, 0], [0, 0], [0, 0], pad_])\n else:\n x = tf.pad(x, [[0, 0], pad_, [0, 0], [0, 0]])\n return x\n\n def _possible_bottleneck_downsample(self,\n x,\n in_filter,\n out_filter,\n stride,\n is_training=True):\n \"\"\"Downsample projection layer, if the filter size does not match.\"\"\"\n if stride[2] > 1 or in_filter != out_filter:\n x = self._conv(\"project\", x, 1, in_filter, out_filter, stride)\n if self.config.version == \"v1\":\n x = self._normalize(\"project_bn\", x, is_training=is_training)\n return x\n\n def _residual_inner(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True):\n \"\"\"Transformation applied on residual units.\"\"\"\n if self.config.version == \"v2\":\n with tf.variable_scope(\"sub1\"):\n if not no_activation:\n x = self._normalize(\"bn1\", x, is_training=is_training)\n x = self._relu(\"relu1\", x)\n x = self._conv(\"conv1\", x, 3, in_filter, out_filter, stride)\n with tf.variable_scope(\"sub2\"):\n x = self._normalize(\"bn2\", x, is_training=is_training)\n x = self._relu(\"relu2\", x)\n x = self._conv(\"conv2\", x, 3, out_filter, out_filter,\n self._stride_arr(1))\n else:\n with tf.variable_scope(\"sub1\"):\n x = self._conv(\"conv2\", x, 3, in_filter, out_filter, stride)\n x = self._normalize(\"bn1\", x, is_training=is_training)\n x = self._relu(\"relu1\", x)\n with tf.variable_scope(\"sub2\"):\n x = self._conv(\"conv2\", x, 3, out_filter, out_filter,\n self._stride_arr(1))\n x = self._normalize(\"bn2\", x, is_training=is_training)\n return x\n\n def _bottleneck_residual_inner(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True):\n \"\"\"Transformation applied on bottleneck residual units.\"\"\"\n if self.config.version == \"v2\":\n with tf.variable_scope(\"sub1\"):\n if not no_activation:\n x = self._normalize(\"bn1\", x, is_training=is_training)\n x = self._relu(\"relu1\", x)\n x = self._conv(\"conv1\", x, 1, in_filter, out_filter // 4, stride)\n with tf.variable_scope(\"sub2\"):\n x = self._normalize(\"bn2\", x, is_training)\n x = self._relu(\"relu2\", x)\n x = self._conv(\"conv2\", x, 3, out_filter // 4, out_filter // 4,\n self._stride_arr(1))\n with tf.variable_scope(\"sub3\"):\n x = self._normalize(\"bn3\", x, is_training=is_training)\n x = self._relu(\"relu3\", x)\n x = self._conv(\"conv3\", x, 1, out_filter // 4, out_filter,\n self._stride_arr(1))\n elif self.config.version == \"v1\":\n with tf.variable_scope(\"sub1\"):\n x = self._conv(\"conv1\", x, 1, in_filter, out_filter // 4, stride)\n x = self._normalize(\"bn1\", x, is_training=is_training)\n x = self._relu(\"relu1\", x)\n with tf.variable_scope(\"sub2\"):\n x = self._conv(\"conv2\", x, 3, out_filter // 4, out_filter // 4,\n self._stride_arr(1))\n x = self._normalize(\"bn1\", x, is_training=is_training)\n x = self._relu(\"relu1\", x)\n with tf.variable_scope(\"sub3\"):\n x = self._conv(\"conv3\", x, 1, out_filter // 4, out_filter,\n self._stride_arr(1))\n x = self._normalize(\"bn3\", x, is_training=is_training)\n else:\n raise ValueError(\"Unkonwn version\")\n return x\n\n def _residual_inner2(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True):\n \"\"\"Transformation applied on residual units.\"\"\"\n # This is SNAIL Resnet\n with tf.variable_scope(\"sub1\"):\n x = self._conv(\"conv1\", x, 3, in_filter, out_filter, self._stride_arr(1))\n x = self._normalize(\"bn1\", x, is_training=is_training)\n x = self._relu(\"relu1\", x)\n with tf.variable_scope(\"sub2\"):\n x = self._conv(\"conv2\", x, 3, out_filter, out_filter,\n self._stride_arr(1))\n x = self._normalize(\"bn2\", x, is_training=is_training)\n x = self._relu(\"relu2\", x)\n with tf.variable_scope(\"sub3\"):\n x = self._conv(\"conv3\", x, 3, out_filter, out_filter,\n self._stride_arr(1))\n x = self._normalize(\"bn3\", x, is_training=is_training)\n return x\n\n def _residual(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True,\n add_relu=True):\n \"\"\"Residual unit with 2 sub layers.\"\"\"\n orig_x = x\n x = self._residual_inner(\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=no_activation,\n is_training=is_training)\n x += self._possible_downsample(orig_x, in_filter, out_filter, stride)\n if self.config.version == \"v1\" and add_relu:\n x = self._relu(\"relu3\", x)\n # x = debug_identity(x)\n return x\n\n def _residual2(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True,\n add_relu=True):\n \"\"\"Residual unit with 2 sub layers.\"\"\"\n orig_x = self._conv(\"proj\", x, 1, in_filter, out_filter,\n self._stride_arr(1))\n x = self._residual_inner2(\n x,\n in_filter,\n out_filter,\n self._stride_arr(1),\n no_activation=no_activation,\n is_training=is_training)\n x = tf.nn.max_pool(\n x + orig_x,\n self._stride_arr(2),\n stride,\n padding='SAME',\n data_format=self.config.data_format)\n if is_training:\n x = tf.nn.dropout(x, keep_prob=0.9)\n return x\n\n def _residual3(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True,\n add_relu=True):\n \"\"\"Residual unit with 2 sub layers.\"\"\"\n orig_x = self._conv(\"proj\", x, 1, in_filter, out_filter,\n self._stride_arr(1))\n x = self._residual_inner2(\n x,\n in_filter,\n out_filter,\n self._stride_arr(1),\n no_activation=no_activation,\n is_training=is_training)\n x = tf.nn.max_pool(\n x + orig_x,\n self._stride_arr(2),\n stride,\n padding='SAME',\n data_format=self.config.data_format)\n return x\n\n def _bottleneck_residual(self,\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=False,\n is_training=True,\n add_relu=True):\n \"\"\"Bottleneck resisual unit with 3 sub layers.\"\"\"\n orig_x = x\n x = self._bottleneck_residual_inner(\n x,\n in_filter,\n out_filter,\n stride,\n no_activation=no_activation,\n is_training=is_training)\n x += self._possible_bottleneck_downsample(\n orig_x, in_filter, out_filter, stride, is_training=is_training)\n if self.config.version == \"v1\" and add_relu:\n x = self._relu(\"relu3\", x)\n return x\n\n def _conv(self, name, x, filter_size, in_filters, out_filters, strides):\n \"\"\"Convolution.\"\"\"\n with tf.variable_scope(name):\n if self.config.filter_initialization == \"normal\":\n n = filter_size * filter_size * out_filters\n init_method = \"truncated_normal\"\n init_param = {\"mean\": 0, \"stddev\": np.sqrt(2.0 / n)}\n elif self.config.filter_initialization == \"uniform\":\n init_method = \"uniform_scaling\"\n init_param = {\"factor\": 1.0}\n kernel = self._weight_variable(\n [filter_size, filter_size, in_filters, out_filters],\n init_method=init_method,\n init_param=init_param,\n wd=self.config.wd,\n dtype=self.dtype,\n name=\"w\")\n return tf.nn.conv2d(\n x,\n kernel,\n strides,\n padding=\"SAME\",\n data_format=self.config.data_format)\n\n def _fully_connected(self, x, out_dim):\n \"\"\"FullyConnected layer for final output.\"\"\"\n x_shape = x.get_shape()\n d = x_shape[1]\n w = self._weight_variable(\n [d, out_dim],\n init_method=\"uniform_scaling\",\n # init_param={\"factor\": 1.0},\n init_param={\"factor\": 1 / np.sqrt(float(out_dim))},\n wd=self.config.wd,\n dtype=self.dtype,\n name=\"w\")\n b = self._weight_variable([out_dim],\n init_method=\"constant\",\n init_param={\"val\": 0.0},\n name=\"b\",\n dtype=self.dtype)\n return tf.nn.xw_plus_b(x, w, b)\n\n def _weight_variable(self,\n shape,\n init_method=None,\n dtype=tf.float32,\n init_param=None,\n wd=None,\n name=None,\n trainable=True,\n seed=0):\n \"\"\"Wrapper to declare variables. Default on CPU.\"\"\"\n if self._ext_wts is None:\n return weight_variable(\n shape,\n init_method=init_method,\n dtype=dtype,\n init_param=init_param,\n wd=wd,\n name=name,\n trainable=trainable,\n seed=seed)\n else:\n assert self._slow_bn, \"Must enable slow BN\"\n assert name is not None # Use name to retrieve the variable name\n vs = tf.get_variable_scope()\n var_name = vs.name + '/' + name\n if var_name in self._ext_wts:\n log.info('Found variable {} in external weights'.format(var_name))\n return self._ext_wts[var_name]\n else:\n log.error('Not found variable {} in external weights'.format(var_name))\n raise ValueError('Variable not found')\n\n def _stride_arr(self, stride):\n \"\"\"Map a stride scalar to the stride array for tf.nn.conv2d.\"\"\"\n if self.config.data_format == 'NCHW':\n return [1, 1, stride, stride]\n else:\n return [1, stride, stride, 1]\n\n def _relu(self, name, x):\n if self.config.leaky_relu > 0.0:\n return tf.nn.leaky_relu(x, alpha=self.config.leaky_relu, name=name)\n else:\n return tf.nn.relu(x, name=name)\n\n def _init_conv(self, x, n_filters, is_training=True):\n \"\"\"Build initial conv layers.\"\"\"\n config = self.config\n init_filter = config.init_filter\n with tf.variable_scope(\"init\"):\n h = self._conv(\"init_conv\", x, init_filter, self.config.num_channel,\n n_filters, self._stride_arr(config.init_stride))\n h = self._normalize(\"init_bn\", h, is_training=is_training)\n h = self._relu(\"init_relu\", h)\n # Max-pooling is used in ImageNet experiments to further reduce\n # dimensionality.\n if config.init_max_pool:\n h = tf.nn.max_pool(\n h,\n self._stride_arr(3),\n self._stride_arr(2),\n padding=\"SAME\",\n data_format=self.config.data_format)\n return h\n\n def _global_avg_pool(self, x, keep_dims=False):\n if self.config.data_format == 'NCHW':\n return tf.reduce_mean(x, [2, 3], keep_dims=keep_dims)\n else:\n return tf.reduce_mean(x, [1, 2], keep_dims=keep_dims)\n"
] | [
[
"numpy.load",
"numpy.savez"
],
[
"tensorflow.cond",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.train.AdamOptimizer",
"tensorflow.add_n",
"tensorflow.get_collection",
"tensorflow.squeeze",
"tensorflow.train.MomentumOptimizer",
"tensorflow.name_scope",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.one_hot",
"numpy.transpose",
"numpy.array",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.contrib.framework.get_or_create_global_step"
],
[
"numpy.sqrt",
"tensorflow.pad",
"tensorflow.contrib.layers.group_norm",
"tensorflow.nn.conv2d",
"tensorflow.nn.moments",
"tensorflow.nn.dropout",
"tensorflow.nn.xw_plus_b",
"tensorflow.nn.batch_normalization",
"tensorflow.shape",
"tensorflow.nn.avg_pool",
"tensorflow.contrib.layers.batch_norm",
"tensorflow.add_to_collection",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.relu",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.assign_sub",
"tensorflow.variable_scope",
"tensorflow.rsqrt",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
bupt-ipcr/RL4Net | [
"b1b694361c688f5e0055148a0cdcb4c6253cd7bd"
] | [
"rl4net/envs/power_allocation/test_pa_rb_env.py"
] | [
"from .pa_rb_env import (\n PAEnv,\n Node\n)\nimport numpy as np\nfrom pathlib import Path\n\nlog2 = np.log2\n\ncues = {\n 0: Node(0.1, 0, 'cue'),\n 1: Node(-0.1, 0, 'cue'),\n}\ndevices = {\n 0: {\n 't_device': Node(0, 0.5, 't_device'),\n 'r_devices': {\n 0: Node(0, 0.6, 'r_device')\n }\n },\n 1: {\n 't_device': Node(0, -0.5, 't_device'),\n 'r_devices': {\n 0: Node(0, -0.6, 'r_device')\n }\n }\n}\n\n\ndef equal(unit, target):\n tolerance = 1e-6 * np.ones_like(target)\n return (np.abs(unit - target) < tolerance).all()\n\n\ndef test_init_pos():\n \"\"\"test position constraint\"\"\"\n env = PAEnv(n_level=4)\n\n def dis(node, target):\n return np.sqrt(\n (node.x - target.x) ** 2 +\n (node.y - target.y) ** 2\n )\n # test bs cues\n assert all(\n env.r_bs <= dis(usr, env.station) <= env.R_bs\n for usr in env.cues.values()\n )\n\n # test devices\n for cluster in env.devices.values():\n t_device, r_devices = cluster['t_device'], cluster['r_devices']\n\n assert env.r_bs <= dis(t_device, env.station) <= (\n env.R_bs - env.R_dev)\n assert all(\n env.r_dev <= dis(r_device, t_device) <= env.R_dev\n for r_device in r_devices.values()\n )\n\n\ndef test_jakes():\n # TODO test stastic features of jakes\n # target_std, target_mean = 0.429, 1.253 # Rayleigh Distribution\n\n # x_len, y_len, Ns = H_set.shape\n # h_std = np.mean([\n # H_set[x, y, :].std()\n # for x in range(x_len)\n # for y in range(y_len)\n # ])\n # assert (h_std - target_std) / target_std < 0.1\n\n # h_mean = np.mean([\n # H_set[x, y, :].mean()\n # for x in range(x_len)\n # for y in range(y_len)\n # ])\n # assert (h_mean - target_mean) / target_mean < 0.05\n pass\n\n\ndef test_init_path_loss():\n \"\"\"test distance, since lognormal is random\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=2)\n env.cues = cues\n env.devices = devices\n env.init_path_loss()\n distance_matrix = env.distance_matrix\n target_dis = np.array(\n [\n [0.1, 1.1, np.sqrt(0.26), np.sqrt(0.26), 0.5],\n [1.1, 0.1, np.sqrt(0.26), np.sqrt(0.26), 0.5],\n [0.6, 0.6, 0.1, 0.1, 0.503],\n [np.sqrt(0.37), np.sqrt(0.37), 0.503, 0.2, 0.1],\n [np.sqrt(0.37), np.sqrt(0.37), 0.2, 0.503, 0.1],\n ]\n )\n assert equal(distance_matrix, target_dis)\n\n\ndef test_get_recv_powers():\n \"\"\"test get_recv_powers\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=1)\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n target_recv_powers = np.array([\n [[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],\n [[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],\n [[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],\n [[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],\n ])\n assert equal(recv_powers, target_recv_powers)\n\n\ndef test_get_rates():\n \"\"\"test get_rates\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=1)\n recv_powers = np.array([\n [[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],\n [[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],\n [[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],\n [[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],\n ])\n rates = env.get_rates(recv_powers)\n _rate = np.array([\n log2(1+1.1/31), log2(1+2.2/42), log2(1+33/1.3), log2(1+44/2.4)\n ])\n target_rates = (_rate * np.ones((env.n_channel, env.n_channel))).T\n assert equal(rates, target_rates)\n\n\ndef test_get_indices():\n \"\"\"test get_indices\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=1, sorter=\"recv_power\",\n m_state=2)\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n rates = env.get_rates(recv_powers)\n metrics = emit_powers, recv_powers, rates, fading\n # rx_indice don't need test\n tx_indice, rx_indice = env.get_indices(*metrics)\n target_tx_indice = np.array([\n [3, 3, 3, 2],\n [0, 1, 2, 3]\n ])\n assert equal(tx_indice, target_tx_indice)\n\n\ndef test_get_rewards():\n env = PAEnv(n_level=4, n_pair=2, m_cue=1, sorter=\"recv_power\",\n m_state=2)\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n rates = env.get_rates(recv_powers)\n\n metrics = emit_powers, recv_powers, rates, fading\n indices = env.get_indices(*metrics)\n rewards = env.get_rewards(rates, indices)\n target_rewards = np.array([\n log2(1+1.1/31) + log2(1+44/2.4),\n log2(1+2.2/42) + log2(1+44/2.4),\n log2(1+33/1.3) + log2(1+44/2.4),\n log2(1+44/2.4) + log2(1+33/1.3),\n ])[:2]\n\n assert equal(rewards, target_rewards)\n\n\ndef test_get_states():\n # test m_state\n env = PAEnv(n_level=4, n_pair=2, m_cue=1,\n m_state=8, metrics=['emit_power', 'recv_power', 'rate'],\n sorter='recv_power')\n assert env.m_state == 4\n\n env = PAEnv(n_level=4, n_pair=2, m_cue=1,\n m_state=2, metrics=['emit_power', 'recv_power', 'rate'],\n sorter='recv_power')\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n rates = env.get_rates(recv_powers)\n\n metrics = emit_powers, recv_powers, rates, fading\n indices = env.get_indices(*metrics)\n states = env.get_states(*metrics, indices=indices)\n _recv = np.array([\n [[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],\n [[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],\n [[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],\n [[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],\n ])\n _rate = np.array([\n log2(1+1.1/31), log2(1+2.2/42), log2(1+33/1.3), log2(1+44/2.4)\n ])\n target_states = np.array([\n np.concatenate([power[3],power[0],_recv[3][0],_recv[0][0],[_rate[3], _rate[0]]]),\n np.concatenate([power[3],power[1],_recv[3][1],_recv[1][1],[_rate[3], _rate[1]]]),\n np.concatenate([power[3],power[2],_recv[3][2],_recv[2][2],[_rate[3], _rate[2]]]),\n np.concatenate([power[2],power[3],_recv[2][3],_recv[3][3],[_rate[2], _rate[3]]]),\n ])[:2]\n assert equal(states, target_states)\n\n\ndef test_sorter():\n # now only recv_power can be sorter\n pass\n\n\ndef test_seed():\n env = PAEnv(n_level=4, m_cue=1, seed=123)\n # this is func in PAEnv to random pos\n\n def random_point(min_r, radius, ox=0, oy=0):\n theta = np.random.random() * 2 * np.pi\n r = np.random.uniform(min_r, radius**2)\n x, y = np.cos(theta) * np.sqrt(r), np.sin(theta) * np.sqrt(r)\n return ox + x, oy + y\n np.random.seed(123)\n target_x, target_y = random_point(env.r_bs, env.R_bs)\n usr = env.cues[0]\n assert all((target_x == usr.x, target_y == usr.y))\n\n\ndef test_action():\n env = PAEnv(n_level=10, seed=799345)\n n_actions = env.n_actions\n n_channel, n_pair = env.n_channel, env.n_pair\n # normal\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_channel, ))\n s_, r, d, i = env.step(action, unit='dBm')\n assert i['rate'] == 3.4741923099965257\n # only D2D actions is enough\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_pair, ))\n s_, r, d, i = env.step(action, unit='dBm')\n assert i['rate'] == 3.4741923099965257 \n # other action dim raises error\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_pair - 1, ))\n try:\n s_, r, d, i = env.step(action, unit='dBm')\n except ValueError as e:\n msg = f\"length of action should be n_channel({env.n_channel})\" \\\n f\" or n_pair({n_pair}), but is {len(action)}\"\n assert e.args[0] == msg\n\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_channel, ))\n s_, r, d, i = env.step(action, unit='mW')\n assert i['rate'] == 3.4928823957853856\n # TODO add test of continuous action\n\n\ndef test_step():\n env = PAEnv(n_level=10)\n n_actions, n_states = env.n_actions, env.n_states\n assert n_actions == 40\n assert n_states == 304\n env.reset()\n action = env.sample()\n env.step(action, unit='dBm')\n # action = env.sample()\n action = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])\n env.step(action, unit='mW')\n action = env.sample()\n try:\n env.step(action, unit='xx')\n except ValueError as e:\n msg = f\"unit should in ['dBm', 'mW'], but is xx\"\n assert e.args[0] == msg\n fig: Path() = env.render()\n if fig.exists():\n fig.unlink()\n\n\nif __name__ == '__main__':\n test_action()\n"
] | [
[
"numpy.expand_dims",
"numpy.ones_like",
"numpy.sqrt",
"numpy.random.seed",
"numpy.abs",
"numpy.random.random",
"numpy.cos",
"numpy.ones",
"numpy.concatenate",
"numpy.sin",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chicco94/crypto-bot | [
"edbc22477544a25d8eb0c90cdd5f03345f11db68"
] | [
"src/trainer.py"
] | [
"from sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport sqlalchemy\nfrom config.config import symbol,backward_steps\nimport joblib\nfrom df_functions import *\n\ndef prepare_single_dataset(df,remove_from_heads:int,remove_from_tails:int,label:int):\n df_copy = df.copy()\n for _ in range(remove_from_tails):\n remove_row_from_tail(df_copy)\n for _ in range(remove_from_heads):\n remove_row_from_head(df_copy)\n add_id(df_copy)\n df_copy.time = df_copy.time.apply(lambda x: x.value)\n df_copy.rename(columns={\"time\": \"time{}\".format(label)\n , \"price\": \"price{}\".format(label)\n , \"quantity\":\"quantity{}\".format(label)}\n ,inplace=True)\n df_copy.drop(columns=['symbol'],inplace=True)\n return df_copy\n \n\ndef prepare_dataset(df,steps:int):\n datasets = []\n for i in range(1,steps):\n datasets.append(prepare_single_dataset(df,steps-i,i-1,i))\n df_target = prepare_single_dataset(df,0,steps-1,steps)\n\n result = datasets.pop()\n while len(datasets)>0:\n result = pd.merge(result, datasets.pop(), on=\"ID\")\n\n target = df_target['price{}'.format(steps)]\n return result,target\n\ndef main():\n # open database\n engine = sqlalchemy.create_engine('sqlite:///data/{}_stream.db'.format(symbol))\n df = pd.read_sql(symbol,engine)\n # prepare dataset\n source,target = prepare_dataset(df,backward_steps)\n # train model\n model = LinearRegression()\n X_train,X_test,y_train,y_test = train_test_split(source,target,test_size=0.33)\n model.fit(X_train,y_train)\n # evaluate model\n score = model.score(X_test,y_test)\n print('score: ',score)\n # save model\n filename = 'models/model_{}.sav'.format(score)\n joblib.dump(model, filename)\n\n #model = joblib.load(filename)\n\nif __name__=='__main__':\n main() "
] | [
[
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LinearRegression",
"pandas.read_sql"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
robinzixuan/Dialog_Act_Bert_Classification | [
"014cc8df0545e5bf85a22127e63e8490f3aa9012"
] | [
"data & result/history.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 30 20:49:32 2019\n\n@author: rluo\n\"\"\"\n\nimport keras\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport pickle\n\nhistory = pickle.load(open('history.p','rb'))\nplt.plot(history['loss'])\n#plt.plot(history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('Epoch')\nplt.legend(['train', 'test'], loc='upper left');\nplt.plot(history['acc'])\n#plt.plot(history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('acc')\nplt.xlabel('Epoch')\nplt.legend(['train', 'test'], loc='upper left');\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kareem1925/qiskit-aqua | [
"7056f9bdd9ece32c41e162faecdcd24cf483da6f",
"7056f9bdd9ece32c41e162faecdcd24cf483da6f",
"7056f9bdd9ece32c41e162faecdcd24cf483da6f"
] | [
"test/optimization/test_vertex_cover.py",
"qiskit/aqua/operators/evolutions/pauli_trotter_evolution.py",
"qiskit/aqua/operators/primitive_ops/pauli_op.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" Test Vertex Cover \"\"\"\n\nimport unittest\nfrom test.optimization import QiskitOptimizationTestCase\nimport numpy as np\nfrom qiskit import BasicAer\n\nfrom qiskit.aqua import aqua_globals, QuantumInstance\nfrom qiskit.optimization.applications.ising import vertex_cover\nfrom qiskit.optimization.applications.ising.common import random_graph, sample_most_likely\nfrom qiskit.aqua.algorithms import NumPyMinimumEigensolver, VQE\nfrom qiskit.aqua.components.variational_forms import RYRZ\nfrom qiskit.aqua.components.optimizers import SPSA\n\n\nclass TestVertexCover(QiskitOptimizationTestCase):\n \"\"\"Cplex Ising tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.seed = 100\n aqua_globals.random_seed = self.seed\n self.num_nodes = 3\n self.w = random_graph(self.num_nodes, edge_prob=0.8, weight_range=10)\n self.qubit_op, self.offset = vertex_cover.get_operator(self.w)\n\n def _brute_force(self):\n # brute-force way\n def bitfield(n, length):\n result = np.binary_repr(n, length)\n return [int(digit) for digit in result] # [2:] to chop off the \"0b\" part\n\n nodes = self.num_nodes\n maximum = 2 ** nodes\n minimal_v = np.inf\n for i in range(maximum):\n cur = bitfield(i, nodes)\n\n cur_v = vertex_cover.check_full_edge_coverage(np.array(cur), self.w)\n if cur_v:\n nonzerocount = np.count_nonzero(cur)\n if nonzerocount < minimal_v:\n minimal_v = nonzerocount\n\n return minimal_v\n\n def test_vertex_cover(self):\n \"\"\" Vertex Cover test \"\"\"\n algo = NumPyMinimumEigensolver(self.qubit_op, aux_operators=[])\n result = algo.run()\n x = sample_most_likely(result.eigenstate)\n sol = vertex_cover.get_graph_solution(x)\n np.testing.assert_array_equal(sol, [0, 1, 1])\n oracle = self._brute_force()\n self.assertEqual(np.count_nonzero(sol), oracle)\n\n def test_vertex_cover_vqe(self):\n \"\"\" Vertex Cover VQE test \"\"\"\n aqua_globals.random_seed = self.seed\n\n result = VQE(self.qubit_op,\n RYRZ(self.qubit_op.num_qubits, depth=3),\n SPSA(max_trials=200),\n max_evals_grouped=2).run(\n QuantumInstance(BasicAer.get_backend('qasm_simulator'),\n seed_simulator=aqua_globals.random_seed,\n seed_transpiler=aqua_globals.random_seed))\n\n x = sample_most_likely(result['eigvecs'][0])\n sol = vertex_cover.get_graph_solution(x)\n oracle = self._brute_force()\n self.assertEqual(np.count_nonzero(sol), oracle)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" PauliTrotterEvolution Class \"\"\"\n\nfrom typing import Optional, Union\nimport logging\nimport numpy as np\n\nfrom ..operator_base import OperatorBase\nfrom ..operator_globals import Z, I\nfrom .evolution_base import EvolutionBase\nfrom ..list_ops.list_op import ListOp\nfrom ..list_ops.summed_op import SummedOp\nfrom ..primitive_ops.pauli_op import PauliOp\nfrom ..primitive_ops.primitive_op import PrimitiveOp\nfrom ..converters.pauli_basis_change import PauliBasisChange\n# TODO uncomment when we implement Abelian grouped evolution.\n# from ..converters.abelian_grouper import AbelianGrouper\nfrom .evolved_op import EvolvedOp\nfrom .trotterizations.trotterization_base import TrotterizationBase\nfrom .trotterizations.trotterization_factory import TrotterizationFactory\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PauliTrotterEvolution(EvolutionBase):\n r\"\"\"\n An Evolution algorithm replacing exponentiated sums of Paulis by changing them each to the\n Z basis, rotating with an rZ, changing back, and trotterizing.\n\n More specifically, we compute basis change circuits for each Pauli into a single-qubit Z,\n evolve the Z by the desired evolution time with an rZ gate, and change the basis back using\n the adjoint of the original basis change circuit. For sums of Paulis, the individual Pauli\n evolution circuits are composed together by Trotterization scheme.\n \"\"\"\n\n def __init__(self,\n trotter_mode: Optional[Union[str, TrotterizationBase]] = 'trotter',\n reps: Optional[int] = 1,\n # TODO uncomment when we implement Abelian grouped evolution.\n # group_paulis: Optional[bool] = False\n ) -> None:\n \"\"\"\n Args:\n trotter_mode: A string ('trotter', 'suzuki', or 'qdrift') to pass to the\n TrotterizationFactory, or a TrotterizationBase, indicating how to combine\n individual Pauli evolution circuits to equal the exponentiation of the Pauli sum.\n reps: How many Trotterization repetitions to make, to improve the approximation\n accuracy.\n # TODO uncomment when we implement Abelian grouped evolution.\n # group_paulis: Whether to group Pauli sums into Abelian\n # sub-groups, so a single diagonalization circuit can be used for each group\n # rather than each Pauli.\n \"\"\"\n\n if isinstance(trotter_mode, TrotterizationBase):\n self._trotter = trotter_mode\n else:\n self._trotter = TrotterizationFactory.build(mode=trotter_mode, reps=reps)\n\n # TODO uncomment when we implement Abelian grouped evolution.\n # self._grouper = AbelianGrouper() if group_paulis else None\n\n @property\n def trotter(self) -> TrotterizationBase:\n \"\"\" TrotterizationBase used to evolve SummedOps. \"\"\"\n return self._trotter\n\n @trotter.setter\n def trotter(self, trotter: TrotterizationBase) -> None:\n \"\"\" Set TrotterizationBase used to evolve SummedOps. \"\"\"\n self._trotter = trotter\n\n def convert(self, operator: OperatorBase) -> OperatorBase:\n r\"\"\"\n Traverse the operator, replacing ``EvolvedOps`` with ``CircuitOps`` containing\n trotterized evolutions equalling the exponentiation of -i * operator.\n\n Args:\n operator: The Operator to convert.\n\n Returns:\n The converted operator.\n \"\"\"\n # TODO uncomment when we implement Abelian grouped evolution.\n # if self._grouper:\n # # Sort into commuting groups\n # operator = self._grouper.convert(operator).reduce()\n return self._recursive_convert(operator)\n\n def _recursive_convert(self, operator: OperatorBase) -> OperatorBase:\n if isinstance(operator, EvolvedOp):\n if not {'Pauli'} == operator.primitive_strings():\n logger.warning('Evolved Hamiltonian is not composed of only Paulis, converting to '\n 'Pauli representation, which can be expensive.')\n # Setting massive=False because this conversion is implicit. User can perform this\n # action on the Hamiltonian with massive=True explicitly if they so choose.\n # TODO explore performance to see whether we should avoid doing this repeatedly\n pauli_ham = operator.primitive.to_pauli_op(massive=False)\n operator = EvolvedOp(pauli_ham, coeff=operator.coeff)\n\n if isinstance(operator.primitive, SummedOp):\n # TODO uncomment when we implement Abelian grouped evolution.\n # if operator.primitive.abelian:\n # return self.evolution_for_abelian_paulisum(operator.primitive)\n # else:\n trotterized = self.trotter.convert(operator.primitive)\n return self._recursive_convert(trotterized)\n elif isinstance(operator.primitive, PauliOp):\n return self.evolution_for_pauli(operator.primitive)\n # Covers ListOp, ComposedOp, TensoredOp\n elif isinstance(operator.primitive, ListOp):\n converted_ops = [self._recursive_convert(op) for op in operator.primitive.oplist]\n return operator.primitive.__class__(converted_ops, coeff=operator.coeff)\n elif isinstance(operator, ListOp):\n return operator.traverse(self.convert).reduce()\n\n return operator\n\n def evolution_for_pauli(self, pauli_op: PauliOp) -> PrimitiveOp:\n r\"\"\"\n Compute evolution Operator for a single Pauli using a ``PauliBasisChange``.\n\n Args:\n pauli_op: The ``PauliOp`` to evolve.\n\n Returns:\n A ``PrimitiveOp``, either the evolution ``CircuitOp`` or a ``PauliOp`` equal to the\n identity if pauli_op is the identity.\n \"\"\"\n\n def replacement_fn(cob_instr_op, dest_pauli_op):\n z_evolution = dest_pauli_op.exp_i()\n # Remember, circuit composition order is mirrored operator composition order.\n return cob_instr_op.adjoint().compose(z_evolution).compose(cob_instr_op)\n\n # Note: PauliBasisChange will pad destination with identities\n # to produce correct CoB circuit\n sig_bits = np.logical_or(pauli_op.primitive.z, pauli_op.primitive.x)\n a_sig_bit = int(max(np.extract(sig_bits, np.arange(pauli_op.num_qubits)[::-1])))\n destination = (I.tensorpower(a_sig_bit)) ^ (Z * pauli_op.coeff)\n cob = PauliBasisChange(destination_basis=destination, replacement_fn=replacement_fn)\n return cob.convert(pauli_op)\n\n # TODO implement Abelian grouped evolution.\n def evolution_for_abelian_paulisum(self, op_sum: SummedOp) -> PrimitiveOp:\n \"\"\" Evolution for abelian pauli sum \"\"\"\n raise NotImplementedError\n",
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" PauliOp Class \"\"\"\n\nfrom typing import Union, Optional, Set\nimport logging\nimport numpy as np\nfrom scipy.sparse import spmatrix\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.circuit import ParameterExpression, Instruction\nfrom qiskit.quantum_info import Pauli\nfrom qiskit.circuit.library import RZGate, RYGate, RXGate, XGate, YGate, ZGate, IGate\n\nfrom ..operator_base import OperatorBase\nfrom .primitive_op import PrimitiveOp\nfrom ..list_ops.summed_op import SummedOp\nfrom ..list_ops.composed_op import ComposedOp\nfrom ..list_ops.tensored_op import TensoredOp\nfrom ..legacy.weighted_pauli_operator import WeightedPauliOperator\n\nlogger = logging.getLogger(__name__)\nPAULI_GATE_MAPPING = {'X': XGate(), 'Y': YGate(), 'Z': ZGate(), 'I': IGate()}\n\n\nclass PauliOp(PrimitiveOp):\n \"\"\" Class for Operators backed by Terra's ``Pauli`` module.\n\n \"\"\"\n\n def __init__(self,\n primitive: Union[Pauli] = None,\n coeff: Optional[Union[int, float, complex, ParameterExpression]] = 1.0) -> None:\n \"\"\"\n Args:\n primitive: The Pauli which defines the behavior of the underlying function.\n coeff: A coefficient multiplying the primitive.\n\n Raises:\n TypeError: invalid parameters.\n \"\"\"\n if not isinstance(primitive, Pauli):\n raise TypeError(\n 'PauliOp can only be instantiated with Paulis, not {}'.format(type(primitive)))\n super().__init__(primitive, coeff=coeff)\n\n def primitive_strings(self) -> Set[str]:\n return {'Pauli'}\n\n @property\n def num_qubits(self) -> int:\n return len(self.primitive)\n\n def add(self, other: OperatorBase) -> OperatorBase:\n if not self.num_qubits == other.num_qubits:\n raise ValueError(\n 'Sum over operators with different numbers of qubits, {} and {}, is not well '\n 'defined'.format(self.num_qubits, other.num_qubits))\n\n if isinstance(other, PauliOp) and self.primitive == other.primitive:\n return PauliOp(self.primitive, coeff=self.coeff + other.coeff)\n\n return SummedOp([self, other])\n\n def adjoint(self) -> OperatorBase:\n return PauliOp(self.primitive, coeff=np.conj(self.coeff))\n\n def equals(self, other: OperatorBase) -> bool:\n if not isinstance(other, PauliOp) or not self.coeff == other.coeff:\n return False\n\n return self.primitive == other.primitive\n\n def tensor(self, other: OperatorBase) -> OperatorBase:\n # Both Paulis\n if isinstance(other, PauliOp):\n # Copying here because Terra's Pauli kron is in-place.\n op_copy = Pauli(x=other.primitive.x, z=other.primitive.z)\n # NOTE!!! REVERSING QISKIT ENDIANNESS HERE\n return PauliOp(op_copy.kron(self.primitive), coeff=self.coeff * other.coeff)\n\n # pylint: disable=cyclic-import,import-outside-toplevel\n from .circuit_op import CircuitOp\n if isinstance(other, CircuitOp):\n return self.to_circuit_op().tensor(other)\n\n return TensoredOp([self, other])\n\n def compose(self, other: OperatorBase) -> OperatorBase:\n other = self._check_zero_for_composition_and_expand(other)\n\n # If self is identity, just return other.\n if not any(self.primitive.x + self.primitive.z):\n return other * self.coeff\n\n # Both Paulis\n if isinstance(other, PauliOp):\n product, phase = Pauli.sgn_prod(self.primitive, other.primitive)\n return PrimitiveOp(product, coeff=self.coeff * other.coeff * phase)\n\n # pylint: disable=cyclic-import,import-outside-toplevel\n from .circuit_op import CircuitOp\n from ..state_fns.circuit_state_fn import CircuitStateFn\n if isinstance(other, (CircuitOp, CircuitStateFn)):\n return self.to_circuit_op().compose(other)\n\n return ComposedOp([self, other])\n\n def to_matrix(self, massive: bool = False) -> np.ndarray:\n if self.num_qubits > 16 and not massive:\n raise ValueError(\n 'to_matrix will return an exponentially large matrix, '\n 'in this case {0}x{0} elements.'\n ' Set massive=True if you want to proceed.'.format(2 ** self.num_qubits))\n\n return self.primitive.to_matrix() * self.coeff\n\n def to_spmatrix(self) -> spmatrix:\n \"\"\" Returns SciPy sparse matrix representation of the Operator.\n\n Returns:\n CSR sparse matrix representation of the Operator.\n\n Raises:\n ValueError: invalid parameters.\n \"\"\"\n return self.primitive.to_spmatrix() * self.coeff\n\n def __str__(self) -> str:\n prim_str = str(self.primitive)\n if self.coeff == 1.0:\n return prim_str\n else:\n return \"{} * {}\".format(self.coeff, prim_str)\n\n def eval(self,\n front: Union[str, dict, np.ndarray,\n OperatorBase] = None) -> Union[OperatorBase, float, complex]:\n if front is None:\n return self.to_matrix_op()\n\n # pylint: disable=import-outside-toplevel,cyclic-import\n from ..state_fns.state_fn import StateFn\n from ..state_fns.dict_state_fn import DictStateFn\n from ..state_fns.circuit_state_fn import CircuitStateFn\n from ..list_ops.list_op import ListOp\n from .circuit_op import CircuitOp\n\n new_front = None\n\n # For now, always do this. If it's not performant, we can be more granular.\n if not isinstance(front, OperatorBase):\n front = StateFn(front, is_measurement=False)\n\n if isinstance(front, ListOp) and front.distributive:\n new_front = front.combo_fn([self.eval(front.coeff * front_elem)\n for front_elem in front.oplist])\n\n elif isinstance(front, DictStateFn):\n new_dict = {}\n corrected_x_bits = self.primitive.x[::-1]\n corrected_z_bits = self.primitive.z[::-1]\n\n for bstr, v in front.primitive.items():\n bitstr = np.asarray(list(bstr)).astype(np.int).astype(np.bool)\n new_b_str = np.logical_xor(bitstr, corrected_x_bits)\n new_str = ''.join(map(str, 1 * new_b_str))\n z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits))\n y_factor = np.product(np.sqrt(1 - 2 * np.logical_and(corrected_x_bits,\n corrected_z_bits) + 0j))\n new_dict[new_str] = (v * z_factor * y_factor) + new_dict.get(new_str, 0)\n new_front = StateFn(new_dict, coeff=self.coeff * front.coeff)\n\n elif isinstance(front, StateFn) and front.is_measurement:\n raise ValueError('Operator composed with a measurement is undefined.')\n\n # Composable types with PauliOp\n elif isinstance(front, (PauliOp, CircuitOp, CircuitStateFn)):\n new_front = self.compose(front)\n\n # Covers VectorStateFn and OperatorStateFn\n elif isinstance(front, OperatorBase):\n new_front = self.to_matrix_op().eval(front.to_matrix_op())\n\n return new_front\n\n def exp_i(self) -> OperatorBase:\n \"\"\" Return a ``CircuitOp`` equivalent to e^-iH for this operator H. \"\"\"\n # if only one qubit is significant, we can perform the evolution\n corrected_x = self.primitive.x[::-1]\n corrected_z = self.primitive.z[::-1]\n # pylint: disable=import-outside-toplevel,no-member\n sig_qubits = np.logical_or(corrected_x, corrected_z)\n if np.sum(sig_qubits) == 0:\n # e^I is just a global phase, but we can keep track of it! Should we?\n # For now, just return identity\n return PauliOp(self.primitive)\n if np.sum(sig_qubits) == 1:\n sig_qubit_index = sig_qubits.tolist().index(True)\n # Y rotation\n if corrected_x[sig_qubit_index] and corrected_z[sig_qubit_index]:\n rot_op = PrimitiveOp(RYGate(self.coeff))\n # Z rotation\n elif corrected_z[sig_qubit_index]:\n rot_op = PrimitiveOp(RZGate(self.coeff))\n # X rotation\n elif corrected_x[sig_qubit_index]:\n rot_op = PrimitiveOp(RXGate(self.coeff))\n\n from ..operator_globals import I\n left_pad = I.tensorpower(sig_qubit_index)\n right_pad = I.tensorpower(self.num_qubits - sig_qubit_index - 1)\n # Need to use overloaded operators here in case left_pad == I^0\n return left_pad ^ rot_op ^ right_pad\n else:\n from ..evolutions.evolved_op import EvolvedOp\n return EvolvedOp(self)\n\n def __hash__(self) -> int:\n # Need this to be able to easily construct AbelianGraphs\n return id(self)\n\n def commutes(self, other_op: OperatorBase) -> bool:\n \"\"\" Returns whether self commutes with other_op.\n\n Args:\n other_op: An ``OperatorBase`` with which to evaluate whether self commutes.\n\n Returns:\n A bool equaling whether self commutes with other_op\n\n \"\"\"\n if not isinstance(other_op, PauliOp):\n return False\n # Don't use compose because parameters will break this\n self_bits = self.primitive.z.astype(int) + 2 * self.primitive.x.astype(int)\n other_bits = other_op.primitive.z.astype(int) + 2 * other_op.primitive.x.astype(int)\n return all((self_bits * other_bits) * (self_bits - other_bits) == 0)\n\n def to_circuit(self) -> QuantumCircuit:\n # If Pauli equals identity, don't skip the IGates\n is_identity = sum(self.primitive.x + self.primitive.z) == 0\n\n # Note: Reversing endianness!!\n qc = QuantumCircuit(len(self.primitive))\n for q, pauli_str in enumerate(reversed(self.primitive.to_label())):\n gate = PAULI_GATE_MAPPING[pauli_str]\n if not pauli_str == 'I' or is_identity:\n qc.append(gate, qargs=[q])\n return qc\n\n def to_instruction(self) -> Instruction:\n # TODO should we just do the following because performance of adding and deleting IGates\n # doesn't matter?\n # (Reduce removes extra IGates).\n # return PrimitiveOp(self.primitive.to_instruction(), coeff=self.coeff).reduce()\n\n return self.to_circuit().to_instruction()\n\n def to_pauli_op(self, massive: bool = False) -> OperatorBase:\n return self\n\n def to_legacy_op(self, massive: bool = False) -> WeightedPauliOperator:\n if isinstance(self.coeff, ParameterExpression):\n try:\n coeff = float(self.coeff)\n except TypeError:\n raise TypeError('Cannot convert Operator with unbound parameter {} to Legacy '\n 'Operator'.format(self.coeff))\n else:\n coeff = self.coeff\n return WeightedPauliOperator(paulis=[(coeff, self.primitive)])\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.binary_repr",
"numpy.array",
"numpy.count_nonzero"
],
[
"numpy.logical_or",
"numpy.arange"
],
[
"numpy.logical_xor",
"numpy.conj",
"numpy.logical_or",
"numpy.logical_and",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oliverfaustino/NRPG-DataManager | [
"71064cb79be304f712aabcceebd6647121d2cb6c"
] | [
"modulos/utils.py"
] | [
"import pyperclip\nimport pandas as pd\n\nfrom modulos.conecao import *\n\n\n\ndef copiar(objeto): # função para copiar os objetos para área de transferência\n global copiar # para resolver o porblema UnboundLocalError: local variable 'copiar' referenced before assignment:\n opcao = int(input('Deseja copiar para área de transferência? \"1\" para sim e qualquer tecla para não\\n\\nR: ')) \n if opcao == 1:\n copiar = pyperclip.copy(objeto)\n print('\\nCopiado com sucesso!') \n else:\n pass\n return copiar\n\n\n\n\ndef select(sql): # função que decta qual tipo de ação eu desejo fazer\n try:\n df = pd.read_sql_query(sql, con=engine).to_string(index=False)\n \n finally:\n pass\n\n return df\n"
] | [
[
"pandas.read_sql_query"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ljch2018/allennlp | [
"63ba3fb28897578d4798039d1713e2b7995eb753"
] | [
"allennlp/models/semantic_parsing/atis/atis_semantic_parser.py"
] | [
"import logging\nfrom typing import Any, Dict, List, Tuple\n\nimport difflib\nimport sqlparse\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data import Vocabulary\nfrom allennlp.data.fields.production_rule_field import ProductionRuleArray\nfrom allennlp.semparse.executors import SqlExecutor\nfrom allennlp.models.model import Model\nfrom allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder, \\\n Embedding\nfrom allennlp.nn import util\nfrom allennlp.semparse.worlds import AtisWorld\nfrom allennlp.semparse.contexts.sql_context_utils import action_sequence_to_sql\nfrom allennlp.state_machines.states import GrammarBasedState\nfrom allennlp.state_machines.transition_functions.linking_transition_function import LinkingTransitionFunction\nfrom allennlp.state_machines import BeamSearch\nfrom allennlp.state_machines.trainers import MaximumMarginalLikelihood\nfrom allennlp.state_machines.states import GrammarStatelet, RnnStatelet\nfrom allennlp.training.metrics import Average\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\[email protected](\"atis_parser\")\nclass AtisSemanticParser(Model):\n \"\"\"\n Parameters\n ----------\n vocab : ``Vocabulary``\n utterance_embedder : ``TextFieldEmbedder``\n Embedder for utterances.\n action_embedding_dim : ``int``\n Dimension to use for action embeddings.\n encoder : ``Seq2SeqEncoder``\n The encoder to use for the input utterance.\n decoder_beam_search : ``BeamSearch``\n Beam search used to retrieve best sequences after training.\n max_decoding_steps : ``int``\n When we're decoding with a beam search, what's the maximum number of steps we should take?\n This only applies at evaluation time, not during training.\n input_attention: ``Attention``\n We compute an attention over the input utterance at each step of the decoder, using the\n decoder hidden state as the query. Passed to the transition function.\n add_action_bias : ``bool``, optional (default=True)\n If ``True``, we will learn a bias weight for each action that gets used when predicting\n that action, in addition to its embedding.\n dropout : ``float``, optional (default=0)\n If greater than 0, we will apply dropout with this probability after all encoders (pytorch\n LSTMs do not apply dropout to their last layer).\n rule_namespace : ``str``, optional (default=rule_labels)\n The vocabulary namespace to use for production rules. The default corresponds to the\n default used in the dataset reader, so you likely don't need to modify this.\n database_file: ``str``, optional (default=/atis/atis.db)\n The path of the SQLite database when evaluating SQL queries. SQLite is disk based, so we need\n the file location to connect to it.\n \"\"\"\n def __init__(self,\n vocab: Vocabulary,\n utterance_embedder: TextFieldEmbedder,\n action_embedding_dim: int,\n encoder: Seq2SeqEncoder,\n decoder_beam_search: BeamSearch,\n max_decoding_steps: int,\n input_attention: Attention,\n add_action_bias: bool = True,\n training_beam_size: int = None,\n dropout: float = 0.0,\n rule_namespace: str = 'rule_labels',\n database_file='/atis/atis.db') -> None:\n # Atis semantic parser init\n super().__init__(vocab)\n self._utterance_embedder = utterance_embedder\n self._encoder = encoder\n self._max_decoding_steps = max_decoding_steps\n self._add_action_bias = add_action_bias\n if dropout > 0:\n self._dropout = torch.nn.Dropout(p=dropout)\n else:\n self._dropout = lambda x: x\n self._rule_namespace = rule_namespace\n self._exact_match = Average()\n self._valid_sql_query = Average()\n self._action_similarity = Average()\n self._denotation_accuracy = Average()\n\n self._executor = SqlExecutor(database_file)\n self._action_padding_index = -1 # the padding value used by IndexField\n num_actions = vocab.get_vocab_size(self._rule_namespace)\n if self._add_action_bias:\n input_action_dim = action_embedding_dim + 1\n else:\n input_action_dim = action_embedding_dim\n self._action_embedder = Embedding(num_embeddings=num_actions, embedding_dim=input_action_dim)\n self._output_action_embedder = Embedding(num_embeddings=num_actions, embedding_dim=action_embedding_dim)\n\n\n # This is what we pass as input in the first step of decoding, when we don't have a\n # previous action, or a previous utterance attention.\n self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))\n self._first_attended_utterance = torch.nn.Parameter(torch.FloatTensor(encoder.get_output_dim()))\n torch.nn.init.normal_(self._first_action_embedding)\n torch.nn.init.normal_(self._first_attended_utterance)\n\n self._num_entity_types = 2 # TODO(kevin): get this in a more principled way somehow?\n self._entity_type_decoder_embedding = Embedding(self._num_entity_types, action_embedding_dim)\n\n self._beam_search = decoder_beam_search\n self._decoder_trainer = MaximumMarginalLikelihood(training_beam_size)\n self._transition_function = LinkingTransitionFunction(encoder_output_dim=self._encoder.get_output_dim(),\n action_embedding_dim=action_embedding_dim,\n input_attention=input_attention,\n predict_start_type_separately=False,\n add_action_bias=self._add_action_bias,\n dropout=dropout)\n\n @overrides\n def forward(self, # type: ignore\n utterance: Dict[str, torch.LongTensor],\n world: List[AtisWorld],\n actions: List[List[ProductionRuleArray]],\n linking_scores: torch.Tensor,\n target_action_sequence: torch.LongTensor = None,\n sql_queries: List[List[str]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n We set up the initial state for the decoder, and pass that state off to either a DecoderTrainer,\n if we're training, or a BeamSearch for inference, if we're not.\n\n Parameters\n ----------\n utterance : Dict[str, torch.LongTensor]\n The output of ``TextField.as_array()`` applied on the utterance ``TextField``. This will\n be passed through a ``TextFieldEmbedder`` and then through an encoder.\n world : ``List[AtisWorld]``\n We use a ``MetadataField`` to get the ``World`` for each input instance. Because of\n how ``MetadataField`` works, this gets passed to us as a ``List[AtisWorld]``,\n actions : ``List[List[ProductionRuleArray]]``\n A list of all possible actions for each ``World`` in the batch, indexed into a\n ``ProductionRuleArray`` using a ``ProductionRuleField``. We will embed all of these\n and use the embeddings to determine which action to take at each timestep in the\n decoder.\n linking_scores: ``torch.Tensor``\n A matrix of the linking the utterance tokens and the entities. This is a binary matrix that\n is deterministically generated where each entry indicates whether a token generated an entity.\n This tensor has shape ``(batch_size, num_entities, num_utterance_tokens)``.\n target_action_sequence : torch.Tensor, optional (default=None)\n The action sequence for the correct action sequence, where each action is an index into the list\n of possible actions. This tensor has shape ``(batch_size, sequence_length, 1)``. We remove the\n trailing dimension.\n sql_queries : List[List[str]], optional (default=None)\n A list of the SQL queries that are given during training or validation.\n \"\"\"\n initial_state = self._get_initial_state(utterance, world, actions, linking_scores)\n batch_size = linking_scores.shape[0]\n if target_action_sequence is not None:\n # Remove the trailing dimension (from ListField[ListField[IndexField]]).\n target_action_sequence = target_action_sequence.squeeze(-1)\n target_mask = target_action_sequence != self._action_padding_index\n else:\n target_mask = None\n\n if self.training:\n # target_action_sequence is of shape (batch_size, 1, sequence_length) here after we unsqueeze it for\n # the MML trainer.\n return self._decoder_trainer.decode(initial_state,\n self._transition_function,\n (target_action_sequence.unsqueeze(1), target_mask.unsqueeze(1)))\n else:\n # TODO(kevin) Move some of this functionality to a separate method for computing validation outputs.\n action_mapping = {}\n for batch_index, batch_actions in enumerate(actions):\n for action_index, action in enumerate(batch_actions):\n action_mapping[(batch_index, action_index)] = action[0]\n outputs: Dict[str, Any] = {'action_mapping': action_mapping}\n outputs['linking_scores'] = linking_scores\n if target_action_sequence is not None:\n outputs['loss'] = self._decoder_trainer.decode(initial_state,\n self._transition_function,\n (target_action_sequence.unsqueeze(1),\n target_mask.unsqueeze(1)))['loss']\n num_steps = self._max_decoding_steps\n # This tells the state to start keeping track of debug info, which we'll pass along in\n # our output dictionary.\n initial_state.debug_info = [[] for _ in range(batch_size)]\n best_final_states = self._beam_search.search(num_steps,\n initial_state,\n self._transition_function,\n keep_final_unfinished_states=False)\n outputs['best_action_sequence'] = []\n outputs['debug_info'] = []\n outputs['entities'] = []\n outputs['predicted_sql_query'] = []\n outputs['sql_queries'] = []\n outputs['utterance'] = []\n outputs['tokenized_utterance'] = []\n\n for i in range(batch_size):\n # Decoding may not have terminated with any completed valid SQL queries, if `num_steps`\n # isn't long enough (or if the model is not trained enough and gets into an\n # infinite action loop).\n if i not in best_final_states:\n self._exact_match(0)\n self._denotation_accuracy(0)\n self._valid_sql_query(0)\n self._action_similarity(0)\n outputs['predicted_sql_query'].append('')\n continue\n\n best_action_indices = best_final_states[i][0].action_history[0]\n\n action_strings = [action_mapping[(i, action_index)]\n for action_index in best_action_indices]\n predicted_sql_query = action_sequence_to_sql(action_strings)\n\n if target_action_sequence is not None:\n # Use a Tensor, not a Variable, to avoid a memory leak.\n targets = target_action_sequence[i].data\n sequence_in_targets = 0\n sequence_in_targets = self._action_history_match(best_action_indices, targets)\n self._exact_match(sequence_in_targets)\n\n similarity = difflib.SequenceMatcher(None, best_action_indices, targets)\n self._action_similarity(similarity.ratio())\n\n if sql_queries and sql_queries[i]:\n denotation_correct = self._executor.evaluate_sql_query(predicted_sql_query, sql_queries[i])\n self._denotation_accuracy(denotation_correct)\n outputs['sql_queries'].append(sql_queries[i])\n\n outputs['utterance'].append(world[i].utterances[-1])\n outputs['tokenized_utterance'].append([token.text\n for token in world[i].tokenized_utterances[-1]])\n outputs['entities'].append(world[i].entities)\n outputs['best_action_sequence'].append(action_strings)\n outputs['predicted_sql_query'].append(sqlparse.format(predicted_sql_query, reindent=True))\n outputs['debug_info'].append(best_final_states[i][0].debug_info[0]) # type: ignore\n return outputs\n\n def _get_initial_state(self,\n utterance: Dict[str, torch.LongTensor],\n worlds: List[AtisWorld],\n actions: List[List[ProductionRuleArray]],\n linking_scores: torch.Tensor) -> GrammarBasedState:\n embedded_utterance = self._utterance_embedder(utterance)\n utterance_mask = util.get_text_field_mask(utterance).float()\n\n batch_size = embedded_utterance.size(0)\n num_entities = max([len(world.entities) for world in worlds])\n\n # entity_types: tensor with shape (batch_size, num_entities)\n entity_types, _ = self._get_type_vector(worlds, num_entities, embedded_utterance)\n\n # (batch_size, num_utterance_tokens, embedding_dim)\n encoder_input = embedded_utterance\n\n # (batch_size, utterance_length, encoder_output_dim)\n encoder_outputs = self._dropout(self._encoder(encoder_input, utterance_mask))\n\n # This will be our initial hidden state and memory cell for the decoder LSTM.\n final_encoder_output = util.get_final_encoder_states(encoder_outputs,\n utterance_mask,\n self._encoder.is_bidirectional())\n memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())\n initial_score = embedded_utterance.data.new_zeros(batch_size)\n\n # To make grouping states together in the decoder easier, we convert the batch dimension in\n # all of our tensors into an outer list. For instance, the encoder outputs have shape\n # `(batch_size, utterance_length, encoder_output_dim)`. We need to convert this into a list\n # of `batch_size` tensors, each of shape `(utterance_length, encoder_output_dim)`. Then we\n # won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.\n initial_score_list = [initial_score[i] for i in range(batch_size)]\n encoder_output_list = [encoder_outputs[i] for i in range(batch_size)]\n utterance_mask_list = [utterance_mask[i] for i in range(batch_size)]\n initial_rnn_state = []\n for i in range(batch_size):\n initial_rnn_state.append(RnnStatelet(final_encoder_output[i],\n memory_cell[i],\n self._first_action_embedding,\n self._first_attended_utterance,\n encoder_output_list,\n utterance_mask_list))\n\n initial_grammar_state = [self._create_grammar_state(worlds[i],\n actions[i],\n linking_scores[i],\n entity_types[i])\n for i in range(batch_size)]\n\n initial_state = GrammarBasedState(batch_indices=list(range(batch_size)),\n action_history=[[] for _ in range(batch_size)],\n score=initial_score_list,\n rnn_state=initial_rnn_state,\n grammar_state=initial_grammar_state,\n possible_actions=actions,\n debug_info=None)\n return initial_state\n\n @staticmethod\n def _get_type_vector(worlds: List[AtisWorld],\n num_entities: int,\n tensor: torch.Tensor = None) -> Tuple[torch.LongTensor, Dict[int, int]]:\n \"\"\"\n Produces the encoding for each entity's type. In addition, a map from a flattened entity\n index to type is returned to combine entity type operations into one method.\n\n Parameters\n ----------\n worlds : ``List[AtisWorld]``\n num_entities : ``int``\n tensor : ``torch.Tensor``\n Used for copying the constructed list onto the right device.\n\n Returns\n -------\n A ``torch.LongTensor`` with shape ``(batch_size, num_entities, num_types)``.\n entity_types : ``Dict[int, int]``\n This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id.\n \"\"\"\n entity_types = {}\n batch_types = []\n\n for batch_index, world in enumerate(worlds):\n types = []\n entities = [('number', entity)\n if 'number' or 'time_range' in entity\n else ('string', entity)\n for entity in world.entities]\n\n for entity_index, entity in enumerate(entities):\n # We need numbers to be first, then strings, since our entities are going to be\n # sorted. We do a split by type and then a merge later, and it relies on this sorting.\n if entity[0] == 'number':\n entity_type = 1\n else:\n entity_type = 0\n types.append(entity_type)\n\n # For easier lookups later, we're actually using a _flattened_ version\n # of (batch_index, entity_index) for the key, because this is how the\n # linking scores are stored.\n flattened_entity_index = batch_index * num_entities + entity_index\n entity_types[flattened_entity_index] = entity_type\n padded = pad_sequence_to_length(types, num_entities, lambda: 0)\n batch_types.append(padded)\n\n return tensor.new_tensor(batch_types, dtype=torch.long), entity_types\n\n @staticmethod\n def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:\n # TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.\n # Check if target is big enough to cover prediction (including start/end symbols)\n if len(predicted) > targets.size(0):\n return 0\n predicted_tensor = targets.new_tensor(predicted)\n targets_trimmed = targets[:len(predicted)]\n # Return 1 if the predicted sequence is anywhere in the list of targets.\n return predicted_tensor.equal(targets_trimmed)\n\n @staticmethod\n def is_nonterminal(token: str):\n if token[0] == '\"' and token[-1] == '\"':\n return False\n return True\n\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n \"\"\"\n We track four metrics here:\n\n 1. exact_match, which is the percentage of the time that our best output action sequence\n matches the SQL query exactly.\n\n 2. denotation_acc, which is the percentage of examples where we get the correct\n denotation. This is the typical \"accuracy\" metric, and it is what you should usually\n report in an experimental result. You need to be careful, though, that you're\n computing this on the full data, and not just the subset that can be parsed. (make sure\n you pass \"keep_if_unparseable=True\" to the dataset reader, which we do for validation data,\n but not training data).\n\n 3. valid_sql_query, which is the percentage of time that decoding actually produces a\n valid SQL query. We might not produce a valid SQL query if the decoder gets\n into a repetitive loop, or we're trying to produce a super long SQL query and run\n out of time steps, or something.\n\n 4. action_similarity, which is how similar the action sequence predicted is to the actual\n action sequence. This is basically a soft measure of exact_match.\n \"\"\"\n return {\n 'exact_match': self._exact_match.get_metric(reset),\n 'denotation_acc': self._denotation_accuracy.get_metric(reset),\n 'valid_sql_query': self._valid_sql_query.get_metric(reset),\n 'action_similarity': self._action_similarity.get_metric(reset)\n }\n\n def _create_grammar_state(self,\n world: AtisWorld,\n possible_actions: List[ProductionRuleArray],\n linking_scores: torch.Tensor,\n entity_types: torch.Tensor) -> GrammarStatelet:\n \"\"\"\n This method creates the GrammarStatelet object that's used for decoding. Part of creating\n that is creating the `valid_actions` dictionary, which contains embedded representations of\n all of the valid actions. So, we create that here as well.\n\n The inputs to this method are for a `single instance in the batch`; none of the tensors we\n create here are batched. We grab the global action ids from the input\n ``ProductionRuleArrays``, and we use those to embed the valid actions for every\n non-terminal type. We use the input ``linking_scores`` for non-global actions.\n\n Parameters\n ----------\n world : ``AtisWorld``\n From the input to ``forward`` for a single batch instance.\n possible_actions : ``List[ProductionRuleArray]``\n From the input to ``forward`` for a single batch instance.\n linking_scores : ``torch.Tensor``\n Assumed to have shape ``(num_entities, num_utterance_tokens)`` (i.e., there is no batch\n dimension).\n entity_types : ``torch.Tensor``\n Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).\n \"\"\"\n action_map = {}\n for action_index, action in enumerate(possible_actions):\n action_string = action[0]\n action_map[action_string] = action_index\n\n valid_actions = world.valid_actions\n entity_map = {}\n entities = world.entities\n\n for entity_index, entity in enumerate(entities):\n entity_map[entity] = entity_index\n\n translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {}\n for key, action_strings in valid_actions.items():\n translated_valid_actions[key] = {}\n # `key` here is a non-terminal from the grammar, and `action_strings` are all the valid\n # productions of that non-terminal. We'll first split those productions by global vs.\n # linked action.\n\n action_indices = [action_map[action_string] for action_string in action_strings]\n production_rule_arrays = [(possible_actions[index], index) for index in action_indices]\n global_actions = []\n linked_actions = []\n for production_rule_array, action_index in production_rule_arrays:\n if production_rule_array[1]:\n global_actions.append((production_rule_array[2], action_index))\n else:\n linked_actions.append((production_rule_array[0], action_index))\n\n if global_actions:\n global_action_tensors, global_action_ids = zip(*global_actions)\n global_action_tensor = entity_types.new_tensor(torch.cat(global_action_tensors, dim=0),\n dtype=torch.long)\n global_input_embeddings = self._action_embedder(global_action_tensor)\n global_output_embeddings = self._output_action_embedder(global_action_tensor)\n translated_valid_actions[key]['global'] = (global_input_embeddings,\n global_output_embeddings,\n list(global_action_ids))\n if linked_actions:\n linked_rules, linked_action_ids = zip(*linked_actions)\n entities = linked_rules\n entity_ids = [entity_map[entity] for entity in entities]\n entity_linking_scores = linking_scores[entity_ids]\n entity_type_tensor = entity_types[entity_ids]\n entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor)\n entity_type_embeddings = entity_types.new_tensor(entity_type_embeddings, dtype=torch.float)\n translated_valid_actions[key]['linked'] = (entity_linking_scores,\n entity_type_embeddings,\n list(linked_action_ids))\n\n return GrammarStatelet(['statement'],\n {},\n translated_valid_actions,\n {},\n self.is_nonterminal,\n reverse_productions=False)\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test\n time, to finalize predictions. This is (confusingly) a separate notion from the \"decoder\"\n in \"encoder/decoder\", where that decoder logic lives in ``TransitionFunction``.\n\n This method trims the output predictions to the first end symbol, replaces indices with\n corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.\n \"\"\"\n action_mapping = output_dict['action_mapping']\n best_actions = output_dict[\"best_action_sequence\"]\n debug_infos = output_dict['debug_info']\n batch_action_info = []\n for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)):\n instance_action_info = []\n for predicted_action, action_debug_info in zip(predicted_actions, debug_info):\n action_info = {}\n action_info['predicted_action'] = predicted_action\n considered_actions = action_debug_info['considered_actions']\n probabilities = action_debug_info['probabilities']\n actions = []\n for action, probability in zip(considered_actions, probabilities):\n if action != -1:\n actions.append((action_mapping[(batch_index, action)], probability))\n actions.sort()\n considered_actions, probabilities = zip(*actions)\n action_info['considered_actions'] = considered_actions\n action_info['action_probabilities'] = probabilities\n action_info['utterance_attention'] = action_debug_info.get('question_attention', [])\n instance_action_info.append(action_info)\n batch_action_info.append(instance_action_info)\n output_dict[\"predicted_actions\"] = batch_action_info\n return output_dict\n"
] | [
[
"torch.FloatTensor",
"torch.nn.Dropout",
"torch.nn.init.normal_",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jdammers/mne-python | [
"1dc1502215a53385cda15c6c336fcc4341dc4d3b",
"1dc1502215a53385cda15c6c336fcc4341dc4d3b",
"1dc1502215a53385cda15c6c336fcc4341dc4d3b",
"1dc1502215a53385cda15c6c336fcc4341dc4d3b",
"1dc1502215a53385cda15c6c336fcc4341dc4d3b"
] | [
"mne/bem.py",
"mne/decoding/tests/test_time_frequency.py",
"mne/inverse_sparse/tests/test_gamma_map.py",
"mne/preprocessing/tests/test_eeglab_infomax.py",
"mne/time_frequency/tests/test_multitaper.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Eric Larson <[email protected]>\n# Lorenzo De Santis <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom functools import partial\nimport glob\nimport os\nimport os.path as op\nimport shutil\nfrom copy import deepcopy\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .transforms import _ensure_trans, apply_trans\nfrom .io import Info\nfrom .io.constants import FIFF\nfrom .io.write import (start_file, start_block, write_float, write_int,\n write_float_matrix, write_int_matrix, end_block,\n end_file)\nfrom .io.tag import find_tag\nfrom .io.tree import dir_tree_find\nfrom .io.open import fiff_open\nfrom .surface import (read_surface, write_surface, complete_surface_info,\n _compute_nearest, _get_ico_surface, read_tri,\n _fast_cross_nd_sum, _get_solids)\nfrom .utils import verbose, logger, run_subprocess, get_subjects_dir, warn, _pl\nfrom .fixes import einsum\nfrom .externals.six import string_types\n\n\n# ############################################################################\n# Compute BEM solution\n\n# The following approach is based on:\n#\n# de Munck JC: \"A linear discretization of the volume conductor boundary\n# integral equation using analytically integrated elements\",\n# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990\n#\n\n\nclass ConductorModel(dict):\n \"\"\"BEM or sphere model.\"\"\"\n\n def __repr__(self): # noqa: D105\n if self['is_sphere']:\n center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0'])\n rad = self.radius\n if rad is None: # no radius / MEG only\n extra = 'Sphere (no layers): r0=[%s] mm' % center\n else:\n extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm'\n % (len(self['layers']) - 1, _pl(self['layers']),\n center, rad * 1000.))\n else:\n extra = ('BEM (%s layer%s)' % (len(self['surfs']),\n _pl(self['surfs'])))\n return '<ConductorModel | %s>' % extra\n\n def copy(self):\n \"\"\"Return copy of ConductorModel instance.\"\"\"\n return deepcopy(self)\n\n @property\n def radius(self):\n \"\"\"Sphere radius if an EEG sphere model.\"\"\"\n if not self['is_sphere']:\n raise RuntimeError('radius undefined for BEM')\n return None if len(self['layers']) == 0 else self['layers'][-1]['rad']\n\n\ndef _calc_beta(rk, rk_norm, rk1, rk1_norm):\n \"\"\"Compute coefficients for calculating the magic vector omega.\"\"\"\n rkk1 = rk1[0] - rk[0]\n size = np.linalg.norm(rkk1)\n rkk1 /= size\n num = rk_norm + np.dot(rk, rkk1)\n den = rk1_norm + np.dot(rk1, rkk1)\n res = np.log(num / den) / size\n return res\n\n\ndef _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):\n \"\"\"Compute the linear potential matrix element computations.\"\"\"\n omega = np.zeros((len(fros), 3))\n\n # we replicate a little bit of the _get_solids code here for speed\n # (we need some of the intermediate values later)\n v1 = tri_rr[np.newaxis, 0, :] - fros\n v2 = tri_rr[np.newaxis, 1, :] - fros\n v3 = tri_rr[np.newaxis, 2, :] - fros\n triples = _fast_cross_nd_sum(v1, v2, v3)\n l1 = np.linalg.norm(v1, axis=1)\n l2 = np.linalg.norm(v2, axis=1)\n l3 = np.linalg.norm(v3, axis=1)\n ss = l1 * l2 * l3\n ss += einsum('ij,ij,i->i', v1, v2, l3)\n ss += einsum('ij,ij,i->i', v1, v3, l2)\n ss += einsum('ij,ij,i->i', v2, v3, l1)\n solids = np.arctan2(triples, ss)\n\n # We *could* subselect the good points from v1, v2, v3, triples, solids,\n # l1, l2, and l3, but there are *very* few bad points. So instead we do\n # some unnecessary calculations, and then omit them from the final\n # solution. These three lines ensure we don't get invalid values in\n # _calc_beta.\n bad_mask = np.abs(solids) < np.pi / 1e6\n l1[bad_mask] = 1.\n l2[bad_mask] = 1.\n l3[bad_mask] = 1.\n\n # Calculate the magic vector vec_omega\n beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],\n _calc_beta(v2, l2, v3, l3)[:, np.newaxis],\n _calc_beta(v3, l3, v1, l1)[:, np.newaxis]]\n vec_omega = (beta[2] - beta[0]) * v1\n vec_omega += (beta[0] - beta[1]) * v2\n vec_omega += (beta[1] - beta[2]) * v3\n\n area2 = 2.0 * tri_area\n n2 = 1.0 / (area2 * area2)\n # leave omega = 0 otherwise\n # Put it all together...\n yys = [v1, v2, v3]\n idx = [0, 1, 2, 0, 2]\n for k in range(3):\n diff = yys[idx[k - 1]] - yys[idx[k + 1]]\n zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)\n omega[:, k] = -n2 * (area2 * zdots * 2. * solids -\n triples * (diff * vec_omega).sum(axis=-1))\n # omit the bad points from the solution\n omega[bad_mask] = 0.\n return omega\n\n\ndef _correct_auto_elements(surf, mat):\n \"\"\"Improve auto-element approximation.\"\"\"\n pi2 = 2.0 * np.pi\n tris_flat = surf['tris'].ravel()\n misses = pi2 - mat.sum(axis=1)\n for j, miss in enumerate(misses):\n # How much is missing?\n n_memb = len(surf['neighbor_tri'][j])\n # The node itself receives one half\n mat[j, j] = miss / 2.0\n # The rest is divided evenly among the member nodes...\n miss /= (4.0 * n_memb)\n members = np.where(j == tris_flat)[0]\n mods = members % 3\n offsets = np.array([[1, 2], [-1, 1], [-1, -2]])\n tri_1 = members + offsets[mods, 0]\n tri_2 = members + offsets[mods, 1]\n for t1, t2 in zip(tri_1, tri_2):\n mat[j, tris_flat[t1]] += miss\n mat[j, tris_flat[t2]] += miss\n return\n\n\ndef _fwd_bem_lin_pot_coeff(surfs):\n \"\"\"Calculate the coefficients for linear collocation approach.\"\"\"\n # taken from fwd_bem_linear_collocation.c\n nps = [surf['np'] for surf in surfs]\n np_tot = sum(nps)\n coeff = np.zeros((np_tot, np_tot))\n offsets = np.cumsum(np.concatenate(([0], nps)))\n for si_1, surf1 in enumerate(surfs):\n rr_ord = np.arange(nps[si_1])\n for si_2, surf2 in enumerate(surfs):\n logger.info(\" %s (%d) -> %s (%d) ...\" %\n (_bem_explain_surface(surf1['id']), nps[si_1],\n _bem_explain_surface(surf2['id']), nps[si_2]))\n tri_rr = surf2['rr'][surf2['tris']]\n tri_nn = surf2['tri_nn']\n tri_area = surf2['tri_area']\n submat = coeff[offsets[si_1]:offsets[si_1 + 1],\n offsets[si_2]:offsets[si_2 + 1]] # view\n for k in range(surf2['ntri']):\n tri = surf2['tris'][k]\n if si_1 == si_2:\n skip_idx = ((rr_ord == tri[0]) |\n (rr_ord == tri[1]) |\n (rr_ord == tri[2]))\n else:\n skip_idx = list()\n # No contribution from a triangle that\n # this vertex belongs to\n # if sidx1 == sidx2 and (tri == j).any():\n # continue\n # Otherwise do the hard job\n coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],\n tri_area[k])\n coeffs[skip_idx] = 0.\n submat[:, tri] -= coeffs\n if si_1 == si_2:\n _correct_auto_elements(surf1, submat)\n return coeff\n\n\ndef _fwd_bem_multi_solution(solids, gamma, nps):\n \"\"\"Do multi surface solution.\n\n * Invert I - solids/(2*M_PI)\n * Take deflation into account\n * The matrix is destroyed after inversion\n * This is the general multilayer case\n \"\"\"\n pi2 = 1.0 / (2 * np.pi)\n n_tot = np.sum(nps)\n assert solids.shape == (n_tot, n_tot)\n nsurf = len(nps)\n defl = 1.0 / n_tot\n # Modify the matrix\n offsets = np.cumsum(np.concatenate(([0], nps)))\n for si_1 in range(nsurf):\n for si_2 in range(nsurf):\n mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]\n slice_j = slice(offsets[si_1], offsets[si_1 + 1])\n slice_k = slice(offsets[si_2], offsets[si_2 + 1])\n solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult\n solids += np.eye(n_tot)\n return linalg.inv(solids, overwrite_a=True)\n\n\ndef _fwd_bem_homog_solution(solids, nps):\n \"\"\"Make a homogeneous solution.\"\"\"\n return _fwd_bem_multi_solution(solids, None, nps)\n\n\ndef _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):\n \"\"\"Modify the solution according to the IP approach.\"\"\"\n n_last = n_tri[-1]\n mult = (1.0 + ip_mult) / ip_mult\n\n logger.info(' Combining...')\n offsets = np.cumsum(np.concatenate(([0], n_tri)))\n for si in range(len(n_tri)):\n # Pick the correct submatrix (right column) and multiply\n sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]\n # Multiply\n sub -= 2 * np.dot(sub, ip_solution)\n\n # The lower right corner is a special case\n sub[-n_last:, -n_last:] += mult * ip_solution\n\n # Final scaling\n logger.info(' Scaling...')\n solution *= ip_mult\n return\n\n\ndef _fwd_bem_linear_collocation_solution(m):\n \"\"\"Compute the linear collocation potential solution.\"\"\"\n # first, add surface geometries\n for surf in m['surfs']:\n complete_surface_info(surf, copy=False, verbose=False)\n\n logger.info('Computing the linear collocation solution...')\n logger.info(' Matrix coefficients...')\n coeff = _fwd_bem_lin_pot_coeff(m['surfs'])\n m['nsol'] = len(coeff)\n logger.info(\" Inverting the coefficient matrix...\")\n nps = [surf['np'] for surf in m['surfs']]\n m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)\n if len(m['surfs']) == 3:\n ip_mult = m['sigma'][1] / m['sigma'][2]\n if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:\n logger.info('IP approach required...')\n logger.info(' Matrix coefficients (homog)...')\n coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])\n logger.info(' Inverting the coefficient matrix (homog)...')\n ip_solution = _fwd_bem_homog_solution(coeff,\n [m['surfs'][-1]['np']])\n logger.info(' Modify the original solution to incorporate '\n 'IP approach...')\n _fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,\n nps)\n m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL\n logger.info(\"Solution ready.\")\n\n\n@verbose\ndef make_bem_solution(surfs, verbose=None):\n \"\"\"Create a BEM solution using the linear collocation approach.\n\n Parameters\n ----------\n surfs : list of dict\n The BEM surfaces to use (`from make_bem_model`)\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n bem : instance of ConductorModel\n The BEM solution.\n\n Notes\n -----\n .. versionadded:: 0.10.0\n\n See Also\n --------\n make_bem_model\n read_bem_surfaces\n write_bem_surfaces\n read_bem_solution\n write_bem_solution\n \"\"\"\n logger.info('Approximation method : Linear collocation\\n')\n if isinstance(surfs, string_types):\n # Load the surfaces\n logger.info('Loading surfaces...')\n surfs = read_bem_surfaces(surfs)\n bem = ConductorModel(is_sphere=False, surfs=surfs)\n _add_gamma_multipliers(bem)\n if len(bem['surfs']) == 3:\n logger.info('Three-layer model surfaces loaded.')\n elif len(bem['surfs']) == 1:\n logger.info('Homogeneous model surface loaded.')\n else:\n raise RuntimeError('Only 1- or 3-layer BEM computations supported')\n _check_bem_size(bem['surfs'])\n _fwd_bem_linear_collocation_solution(bem)\n logger.info('BEM geometry computations complete.')\n return bem\n\n\n# ############################################################################\n# Make BEM model\n\ndef _ico_downsample(surf, dest_grade):\n \"\"\"Downsample the surface if isomorphic to a subdivided icosahedron.\"\"\"\n n_tri = len(surf['tris'])\n found = -1\n bad_msg = (\"A surface with %d triangles cannot be isomorphic with a \"\n \"subdivided icosahedron.\" % n_tri)\n if n_tri % 20 != 0:\n raise RuntimeError(bad_msg)\n n_tri = n_tri // 20\n found = int(round(np.log(n_tri) / np.log(4)))\n if n_tri != 4 ** found:\n raise RuntimeError(bad_msg)\n del n_tri\n\n if dest_grade > found:\n raise RuntimeError('For this surface, decimation grade should be %d '\n 'or less, not %s.' % (found, dest_grade))\n\n source = _get_ico_surface(found)\n dest = _get_ico_surface(dest_grade, patch_stats=True)\n del dest['tri_cent']\n del dest['tri_nn']\n del dest['neighbor_tri']\n del dest['tri_area']\n if not np.array_equal(source['tris'], surf['tris']):\n raise RuntimeError('The source surface has a matching number of '\n 'triangles but ordering is wrong')\n logger.info('Going from %dth to %dth subdivision of an icosahedron '\n '(n_tri: %d -> %d)' % (found, dest_grade, len(surf['tris']),\n len(dest['tris'])))\n # Find the mapping\n dest['rr'] = surf['rr'][_get_ico_map(source, dest)]\n return dest\n\n\ndef _get_ico_map(fro, to):\n \"\"\"Get a mapping between ico surfaces.\"\"\"\n nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)\n n_bads = (dists > 5e-3).sum()\n if n_bads > 0:\n raise RuntimeError('No matching vertex for %d destination vertices'\n % (n_bads))\n return nearest\n\n\ndef _order_surfaces(surfs):\n \"\"\"Reorder the surfaces.\"\"\"\n if len(surfs) != 3:\n return surfs\n # we have three surfaces\n surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,\n FIFF.FIFFV_BEM_SURF_ID_SKULL,\n FIFF.FIFFV_BEM_SURF_ID_BRAIN]\n ids = np.array([surf['id'] for surf in surfs])\n if set(ids) != set(surf_order):\n raise RuntimeError('bad surface ids: %s' % ids)\n order = [np.where(ids == id_)[0][0] for id_ in surf_order]\n surfs = [surfs[idx] for idx in order]\n return surfs\n\n\ndef _assert_complete_surface(surf, incomplete='raise'):\n \"\"\"Check the sum of solid angles as seen from inside.\"\"\"\n # from surface_checks.c\n tot_angle = 0.\n # Center of mass....\n cm = surf['rr'].mean(axis=0)\n logger.info('%s CM is %6.2f %6.2f %6.2f mm' %\n (_surf_name[surf['id']],\n 1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))\n tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]\n prop = tot_angle / (2 * np.pi)\n if np.abs(prop - 1.0) > 1e-5:\n msg = ('Surface %s is not complete (sum of solid angles '\n 'yielded %g, should be 1.)'\n % (_surf_name[surf['id']], prop))\n if incomplete == 'raise':\n raise RuntimeError(msg)\n else:\n warn(msg)\n\n\n_surf_name = {\n FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',\n FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',\n FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',\n FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ',\n}\n\n\ndef _assert_inside(fro, to):\n \"\"\"Check one set of points is inside a surface.\"\"\"\n # this is \"is_inside\" in surface_checks.c\n tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])\n if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():\n raise RuntimeError('Surface %s is not completely inside surface %s'\n % (_surf_name[fro['id']], _surf_name[to['id']]))\n\n\ndef _check_surfaces(surfs, incomplete='raise'):\n \"\"\"Check that the surfaces are complete and non-intersecting.\"\"\"\n for surf in surfs:\n _assert_complete_surface(surf, incomplete=incomplete)\n # Then check the topology\n for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):\n logger.info('Checking that %s surface is inside %s surface...' %\n (_surf_name[surf_2['id']], _surf_name[surf_1['id']]))\n _assert_inside(surf_2, surf_1)\n\n\ndef _check_surface_size(surf):\n \"\"\"Check that the coordinate limits are reasonable.\"\"\"\n sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)\n if (sizes < 0.05).any():\n raise RuntimeError('Dimensions of the surface %s seem too small '\n '(%9.5f mm). Maybe the the unit of measure is '\n 'meters instead of mm' %\n (_surf_name[surf['id']], 1000 * sizes.min()))\n\n\ndef _check_thicknesses(surfs):\n \"\"\"Compute how close we are.\"\"\"\n for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):\n min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],\n return_dists=True)[0]\n min_dist = min_dist.min()\n logger.info('Checking distance between %s and %s surfaces...' %\n (_surf_name[surf_1['id']], _surf_name[surf_2['id']]))\n logger.info('Minimum distance between the %s and %s surfaces is '\n 'approximately %6.1f mm' %\n (_surf_name[surf_1['id']], _surf_name[surf_2['id']],\n 1000 * min_dist))\n\n\ndef _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True,\n incomplete='raise'):\n \"\"\"Convert surfaces to a BEM.\"\"\"\n # equivalent of mne_surf2bem\n # surfs can be strings (filenames) or surface dicts\n if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) ==\n len(sigmas)):\n raise ValueError('surfs, ids, and sigmas must all have the same '\n 'number of elements (1 or 3)')\n surf = list(surfs)\n for si, surf in enumerate(surfs):\n if isinstance(surf, string_types):\n surfs[si] = read_surface(surf, return_dict=True)[-1]\n # Downsampling if the surface is isomorphic with a subdivided icosahedron\n if ico is not None:\n for si, surf in enumerate(surfs):\n surfs[si] = _ico_downsample(surf, ico)\n for surf, id_ in zip(surfs, ids):\n surf['id'] = id_\n surf['coord_frame'] = surf.get('coord_frame', FIFF.FIFFV_COORD_MRI)\n surf.update(np=len(surf['rr']), ntri=len(surf['tris']))\n if rescale:\n surf['rr'] /= 1000. # convert to meters\n\n # Shifting surfaces is not implemented here...\n\n # Order the surfaces for the benefit of the topology checks\n for surf, sigma in zip(surfs, sigmas):\n surf['sigma'] = sigma\n surfs = _order_surfaces(surfs)\n\n # Check topology as best we can\n _check_surfaces(surfs, incomplete=incomplete)\n for surf in surfs:\n _check_surface_size(surf)\n _check_thicknesses(surfs)\n logger.info('Surfaces passed the basic topology checks.')\n return surfs\n\n\n@verbose\ndef make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),\n subjects_dir=None, verbose=None):\n \"\"\"Create a BEM model for a subject.\n\n .. note:: To get a single layer bem corresponding to the --homog flag in\n the command line tool set the ``conductivity`` parameter\n to a list/tuple with a single value (e.g. [0.3]).\n\n Parameters\n ----------\n subject : str\n The subject.\n ico : int | None\n The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.\n If None, no subsampling is applied.\n conductivity : array of int, shape (3,) or (1,)\n The conductivities to use for each shell. Should be a single element\n for a one-layer model, or three elements for a three-layer model.\n Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a\n single-layer model would be ``[0.3]``.\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n surfaces : list of dict\n The BEM surfaces. Use `make_bem_solution` to turn these into a\n `ConductorModel` suitable for forward calculation.\n\n Notes\n -----\n .. versionadded:: 0.10.0\n\n See Also\n --------\n make_bem_solution\n make_sphere_model\n read_bem_surfaces\n write_bem_surfaces\n \"\"\"\n conductivity = np.array(conductivity, float)\n if conductivity.ndim != 1 or conductivity.size not in (1, 3):\n raise ValueError('conductivity must be 1D array-like with 1 or 3 '\n 'elements')\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n subject_dir = op.join(subjects_dir, subject)\n bem_dir = op.join(subject_dir, 'bem')\n inner_skull = op.join(bem_dir, 'inner_skull.surf')\n outer_skull = op.join(bem_dir, 'outer_skull.surf')\n outer_skin = op.join(bem_dir, 'outer_skin.surf')\n surfaces = [inner_skull, outer_skull, outer_skin]\n ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,\n FIFF.FIFFV_BEM_SURF_ID_SKULL,\n FIFF.FIFFV_BEM_SURF_ID_HEAD]\n logger.info('Creating the BEM geometry...')\n if len(conductivity) == 1:\n surfaces = surfaces[:1]\n ids = ids[:1]\n surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)\n _check_bem_size(surfaces)\n logger.info('Complete.\\n')\n return surfaces\n\n\n# ############################################################################\n# Compute EEG sphere model\n\ndef _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):\n \"\"\"Get the model depended weighting factor for n.\"\"\"\n nlayer = len(m['layers'])\n if nlayer in (0, 1):\n return 1.\n\n # Initialize the arrays\n c1 = np.zeros(nlayer - 1)\n c2 = np.zeros(nlayer - 1)\n cr = np.zeros(nlayer - 1)\n cr_mult = np.zeros(nlayer - 1)\n for k in range(nlayer - 1):\n c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']\n c2[k] = c1[k] - 1.0\n cr_mult[k] = m['layers'][k]['rel_rad']\n cr[k] = cr_mult[k]\n cr_mult[k] *= cr_mult[k]\n\n coeffs = np.zeros(n_terms - 1)\n for n in range(1, n_terms):\n # Increment the radius coefficients\n for k in range(nlayer - 1):\n cr[k] *= cr_mult[k]\n\n # Multiply the matrices\n M = np.eye(2)\n n1 = n + 1.0\n for k in range(nlayer - 2, -1, -1):\n M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],\n [n * c2[k] * cr[k], n1 + n * c1[k]]], M)\n num = n * (2.0 * n + 1.0) ** (nlayer - 1)\n coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])\n return coeffs\n\n\ndef _compose_linear_fitting_data(mu, u):\n \"\"\"Get the linear fitting data.\"\"\"\n # y is the data to be fitted (nterms-1 x 1)\n # M is the model matrix (nterms-1 x nfit-1)\n for k in range(u['nterms'] - 1):\n k1 = k + 1\n mu1n = np.power(mu[0], k1)\n u['y'][k] = u['w'][k] * (u['fn'][k1] - mu1n * u['fn'][0])\n for p in range(u['nfit'] - 1):\n u['M'][k][p] = u['w'][k] * (np.power(mu[p + 1], k1) - mu1n)\n\n\ndef _compute_linear_parameters(mu, u):\n \"\"\"Compute the best-fitting linear parameters.\"\"\"\n _compose_linear_fitting_data(mu, u)\n uu, sing, vv = linalg.svd(u['M'], full_matrices=False)\n\n # Compute the residuals\n u['resi'] = u['y'].copy()\n\n vec = np.empty(u['nfit'] - 1)\n for p in range(u['nfit'] - 1):\n vec[p] = np.dot(uu[:, p], u['y'])\n for k in range(u['nterms'] - 1):\n u['resi'][k] -= uu[k, p] * vec[p]\n vec[p] = vec[p] / sing[p]\n\n lambda_ = np.zeros(u['nfit'])\n for p in range(u['nfit'] - 1):\n sum_ = 0.\n for q in range(u['nfit'] - 1):\n sum_ += vv[q, p] * vec[q]\n lambda_[p + 1] = sum_\n lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])\n rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y'])\n return rv, lambda_\n\n\ndef _one_step(mu, u):\n \"\"\"Evaluate the residual sum of squares fit for one set of mu values.\"\"\"\n if np.abs(mu).max() > 1.0:\n return 1.0\n\n # Compose the data for the linear fitting, compute SVD, then residuals\n _compose_linear_fitting_data(mu, u)\n u['uu'], u['sing'], u['vv'] = linalg.svd(u['M'])\n u['resi'][:] = u['y'][:]\n for p in range(u['nfit'] - 1):\n dot = np.dot(u['uu'][p], u['y'])\n for k in range(u['nterms'] - 1):\n u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot\n\n # Return their sum of squares\n return np.dot(u['resi'], u['resi'])\n\n\ndef _fwd_eeg_fit_berg_scherg(m, nterms, nfit):\n \"\"\"Fit the Berg-Scherg equivalent spherical model dipole parameters.\"\"\"\n from scipy.optimize import fmin_cobyla\n assert nfit >= 2\n u = dict(y=np.zeros(nterms - 1), resi=np.zeros(nterms - 1),\n nfit=nfit, nterms=nterms, M=np.zeros((nterms - 1, nfit - 1)))\n\n # (1) Calculate the coefficients of the true expansion\n u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)\n\n # (2) Calculate the weighting\n f = (min([layer['rad'] for layer in m['layers']]) /\n max([layer['rad'] for layer in m['layers']]))\n\n # correct weighting\n k = np.arange(1, nterms + 1)\n u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /\n k) * np.power(f, (k - 1.0))\n u['w'][-1] = 0\n\n # Do the nonlinear minimization, constraining mu to the interval [-1, +1]\n mu_0 = np.random.RandomState(0).rand(nfit) * f\n fun = partial(_one_step, u=u)\n max_ = 1. - 2e-4 # adjust for fmin_cobyla \"catol\" that not all scipy have\n cons = [(lambda x: max_ - np.abs(x[ii])) for ii in range(nfit)]\n mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=5e-3, disp=0)\n\n # (6) Do the final step: calculation of the linear parameters\n rv, lambda_ = _compute_linear_parameters(mu, u)\n order = np.argsort(mu)[::-1]\n mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first\n\n m['mu'] = mu\n # This division takes into account the actual conductivities\n m['lambda'] = lambda_ / m['layers'][-1]['sigma']\n m['nfit'] = nfit\n return rv\n\n\n@verbose\ndef make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,\n relative_radii=(0.90, 0.92, 0.97, 1.0),\n sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):\n \"\"\"Create a spherical model for forward solution calculation.\n\n Parameters\n ----------\n r0 : array-like | str\n Head center to use (in head coordinates). If 'auto', the head\n center will be calculated from the digitization points in info.\n head_radius : float | str | None\n If float, compute spherical shells for EEG using the given radius.\n If 'auto', estimate an approriate radius from the dig points in Info,\n If None, exclude shells (single layer sphere model).\n info : instance of Info | None\n Measurement info. Only needed if ``r0`` or ``head_radius`` are\n ``'auto'``.\n relative_radii : array-like\n Relative radii for the spherical shells.\n sigmas : array-like\n Sigma values for the spherical shells.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n sphere : instance of ConductorModel\n The resulting spherical conductor model.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n\n See Also\n --------\n make_bem_model\n make_bem_solution\n \"\"\"\n for name in ('r0', 'head_radius'):\n param = locals()[name]\n if isinstance(param, string_types):\n if param != 'auto':\n raise ValueError('%s, if str, must be \"auto\" not \"%s\"'\n % (name, param))\n relative_radii = np.array(relative_radii, float).ravel()\n sigmas = np.array(sigmas, float).ravel()\n if len(relative_radii) != len(sigmas):\n raise ValueError('relative_radii length (%s) must match that of '\n 'sigmas (%s)' % (len(relative_radii),\n len(sigmas)))\n if len(sigmas) <= 1 and head_radius is not None:\n raise ValueError('at least 2 sigmas must be supplied if '\n 'head_radius is not None, got %s' % (len(sigmas),))\n if (isinstance(r0, string_types) and r0 == 'auto') or \\\n (isinstance(head_radius, string_types) and head_radius == 'auto'):\n if info is None:\n raise ValueError('Info must not be None for auto mode')\n head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units='m')[:2]\n if isinstance(r0, string_types):\n r0 = r0_fit\n if isinstance(head_radius, string_types):\n head_radius = head_radius_fit\n sphere = ConductorModel(is_sphere=True, r0=np.array(r0),\n coord_frame=FIFF.FIFFV_COORD_HEAD)\n sphere['layers'] = list()\n if head_radius is not None:\n # Eventually these could be configurable...\n relative_radii = np.array(relative_radii, float)\n sigmas = np.array(sigmas, float)\n order = np.argsort(relative_radii)\n relative_radii = relative_radii[order]\n sigmas = sigmas[order]\n for rel_rad, sig in zip(relative_radii, sigmas):\n # sort layers by (relative) radius, and scale radii\n layer = dict(rad=rel_rad, sigma=sig)\n layer['rel_rad'] = layer['rad'] = rel_rad\n sphere['layers'].append(layer)\n\n # scale the radii\n R = sphere['layers'][-1]['rad']\n rR = sphere['layers'][-1]['rel_rad']\n for layer in sphere['layers']:\n layer['rad'] /= R\n layer['rel_rad'] /= rR\n\n #\n # Setup the EEG sphere model calculations\n #\n\n # Scale the relative radii\n for k in range(len(relative_radii)):\n sphere['layers'][k]['rad'] = (head_radius *\n sphere['layers'][k]['rel_rad'])\n rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)\n logger.info('\\nEquiv. model fitting -> RV = %g %%' % (100 * rv))\n for k in range(3):\n logger.info('mu%d = %g lambda%d = %g'\n % (k + 1, sphere['mu'][k], k + 1,\n sphere['layers'][-1]['sigma'] *\n sphere['lambda'][k]))\n logger.info('Set up EEG sphere model with scalp radius %7.1f mm\\n'\n % (1000 * head_radius,))\n return sphere\n\n\n# #############################################################################\n# Sphere fitting\n\n_dig_kind_dict = {\n 'cardinal': FIFF.FIFFV_POINT_CARDINAL,\n 'hpi': FIFF.FIFFV_POINT_HPI,\n 'eeg': FIFF.FIFFV_POINT_EEG,\n 'extra': FIFF.FIFFV_POINT_EXTRA,\n}\n_dig_kind_rev = dict((val, key) for key, val in _dig_kind_dict.items())\n_dig_kind_ints = tuple(_dig_kind_dict.values())\n\n\n@verbose\ndef fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None):\n \"\"\"Fit a sphere to the headshape points to determine head center.\n\n Parameters\n ----------\n info : instance of Info\n Measurement info.\n dig_kinds : list of str | str\n Kind of digitization points to use in the fitting. These can be any\n combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also\n be 'auto' (default), which will use only the 'extra' points if\n enough (more than 10) are available, and if not, uses 'extra' and\n 'eeg' points.\n units : str\n Can be \"m\" (default) or \"mm\".\n\n .. versionadded:: 0.12\n\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n radius : float\n Sphere radius.\n origin_head: ndarray, shape (3,)\n Head center in head coordinates.\n origin_device: ndarray, shape (3,)\n Head center in device coordinates.\n\n Notes\n -----\n This function excludes any points that are low and frontal\n (``z < 0 and y > 0``) to improve the fit.\n \"\"\"\n if not isinstance(units, string_types) or units not in ('m', 'mm'):\n raise ValueError('units must be a \"m\" or \"mm\"')\n radius, origin_head, origin_device = _fit_sphere_to_headshape(\n info, dig_kinds)\n if units == 'mm':\n radius *= 1e3\n origin_head *= 1e3\n origin_device *= 1e3\n return radius, origin_head, origin_device\n\n\n@verbose\ndef get_fitting_dig(info, dig_kinds='auto', verbose=None):\n \"\"\"Get digitization points suitable for sphere fitting.\n\n Parameters\n ----------\n info : instance of Info\n The measurement info.\n dig_kinds : list of str | str\n Kind of digitization points to use in the fitting. These can be any\n combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also\n be 'auto' (default), which will use only the 'extra' points if\n enough (more than 10) are available, and if not, uses 'extra' and\n 'eeg' points.\n verbose : bool, str or None\n If not None, override default verbose level\n\n Returns\n -------\n dig : array, shape (n_pts, 3)\n The digitization points (in head coordinates) to use for fitting.\n\n Notes\n -----\n This will exclude digitization locations that have ``z < 0 and y > 0``,\n i.e. points on the nose and below the nose on the face.\n\n .. versionadded:: 0.14\n \"\"\"\n if not isinstance(info, Info):\n raise TypeError('info must be an instance of Info not %s' % type(info))\n if info['dig'] is None:\n raise RuntimeError('Cannot fit headshape without digitization '\n ', info[\"dig\"] is None')\n if isinstance(dig_kinds, string_types):\n if dig_kinds == 'auto':\n # try \"extra\" first\n try:\n return get_fitting_dig(info, 'extra')\n except ValueError:\n pass\n return get_fitting_dig(info, ('extra', 'eeg'))\n else:\n dig_kinds = (dig_kinds,)\n # convert string args to ints (first make dig_kinds mutable in case tuple)\n dig_kinds = list(dig_kinds)\n for di, d in enumerate(dig_kinds):\n dig_kinds[di] = _dig_kind_dict.get(d, d)\n if dig_kinds[di] not in _dig_kind_ints:\n raise ValueError('dig_kinds[#%d] (%s) must be one of %s'\n % (di, d, sorted(list(_dig_kind_dict.keys()))))\n\n # get head digization points of the specified kind(s)\n hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]\n if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']):\n raise RuntimeError('Digitization points not in head coordinates, '\n 'contact mne-python developers')\n\n # exclude some frontal points (nose etc.)\n hsp = np.array([p for p in hsp if not (p[2] < -1e-6 and p[1] > 1e-6)])\n\n if len(hsp) <= 10:\n kinds_str = ', '.join(['\"%s\"' % _dig_kind_rev[d]\n for d in sorted(dig_kinds)])\n msg = ('Only %s head digitization points of the specified kind%s (%s,)'\n % (len(hsp), _pl(dig_kinds), kinds_str))\n if len(hsp) < 4:\n raise ValueError(msg + ', at least 4 required')\n else:\n warn(msg + ', fitting may be inaccurate')\n return hsp\n\n\n@verbose\ndef _fit_sphere_to_headshape(info, dig_kinds, verbose=None):\n \"\"\"Fit a sphere to the given head shape.\"\"\"\n hsp = get_fitting_dig(info, dig_kinds)\n radius, origin_head = _fit_sphere(np.array(hsp), disp=False)\n # compute origin in device coordinates\n head_to_dev = _ensure_trans(info['dev_head_t'], 'head', 'meg')\n origin_device = apply_trans(head_to_dev, origin_head)\n logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm'\n % (radius * 1e3,))\n # 99th percentile on Wikipedia for Giabella to back of head is 21.7cm,\n # i.e. 108mm \"radius\", so let's go with 110mm\n # en.wikipedia.org/wiki/Human_head#/media/File:HeadAnthropometry.JPG\n if radius > 0.110:\n warn('Estimated head size (%0.1f mm) exceeded 99th '\n 'percentile for adult head size' % (1e3 * radius,))\n # > 2 cm away from head center in X or Y is strange\n if np.linalg.norm(origin_head[:2]) > 0.02:\n warn('(X, Y) fit (%0.1f, %0.1f) more than 20 mm from '\n 'head frame origin' % tuple(1e3 * origin_head[:2]))\n logger.info('Origin head coordinates:'.ljust(30) +\n '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_head))\n logger.info('Origin device coordinates:'.ljust(30) +\n '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_device))\n return radius, origin_head, origin_device\n\n\ndef _fit_sphere(points, disp='auto'):\n \"\"\"Fit a sphere to an arbitrary set of points.\"\"\"\n from scipy.optimize import fmin_cobyla\n if isinstance(disp, string_types) and disp == 'auto':\n disp = True if logger.level <= 20 else False\n # initial guess for center and radius\n radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.\n radius_init = radii.mean()\n center_init = np.median(points, axis=0)\n\n # optimization\n x0 = np.concatenate([center_init, [radius_init]])\n\n def cost_fun(center_rad):\n d = np.linalg.norm(points - center_rad[:3], axis=1) - center_rad[3]\n d *= d\n return d.sum()\n\n def constraint(center_rad):\n return center_rad[3] # radius must be >= 0\n\n x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init,\n rhoend=radius_init * 1e-6, disp=disp)\n\n origin = x_opt[:3]\n radius = x_opt[3]\n return radius, origin\n\n\ndef _check_origin(origin, info, coord_frame='head', disp=False):\n \"\"\"Check or auto-determine the origin.\"\"\"\n if isinstance(origin, string_types):\n if origin != 'auto':\n raise ValueError('origin must be a numerical array, or \"auto\", '\n 'not %s' % (origin,))\n if coord_frame == 'head':\n R, origin = fit_sphere_to_headshape(info, verbose=False,\n units='m')[:2]\n logger.info(' Automatic origin fit: head of radius %0.1f mm'\n % (R * 1000.,))\n del R\n else:\n origin = (0., 0., 0.)\n origin = np.array(origin, float)\n if origin.shape != (3,):\n raise ValueError('origin must be a 3-element array')\n if disp:\n origin_str = ', '.join(['%0.1f' % (o * 1000) for o in origin])\n msg = (' Using origin %s mm in the %s frame'\n % (origin_str, coord_frame))\n if coord_frame == 'meg' and info['dev_head_t'] is not None:\n o_dev = apply_trans(info['dev_head_t'], origin)\n origin_str = ', '.join('%0.1f' % (o * 1000,) for o in o_dev)\n msg += ' (%s mm in the head frame)' % (origin_str,)\n logger.info(msg)\n return origin\n\n\n# ############################################################################\n# Create BEM surfaces\n\n@verbose\ndef make_watershed_bem(subject, subjects_dir=None, overwrite=False,\n volume='T1', atlas=False, gcaatlas=False, preflood=None,\n show=False, verbose=None):\n \"\"\"Create BEM surfaces using the FreeSurfer watershed algorithm.\n\n Parameters\n ----------\n subject : str\n Subject name (required)\n subjects_dir : str\n Directory containing subjects data. If None use\n the Freesurfer SUBJECTS_DIR environment variable.\n overwrite : bool\n Write over existing files\n volume : str\n Defaults to T1\n atlas : bool\n Specify the --atlas option for mri_watershed\n gcaatlas : bool\n Use the subcortical atlas\n preflood : int\n Change the preflood height\n show : bool\n Show surfaces to visually inspect all three BEM surfaces (recommended).\n\n .. versionadded:: 0.12\n\n verbose : bool, str or None\n If not None, override default verbose level\n\n Notes\n -----\n .. versionadded:: 0.10\n \"\"\"\n from .viz.misc import plot_bem\n env, mri_dir = _prepare_env(subject, subjects_dir,\n requires_freesurfer=True)[:2]\n\n subjects_dir = env['SUBJECTS_DIR']\n subject_dir = op.join(subjects_dir, subject)\n mri_dir = op.join(subject_dir, 'mri')\n T1_dir = op.join(mri_dir, volume)\n T1_mgz = op.join(mri_dir, volume + '.mgz')\n bem_dir = op.join(subject_dir, 'bem')\n ws_dir = op.join(subject_dir, 'bem', 'watershed')\n if not op.isdir(bem_dir):\n os.makedirs(bem_dir)\n if not op.isdir(T1_dir) and not op.isfile(T1_mgz):\n raise RuntimeError('Could not find the MRI data')\n if op.isdir(ws_dir):\n if not overwrite:\n raise RuntimeError('%s already exists. Use the --overwrite option'\n ' to recreate it.' % ws_dir)\n else:\n shutil.rmtree(ws_dir)\n # put together the command\n cmd = ['mri_watershed']\n if preflood:\n cmd += [\"-h\", \"%s\" % int(preflood)]\n\n if gcaatlas:\n cmd += ['-atlas', '-T1', '-brain_atlas', env['FREESURFER_HOME'] +\n '/average/RB_all_withskull_2007-08-08.gca',\n subject_dir + '/mri/transforms/talairach_with_skull.lta']\n elif atlas:\n cmd += ['-atlas']\n if op.exists(T1_mgz):\n cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz,\n op.join(ws_dir, 'ws')]\n else:\n cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir,\n op.join(ws_dir, 'ws')]\n # report and run\n logger.info('\\nRunning mri_watershed for BEM segmentation with the '\n 'following parameters:\\n\\n'\n 'SUBJECTS_DIR = %s\\n'\n 'SUBJECT = %s\\n'\n 'Results dir = %s\\n' % (subjects_dir, subject, ws_dir))\n os.makedirs(op.join(ws_dir, 'ws'))\n run_subprocess(cmd, env=env)\n\n if op.isfile(T1_mgz):\n new_info = _extract_volume_info(T1_mgz)\n if new_info is None:\n warn('nibabel is required to replace the volume info. Volume info'\n 'not updated in the written surface.')\n new_info = dict()\n surfs = ['brain', 'inner_skull', 'outer_skull', 'outer_skin']\n for s in surfs:\n surf_ws_out = op.join(ws_dir, '%s_%s_surface' % (subject, s))\n\n rr, tris, volume_info = read_surface(surf_ws_out,\n read_metadata=True)\n volume_info.update(new_info) # replace volume info, 'head' stays\n\n write_surface(s, rr, tris, volume_info=volume_info)\n # Create symbolic links\n surf_out = op.join(bem_dir, '%s.surf' % s)\n if not overwrite and op.exists(surf_out):\n skip_symlink = True\n else:\n if op.exists(surf_out):\n os.remove(surf_out)\n _symlink(surf_ws_out, surf_out)\n skip_symlink = False\n\n if skip_symlink:\n logger.info(\"Unable to create all symbolic links to .surf files \"\n \"in bem folder. Use --overwrite option to recreate \"\n \"them.\")\n dest = op.join(bem_dir, 'watershed')\n else:\n logger.info(\"Symbolic links to .surf files created in bem folder\")\n dest = bem_dir\n\n logger.info(\"\\nThank you for waiting.\\nThe BEM triangulations for this \"\n \"subject are now available at:\\n%s.\" % dest)\n\n # Write a head file for coregistration\n fname_head = op.join(bem_dir, subject + '-head.fif')\n if op.isfile(fname_head):\n os.remove(fname_head)\n\n surf = _surfaces_to_bem([op.join(ws_dir, subject + '_outer_skin_surface')],\n [FIFF.FIFFV_BEM_SURF_ID_HEAD], sigmas=[1])\n write_bem_surfaces(fname_head, surf)\n\n # Show computed BEM surfaces\n if show:\n plot_bem(subject=subject, subjects_dir=subjects_dir,\n orientation='coronal', slices=None, show=True)\n\n logger.info('Created %s\\n\\nComplete.' % (fname_head,))\n\n\ndef _extract_volume_info(mgz, raise_error=True):\n \"\"\"Extract volume info from a mgz file.\"\"\"\n try:\n import nibabel as nib\n except ImportError:\n return # warning raised elsewhere\n header = nib.load(mgz).header\n vol_info = dict()\n version = header['version']\n if version == 1:\n version = '%s # volume info valid' % version\n else:\n raise ValueError('Volume info invalid.')\n vol_info['valid'] = version\n vol_info['filename'] = mgz\n vol_info['volume'] = header['dims'][:3]\n vol_info['voxelsize'] = header['delta']\n vol_info['xras'], vol_info['yras'], vol_info['zras'] = header['Mdc'].T\n vol_info['cras'] = header['Pxyz_c']\n return vol_info\n\n\n# ############################################################################\n# Read\n\n@verbose\ndef read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):\n \"\"\"Read the BEM surfaces from a FIF file.\n\n Parameters\n ----------\n fname : string\n The name of the file containing the surfaces.\n patch_stats : bool, optional (default False)\n Calculate and add cortical patch statistics to the surfaces.\n s_id : int | None\n If int, only read and return the surface with the given s_id.\n An error will be raised if it doesn't exist. If None, all\n surfaces are read and returned.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n surf: list | dict\n A list of dictionaries that each contain a surface. If s_id\n is not None, only the requested surface will be returned.\n\n See Also\n --------\n write_bem_surfaces, write_bem_solution, make_bem_model\n \"\"\"\n # Default coordinate frame\n coord_frame = FIFF.FIFFV_COORD_MRI\n # Open the file, create directory\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find BEM\n bem = dir_tree_find(tree, FIFF.FIFFB_BEM)\n if bem is None or len(bem) == 0:\n raise ValueError('BEM data not found')\n\n bem = bem[0]\n # Locate all surfaces\n bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)\n if bemsurf is None:\n raise ValueError('BEM surface data not found')\n\n logger.info(' %d BEM surfaces found' % len(bemsurf))\n # Coordinate frame possibly at the top level\n tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)\n if tag is not None:\n coord_frame = tag.data\n # Read all surfaces\n if s_id is not None:\n surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)\n for bsurf in bemsurf]\n surf = [s for s in surf if s is not None]\n if not len(surf) == 1:\n raise ValueError('surface with id %d not found' % s_id)\n else:\n surf = list()\n for bsurf in bemsurf:\n logger.info(' Reading a surface...')\n this = _read_bem_surface(fid, bsurf, coord_frame)\n surf.append(this)\n logger.info('[done]')\n logger.info(' %d BEM surfaces read' % len(surf))\n for this in surf:\n if patch_stats or this['nn'] is None:\n complete_surface_info(this, copy=False)\n return surf[0] if s_id is not None else surf\n\n\ndef _read_bem_surface(fid, this, def_coord_frame, s_id=None):\n \"\"\"Read one bem surface.\"\"\"\n # fid should be open as a context manager here\n res = dict()\n # Read all the interesting stuff\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)\n\n if tag is None:\n res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN\n else:\n res['id'] = int(tag.data)\n\n if s_id is not None and res['id'] != s_id:\n return None\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)\n res['sigma'] = 1.0 if tag is None else float(tag.data)\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)\n if tag is None:\n raise ValueError('Number of vertices not found')\n\n res['np'] = int(tag.data)\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)\n if tag is None:\n raise ValueError('Number of triangles not found')\n res['ntri'] = int(tag.data)\n\n tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)\n if tag is None:\n tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)\n if tag is None:\n res['coord_frame'] = def_coord_frame\n else:\n res['coord_frame'] = tag.data\n else:\n res['coord_frame'] = tag.data\n\n # Vertices, normals, and triangles\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)\n if tag is None:\n raise ValueError('Vertex data not found')\n\n res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug\n if res['rr'].shape[0] != res['np']:\n raise ValueError('Vertex information is incorrect')\n\n tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)\n if tag is None:\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)\n if tag is None:\n res['nn'] = None\n else:\n res['nn'] = tag.data.copy()\n if res['nn'].shape[0] != res['np']:\n raise ValueError('Vertex normal information is incorrect')\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)\n if tag is None:\n raise ValueError('Triangulation not found')\n\n res['tris'] = tag.data - 1 # index start at 0 in Python\n if res['tris'].shape[0] != res['ntri']:\n raise ValueError('Triangulation information is incorrect')\n\n return res\n\n\n@verbose\ndef read_bem_solution(fname, verbose=None):\n \"\"\"Read the BEM solution from a file.\n\n Parameters\n ----------\n fname : string\n The file containing the BEM solution.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n bem : instance of ConductorModel\n The BEM solution.\n\n See Also\n --------\n write_bem_solution, read_bem_surfaces, write_bem_surfaces,\n make_bem_solution\n \"\"\"\n # mirrors fwd_bem_load_surfaces from fwd_bem_model.c\n logger.info('Loading surfaces...')\n bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)\n if len(bem_surfs) == 3:\n logger.info('Three-layer model surfaces loaded.')\n needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,\n FIFF.FIFFV_BEM_SURF_ID_SKULL,\n FIFF.FIFFV_BEM_SURF_ID_BRAIN])\n if not all(x['id'] in needed for x in bem_surfs):\n raise RuntimeError('Could not find necessary BEM surfaces')\n # reorder surfaces as necessary (shouldn't need to?)\n reorder = [None] * 3\n for x in bem_surfs:\n reorder[np.where(x['id'] == needed)[0][0]] = x\n bem_surfs = reorder\n elif len(bem_surfs) == 1:\n if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:\n raise RuntimeError('BEM Surfaces not found')\n logger.info('Homogeneous model surface loaded.')\n\n # convert from surfaces to solution\n bem = ConductorModel(is_sphere=False, surfs=bem_surfs)\n logger.info('\\nLoading the solution matrix...\\n')\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find the BEM data\n nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)\n if len(nodes) == 0:\n raise RuntimeError('No BEM data in %s' % fname)\n bem_node = nodes[0]\n\n # Approximation method\n tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)\n if tag is None:\n raise RuntimeError('No BEM solution found in %s' % fname)\n method = tag.data[0]\n if method not in (FIFF.FIFFV_BEM_APPROX_CONST,\n FIFF.FIFFV_BEM_APPROX_LINEAR):\n raise RuntimeError('Cannot handle BEM approximation method : %d'\n % method)\n\n tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)\n dims = tag.data.shape\n if len(dims) != 2:\n raise RuntimeError('Expected a two-dimensional solution matrix '\n 'instead of a %d dimensional one' % dims[0])\n\n dim = 0\n for surf in bem['surfs']:\n if method == FIFF.FIFFV_BEM_APPROX_LINEAR:\n dim += surf['np']\n else: # method == FIFF.FIFFV_BEM_APPROX_CONST\n dim += surf['ntri']\n\n if dims[0] != dim or dims[1] != dim:\n raise RuntimeError('Expected a %d x %d solution matrix instead of '\n 'a %d x %d one' % (dim, dim, dims[1], dims[0]))\n sol = tag.data\n nsol = dims[0]\n\n bem['solution'] = sol\n bem['nsol'] = nsol\n bem['bem_method'] = method\n\n # Gamma factors and multipliers\n _add_gamma_multipliers(bem)\n kind = {\n FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',\n FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',\n }[bem['bem_method']]\n logger.info('Loaded %s BEM solution from %s', kind, fname)\n return bem\n\n\ndef _add_gamma_multipliers(bem):\n \"\"\"Add gamma and multipliers in-place.\"\"\"\n bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])\n # Dirty trick for the zero conductivity outside\n sigma = np.r_[0.0, bem['sigma']]\n bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])\n bem['field_mult'] = sigma[1:] - sigma[:-1]\n # make sure subsequent \"zip\"s work correctly\n assert len(bem['surfs']) == len(bem['field_mult'])\n bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /\n (sigma[1:] + sigma[:-1])[:, np.newaxis])\n\n\n_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,\n 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,\n 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}\n\n\ndef _bem_find_surface(bem, id_):\n \"\"\"Find surface from already-loaded BEM.\"\"\"\n if isinstance(id_, string_types):\n name = id_\n id_ = _surf_dict[id_]\n else:\n name = _bem_explain_surface(id_)\n idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]\n if len(idx) != 1:\n raise RuntimeError('BEM model does not have the %s triangulation'\n % name.replace('_', ' '))\n return bem['surfs'][idx[0]]\n\n\ndef _bem_explain_surface(id_):\n \"\"\"Return a string corresponding to the given surface ID.\"\"\"\n _rev_dict = dict((val, key) for key, val in _surf_dict.items())\n return _rev_dict[id_]\n\n\n# ############################################################################\n# Write\n\ndef write_bem_surfaces(fname, surfs):\n \"\"\"Write BEM surfaces to a fiff file.\n\n Parameters\n ----------\n fname : str\n Filename to write.\n surfs : dict | list of dict\n The surfaces, or a single surface.\n \"\"\"\n if isinstance(surfs, dict):\n surfs = [surfs]\n with start_file(fname) as fid:\n start_block(fid, FIFF.FIFFB_BEM)\n write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])\n _write_bem_surfaces_block(fid, surfs)\n end_block(fid, FIFF.FIFFB_BEM)\n end_file(fid)\n\n\ndef _write_bem_surfaces_block(fid, surfs):\n \"\"\"Write bem surfaces to open file handle.\"\"\"\n for surf in surfs:\n start_block(fid, FIFF.FIFFB_BEM_SURF)\n write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])\n write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])\n write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])\n write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])\n write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])\n # index start at 0 in Python\n write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,\n surf['tris'] + 1)\n if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:\n write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])\n end_block(fid, FIFF.FIFFB_BEM_SURF)\n\n\ndef write_bem_solution(fname, bem):\n \"\"\"Write a BEM model with solution.\n\n Parameters\n ----------\n fname : str\n The filename to use.\n bem : instance of ConductorModel\n The BEM model with solution to save.\n\n See Also\n --------\n read_bem_solution\n \"\"\"\n _check_bem_size(bem['surfs'])\n with start_file(fname) as fid:\n start_block(fid, FIFF.FIFFB_BEM)\n # Coordinate frame (mainly for backward compatibility)\n write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,\n bem['surfs'][0]['coord_frame'])\n # Surfaces\n _write_bem_surfaces_block(fid, bem['surfs'])\n # The potential solution\n if 'solution' in bem:\n if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:\n raise RuntimeError('Only linear collocation supported')\n write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)\n write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,\n bem['solution'])\n end_block(fid, FIFF.FIFFB_BEM)\n end_file(fid)\n\n\n# #############################################################################\n# Create 3-Layers BEM model from Flash MRI images\n\ndef _prepare_env(subject, subjects_dir, requires_freesurfer):\n \"\"\"Prepare an env object for subprocess calls.\"\"\"\n env = os.environ.copy()\n if requires_freesurfer and not os.environ.get('FREESURFER_HOME'):\n raise RuntimeError('I cannot find freesurfer. The FREESURFER_HOME '\n 'environment variable is not set.')\n\n if not isinstance(subject, string_types):\n raise TypeError('The subject argument must be set')\n\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n if not op.isdir(subjects_dir):\n raise RuntimeError('Could not find the MRI data directory \"%s\"'\n % subjects_dir)\n subject_dir = op.join(subjects_dir, subject)\n if not op.isdir(subject_dir):\n raise RuntimeError('Could not find the subject data directory \"%s\"'\n % (subject_dir,))\n env['SUBJECT'] = subject\n env['SUBJECTS_DIR'] = subjects_dir\n mri_dir = op.join(subject_dir, 'mri')\n bem_dir = op.join(subject_dir, 'bem')\n return env, mri_dir, bem_dir\n\n\n@verbose\ndef convert_flash_mris(subject, flash30=True, convert=True, unwarp=False,\n subjects_dir=None, verbose=None):\n \"\"\"Convert DICOM files for use with make_flash_bem.\n\n Parameters\n ----------\n subject : str\n Subject name.\n flash30 : bool\n Use 30-degree flip angle data.\n convert : bool\n Assume that the Flash MRI images have already been converted\n to mgz files.\n unwarp : bool\n Run grad_unwarp with -unwarp option on each of the converted\n data sets. It requires FreeSurfer's MATLAB toolbox to be properly\n installed.\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Notes\n -----\n Before running this script do the following:\n (unless convert=False is specified)\n\n 1. Copy all of your FLASH images in a single directory <source> and\n create a directory <dest> to hold the output of mne_organize_dicom\n 2. cd to <dest> and run\n $ mne_organize_dicom <source>\n to create an appropriate directory structure\n 3. Create symbolic links to make flash05 and flash30 point to the\n appropriate series:\n $ ln -s <FLASH 5 series dir> flash05\n $ ln -s <FLASH 30 series dir> flash30\n Some partition formats (e.g. FAT32) do not support symbolic links.\n In this case, copy the file to the appropriate series:\n $ cp <FLASH 5 series dir> flash05\n $ cp <FLASH 30 series dir> flash30\n 4. cd to the directory where flash05 and flash30 links are\n 5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately\n 6. Run this script\n\n This function assumes that the Freesurfer segmentation of the subject\n has been completed. In particular, the T1.mgz and brain.mgz MRI volumes\n should be, as usual, in the subject's mri directory.\n \"\"\"\n env, mri_dir = _prepare_env(subject, subjects_dir,\n requires_freesurfer=True)[:2]\n curdir = os.getcwd()\n # Step 1a : Data conversion to mgz format\n if not op.exists(op.join(mri_dir, 'flash', 'parameter_maps')):\n os.makedirs(op.join(mri_dir, 'flash', 'parameter_maps'))\n echos_done = 0\n if convert:\n logger.info(\"\\n---- Converting Flash images ----\")\n echos = ['001', '002', '003', '004', '005', '006', '007', '008']\n if flash30:\n flashes = ['05']\n else:\n flashes = ['05', '30']\n #\n missing = False\n for flash in flashes:\n for echo in echos:\n if not op.isdir(op.join('flash' + flash, echo)):\n missing = True\n if missing:\n echos = ['002', '003', '004', '005', '006', '007', '008', '009']\n for flash in flashes:\n for echo in echos:\n if not op.isdir(op.join('flash' + flash, echo)):\n raise RuntimeError(\"Directory %s is missing.\"\n % op.join('flash' + flash, echo))\n #\n for flash in flashes:\n for echo in echos:\n if not op.isdir(op.join('flash' + flash, echo)):\n raise RuntimeError(\"Directory %s is missing.\"\n % op.join('flash' + flash, echo))\n sample_file = glob.glob(op.join('flash' + flash, echo, '*'))[0]\n dest_file = op.join(mri_dir, 'flash',\n 'mef' + flash + '_' + echo + '.mgz')\n # do not redo if already present\n if op.isfile(dest_file):\n logger.info(\"The file %s is already there\")\n else:\n cmd = ['mri_convert', sample_file, dest_file]\n run_subprocess(cmd, env=env)\n echos_done += 1\n # Step 1b : Run grad_unwarp on converted files\n os.chdir(op.join(mri_dir, \"flash\"))\n files = glob.glob(\"mef*.mgz\")\n if unwarp:\n logger.info(\"\\n---- Unwarp mgz data sets ----\")\n for infile in files:\n outfile = infile.replace(\".mgz\", \"u.mgz\")\n cmd = ['grad_unwarp', '-i', infile, '-o', outfile, '-unwarp',\n 'true']\n run_subprocess(cmd, env=env)\n # Clear parameter maps if some of the data were reconverted\n if echos_done > 0 and op.exists(\"parameter_maps\"):\n shutil.rmtree(\"parameter_maps\")\n logger.info(\"\\nParameter maps directory cleared\")\n if not op.exists(\"parameter_maps\"):\n os.makedirs(\"parameter_maps\")\n # Step 2 : Create the parameter maps\n if flash30:\n logger.info(\"\\n---- Creating the parameter maps ----\")\n if unwarp:\n files = glob.glob(\"mef05*u.mgz\")\n if len(os.listdir('parameter_maps')) == 0:\n cmd = ['mri_ms_fitparms'] + files + ['parameter_maps']\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"Parameter maps were already computed\")\n # Step 3 : Synthesize the flash 5 images\n logger.info(\"\\n---- Synthesizing flash 5 images ----\")\n os.chdir('parameter_maps')\n if not op.exists('flash5.mgz'):\n cmd = ['mri_synthesize', '20 5 5', 'T1.mgz', 'PD.mgz',\n 'flash5.mgz']\n run_subprocess(cmd, env=env)\n os.remove('flash5_reg.mgz')\n else:\n logger.info(\"Synthesized flash 5 volume is already there\")\n else:\n logger.info(\"\\n---- Averaging flash5 echoes ----\")\n os.chdir('parameter_maps')\n if unwarp:\n files = glob.glob(\"mef05*u.mgz\")\n else:\n files = glob.glob(\"mef05*.mgz\")\n cmd = ['mri_average', '-noconform', files, 'flash5.mgz']\n run_subprocess(cmd, env=env)\n if op.exists('flash5_reg.mgz'):\n os.remove('flash5_reg.mgz')\n\n # Go back to initial directory\n os.chdir(curdir)\n\n\n@verbose\ndef make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None,\n flash_path=None, verbose=None):\n \"\"\"Create 3-Layer BEM model from prepared flash MRI images.\n\n Parameters\n ----------\n subject : str\n Subject name.\n overwrite : bool\n Write over existing .surf files in bem folder.\n show : bool\n Show surfaces to visually inspect all three BEM surfaces (recommended).\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n flash_path : str | None\n Path to the flash images. If None (default), mri/flash/parameter_maps\n within the subject reconstruction is used.\n\n .. versionadded:: 0.13.0\n\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Notes\n -----\n This program assumes that FreeSurfer is installed and sourced properly.\n\n This function extracts the BEM surfaces (outer skull, inner skull, and\n outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30\n degrees, in mgz format.\n\n See Also\n --------\n convert_flash_mris\n \"\"\"\n from .viz.misc import plot_bem\n\n is_test = os.environ.get('MNE_SKIP_FS_FLASH_CALL', False)\n\n env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir,\n requires_freesurfer=True)\n\n if flash_path is None:\n flash_path = op.join(mri_dir, 'flash', 'parameter_maps')\n else:\n flash_path = op.abspath(flash_path)\n curdir = os.getcwd()\n subjects_dir = env['SUBJECTS_DIR']\n\n logger.info('\\nProcessing the flash MRI data to produce BEM meshes with '\n 'the following parameters:\\n'\n 'SUBJECTS_DIR = %s\\n'\n 'SUBJECT = %s\\n'\n 'Result dir = %s\\n' % (subjects_dir, subject,\n op.join(bem_dir, 'flash')))\n # Step 4 : Register with MPRAGE\n logger.info(\"\\n---- Registering flash 5 with MPRAGE ----\")\n flash5 = op.join(flash_path, 'flash5.mgz')\n flash5_reg = op.join(flash_path, 'flash5_reg.mgz')\n if not op.exists(flash5_reg):\n if op.exists(op.join(mri_dir, 'T1.mgz')):\n ref_volume = op.join(mri_dir, 'T1.mgz')\n else:\n ref_volume = op.join(mri_dir, 'T1')\n cmd = ['fsl_rigid_register', '-r', ref_volume, '-i', flash5,\n '-o', flash5_reg]\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"Registered flash 5 image is already there\")\n # Step 5a : Convert flash5 into COR\n logger.info(\"\\n---- Converting flash5 volume into COR format ----\")\n shutil.rmtree(op.join(mri_dir, 'flash5'), ignore_errors=True)\n os.makedirs(op.join(mri_dir, 'flash5'))\n if not is_test: # CIs don't have freesurfer, skipped when testing.\n cmd = ['mri_convert', flash5_reg, op.join(mri_dir, 'flash5')]\n run_subprocess(cmd, env=env)\n # Step 5b and c : Convert the mgz volumes into COR\n os.chdir(mri_dir)\n convert_T1 = False\n if not op.isdir('T1') or len(glob.glob(op.join('T1', 'COR*'))) == 0:\n convert_T1 = True\n convert_brain = False\n if not op.isdir('brain') or len(glob.glob(op.join('brain', 'COR*'))) == 0:\n convert_brain = True\n logger.info(\"\\n---- Converting T1 volume into COR format ----\")\n if convert_T1:\n if not op.isfile('T1.mgz'):\n raise RuntimeError(\"Both T1 mgz and T1 COR volumes missing.\")\n os.makedirs('T1')\n cmd = ['mri_convert', 'T1.mgz', 'T1']\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"T1 volume is already in COR format\")\n logger.info(\"\\n---- Converting brain volume into COR format ----\")\n if convert_brain:\n if not op.isfile('brain.mgz'):\n raise RuntimeError(\"Both brain mgz and brain COR volumes missing.\")\n os.makedirs('brain')\n cmd = ['mri_convert', 'brain.mgz', 'brain']\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"Brain volume is already in COR format\")\n # Finally ready to go\n if not is_test: # CIs don't have freesurfer, skipped when testing.\n logger.info(\"\\n---- Creating the BEM surfaces ----\")\n cmd = ['mri_make_bem_surfaces', subject]\n run_subprocess(cmd, env=env)\n\n logger.info(\"\\n---- Converting the tri files into surf files ----\")\n os.chdir(bem_dir)\n if not op.exists('flash'):\n os.makedirs('flash')\n os.chdir('flash')\n surfs = ['inner_skull', 'outer_skull', 'outer_skin']\n for surf in surfs:\n shutil.move(op.join(bem_dir, surf + '.tri'), surf + '.tri')\n\n nodes, tris = read_tri(surf + '.tri', swap=True)\n vol_info = _extract_volume_info(flash5_reg)\n if vol_info is None:\n warn('nibabel is required to update the volume info. Volume info '\n 'omitted from the written surface.')\n else:\n vol_info['head'] = np.array([20])\n write_surface(surf + '.surf', nodes, tris, volume_info=vol_info)\n\n # Cleanup section\n logger.info(\"\\n---- Cleaning up ----\")\n os.chdir(bem_dir)\n os.remove('inner_skull_tmp.tri')\n os.chdir(mri_dir)\n if convert_T1:\n shutil.rmtree('T1')\n logger.info(\"Deleted the T1 COR volume\")\n if convert_brain:\n shutil.rmtree('brain')\n logger.info(\"Deleted the brain COR volume\")\n shutil.rmtree('flash5')\n logger.info(\"Deleted the flash5 COR volume\")\n # Create symbolic links to the .surf files in the bem folder\n logger.info(\"\\n---- Creating symbolic links ----\")\n os.chdir(bem_dir)\n for surf in surfs:\n surf = surf + '.surf'\n if not overwrite and op.exists(surf):\n skip_symlink = True\n else:\n if op.exists(surf):\n os.remove(surf)\n _symlink(op.join('flash', surf), op.join(surf))\n skip_symlink = False\n if skip_symlink:\n logger.info(\"Unable to create all symbolic links to .surf files \"\n \"in bem folder. Use --overwrite option to recreate them.\")\n dest = op.join(bem_dir, 'flash')\n else:\n logger.info(\"Symbolic links to .surf files created in bem folder\")\n dest = bem_dir\n logger.info(\"\\nThank you for waiting.\\nThe BEM triangulations for this \"\n \"subject are now available at:\\n%s.\\nWe hope the BEM meshes \"\n \"created will facilitate your MEG and EEG data analyses.\"\n % dest)\n # Show computed BEM surfaces\n if show:\n plot_bem(subject=subject, subjects_dir=subjects_dir,\n orientation='coronal', slices=None, show=True)\n\n # Go back to initial directory\n os.chdir(curdir)\n\n\ndef _check_bem_size(surfs):\n \"\"\"Check bem surface sizes.\"\"\"\n if len(surfs) > 1 and surfs[0]['np'] > 10000:\n warn('The bem surfaces have %s data points. 5120 (ico grade=4) '\n 'should be enough. Dense 3-layer bems may not save properly.' %\n surfs[0]['np'])\n\n\ndef _symlink(src, dest):\n \"\"\"Create a symlink.\"\"\"\n try:\n os.symlink(src, dest)\n except OSError:\n warn('Could not create symbolic link %s. Check that your partition '\n 'handles symbolic links. The file will be copied instead.' % dest)\n shutil.copy(src, dest)\n",
"# Author: Jean-Remi King, <[email protected]>\n#\n# License: BSD (3-clause)\n\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom nose.tools import assert_raises\nfrom mne.utils import requires_sklearn\nfrom mne.decoding.time_frequency import TimeFrequency\n\n\n@requires_sklearn\ndef test_timefrequency():\n from sklearn.base import clone\n # Init\n n_freqs = 3\n freqs = np.linspace(20, 30, n_freqs)\n tf = TimeFrequency(freqs, sfreq=100)\n for output in ['avg_power', 'foo', None]:\n assert_raises(ValueError, TimeFrequency, freqs, output=output)\n tf = clone(tf)\n\n # Fit\n n_epochs, n_chans, n_times = 10, 2, 100\n X = np.random.rand(n_epochs, n_chans, n_times)\n tf.fit(X, None)\n\n # Transform\n tf = TimeFrequency(freqs, sfreq=100)\n tf.fit_transform(X, None)\n # 3-D X\n Xt = tf.transform(X)\n assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times])\n # 2-D X\n Xt = tf.transform(X[:, 0, :])\n assert_array_equal(Xt.shape, [n_epochs, n_freqs, n_times])\n # 3-D with decim\n tf = TimeFrequency(freqs, sfreq=100, decim=2)\n Xt = tf.transform(X)\n assert_array_equal(Xt.shape, [n_epochs, n_chans, n_freqs, n_times // 2])\n",
"# Author: Martin Luessi <[email protected]>\n#\n# License: Simplified BSD\n\nimport os.path as op\n\nfrom nose.tools import assert_true, assert_raises\nimport pytest\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_equal,\n assert_allclose)\n\nimport mne\nfrom mne.datasets import testing\nfrom mne import (read_cov, read_forward_solution, read_evokeds,\n convert_forward_solution)\nfrom mne.cov import regularize\nfrom mne.inverse_sparse import gamma_map\nfrom mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles\nfrom mne import pick_types_forward\nfrom mne.utils import run_tests_if_main\nfrom mne.dipole import Dipole\n\ndata_path = testing.data_path(download=False)\nfname_evoked = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis-ave.fif')\nfname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')\nfname_fwd = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')\nsubjects_dir = op.join(data_path, 'subjects')\n\n\ndef _check_stc(stc, evoked, idx, ratio=50.):\n \"\"\"Helper to check correctness\"\"\"\n assert_array_almost_equal(stc.times, evoked.times, 5)\n amps = np.sum(stc.data ** 2, axis=1)\n order = np.argsort(amps)[::-1]\n amps = amps[order]\n verts = np.concatenate(stc.vertices)[order]\n assert_equal(idx, verts[0], err_msg=str(list(verts)))\n assert_true(amps[0] > ratio * amps[1], msg=str(amps[0] / amps[1]))\n\n\ndef _check_stcs(stc1, stc2):\n \"\"\"Helper to check correctness\"\"\"\n assert_allclose(stc1.times, stc2.times)\n assert_allclose(stc1.data, stc2.data)\n assert_allclose(stc1.vertices[0], stc2.vertices[0])\n assert_allclose(stc1.vertices[1], stc2.vertices[1])\n assert_allclose(stc1.tmin, stc2.tmin)\n assert_allclose(stc1.tstep, stc2.tstep)\n\n\[email protected]\[email protected]_testing_data\ndef test_gamma_map():\n \"\"\"Test Gamma MAP inverse\"\"\"\n forward = read_forward_solution(fname_fwd)\n forward = convert_forward_solution(forward, surf_ori=True)\n\n forward = pick_types_forward(forward, meg=False, eeg=True)\n evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),\n proj=False)\n evoked.resample(50, npad=100)\n evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak\n\n cov = read_cov(fname_cov)\n cov = regularize(cov, evoked.info)\n\n alpha = 0.5\n stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,\n xyz_same_gamma=True, update_mode=1)\n _check_stc(stc, evoked, 68477)\n\n stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,\n xyz_same_gamma=False, update_mode=1)\n _check_stc(stc, evoked, 82010)\n\n dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4,\n xyz_same_gamma=False, update_mode=1,\n return_as_dipoles=True)\n assert_true(isinstance(dips[0], Dipole))\n stc_dip = make_stc_from_dipoles(dips, forward['src'])\n _check_stcs(stc, stc_dip)\n\n # force fixed orientation\n stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,\n xyz_same_gamma=False, update_mode=2,\n loose=0, return_residual=False)\n _check_stc(stc, evoked, 85739, 20)\n\n\[email protected]\[email protected]_testing_data\ndef test_gamma_map_vol_sphere():\n \"\"\"Gamma MAP with a sphere forward and volumic source space\"\"\"\n evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),\n proj=False)\n evoked.resample(50, npad=100)\n evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak\n\n cov = read_cov(fname_cov)\n cov = regularize(cov, evoked.info)\n\n info = evoked.info\n sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)\n src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,\n sphere=(0.0, 0.0, 0.0, 80.0),\n bem=None, mindist=5.0,\n exclude=2.0)\n fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere,\n eeg=False, meg=True)\n\n alpha = 0.5\n assert_raises(ValueError, gamma_map, evoked, fwd, cov, alpha,\n loose=0, return_residual=False)\n\n assert_raises(ValueError, gamma_map, evoked, fwd, cov, alpha,\n loose=0.2, return_residual=False)\n\n stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4,\n xyz_same_gamma=False, update_mode=2,\n return_residual=False)\n\n assert_array_almost_equal(stc.times, evoked.times, 5)\n\n # Compare orientation obtained using fit_dipole and gamma_map\n # for a simulated evoked containing a single dipole\n stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),\n vertices=stc.vertices[:1],\n tmin=stc.tmin,\n tstep=stc.tstep)\n evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,\n use_cps=True)\n\n dip_gmap = gamma_map(evoked_dip, fwd, cov, 0.1, return_as_dipoles=True)\n\n amp_max = [np.max(d.amplitude) for d in dip_gmap]\n dip_gmap = dip_gmap[np.argmax(amp_max)]\n assert_true(dip_gmap[0].pos[0] in src[0]['rr'][stc.vertices])\n\n dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]\n assert_true(np.abs(np.dot(dip_fit.ori[0], dip_gmap.ori[0])) > 0.99)\n\nrun_tests_if_main()\n",
"import os.path as op\n\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\nimport pytest\n\nfrom scipy.linalg import svd, pinv\nimport scipy.io as sio\n\nfrom mne.io import read_raw_fif\nfrom mne import pick_types\nfrom mne.preprocessing.infomax_ import infomax\nfrom mne.utils import random_permutation, run_tests_if_main\nfrom mne.datasets import testing\n\nbase_dir = op.join(op.dirname(__file__), 'data')\n\n\ndef generate_data_for_comparing_against_eeglab_infomax(ch_type, random_state):\n \"\"\"Generate data.\"\"\"\n\n data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')\n raw_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')\n\n raw = read_raw_fif(raw_fname, preload=True)\n\n if ch_type == 'eeg':\n picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')\n else:\n picks = pick_types(raw.info, meg=ch_type,\n eeg=False, exclude='bads')\n\n # select a small number of channels for the test\n number_of_channels_to_use = 5\n idx_perm = random_permutation(picks.shape[0], random_state)\n picks = picks[idx_perm[:number_of_channels_to_use]]\n\n raw.filter(1, 45, picks=picks, filter_length='10s',\n l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,\n phase='zero-double', fir_window='hann',\n fir_design='firwin2') # use the old way\n X = raw[picks, :][0][:, ::20]\n\n # Subtract the mean\n mean_X = X.mean(axis=1)\n X -= mean_X[:, None]\n\n # pre_whitening: z-score\n X /= np.std(X)\n\n T = X.shape[1]\n cov_X = np.dot(X, X.T) / T\n\n # Let's whiten the data\n U, D, _ = svd(cov_X)\n W = np.dot(U, U.T / np.sqrt(D)[:, None])\n Y = np.dot(W, X)\n\n return Y\n\n\[email protected]\[email protected]_testing_data\ndef test_mne_python_vs_eeglab():\n \"\"\" Test eeglab vs mne_python infomax code.\"\"\"\n random_state = 42\n\n methods = ['infomax', 'extended_infomax']\n ch_types = ['eeg', 'mag']\n for ch_type in ch_types:\n Y = generate_data_for_comparing_against_eeglab_infomax(\n ch_type, random_state)\n N, T = Y.shape\n for method in methods:\n eeglab_results_file = ('eeglab_%s_results_%s_data.mat'\n % (method,\n dict(eeg='eeg', mag='meg')[ch_type]))\n\n # For comparasion against eeglab, make sure the following\n # parameters have the same value in mne_python and eeglab:\n #\n # - starting point\n # - random state\n # - learning rate\n # - block size\n # - blowup parameter\n # - blowup_fac parameter\n # - tolerance for stopping the algorithm\n # - number of iterations\n # - anneal_step parameter\n #\n # Notes:\n # * By default, eeglab whiten the data using \"sphering transform\"\n # instead of pca. The mne_python infomax code does not\n # whiten the data. To make sure both mne_python and eeglab starts\n # from the same point (i.e., the same matrix), we need to make\n # sure to whiten the data outside, and pass these whiten data to\n # mne_python and eeglab. Finally, we need to tell eeglab that\n # the input data is already whiten, this can be done by calling\n # eeglab with the following syntax:\n #\n # % Run infomax\n # [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...\n # runica( Y, 'sphering', 'none');\n #\n # % Run extended infomax\n # [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...\n # runica( Y, 'sphering', 'none', 'extended', 1);\n #\n # By calling eeglab using the former code, we are using its\n # default parameters, which are specified below in the section\n # \"EEGLAB default parameters\".\n #\n # * eeglab does not expose a parameter for fixing the random state.\n # Therefore, to accomplish this, we need to edit the runica.m\n # file located at /path_to_eeglab/functions/sigprocfunc/runica.m\n #\n # i) Comment the line related with the random number generator\n # (line 812).\n # ii) Then, add the following line just below line 812:\n # rng(42); %use 42 as random seed.\n #\n # * eeglab does not have the parameter \"n_small_angle\",\n # so we need to disable it for making a fair comparison.\n #\n # * Finally, we need to take the unmixing matrix estimated by the\n # mne_python infomax implementation and order the components\n # in the same way that eeglab does. This is done below in the\n # section \"Order the components in the same way that eeglab does\"\n\n # EEGLAB default parameters\n l_rate_eeglab = 0.00065 / np.log(N)\n block_eeglab = int(np.ceil(np.min([5 * np.log(T), 0.3 * T])))\n blowup_eeglab = 1e9\n blowup_fac_eeglab = 0.8\n max_iter_eeglab = 512\n\n if method == 'infomax':\n anneal_step_eeglab = 0.9\n use_extended = False\n\n elif method == 'extended_infomax':\n anneal_step_eeglab = 0.98\n use_extended = True\n\n w_change_eeglab = 1e-7 if N > 32 else 1e-6\n\n # Call mne_python infomax version using the following sintax\n # to obtain the same result than eeglab version\n unmixing = infomax(\n Y.T, extended=use_extended, random_state=random_state,\n max_iter=max_iter_eeglab, l_rate=l_rate_eeglab,\n block=block_eeglab, w_change=w_change_eeglab,\n blowup=blowup_eeglab, blowup_fac=blowup_fac_eeglab,\n n_small_angle=None, anneal_step=anneal_step_eeglab)\n\n # Order the components in the same way that eeglab does\n sources = np.dot(unmixing, Y)\n mixing = pinv(unmixing)\n\n mvar = np.sum(mixing ** 2, axis=0) * \\\n np.sum(sources ** 2, axis=1) / (N * T - 1)\n windex = np.argsort(mvar)[::-1]\n\n unmixing_ordered = unmixing[windex, :]\n\n # Load the eeglab results, then compare the unmixing matrices\n # estimated by mne_python and eeglab. To make the comparison use\n # the \\ell_inf norm:\n # ||unmixing_mne_python - unmixing_eeglab||_inf\n\n eeglab_data = sio.loadmat(op.join(base_dir, eeglab_results_file))\n unmixing_eeglab = eeglab_data['unmixing_eeglab']\n\n maximum_difference = np.max(np.abs(unmixing_ordered -\n unmixing_eeglab))\n\n assert_almost_equal(maximum_difference, 1e-12, decimal=10)\n\nrun_tests_if_main()\n",
"from distutils.version import LooseVersion\nimport warnings\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom mne.time_frequency import psd_multitaper\nfrom mne.time_frequency.multitaper import dpss_windows\nfrom mne.utils import requires_nitime\nfrom mne.io import RawArray\nfrom mne import create_info\n\n\n@requires_nitime\ndef test_dpss_windows():\n \"\"\"Test computation of DPSS windows.\"\"\"\n\n import nitime as ni\n N = 1000\n half_nbw = 4\n Kmax = int(2 * half_nbw)\n\n dpss, eigs = dpss_windows(N, half_nbw, Kmax, low_bias=False)\n with warnings.catch_warnings(record=True): # conversions\n dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax)\n\n assert_array_almost_equal(dpss, dpss_ni)\n assert_array_almost_equal(eigs, eigs_ni)\n\n dpss, eigs = dpss_windows(N, half_nbw, Kmax, interp_from=200,\n low_bias=False)\n with warnings.catch_warnings(record=True): # conversions\n dpss_ni, eigs_ni = ni.algorithms.dpss_windows(N, half_nbw, Kmax,\n interp_from=200)\n\n assert_array_almost_equal(dpss, dpss_ni)\n assert_array_almost_equal(eigs, eigs_ni)\n\n\n@requires_nitime\ndef test_multitaper_psd():\n \"\"\"Test multi-taper PSD computation.\"\"\"\n import nitime as ni\n for n_times in (100, 101):\n n_channels = 5\n data = np.random.RandomState(0).randn(n_channels, n_times)\n sfreq = 500\n info = create_info(n_channels, sfreq, 'eeg')\n raw = RawArray(data, info)\n pytest.raises(ValueError, psd_multitaper, raw, sfreq,\n normalization='foo')\n ni_5 = (LooseVersion(ni.__version__) >= LooseVersion('0.5'))\n norm = 'full' if ni_5 else 'length'\n for adaptive, n_jobs in zip((False, True, True), (1, 1, 2)):\n psd, freqs = psd_multitaper(raw, adaptive=adaptive,\n n_jobs=n_jobs,\n normalization=norm)\n with warnings.catch_warnings(record=True): # nitime integers\n freqs_ni, psd_ni, _ = ni.algorithms.spectral.multi_taper_psd(\n data, sfreq, adaptive=adaptive, jackknife=False)\n assert_array_almost_equal(psd, psd_ni, decimal=4)\n if n_times % 2 == 0:\n # nitime's frequency definitions must be incorrect,\n # they give the same values for 100 and 101 samples\n assert_array_almost_equal(freqs, freqs_ni)\n with pytest.raises(ValueError, match='use a value of at least'):\n psd_multitaper(raw, bandwidth=4.9)\n"
] | [
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.sqrt",
"numpy.arctan2",
"numpy.concatenate",
"numpy.max",
"numpy.where",
"numpy.arange",
"numpy.eye",
"scipy.linalg.inv",
"numpy.zeros",
"numpy.log",
"numpy.power",
"numpy.min",
"numpy.median",
"scipy.optimize.fmin_cobyla",
"numpy.argsort",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"numpy.abs",
"numpy.array_equal",
"numpy.linalg.norm",
"numpy.empty"
],
[
"numpy.testing.assert_array_equal",
"sklearn.base.clone",
"numpy.random.rand",
"numpy.linspace"
],
[
"numpy.dot",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.testing.assert_allclose",
"numpy.argsort",
"numpy.random.RandomState",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.dot",
"scipy.linalg.pinv",
"scipy.linalg.svd",
"numpy.log",
"numpy.sqrt",
"numpy.abs",
"numpy.testing.assert_almost_equal",
"numpy.std",
"numpy.argsort",
"numpy.sum"
],
[
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TencentYoutuResearch/SelfSupervisedLearning-DSM | [
"655a0a23a47bf2559f3d435384ae59a8871a5ff5"
] | [
"src/augment/basic_augmentation/noise.py"
] | [
"import torch\nimport torch.nn as nn\n\n\n\"\"\"\nusage\n z_rand = generate_noise([1,nzx,nzy], device=opt.device)\n z_rand = z_rand.expand(1,3,Z_opt.shape[2],Z_opt.shape[3])\n z_prev1 = 0.95*Z_opt +0.05*z_rand\n\"\"\"\n\n\ndef upsampling(im, sx, sy):\n m = nn.Upsample(size=[round(sx), round(sy)], mode='bilinear', align_corners=True)\n return m(im)\n\n\ndef generate_noise(size, num_samp=1, device='cuda', type='gaussian', scale=1):\n if type == 'gaussian':\n noise = torch.randn(num_samp, size[0], round(size[1]/scale), round(size[2]/scale))\n noise = upsampling(noise, size[1], size[2])\n if type == 'gaussian_mixture':\n noise1 = torch.randn(num_samp, size[0], size[1], size[2]) + 5\n noise2 = torch.randn(num_samp, size[0], size[1], size[2])\n noise = noise1 + noise2\n if type == 'uniform':\n noise = torch.randn(num_samp, size[0], size[1], size[2])\n return noise\n"
] | [
[
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NoeSamaille/medical-detection-toolkit | [
"232d3d1444ccaac04e15a00d8030390560236871",
"232d3d1444ccaac04e15a00d8030390560236871"
] | [
"utils/exp_utils.py",
"experiments/lidc_exp/data_loader.py"
] | [
"#!/usr/bin/env python\n# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Iterable, Tuple, Any, Union\nimport os, sys\nimport subprocess\nfrom multiprocessing import Process\n\nimport importlib.util\nimport pickle\n\nimport logging\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom collections import OrderedDict\nimport numpy as np\nimport torch\nimport pandas as pd\n\ndef split_off_process(target, *args, daemon: bool=False, **kwargs):\n \"\"\"Start a process that won't block parent script.\n No join(), no return value. If daemon=False: before parent exits, it waits for this to finish.\n :param target: the target function of the process.\n :params *args: args to pass to target.\n :param daemon: if False: before parent exits, it waits for this process to finish.\n :params **kwargs: kwargs to pass to target.\n \"\"\"\n p = Process(target=target, args=tuple(args), kwargs=kwargs, daemon=daemon)\n p.start()\n return p\n\ndef get_formatted_duration(seconds: float, format: str=\"hms\") -> str:\n \"\"\"Format a time in seconds.\n :param format: \"hms\" for hours mins secs or \"ms\" for min secs.\n \"\"\"\n mins, secs = divmod(seconds, 60)\n if format == \"ms\":\n t = \"{:d}m:{:02d}s\".format(int(mins), int(secs))\n elif format == \"hms\":\n h, mins = divmod(mins, 60)\n t = \"{:d}h:{:02d}m:{:02d}s\".format(int(h), int(mins), int(secs))\n else:\n raise Exception(\"Format {} not available, only 'hms' or 'ms'\".format(format))\n return t\n\nclass CombinedLogger(object):\n \"\"\"Combine console and tensorboard logger and record system metrics.\n \"\"\"\n\n def __init__(self, name: str, log_dir: str, server_env: bool=True, fold: Union[int, str]=\"all\"):\n self.pylogger = logging.getLogger(name)\n self.tboard = SummaryWriter(log_dir=os.path.join(log_dir, \"tboard\"))\n self.log_dir = log_dir\n self.fold = str(fold)\n self.server_env = server_env\n\n self.pylogger.setLevel(logging.DEBUG)\n self.log_file = os.path.join(log_dir, \"fold_\"+self.fold, 'exec.log')\n os.makedirs(os.path.dirname(self.log_file), exist_ok=True)\n self.pylogger.addHandler(logging.FileHandler(self.log_file))\n if not server_env:\n self.pylogger.addHandler(ColorHandler())\n else:\n self.pylogger.addHandler(logging.StreamHandler())\n self.pylogger.propagate = False\n\n def __getattr__(self, attr):\n \"\"\"delegate all undefined method requests to objects of\n this class in order pylogger, tboard (first find first serve).\n E.g., combinedlogger.add_scalars(...) should trigger self.tboard.add_scalars(...)\n \"\"\"\n for obj in [self.pylogger, self.tboard]:\n if attr in dir(obj):\n return getattr(obj, attr)\n print(\"logger attr not found\")\n\n def set_logfile(self, fold: Union[int, str, None]=None, log_file: Union[str, None]=None):\n if fold is not None:\n self.fold = str(fold)\n if log_file is None:\n self.log_file = os.path.join(self.log_dir, \"fold_\"+self.fold, 'exec.log')\n else:\n self.log_file = log_file\n os.makedirs(os.path.dirname(self.log_file), exist_ok=True)\n for hdlr in self.pylogger.handlers:\n hdlr.close()\n self.pylogger.handlers = []\n self.pylogger.addHandler(logging.FileHandler(self.log_file))\n if not self.server_env:\n self.pylogger.addHandler(ColorHandler())\n else:\n self.pylogger.addHandler(logging.StreamHandler())\n\n def metrics2tboard(self, metrics, global_step=None, suptitle=None):\n \"\"\"\n :param metrics: {'train': dataframe, 'val':df}, df as produced in\n evaluator.py.evaluate_predictions\n \"\"\"\n # print(\"metrics\", metrics)\n if global_step is None:\n global_step = len(metrics['train'][list(metrics['train'].keys())[0]]) - 1\n if suptitle is not None:\n suptitle = str(suptitle)\n else:\n suptitle = \"Fold_\" + str(self.fold)\n\n for key in ['train', 'val']:\n # series = {k:np.array(v[-1]) for (k,v) in metrics[key].items() if not np.isnan(v[-1]) and not 'Bin_Stats' in k}\n loss_series = {}\n mon_met_series = {}\n for tag, val in metrics[key].items():\n val = val[-1] # maybe remove list wrapping, recording in evaluator?\n if 'loss' in tag.lower() and not np.isnan(val):\n loss_series[\"{}\".format(tag)] = val\n elif not np.isnan(val):\n mon_met_series[\"{}\".format(tag)] = val\n\n self.tboard.add_scalars(suptitle + \"/Losses/{}\".format(key), loss_series, global_step)\n self.tboard.add_scalars(suptitle + \"/Monitor_Metrics/{}\".format(key), mon_met_series, global_step)\n self.tboard.add_scalars(suptitle + \"/Learning_Rate\", metrics[\"lr\"], global_step)\n return\n\n def __del__(self): # otherwise might produce multiple prints e.g. in ipython console\n for hdlr in self.pylogger.handlers:\n hdlr.close()\n self.pylogger.handlers = []\n del self.pylogger\n self.tboard.flush()\n # close somehow prevents main script from exiting\n # maybe revise this issue in a later pytorch version\n #self.tboard.close()\n\n\ndef get_logger(exp_dir: str, server_env: bool=False) -> CombinedLogger:\n \"\"\"\n creates logger instance. writing out info to file, to terminal and to tensorboard.\n :param exp_dir: experiment directory, where exec.log file is stored.\n :param server_env: True if operating in server environment (e.g., gpu cluster)\n :return: custom CombinedLogger instance.\n \"\"\"\n log_dir = os.path.join(exp_dir, \"logs\")\n logger = CombinedLogger('medicaldetectiontoolkit', log_dir, server_env=server_env)\n print(\"Logging to {}\".format(logger.log_file))\n return logger\n\n\ndef prep_exp(dataset_path, exp_path, server_env, use_stored_settings=True, is_training=True):\n \"\"\"\n I/O handling, creating of experiment folder structure. Also creates a snapshot of configs/model scripts and copies them to the exp_dir.\n This way the exp_dir contains all info needed to conduct an experiment, independent to changes in actual source code. Thus, training/inference of this experiment can be started at anytime. Therefore, the model script is copied back to the source code dir as tmp_model (tmp_backbone).\n Provides robust structure for cloud deployment.\n :param dataset_path: path to source code for specific data set. (e.g. medicaldetectiontoolkit/lidc_exp)\n :param exp_path: path to experiment directory.\n :param server_env: boolean flag. pass to configs script for cloud deployment.\n :param use_stored_settings: boolean flag. When starting training: If True, starts training from snapshot in existing experiment directory, else creates experiment directory on the fly using configs/model scripts from source code.\n :param is_training: boolean flag. distinguishes train vs. inference mode.\n :return:\n \"\"\"\n\n if is_training:\n if use_stored_settings:\n cf_file = import_module('cf_file', os.path.join(exp_path, 'configs.py'))\n cf = cf_file.configs(server_env)\n # in this mode, previously saved model and backbone need to be found in exp dir.\n if not os.path.isfile(os.path.join(exp_path, 'mdt_model.py')) or \\\n not os.path.isfile(os.path.join(exp_path, 'backbone.py')):\n raise Exception(\n \"Selected use_stored_settings option but no model and/or backbone source files exist in exp dir.\")\n cf.model_path = os.path.join(exp_path, 'mdt_model.py')\n cf.backbone_path = os.path.join(exp_path, 'backbone.py')\n else:\n # this case overwrites settings files in exp dir, i.e., default_configs, configs, backbone, model\n os.makedirs(exp_path, exist_ok=True)\n # run training with source code info and copy snapshot of model to exp_dir for later testing (overwrite scripts if exp_dir already exists.)\n subprocess.call('cp {} {}'.format('default_configs.py', os.path.join(exp_path, 'default_configs.py')),\n shell=True)\n subprocess.call(\n 'cp {} {}'.format(os.path.join(dataset_path, 'configs.py'), os.path.join(exp_path, 'configs.py')),\n shell=True)\n cf_file = import_module('cf_file', os.path.join(dataset_path, 'configs.py'))\n cf = cf_file.configs(server_env)\n subprocess.call('cp {} {}'.format(cf.model_path, os.path.join(exp_path, 'mdt_model.py')), shell=True)\n subprocess.call('cp {} {}'.format(cf.backbone_path, os.path.join(exp_path, 'backbone.py')), shell=True)\n if os.path.isfile(os.path.join(exp_path, \"fold_ids.pickle\")):\n subprocess.call('rm {}'.format(os.path.join(exp_path, \"fold_ids.pickle\")), shell=True)\n\n else:\n # testing, use model and backbone stored in exp dir.\n cf_file = import_module('cf_file', os.path.join(exp_path, 'configs.py'))\n cf = cf_file.configs(server_env)\n cf.model_path = os.path.join(exp_path, 'mdt_model.py')\n cf.backbone_path = os.path.join(exp_path, 'backbone.py')\n\n\n cf.exp_dir = exp_path\n cf.test_dir = os.path.join(cf.exp_dir, 'test')\n cf.plot_dir = os.path.join(cf.exp_dir, 'plots')\n if not os.path.exists(cf.test_dir):\n os.mkdir(cf.test_dir)\n if not os.path.exists(cf.plot_dir):\n os.mkdir(cf.plot_dir)\n cf.experiment_name = exp_path.split(\"/\")[-1]\n cf.created_fold_id_pickle = False\n\n return cf\n\n\n\ndef import_module(name: str, path: str):\n \"\"\"\n correct way of importing a module dynamically in python 3.\n :param name: name given to module instance.\n :param path: path to module.\n :return: module: returned module instance.\n \"\"\"\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef set_params_flag(module: torch.nn.Module, flag: Tuple[str, Any], check_overwrite: bool = True) -> torch.nn.Module:\n \"\"\"Set an attribute for all passed module parameters.\n\n :param flag: tuple (str attribute name : attr value)\n :param check_overwrite: if True, assert that attribute not already exists.\n\n \"\"\"\n for param in module.parameters():\n if check_overwrite:\n assert not hasattr(param, flag[0]), \\\n \"param {} already has attr {} (w/ val {})\".format(param, flag[0], getattr(param, flag[0]))\n setattr(param, flag[0], flag[1])\n return module\n\ndef parse_params_for_optim(net: torch.nn.Module, weight_decay: float = 0., exclude_from_wd: Iterable = (\"norm\",)) -> list:\n \"\"\"Split network parameters into weight-decay dependent groups for the optimizer.\n :param net: network.\n :param weight_decay: weight decay value for the parameters that it is applied to. excluded parameters will have\n weight decay 0.\n :param exclude_from_wd: List of strings of parameter-group names to exclude from weight decay. Options: \"norm\", \"bias\".\n :return:\n \"\"\"\n if weight_decay is None:\n weight_decay = 0.\n # pytorch implements parameter groups as dicts {'params': ...} and\n # weight decay as p.data.mul_(1 - group['lr'] * group['weight_decay'])\n norm_types = [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d,\n torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm, torch.nn.GroupNorm, torch.nn.SyncBatchNorm, torch.nn.LocalResponseNorm]\n level_map = {\"bias\": \"weight\",\n \"norm\": \"module\"}\n type_map = {\"norm\": norm_types}\n\n exclude_from_wd = [str(name).lower() for name in exclude_from_wd]\n exclude_weight_names = [k for k, v in level_map.items() if k in exclude_from_wd and v == \"weight\"]\n exclude_module_types = tuple([type_ for k, v in level_map.items() if (k in exclude_from_wd and v == \"module\")\n for type_ in type_map[k]])\n\n if exclude_from_wd:\n print(\"excluding {} from weight decay.\".format(exclude_from_wd))\n\n for module in net.modules():\n if isinstance(module, exclude_module_types):\n set_params_flag(module, (\"no_wd\", True))\n for param_name, param in net.named_parameters():\n if np.any([ename in param_name for ename in exclude_weight_names]):\n setattr(param, \"no_wd\", True)\n\n with_dec, no_dec = [], []\n for param in net.parameters():\n if hasattr(param, \"no_wd\") and param.no_wd == True:\n no_dec.append(param)\n else:\n with_dec.append(param)\n orig_ps = sum(p.numel() for p in net.parameters())\n with_ps = sum(p.numel() for p in with_dec)\n wo_ps = sum(p.numel() for p in no_dec)\n assert orig_ps == with_ps + wo_ps, \"orig n parameters {} unequals sum of with wd {} and w/o wd {}.\"\\\n .format(orig_ps, with_ps, wo_ps)\n\n groups = [{'params': gr, 'weight_decay': wd} for (gr, wd) in [(no_dec, 0.), (with_dec, weight_decay)] if len(gr)>0]\n return groups\n\n\nclass ModelSelector:\n '''\n saves a checkpoint after each epoch as 'last_state' (can be loaded to continue interrupted training).\n saves the top-k (k=cf.save_n_models) ranked epochs. In inference, predictions of multiple epochs can be ensembled to improve performance.\n '''\n\n def __init__(self, cf, logger):\n\n self.cf = cf\n self.saved_epochs = [-1] * cf.save_n_models\n self.logger = logger\n\n def run_model_selection(self, net: torch.nn.Module, optimizer: torch.optim.Optimizer,\n monitor_metrics: dict, epoch: int):\n\n # take the mean over all selection criteria in each epoch\n non_nan_scores = np.mean(np.array([[0 if (ii is None or np.isnan(ii)) else ii for ii in monitor_metrics['val'][sc]] for sc in self.cf.model_selection_criteria]), 0)\n epochs_scores = [ii for ii in non_nan_scores[1:]]\n # ranking of epochs according to model_selection_criterion\n epoch_ranking = np.argsort(epochs_scores, kind=\"stable\")[::-1] + 1 #epochs start at 1\n # if set in configs, epochs < min_save_thresh are discarded from saving process.\n epoch_ranking = epoch_ranking[epoch_ranking >= self.cf.min_save_thresh]\n\n # check if current epoch is among the top-k epochs.\n if epoch in epoch_ranking[:self.cf.save_n_models]:\n\n save_dir = os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(epoch))\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n torch.save(net.state_dict(), os.path.join(save_dir, 'params.pth'))\n with open(os.path.join(save_dir, 'monitor_metrics.pickle'), 'wb') as handle:\n pickle.dump(monitor_metrics, handle)\n # save epoch_ranking to keep info for inference.\n np.save(os.path.join(self.cf.fold_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])\n np.save(os.path.join(save_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])\n\n self.logger.info(\n \"saving current epoch {} at rank {}\".format(epoch, np.argwhere(epoch_ranking == epoch)))\n # delete params of the epoch that just fell out of the top-k epochs.\n for se in [int(ii.split('_')[0]) for ii in os.listdir(self.cf.fold_dir) if 'best_checkpoint' in ii]:\n if se in epoch_ranking[self.cf.save_n_models:]:\n subprocess.call('rm -rf {}'.format(os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(se))), shell=True)\n self.logger.info('deleting epoch {} at rank {}'.format(se, np.argwhere(epoch_ranking == se)))\n\n state = {\n 'epoch': epoch,\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n\n # save checkpoint of current epoch.\n save_dir = os.path.join(self.cf.fold_dir, 'last_checkpoint'.format(epoch))\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n torch.save(state, os.path.join(save_dir, 'params.pth'))\n np.save(os.path.join(save_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])\n with open(os.path.join(save_dir, 'monitor_metrics.pickle'), 'wb') as handle:\n pickle.dump(monitor_metrics, handle)\n return os.path.join(os.path.join(self.cf.fold_dir, f'{epoch_ranking[0]}_best_checkpoint'))\n\n\n\ndef load_checkpoint(checkpoint_path: str, net: torch.nn.Module, optimizer: torch.optim.Optimizer) -> Tuple:\n\n checkpoint = torch.load(os.path.join(checkpoint_path, 'params.pth'))\n net.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n with open(os.path.join(checkpoint_path, 'monitor_metrics.pickle'), 'rb') as handle:\n monitor_metrics = pickle.load(handle)\n starting_epoch = checkpoint['epoch'] + 1\n return starting_epoch, net, optimizer, monitor_metrics\n\n\n\ndef prepare_monitoring(cf):\n \"\"\"\n creates dictionaries, where train/val metrics are stored.\n \"\"\"\n metrics = {}\n # first entry for loss dict accounts for epoch starting at 1.\n metrics['train'] = OrderedDict()\n metrics['val'] = OrderedDict()\n metric_classes = []\n if 'rois' in cf.report_score_level:\n metric_classes.extend([v for k, v in cf.class_dict.items()])\n if 'patient' in cf.report_score_level:\n metric_classes.extend(['patient'])\n for cl in metric_classes:\n metrics['train'][cl + '_ap'] = [np.nan]\n metrics['val'][cl + '_ap'] = [np.nan]\n if cl == 'patient':\n metrics['train'][cl + '_auc'] = [np.nan]\n metrics['val'][cl + '_auc'] = [np.nan]\n\n return metrics\n\n\n\ndef create_csv_output(results_list, cf, logger):\n \"\"\"\n Write out test set predictions to .csv file. output format is one line per prediction:\n PatientID | PredictionID | [y1 x1 y2 x2 (z1) (z2)] | score | pred_classID\n Note, that prediction coordinates correspond to images as loaded for training/testing and need to be adapted when\n plotted over raw data (before preprocessing/resampling).\n :param results_list: [[patient_results, patient_id], [patient_results, patient_id], ...]\n \"\"\"\n\n logger.info('creating csv output file at {}'.format(os.path.join(cf.test_dir, 'results.csv')))\n predictions_df = pd.DataFrame(columns = ['patientID', 'predictionID', 'coords', 'score', 'pred_classID'])\n for r in results_list:\n\n pid = r[1]\n\n #optionally load resampling info from preprocessing to match output predictions with raw data.\n #with open(os.path.join(cf.exp_dir, 'test_resampling_info', pid), 'rb') as handle:\n # resampling_info = pickle.load(handle)\n\n for bix, box in enumerate(r[0][0]):\n if box[\"box_type\"] == \"gt\":\n continue\n assert box['box_type'] == 'det', box['box_type']\n coords = box['box_coords']\n score = box['box_score']\n pred_class_id = box['box_pred_class_id']\n out_coords = []\n if score >= cf.min_det_thresh:\n out_coords.append(coords[0]) #* resampling_info['scale'][0])\n out_coords.append(coords[1]) #* resampling_info['scale'][1])\n out_coords.append(coords[2]) #* resampling_info['scale'][0])\n out_coords.append(coords[3]) #* resampling_info['scale'][1])\n if len(coords) > 4:\n out_coords.append(coords[4]) #* resampling_info['scale'][2] + resampling_info['z_crop'])\n out_coords.append(coords[5]) #* resampling_info['scale'][2] + resampling_info['z_crop'])\n\n predictions_df.loc[len(predictions_df)] = [pid, bix, out_coords, score, pred_class_id]\n try:\n fold = cf.fold\n except:\n fold = 'hold_out'\n predictions_df.to_csv(os.path.join(cf.exp_dir, 'results_{}.csv'.format(fold)), index=False)\n\n\n\nclass _AnsiColorizer(object):\n \"\"\"\n A colorizer is an object that loosely wraps around a stream, allowing\n callers to write text to the stream in a particular color.\n\n Colorizer classes must implement C{supported()} and C{write(text, color)}.\n \"\"\"\n _colors = dict(black=30, red=31, green=32, yellow=33,\n blue=34, magenta=35, cyan=36, white=37, default=39)\n\n def __init__(self, stream):\n self.stream = stream\n\n @classmethod\n def supported(cls, stream=sys.stdout):\n \"\"\"\n A class method that returns True if the current platform supports\n coloring terminal output using this method. Returns False otherwise.\n \"\"\"\n if not stream.isatty():\n return False # auto color only on TTYs\n try:\n import curses\n except ImportError:\n return False\n else:\n try:\n try:\n return curses.tigetnum(\"colors\") > 2\n except curses.error:\n curses.setupterm()\n return curses.tigetnum(\"colors\") > 2\n except:\n raise\n # guess false in case of error\n return False\n\n def write(self, text, color):\n \"\"\"\n Write the given text to the stream in the given color.\n\n @param text: Text to be written to the stream.\n\n @param color: A string label for a color. e.g. 'red', 'white'.\n \"\"\"\n color = self._colors[color]\n self.stream.write('\\x1b[%sm%s\\x1b[0m' % (color, text))\n\n\n\nclass ColorHandler(logging.StreamHandler):\n\n\n def __init__(self, stream=sys.stdout):\n super(ColorHandler, self).__init__(_AnsiColorizer(stream))\n\n def emit(self, record):\n msg_colors = {\n logging.DEBUG: \"green\",\n logging.INFO: \"default\",\n logging.WARNING: \"red\",\n logging.ERROR: \"red\"\n }\n color = msg_colors.get(record.levelno, \"blue\")\n self.stream.write(record.msg + \"\\n\", color)\n\n",
"#!/usr/bin/env python\n# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n'''\nExample Data Loader for the LIDC data set. This dataloader expects preprocessed data in .npy or .npz files per patient and\na pandas dataframe in the same directory containing the meta-info e.g. file paths, labels, foregound slice-ids.\n'''\n\n\nimport numpy as np\nimport os\nfrom collections import OrderedDict\nimport pandas as pd\nimport pickle\nimport time\nimport subprocess\n\n# batch generator tools from https://github.com/MIC-DKFZ/batchgenerators\nfrom batchgenerators.dataloading.data_loader import SlimDataLoaderBase\nfrom batchgenerators.transforms.spatial_transforms import MirrorTransform as Mirror\nfrom batchgenerators.transforms.abstract_transforms import Compose\nfrom batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter\nfrom batchgenerators.dataloading import SingleThreadedAugmenter\nfrom batchgenerators.transforms.spatial_transforms import SpatialTransform\nfrom batchgenerators.transforms.crop_and_pad_transforms import CenterCropTransform\nfrom batchgenerators.transforms.utility_transforms import ConvertSegToBoundingBoxCoordinates\n\nimport utils.dataloader_utils as dutils\nimport utils.exp_utils as utils\n\ndef get_train_generators(cf, logger):\n \"\"\"\n wrapper function for creating the training batch generator pipeline. returns the train/val generators.\n selects patients according to cv folds (generated by first run/fold of experiment):\n splits the data into n-folds, where 1 split is used for val, 1 split for testing and the rest for training. (inner loop test set)\n If cf.hold_out_test_set is True, adds the test split to the training data.\n \"\"\"\n all_data = load_dataset(cf, logger)\n all_pids_list = np.unique([v['pid'] for (k, v) in all_data.items() if 'AUG' not in v['pid']])\n\n splits_file = os.path.join(cf.exp_dir, 'fold_ids.pickle')\n if not os.path.exists(splits_file) and not cf.created_fold_id_pickle:\n fg = dutils.fold_generator(seed=cf.seed, n_splits=cf.n_cv_splits, len_data=len(all_pids_list)).get_fold_names()\n with open(splits_file, 'wb') as handle:\n pickle.dump(fg, handle)\n cf.created_fold_id_pickle = True\n else:\n with open(splits_file, 'rb') as handle:\n fg = pickle.load(handle)\n\n train_ix, val_ix, test_ix, _ = fg[cf.fold]\n\n if cf.train_set_proportion < 1.0:\n train_ix = np.random.choice(train_ix, int(cf.train_set_proportion*len(train_ix)))\n\n train_pids = [all_pids_list[ix] for ix in train_ix]\n val_pids = [all_pids_list[ix] for ix in val_ix]\n\n if cf.hold_out_test_set:\n train_pids += [all_pids_list[ix] for ix in test_ix]\n\n if cf.gan_dataset:\n gan_pids = [v['pid'].replace('-AUG', '') for (k, v) in all_data.items() if 'AUG' in v['pid']]\n gan_pids = np.intersect1d(gan_pids, train_pids)\n gan_pids = [gpid + '-AUG' for gpid in gan_pids]\n if cf.gan_prop > 0.0:\n np.random.shuffle(gan_pids)\n gan_pids = gan_pids[:int(np.floor(len(train_pids) * cf.gan_prop))]\n train_pids = np.concatenate((train_pids, gan_pids))\n\n train_data = {k: v for (k, v) in all_data.items() if any(p == v['pid'] for p in train_pids)}\n val_data = {k: v for (k, v) in all_data.items() if any(p == v['pid'] for p in val_pids)}\n\n logger.info(\"data set loaded with: {} train / {} val / {} test patients\".format(len(train_pids), len(val_ix), len(test_ix)))\n batch_gen = {}\n batch_gen['train'] = create_data_gen_pipeline(train_data, cf=cf, is_training=True)\n batch_gen['val_sampling'] = create_data_gen_pipeline(val_data, cf=cf, is_training=False)\n if cf.val_mode == 'val_patient':\n batch_gen['val_patient'] = PatientBatchIterator(val_data, cf=cf)\n batch_gen['n_val'] = len(val_ix) if cf.max_val_patients is None else min(len(val_ix), cf.max_val_patients)\n else:\n batch_gen['n_val'] = cf.num_val_batches\n\n return batch_gen\n\n\ndef get_test_generator(cf, logger):\n \"\"\"\n wrapper function for creating the test batch generator pipeline.\n selects patients according to cv folds (generated by first run/fold of experiment)\n If cf.hold_out_test_set is True, gets the data from an external folder instead.\n \"\"\"\n if cf.hold_out_test_set:\n pp_name = cf.pp_test_name\n test_ix = None\n else:\n pp_name = None\n with open(os.path.join(cf.exp_dir, 'fold_ids.pickle'), 'rb') as handle:\n fold_list = pickle.load(handle)\n _, _, test_ix, _ = fold_list[cf.fold]\n # warnings.warn('WARNING: using validation set for testing!!!')\n\n test_data = load_dataset(cf, logger, test_ix, pp_data_path=cf.pp_test_data_path, pp_name=pp_name)\n logger.info(\"data set loaded with: {} test patients\".format(len(test_ix)))\n batch_gen = {}\n batch_gen['test'] = PatientBatchIterator(test_data, cf=cf)\n batch_gen['n_test'] = len(test_ix) if cf.max_test_patients==\"all\" else \\\n min(cf.max_test_patients, len(test_ix))\n return batch_gen\n\n\ndef get_pred_generator(cf, logger):\n \"\"\"\n wrapper function for creating the prediction batch generator pipeline.\n \"\"\"\n test_data = load_dataset(cf, logger, patient_path=cf.patient_path)\n logger.info(f\"data set loaded with: {cf.patient_path}\")\n batch_gen = {}\n batch_gen['pred'] = PatientBatchIterator(test_data, cf=cf)\n batch_gen['n_test'] = 1\n return batch_gen\n\n\n\ndef load_dataset(cf, logger, subset_ixs=None, pp_data_path=None, pp_name=None, patient_path=None):\n \"\"\"\n loads the dataset. if deployed in cloud also copies and unpacks the data to the working directory.\n :param subset_ixs: subset indices to be loaded from the dataset. used e.g. for testing to only load the test folds.\n :return: data: dictionary with one entry per patient (in this case per patient-breast, since they are treated as\n individual images for training) each entry is a dictionary containing respective meta-info as well as paths to the preprocessed\n numpy arrays to be loaded during batch-generation\n \"\"\"\n data = OrderedDict()\n if patient_path is not None:\n # Load data of specific patient (e.g. for prediction)\n filename=patient_path\n pid=os.path.splitext(os.path.basename(filename))[0]\n filenameNifty=filename.replace('.npy', '_nifti.npy')\n if os.path.exists(filenameNifty):\n seg = filenameNifty\n else:\n seg = None\n data[pid] = {'data': filename, 'seg': seg, 'pid': pid, 'class_target': []}\n else:\n if pp_data_path is None:\n pp_data_path = cf.pp_data_path\n if pp_name is None:\n pp_name = cf.pp_name\n if cf.server_env:\n copy_data = True\n target_dir = os.path.join(cf.data_dest, pp_name)\n if not os.path.exists(target_dir):\n cf.data_source_dir = pp_data_path\n os.makedirs(target_dir)\n subprocess.call('rsync -av {} {}'.format(\n os.path.join(cf.data_source_dir, cf.input_df_name), os.path.join(target_dir, cf.input_df_name)), shell=True)\n logger.info('created target dir and info df at {}'.format(os.path.join(target_dir, cf.input_df_name)))\n\n elif subset_ixs is None:\n copy_data = False\n\n pp_data_path = target_dir\n\n\n p_df = pd.read_pickle(os.path.join(pp_data_path, cf.input_df_name))\n\n if cf.select_prototype_subset is not None:\n prototype_pids = p_df.pid.tolist()[:cf.select_prototype_subset]\n p_df = p_df[p_df.pid.isin(prototype_pids)]\n logger.warning('WARNING: using prototyping data subset!!!')\n\n if subset_ixs is not None:\n pids = p_df.pid.tolist()\n pids = [pid for pid in pids if 'AUG' not in pid] # cannot load a gan patient in subset\n subset_pids = [np.unique(pids)[ix] for ix in subset_ixs]\n p_df = p_df[p_df.pid.isin(subset_pids)]\n logger.info('subset: selected {} instances from df'.format(len(p_df)))\n\n if cf.server_env:\n if copy_data:\n copy_and_unpack_data(logger, p_df.pid.tolist(), cf.fold_dir, cf.data_source_dir, target_dir)\n\n class_targets = p_df['class_target'].tolist()\n pids = p_df.pid.tolist()\n imgs = [os.path.join(pp_data_path, '{}_img.npy'.format(pid)) for pid in pids]\n segs = [os.path.join(pp_data_path,'{}_rois.npy'.format(pid)) for pid in pids]\n\n for ix, pid in enumerate(pids):\n # for the experiment conducted here, malignancy scores are binarized: (benign: 1-2, malignant: 3-5)\n #targets = [1 if ii >= 3 else 0 for ii in class_targets[ix]]\n targets = [0 for ii in class_targets[ix]] # Target is binarized (presence of nodules)\n # targets = [1 for ii in class_targets[ix]] # Single target here: presence of nodule\n data[pid] = {'data': imgs[ix], 'seg': segs[ix], 'pid': pid, 'class_target': targets}\n data[pid]['fg_slices'] = p_df['fg_slices'].tolist()[ix]\n\n return data\n\n\n\ndef create_data_gen_pipeline(patient_data, cf, is_training=True):\n \"\"\"\n create mutli-threaded train/val/test batch generation and augmentation pipeline.\n :param patient_data: dictionary containing one dictionary per patient in the train/test subset.\n :param is_training: (optional) whether to perform data augmentation (training) or not (validation/testing)\n :return: multithreaded_generator\n \"\"\"\n\n # Set n_workers if too high\n cf.n_workers = min(cf.n_workers, int(np.ceil(len(patient_data) / cf.batch_size)))\n\n # create instance of batch generator as first element in pipeline.\n data_gen = BatchGenerator(patient_data, batch_size=cf.batch_size, cf=cf)\n\n # add transformations to pipeline.\n my_transforms = []\n if is_training:\n mirror_transform = Mirror(axes=np.arange(cf.dim))\n my_transforms.append(mirror_transform)\n spatial_transform = SpatialTransform(patch_size=cf.patch_size[:cf.dim],\n patch_center_dist_from_border=cf.da_kwargs['rand_crop_dist'],\n do_elastic_deform=cf.da_kwargs['do_elastic_deform'],\n alpha=cf.da_kwargs['alpha'], sigma=cf.da_kwargs['sigma'],\n do_rotation=cf.da_kwargs['do_rotation'], angle_x=cf.da_kwargs['angle_x'],\n angle_y=cf.da_kwargs['angle_y'], angle_z=cf.da_kwargs['angle_z'],\n do_scale=cf.da_kwargs['do_scale'], scale=cf.da_kwargs['scale'],\n random_crop=cf.da_kwargs['random_crop'])\n\n my_transforms.append(spatial_transform)\n else:\n my_transforms.append(CenterCropTransform(crop_size=cf.patch_size[:cf.dim]))\n\n my_transforms.append(ConvertSegToBoundingBoxCoordinates(cf.dim, get_rois_from_seg_flag=False, class_specific_seg_flag=cf.class_specific_seg_flag))\n all_transforms = Compose(my_transforms)\n # multithreaded_generator = SingleThreadedAugmenter(data_gen, all_transforms)\n multithreaded_generator = MultiThreadedAugmenter(data_gen, all_transforms, num_processes=cf.n_workers, seeds=range(cf.n_workers))\n return multithreaded_generator\n\n\nclass BatchGenerator(SlimDataLoaderBase):\n \"\"\"\n creates the training/validation batch generator. Samples n_batch_size patients (draws a slice from each patient if 2D)\n from the data set while maintaining foreground-class balance. Returned patches are cropped/padded to pre_crop_size.\n Actual patch_size is obtained after data augmentation.\n :param data: data dictionary as provided by 'load_dataset'.\n :param batch_size: number of patients to sample for the batch\n :return dictionary containing the batch data (b, c, y, x(, z)) / seg (b, 1, y, x(, z)) / pids / class_target\n \"\"\"\n def __init__(self, data, batch_size, cf):\n super(BatchGenerator, self).__init__(data, batch_size, number_of_threads_in_multithreaded=cf.n_workers)\n\n self.cf = cf\n self.crop_margin = np.array(self.cf.patch_size)/8. #min distance of ROI center to edge of cropped_patch.\n self.p_fg = 0.5\n self.init_pos = 0\n self.thread_workload = int(np.ceil(len(self._data) / self.number_of_threads_in_multithreaded))\n self.cur_position = 0\n self.seed = 0\n self.was_initialized = False\n self.data_ixs = np.arange(len(self._data))\n\n def reset(self):\n np.random.seed(self.seed)\n self.seed = self.seed + 1\n np.random.shuffle(self.data_ixs)\n # print(f\"Data (thread {self.thread_id}) shuffled\")\n self.init_pos = self.thread_id * self.thread_workload\n self.cur_position = self.init_pos\n self.was_initialized = True\n\n def generate_train_batch(self):\n\n batch_data, batch_segs, batch_pids, batch_targets, batch_patient_labels = [], [], [], [], []\n class_targets_list = [v['class_target'] for (k, v) in self._data.items()]\n\n if self.cf.head_classes > 2:\n # samples patients towards equilibrium of foreground classes on a roi-level (after randomly sampling the ratio \"batch_sample_slack).\n batch_ixs = dutils.get_class_balanced_patients(\n class_targets_list, self.batch_size, self.cf.head_classes - 1, slack_factor=self.cf.batch_sample_slack)\n else:\n # TODO: batch_ixs = np.random.choice(len(class_targets_list), self.batch_size)\n if not self.was_initialized:\n self.reset()\n start_ix = self.cur_position\n last_ix = start_ix + self.batch_size\n if start_ix < self.init_pos + self.thread_workload:\n if last_ix > self.init_pos + self.thread_workload:\n last_ix = self.init_pos + self.thread_workload\n self.cur_position = last_ix\n batch_ixs = self.data_ixs[start_ix:last_ix] % len(self._data)\n if len(batch_ixs) < self.batch_size:\n batch_ixs = np.concatenate((batch_ixs, np.arange(self.batch_size-len(batch_ixs))))\n # print(f'thread {self.thread_id}: {batch_ixs}')\n else:\n self.reset()\n raise StopIteration\n\n patients = list(self._data.items())\n\n for b in batch_ixs:\n patient = patients[b][1]\n\n # data shape: from (z,y,x) to (c, y, x, z).\n data = np.transpose(np.load(patient['data'], mmap_mode='r'), axes=(1, 2, 0))[np.newaxis]\n seg = np.transpose(np.load(patient['seg'], mmap_mode='r'), axes=(1, 2, 0))\n batch_pids.append(patient['pid'])\n batch_targets.append(patient['class_target'])\n\n if self.cf.dim == 2:\n # draw random slice from patient while oversampling slices containing foreground objects with p_fg.\n if len(patient['fg_slices']) > 0:\n fg_prob = self.p_fg / len(patient['fg_slices'])\n bg_prob = (1 - self.p_fg) / (data.shape[3] - len(patient['fg_slices']))\n slices_prob = [fg_prob if ix in patient['fg_slices'] else bg_prob for ix in range(data.shape[3])]\n slice_id = np.random.choice(data.shape[3], p=slices_prob)\n else:\n slice_id = np.random.choice(data.shape[3])\n\n # if set to not None, add neighbouring slices to each selected slice in channel dimension.\n if self.cf.n_3D_context is not None:\n padded_data = dutils.pad_nd_image(data[0], [(data.shape[-1] + (self.cf.n_3D_context*2))], mode='constant')\n padded_slice_id = slice_id + self.cf.n_3D_context\n data = (np.concatenate([padded_data[..., ii][np.newaxis] for ii in range(\n padded_slice_id - self.cf.n_3D_context, padded_slice_id + self.cf.n_3D_context + 1)], axis=0))\n else:\n data = data[..., slice_id]\n seg = seg[..., slice_id]\n\n # pad data if smaller than pre_crop_size.\n if np.any([data.shape[dim + 1] < ps for dim, ps in enumerate(self.cf.pre_crop_size)]):\n new_shape = [np.max([data.shape[dim + 1], ps]) for dim, ps in enumerate(self.cf.pre_crop_size)]\n data = dutils.pad_nd_image(data, new_shape, mode='constant')\n seg = dutils.pad_nd_image(seg, new_shape, mode='constant')\n\n # crop patches of size pre_crop_size, while sampling patches containing foreground with p_fg.\n crop_dims = [dim for dim, ps in enumerate(self.cf.pre_crop_size) if data.shape[dim + 1] > ps]\n if len(crop_dims) > 0:\n fg_prob_sample = np.random.rand(1)\n # with p_fg: sample random pixel from random ROI and shift center by random value.\n if fg_prob_sample < self.p_fg and np.sum(seg) > 0:\n seg_ixs = np.argwhere(seg == np.random.choice(np.unique(seg)[1:], 1))\n roi_anchor_pixel = seg_ixs[np.random.choice(seg_ixs.shape[0], 1)][0]\n assert seg[tuple(roi_anchor_pixel)] > 0\n # sample the patch center coords. constrained by edges of images - pre_crop_size /2. And by\n # distance to the desired ROI < patch_size /2.\n # (here final patch size to account for center_crop after data augmentation).\n sample_seg_center = {}\n for ii in crop_dims:\n low = np.max((self.cf.pre_crop_size[ii]//2, roi_anchor_pixel[ii] - (self.cf.patch_size[ii]//2 - self.crop_margin[ii])))\n high = np.min((data.shape[ii + 1] - self.cf.pre_crop_size[ii]//2,\n roi_anchor_pixel[ii] + (self.cf.patch_size[ii]//2 - self.crop_margin[ii])))\n # happens if lesion on the edge of the image. dont care about roi anymore,\n # just make sure pre-crop is inside image.\n if low >= high:\n low = data.shape[ii + 1] // 2 - (data.shape[ii + 1] // 2 - self.cf.pre_crop_size[ii] // 2)\n high = data.shape[ii + 1] // 2 + (data.shape[ii + 1] // 2 - self.cf.pre_crop_size[ii] // 2)\n sample_seg_center[ii] = np.random.randint(low=low, high=high)\n\n else:\n # not guaranteed to be empty. probability of emptiness depends on the data.\n sample_seg_center = {ii: np.random.randint(low=self.cf.pre_crop_size[ii]//2,\n high=data.shape[ii + 1] - self.cf.pre_crop_size[ii]//2) for ii in crop_dims}\n\n for ii in crop_dims:\n min_crop = int(sample_seg_center[ii] - self.cf.pre_crop_size[ii] // 2)\n max_crop = int(sample_seg_center[ii] + self.cf.pre_crop_size[ii] // 2)\n data = np.take(data, indices=range(min_crop, max_crop), axis=ii + 1)\n seg = np.take(seg, indices=range(min_crop, max_crop), axis=ii)\n\n batch_data.append(data)\n batch_segs.append(seg[np.newaxis])\n\n data = np.array(batch_data)\n seg = np.array(batch_segs).astype(np.uint8)\n class_target = np.array(batch_targets)\n return {'data': data, 'seg': seg, 'pid': batch_pids, 'class_target': class_target}\n\n\n\nclass PatientBatchIterator(SlimDataLoaderBase):\n \"\"\"\n creates a test generator that iterates over entire given dataset returning 1 patient per batch.\n Can be used for monitoring if cf.val_mode = 'patient_val' for a monitoring closer to actualy evaluation (done in 3D),\n if willing to accept speed-loss during training.\n :return: out_batch: dictionary containing one patient with batch_size = n_3D_patches in 3D or\n batch_size = n_2D_patches in 2D .\n \"\"\"\n def __init__(self, data, cf): #threads in augmenter\n super(PatientBatchIterator, self).__init__(data, 0)\n self.cf = cf\n self.patient_ix = 0\n self.dataset_pids = [v['pid'] for (k, v) in data.items()]\n self.patch_size = cf.patch_size\n if len(self.patch_size) == 2:\n self.patch_size = self.patch_size + [1]\n\n\n def generate_train_batch(self):\n\n\n pid = self.dataset_pids[self.patient_ix]\n patient = self._data[pid]\n data = np.transpose(np.load(patient['data'], mmap_mode='r'), axes=(1, 2, 0))[np.newaxis] # (c, y, x, z)\n seg = np.transpose(np.load(patient['seg'], mmap_mode='r'), axes=(1, 2, 0)) if patient['seg'] is not None else np.zeros(data.shape)\n batch_class_targets = np.array([patient['class_target']])\n\n # pad data if smaller than patch_size seen during training.\n if np.any([data.shape[dim + 1] < ps for dim, ps in enumerate(self.patch_size)]):\n new_shape = [data.shape[0]] + [np.max([data.shape[dim + 1], self.patch_size[dim]]) for dim, ps in enumerate(self.patch_size)]\n data = dutils.pad_nd_image(data, new_shape) # use 'return_slicer' to crop image back to original shape.\n seg = dutils.pad_nd_image(seg, new_shape)\n\n # get 3D targets for evaluation, even if network operates in 2D. 2D predictions will be merged to 3D in predictor.\n if self.cf.dim == 3 or self.cf.merge_2D_to_3D_preds:\n out_data = data[np.newaxis]\n out_seg = seg[np.newaxis, np.newaxis]\n out_targets = batch_class_targets\n\n batch_3D = {'data': out_data, 'seg': out_seg, 'class_target': out_targets, 'pid': pid}\n converter = ConvertSegToBoundingBoxCoordinates(dim=3, get_rois_from_seg_flag=False, class_specific_seg_flag=self.cf.class_specific_seg_flag)\n batch_3D = converter(**batch_3D)\n batch_3D.update({'patient_bb_target': batch_3D['bb_target'],\n 'patient_roi_labels': batch_3D['class_target'],\n 'original_img_shape': out_data.shape})\n\n if self.cf.dim == 2:\n out_data = np.transpose(data, axes=(3, 0, 1, 2)) # (z, c, y, x )\n out_seg = np.transpose(seg, axes=(2, 0, 1))[:, np.newaxis]\n out_targets = np.array(np.repeat(batch_class_targets, out_data.shape[0], axis=0))\n\n # if set to not None, add neighbouring slices to each selected slice in channel dimension.\n if self.cf.n_3D_context is not None:\n slice_range = range(self.cf.n_3D_context, out_data.shape[0] + self.cf.n_3D_context)\n out_data = np.pad(out_data, ((self.cf.n_3D_context, self.cf.n_3D_context), (0, 0), (0, 0), (0, 0)), 'constant', constant_values=0)\n out_data = np.array(\n [np.concatenate([out_data[ii] for ii in range(\n slice_id - self.cf.n_3D_context, slice_id + self.cf.n_3D_context + 1)], axis=0) for slice_id in\n slice_range])\n\n batch_2D = {'data': out_data, 'seg': out_seg, 'class_target': out_targets, 'pid': pid}\n converter = ConvertSegToBoundingBoxCoordinates(dim=2, get_rois_from_seg_flag=False, class_specific_seg_flag=self.cf.class_specific_seg_flag)\n batch_2D = converter(**batch_2D)\n\n if self.cf.merge_2D_to_3D_preds:\n batch_2D.update({'patient_bb_target': batch_3D['patient_bb_target'],\n 'patient_roi_labels': batch_3D['patient_roi_labels'],\n 'original_img_shape': out_data.shape})\n else:\n batch_2D.update({'patient_bb_target': batch_2D['bb_target'],\n 'patient_roi_labels': batch_2D['class_target'],\n 'original_img_shape': out_data.shape})\n\n out_batch = batch_3D if self.cf.dim == 3 else batch_2D\n patient_batch = out_batch\n\n # crop patient-volume to patches of patch_size used during training. stack patches up in batch dimension.\n # in this case, 2D is treated as a special case of 3D with patch_size[z] = 1.\n if np.any([data.shape[dim + 1] > self.patch_size[dim] for dim in range(3)]):\n patch_crop_coords_list = dutils.get_patch_crop_coords(data[0], self.patch_size)\n new_img_batch, new_seg_batch, new_class_targets_batch = [], [], []\n\n for cix, c in enumerate(patch_crop_coords_list):\n\n seg_patch = seg[c[0]:c[1], c[2]: c[3], c[4]:c[5]]\n new_seg_batch.append(seg_patch)\n\n # if set to not None, add neighbouring slices to each selected slice in channel dimension.\n # correct patch_crop coordinates by added slices of 3D context.\n if self.cf.dim == 2 and self.cf.n_3D_context is not None:\n tmp_c_5 = c[5] + (self.cf.n_3D_context * 2)\n if cix == 0:\n data = np.pad(data, ((0, 0), (0, 0), (0, 0), (self.cf.n_3D_context, self.cf.n_3D_context)), 'constant', constant_values=0)\n else:\n tmp_c_5 = c[5]\n\n new_img_batch.append(data[:, c[0]:c[1], c[2]:c[3], c[4]:tmp_c_5])\n\n data = np.array(new_img_batch) # (n_patches, c, x, y, z)\n seg = np.array(new_seg_batch)[:, np.newaxis] # (n_patches, 1, x, y, z)\n batch_class_targets = np.repeat(batch_class_targets, len(patch_crop_coords_list), axis=0)\n\n if self.cf.dim == 2:\n if self.cf.n_3D_context is not None:\n data = np.transpose(data[:, 0], axes=(0, 3, 1, 2))\n else:\n # all patches have z dimension 1 (slices). discard dimension\n data = data[..., 0]\n seg = seg[..., 0]\n\n patch_batch = {'data': data, 'seg': seg, 'class_target': batch_class_targets, 'pid': pid}\n patch_batch['patch_crop_coords'] = np.array(patch_crop_coords_list)\n patch_batch['patient_bb_target'] = patient_batch['patient_bb_target']\n patch_batch['patient_roi_labels'] = patient_batch['patient_roi_labels']\n patch_batch['original_img_shape'] = patient_batch['original_img_shape']\n\n if self.cf.patient_path is None:\n converter = ConvertSegToBoundingBoxCoordinates(self.cf.dim, get_rois_from_seg_flag=False, class_specific_seg_flag=self.cf.class_specific_seg_flag)\n patch_batch = converter(**patch_batch)\n out_batch = patch_batch\n\n self.patient_ix += 1\n if self.patient_ix == len(self.dataset_pids):\n self.patient_ix = 0\n\n return out_batch\n\n\n\ndef copy_and_unpack_data(logger, pids, fold_dir, source_dir, target_dir):\n\n start_time = time.time()\n with open(os.path.join(fold_dir, 'file_list.txt'), 'w') as handle:\n for pid in pids:\n handle.write('{}_img.npz\\n'.format(pid))\n handle.write('{}_rois.npz\\n'.format(pid))\n\n subprocess.call('rsync -av --files-from {} {} {}'.format(os.path.join(fold_dir, 'file_list.txt'),\n source_dir, target_dir), shell=True)\n n_threads = 8\n dutils.unpack_dataset(target_dir, threads=n_threads)\n copied_files = os.listdir(target_dir)\n t = utils.get_formatted_duration(time.time() - start_time)\n logger.info(\"\\ncopying and unpacking data set finished using {} threads.\\n{} files in target dir: {}. Took {}\\n\"\n .format(n_threads, len(copied_files), target_dir, t))\n\n\nif __name__==\"__main__\":\n\n total_stime = time.time()\n\n cf_file = utils.import_module(\"cf\", \"configs.py\")\n cf = cf_file.configs()\n\n cf.created_fold_id_pickle = False\n cf.exp_dir = \"dev/\"\n cf.plot_dir = cf.exp_dir + \"plots\"\n os.makedirs(cf.exp_dir, exist_ok=True)\n cf.fold = 0\n logger = utils.get_logger(cf.exp_dir)\n\n #batch_gen = get_train_generators(cf, logger)\n #train_batch = next(batch_gen[\"train\"])\n\n test_gen = get_test_generator(cf, logger)\n test_batch = next(test_gen[\"test\"])\n\n mins, secs = divmod((time.time() - total_stime), 60)\n h, mins = divmod(mins, 60)\n t = \"{:d}h:{:02d}m:{:02d}s\".format(int(h), int(mins), int(secs))\n print(\"{} total runtime: {}\".format(os.path.split(__file__)[1], t))\n\n"
] | [
[
"numpy.isnan",
"pandas.DataFrame",
"numpy.argwhere",
"numpy.any",
"numpy.argsort"
],
[
"numpy.pad",
"numpy.random.seed",
"numpy.unique",
"numpy.random.choice",
"numpy.arange",
"numpy.min",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.intersect1d",
"numpy.max",
"numpy.random.rand",
"numpy.transpose",
"numpy.load",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
theBraindonor/chicago-crime-arrests | [
"64cdb82fbe828d1316cf945b67ddc205ef190293"
] | [
"model/experiment/gaussian_naive_bayes_model.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Experiment with a gaussian naive bayes model with a variety of balancing techniques on the cleaned data set\n\"\"\"\n\n__author__ = \"John Hoff\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2019, John Hoff\"\n__license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\"\n__version__ = \"1.0\"\n\nfrom imblearn.combine import SMOTEENN\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\n\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom utility import Runner\nfrom model import load_clean_sample_data_frame, binned_geo_one_hot_data_mapper\n\n\nsample = None\nfit_increment = 10000\n\n\ndef test_gaussian_naive_bayes():\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_basic',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1\n )\n\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_under_sampled',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1,\n sampling=RandomUnderSampler()\n )\n\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_over_sampled',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1,\n sampling=SMOTE()\n )\n\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_combine_sampled',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1,\n sampling=SMOTEENN()\n )\n\n\nif __name__ == '__main__':\n test_gaussian_naive_bayes()\n"
] | [
[
"sklearn.naive_bayes.GaussianNB"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tejas-9er/SVM-vs-LSSVM | [
"e44f63458680c39df370ddfcdf22e8c450d23128"
] | [
"data/LSSVM.py"
] | [
"import numpy as np\nimport scipy\nfrom scipy.sparse import linalg\nfrom sklearn.metrics import accuracy_score\n\nclass LSSVM:\n def __init__(self, kernel = 'linear', C = 1.0,gamma = 1.0, d = 2.0):\n kernels = {\n 'rbf':self.rbf,\n 'poly':self.polynomial,\n 'linear':self.linear\n }\n \n self.kernel = kernels[kernel]\n self.C = C\n self.gamma = 1.0\n self.d = d\n \n #Build the gram matrix\n def build_kernel_matrix(self, X, y):\n instances, dimensions = X.shape\n\n gram_matrix = np.zeros((instances,instances))\n #computing the gram matrix, involves going over the dataset and computing pairwise kernel function\n for i in range(0, instances):\n for j in range(0, instances):\n \n gram_matrix[i, j] = self.kernel(X[i], X[j])\n return gram_matrix\n\n def fit(self, X, y):\n\n self.kernel_matrix = self.build_kernel_matrix(X,y)\n identity_matrix = np.identity(X.shape[0])\n #We wish to solve Ax = B, so we begin by defining the matrices A, B\n A = np.zeros((X.shape[0]+1, X.shape[0]+1))\n B = np.ones(((X.shape[0]+1,1)))\n\n A[0][0] = 0\n A[0,1:X.shape[0]+1] = np.hstack((np.ones(X.shape[0])))\n A[1:X.shape[0]+1,0] = np.ones(X.shape[0])\n A[1:X.shape[0]+1,1:X.shape[0]+1] = self.kernel_matrix + identity_matrix / self.C\n \n #B is a column vector. \n B[0][0] = 0\n B[1:X.shape[0]+1,0] = y\n\n solution = scipy.sparse.linalg.cg(A,B)\n\n self.bias = solution[:-1]\n \n solution = solution[:-1]\n self.support_vector_alphas = []\n self.support_vector_labels = []\n self.support_vectors = []\n for index,alpha in enumerate(solution[0]):\n if(alpha > 1e-3):\n self.support_vector_alphas.append(alpha)\n self.support_vector_labels.append(y[index])\n self.support_vectors.append(X[index])\n #define kernels\n def linear(self, x1, x2):\n return np.dot(x1, x2.T)\n\n def polynomial(self, x1, x2):\n return (np.dot(x1, x2.T) ** self.d)\n \n def rbf(self,xi,xj):\n return np.exp(-self.gamma * np.linalg.norm(xi-xj)**2)\n\n def predict(self,X_test):\n predictions = []\n \n for instance in X_test:\n\n for index, sv in enumerate(self.support_vectors):\n prediction = np.sum(self.support_vector_alphas[index] * self.support_vector_labels[index] * self.kernel(sv,instance) + self.bias)\n \n predictions.append(np.sign(prediction).astype(int))\n\n return np.array(predictions)"
] | [
[
"numpy.dot",
"scipy.sparse.linalg.cg",
"numpy.linalg.norm",
"numpy.ones",
"numpy.sign",
"numpy.identity",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
gcruchon/test-opencv | [
"fdf7cb7a86f5606ca6df6170107a0264fbc43e9c"
] | [
"chapter1-cam.py"
] | [
"import cv2\nimport numpy as np\ncap = cv2.VideoCapture(0)\nkernel = np.ones((5, 5), np.uint8)\n\nwhile True:\n success, img = cap.read()\n cv2.imshow(\"Cam\", cv2.Canny(img, 100, 100))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n"
] | [
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
diable201/ComputerVision | [
"5ee153363fa6757d3cd8b1add3e5d48b01a499e2"
] | [
"Lectures/lec_05/genSymbolImg.py"
] | [
"import cv2\nimport numpy as np\nfrom random import randint, uniform\nimport string, random\n\n\ndef addNoise(image): \n row,col = image.shape\n s_vs_p = 0.4\n amount = 0.01\n out = np.copy(image)\n # Salt mode\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[tuple(coords)] = 1\n\n # Pepper mode\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]\n out[tuple(coords)] = 0\n return out\n\n\n# def addLines(img):\n# for i in range(randint(0,2)):\n# y1 = randint(0, img.shape[0])\n# y2 = randint(0, img.shape[0])\n# cv2.line(img, (0, y1), (img.shape[1], y2), 0, 1)\n\n\ndef addBlur(img, kw, kh):\n return cv2.blur(img, (kw, kh))\n\n\ndef text_generator(chars, size = 8):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef addText(img, chars, font, size, line_size):\n\n text = text_generator(chars, 1) \n\n cv2.putText(img, text, (0, img.shape[0]-4), font, size, (0, 0, 255), line_size, cv2.LINE_AA)\n\n return text\n\nsizes = [(70,58),(40,35),(75,70),(70,70),(70,70),(50,50)]\n\ndef genSymbolImg(chars = string.ascii_uppercase + string.digits,\n font = None,\n line_size = None,\n blur = None,\n kw = None, \n kh = None):\n\n if font is None:\n font = randint(0, 5)\n\n # if size is None:\n # size = uniform(2.5, 3.5)\n\n if line_size is None:\n line_size = randint(1, 3)\n\n if blur is None:\n blur = randint(0, 1)\n\n if kw is None:\n kw = randint(3, 9)\n\n if kh is None:\n kh = randint(3, 9)\n\n\n genImg = np.full(sizes[font], 255, dtype= np.uint8)\n\n text = addText(genImg, chars, font, 3, line_size)\n\n if randint(0, 1):\n genImg = addNoise(genImg)\n \n # if lines:\n # addLines(genImg)\n\n if blur:\n genImg = addBlur(genImg, kw, kh)\n\n\n return genImg, text\n\n\n\nif __name__ == '__main__':\n\n for i in xrange(10000):\n img, text = genSymbolImg(kw = 5, kh = 5, blur = 1)\n print(text)\n\n cv2.imshow(\"W\", img)\n k = cv2.waitKey(0)\n if k == 27:\n break"
] | [
[
"numpy.ceil",
"numpy.copy",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cradesto/pystella | [
"f6f44ed12d9648585a52a09e15d494daa4c70c59"
] | [
"tau.py"
] | [
"#!/usr/bin/env python3\n\nimport argparse\nimport logging\n\nimport numpy as np\nimport pystella as ps\nfrom pystella.model.sn_tau import StellaTauDetail\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\n\n__author__ = 'bakl'\n\n# todo Show filters\n# todo show valuse for filters\n# todo compute SED = 4 pi R^2 sig T^4\n\n\ndef plot_tau_moments(tau, moments=None, xlim=None):\n import matplotlib.pyplot as plt\n\n moments = moments or np.exp(np.linspace(np.log(0.5), np.log(400.), 40))\n\n fig, (axV, axT) = plt.subplots(2, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})\n axV.set_title(tau.Name)\n axV.set_xlabel('')\n axV.set_ylabel('Velocity [1000 km/s]')\n\n axT.set_xlabel('Radius [cm]')\n axT.set_ylabel('Temperature [K]')\n\n for i, time in enumerate(moments):\n b = tau.block_nearest(time)\n n = int(2 - np.log10(max(1e-03, abs(b.Time)))) # if b.Time >= 10. else 4 # label format\n p = axV.semilogx(b.R, b.V8, label=\"t= {:.{}f}\".format(b.Time, n))\n color = p[0].get_color()\n axT.loglog(b.R, b.T, label=\"t={:.2f}\".format(time), color=color)\n\n axV.legend(frameon=False)\n\n if xlim is not None:\n axT.set_xlim(xlim)\n axV.set_xlim(xlim)\n\n fig.tight_layout()\n return fig\n\n\ndef plot_bands(ax, bnames, amp=30, alpha=0.5):\n \"\"\"Plot the filter responses\"\"\"\n color_dic = ps.band.colors()\n res = {}\n for bname in bnames:\n b = ps.band.band_by_name(bname)\n wl = b.wl * ps.phys.cm_to_angs\n ax.plot(wl, b.resp_wl*amp, color_dic[bname], alpha=alpha)\n\n wl_eff = b.wl_eff_angs\n ax.axvline(x=wl_eff, ymin=0., ymax=0.99, linestyle='--', color=color_dic[bname], alpha=alpha)\n ax.text(wl_eff, 10, bname, fontsize=12)\n ax.text(wl_eff*.95, 3, \"{:.0f}\".format(wl_eff), fontsize=6)\n res[bname] = (wl_eff, color_dic[bname])\n return res\n\n\ndef plot_tau_phot(tau_data, pars, tau_ph, xlim=None, title='', bnames=None):\n \"\"\"\n Plot photosphere as Func(nu). Maybe: R, V, V8, T\n :param pars: the parameters of photosphere\n :param tau_data: the data at the optical depth tau_ph\n :param tau_ph: the photosphere location\n :param xlim: wave length interval [A]\n :param title: the plot title\n :param bnames: array of filter names to show the filter responses\n :return: figure\n \"\"\"\n import matplotlib.pyplot as plt\n\n def fr2wv(nu):\n return ps.phys.c / nu * ps.phys.cm_to_angs\n\n fig, axs = plt.subplots(len(pars)+1, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})\n\n # Setup\n ax = axs[0]\n ax.set_ylabel(r'Zone ($\\tau_{{ph}}= {:.2f}$)'.format(tau_ph))\n ax.set_title(title)\n ax.xaxis.set_ticks_position('top')\n # ax.xaxis.tick_top()\n # ax.tick_params(axis=\"x\", direction=\"in\", pad=-22)\n # ax.tick_params(direction='in')\n\n for i, p in enumerate(pars, 1):\n ax = axs[i]\n ax.set_ylabel(r'{}$_{{ph}}$'.format(p))\n if i < len(axs)-1:\n ax.set_xlabel('')\n ax.tick_params(which='both', top=False, bottom=False)\n else:\n ax.set_xlabel('Wavelength [A]')\n\n # Plot Zone_ph\n colors = []\n for j, (t, freq, y) in enumerate(tau_data[StellaTauDetail.col_zon]):\n axzon = axs[0]\n n = int(3 - np.log10(max(1e-03, abs(t)))) # label format\n lbl = \"t= {:.{}f} d\".format(t, n)\n\n ll = axzon.semilogx(fr2wv(freq), y, label=lbl)\n color = ll[0].get_color()\n colors.append(color)\n\n bnames_waves = None\n if bnames is not None:\n ylim = axzon.get_ylim()\n bnames_waves = plot_bands(axzon, bnames, amp=ylim[1]*0.25, alpha=0.5)\n\n # Plot other params\n for i, p in enumerate(pars, 1):\n is_log = p.startswith('log')\n p_data = p.replace('log', '') if is_log else p\n ax = axs[i]\n for j, (t, freq, y) in enumerate(tau_data[p_data]):\n x = fr2wv(freq)\n if is_log:\n ax.loglog(x, y, color=colors[j])\n else:\n ax.semilogx(x, y, color=colors[j])\n\n if bnames_waves is not None:\n for bn, (wl, col) in bnames_waves.items():\n ax.axvline(x=wl, ymin=0., ymax=0.99, linestyle='--', color=col, alpha=0.5)\n\n # Post-plotting\n for i, ax in enumerate(axs):\n ax.tick_params(which='both', left=True, right=True, direction=\"in\")\n # ax.grid(axis=\"x\", color=\"grey\", alpha=.5, linewidth=1, linestyle=\":\")\n\n if xlim is not None:\n ax.set_xlim(xlim)\n\n axs[0].legend(frameon=False)\n\n fig.tight_layout()\n return fig\n\n\ndef get_parser(times='0.1:1:10:25:65', bnames='U:B:V:R'):\n parser = argparse.ArgumentParser(description='Standard Candle Method.')\n print(\" Plot the tau-wave diagram for STELLA models\")\n parser.add_argument('-b', '--band',\n nargs='?',\n required=False,\n # default=bnames,\n const=bnames,\n type=str,\n dest=\"bnames\",\n help=\"-b <bands>: string. If set only -b BNAMES is {}\".format(bnames))\n parser.add_argument('-i', '--input',\n required=True,\n dest=\"input\",\n help=\"Model name, example: cat_R450_M15_Ni007\")\n parser.add_argument('-p', '--path',\n required=False,\n type=str,\n default=False,\n dest=\"path\",\n help=\"Model directory\")\n parser.add_argument('-ph', '--phot',\n required=False,\n type=str,\n default=False,\n dest=\"phot\",\n help='Plot photosphere parameter. Maybe: R, V, V8, T. Example: -ph R:V8:T ' \n 'You may use prefix log, e.g. logT or logV8')\n parser.add_argument('-s', '--save',\n action='store_const',\n const=True,\n dest=\"is_save\",\n help=\"To save the result plot to pdf-file. Format: tau_[name]_t[times].pdf.\")\n parser.add_argument('-t', '--time',\n required=False,\n type=str,\n default=times,\n dest=\"times\",\n help=\"Plot tau snap for selected time moments. Default: {0}\".format(times))\n parser.add_argument('--tau_ph',\n required=False,\n type=float,\n default=2./3.,\n dest=\"tau_ph\",\n help=\"The optical depth at the photosphere. Default: 2/3\")\n parser.add_argument('-x', '--xlim',\n required=False,\n type=str,\n default=None,\n dest=\"xlim\",\n help=\"wave length interval [A]. Example: 1.:25e3. Default: all waves in the tau-file\")\n parser.add_argument('-w', '--write',\n required=False,\n type=str,\n default=None,\n dest=\"write_prefix\",\n help=\"The prefix of file + -ParamName.dat\")\n\n return parser\n\n\ndef str2float(s):\n return list(map(float, s.split(':')))\n\n\ndef main():\n import os\n import sys\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n plt = None\n\n ps.Band.load_settings()\n\n model_ext = '.tau'\n\n parser = get_parser()\n args, unknownargs = parser.parse_known_args()\n\n path = os.getcwd()\n if args.path:\n path = os.path.expanduser(args.path)\n\n # Set model names\n fname = None\n if args.input:\n fname = args.input.strip()\n fname = fname.replace(model_ext, '')\n\n if fname is None:\n parser.print_help()\n sys.exit(2)\n\n model = ps.Stella(fname, path=path)\n\n if not model.is_tau:\n print(\"No tau-data for: \" + str(model))\n return None\n\n fig = None\n xlim = None\n fplot = None\n print('\\n Arguments')\n times = str2float(args.times)\n print(' The time moments: ', args.times)\n print(' The optical depth ', args.tau_ph)\n if args.phot:\n print(' The photospheric parameters ', args.phot)\n if args.xlim is not None:\n xlim = str2float(args.xlim)\n print(\" xlim: \", xlim)\n # Set band names\n bnames = ('B',)\n ps.Band.load_settings()\n if args.bnames:\n bnames = []\n for bname in args.bnames.split('-'):\n if not ps.band.is_exist(bname):\n print('No such band: ' + bname)\n parser.print_help()\n sys.exit(2)\n bnames.append(bname)\n\n tau = model.get_tau().load(is_info=False)\n print('\\n Loaded data from {}'.format(tau.FName))\n print('Model has Nzone= {} Ntimes= {}'.format(tau.Nzon, tau.Ntimes))\n print(\"The model time interval: {:.3e} - {:3e} days\".format(min(tau.Times), max(tau.Times)))\n print(\"The bnames are {}\".format(', '.join(bnames)))\n # print(tau.Wl2angs)\n # tau = b.Tau\n # print(tau.shape)\n\n ###\n # Plot\n if args.phot:\n pars = args.phot.split(':')\n if isinstance(pars, str):\n pars = [pars]\n pars_data = [p.replace('log', '') for p in pars]\n tau_data = tau.params_ph(pars=pars_data, moments=times, tau_ph=args.tau_ph)\n\n if args.write_prefix:\n fwrite = os.path.expanduser(args.write_prefix)\n tau.data_save(fwrite, tau_data, pars_data)\n else:\n # Print parameters\n print('\\nPhotospheric parameters:')\n for ii, p in enumerate(pars_data):\n print('{:9s} {}'.format('t_real', ' '.join([f'{p}_{b:10s}' for b in bnames])))\n for i, (t, freq, y) in enumerate(tau_data[p]):\n s = '{:9.4f} '.format(t)\n for bname in bnames:\n b = ps.band.band_by_name(bname)\n fr_eff = b.freq_eff\n idx = (np.abs(freq - fr_eff)).argmin()\n s += ' {:10e}'.format( y[idx])\n print(s)\n # Plot\n fig = plot_tau_phot(tau_data, pars, tau_ph=args.tau_ph, xlim=xlim, title=tau.Name, bnames=bnames)\n fplot = os.path.expanduser(\"~/tau_{}_{}.pdf\".format(fname, str.replace(args.phot, ':', '-')))\n else:\n fig = plot_tau_moments(tau, moments=times, xlim=xlim)\n\n if args.is_save:\n if fplot is None:\n fplot = os.path.expanduser(\"~/tau_{0}_t{1}.pdf\".format(fname, str.replace(args.times, ':', '-')))\n print(\"Save plot to {0}\".format(fplot))\n fig.savefig(fplot, bbox_inches='tight')\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.abs",
"numpy.log",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ares201005/qiskit-aer | [
"fb3bab00ab810e73ad333b0f538fa6c3c53f054e"
] | [
"test/terra/backends/qasm_simulator/qasm_snapshot.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nQasmSimulator Integration Tests for Snapshot instructions\n\"\"\"\n\nimport logging\nimport itertools as it\nimport numpy as np\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.compiler import assemble\nfrom qiskit.quantum_info import DensityMatrix, Pauli, Operator\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.providers.aer import AerError\n\nfrom test.terra.reference.ref_snapshot_state import (\n snapshot_state_circuits_deterministic, snapshot_state_counts_deterministic,\n snapshot_state_pre_measure_statevector_deterministic,\n snapshot_state_post_measure_statevector_deterministic,\n snapshot_state_circuits_nondeterministic,\n snapshot_state_counts_nondeterministic,\n snapshot_state_pre_measure_statevector_nondeterministic,\n snapshot_state_post_measure_statevector_nondeterministic)\nfrom test.terra.reference.ref_snapshot_probabilities import (\n snapshot_probabilities_circuits, snapshot_probabilities_counts,\n snapshot_probabilities_labels_qubits,\n snapshot_probabilities_post_meas_probs,\n snapshot_probabilities_pre_meas_probs)\nfrom test.terra.reference.ref_snapshot_expval import (\n snapshot_expval_circuits, snapshot_expval_counts, snapshot_expval_labels,\n snapshot_expval_post_meas_values, snapshot_expval_pre_meas_values)\n\n\nclass QasmSnapshotStatevectorTests:\n \"\"\"QasmSimulator snapshot statevector tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'matrix_product_state'\n ]\n BACKEND_OPTS = {}\n\n def statevector_snapshots(self, data, label):\n \"\"\"Format snapshots as list of Numpy arrays\"\"\"\n snaps = data.get(\"snapshots\", {}).get(\"statevector\", {}).get(label, [])\n statevecs = []\n for snap in snaps:\n self.assertIsInstance(snap, np.ndarray)\n statevecs.append(snap)\n return statevecs\n\n def test_snapshot_statevector_pre_measure_det(self):\n \"\"\"Test snapshot statevector before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'statevector',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = statevec_targets[j]\n value = snaps[0]\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_statevector_pre_measure_nondet(self):\n \"\"\"Test snapshot statevector before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'statevector',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = statevec_targets[j]\n value = snaps[0]\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_statevector_post_measure_det(self):\n \"\"\"Test snapshot statevector after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'statevector',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n logging.getLogger().setLevel(logging.CRITICAL)\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n self.assertTrue(np.allclose(snaps[j], target))\n\n def test_snapshot_statevector_post_measure_nondet(self):\n \"\"\"Test snapshot statevector after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'statevector',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n self.assertTrue(np.allclose(snaps[j], target))\n\n\nclass QasmSnapshotStabilizerTests:\n \"\"\"QasmSimulator method snapshot stabilizer tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = ['automatic', 'stabilizer']\n BACKEND_OPTS = {}\n\n @staticmethod\n def stabilizer_snapshots(data, label):\n \"\"\"Get stabilizer snapshots\"\"\"\n return data.get(\"snapshots\", {}).get(\"stabilizer\", {}).get(label, [])\n\n @staticmethod\n def stabilizes_statevector(stabilizer, statevector):\n \"\"\"Return True if two stabilizer states are equal.\"\"\"\n # Get stabilizer and destabilizers and convert to sets\n for stab in stabilizer:\n if stab[0] == '-':\n pauli_mat = -1 * Pauli.from_label(stab[1:]).to_matrix()\n else:\n pauli_mat = Pauli.from_label(stab).to_matrix()\n val = statevector.conj().dot(pauli_mat.dot(statevector))\n if not np.isclose(val, 1):\n return False\n return True\n\n def test_snapshot_stabilizer_pre_measure_det(self):\n \"\"\"Test snapshot stabilizer before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'stabilizer',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n self.assertEqual(len(snaps), 1)\n statevec = statevec_targets[j]\n stabilizer = snaps[0]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_pre_measure_nondet(self):\n \"\"\"Test snapshot stabilizer before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'stabilizer',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n self.assertEqual(len(snaps), 1)\n statevec = statevec_targets[j]\n stabilizer = snaps[0]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_post_measure_det(self):\n \"\"\"Test snapshot stabilizer after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'stabilizer',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n statevec = statevec_targets[i].get(mem)\n stabilizer = snaps[j]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_post_measure_nondet(self):\n \"\"\"Test snapshot stabilizer after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'stabilizer',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n statevec = statevec_targets[i].get(mem)\n stabilizer = snaps[j]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n\nclass QasmSnapshotDensityMatrixTests:\n \"\"\"QasmSimulator snapshot density matrix tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'density_matrix', 'density_matrix_gpu',\n 'density_matrix_thrust'\n ]\n BACKEND_OPTS = {}\n\n def density_snapshots(self, data, label):\n \"\"\"Format snapshots as list of Numpy arrays\"\"\"\n # Check snapshot entry exists in data\n snaps = data.get(\"snapshots\", {}).get(\"density_matrix\",\n {}).get(label, [])\n # Convert nested lists to numpy arrays\n output = {}\n for snap_dict in snaps:\n memory = snap_dict['memory']\n self.assertIsInstance(snap_dict['value'], np.ndarray)\n output[memory] = snap_dict['value']\n return output\n\n def test_snapshot_density_matrix_pre_measure_det(self):\n \"\"\"Test snapshot density matrix before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'density_matrix',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = np.outer(statevec_targets[j],\n statevec_targets[j].conj())\n # Pre-measurement all memory bits should be 0\n value = snaps.get('0x0')\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_density_matrix_pre_measure_nondet(self):\n \"\"\"Test snapshot density matrix before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'density_matrix',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = np.outer(statevec_targets[j],\n statevec_targets[j].conj())\n value = snaps.get('0x0')\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_density_matrix_post_measure_det(self):\n \"\"\"Test snapshot density matrix after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'density_matrix',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n target = np.outer(target, target.conj())\n value = snaps.get(mem)\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_density_matrix_post_measure_nondet(self):\n \"\"\"Test snapshot density matrix after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'density_matrix',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n target = np.outer(target, target.conj())\n value = snaps.get(mem)\n self.assertTrue(np.allclose(value, target))\n\n\nclass QasmSnapshotProbabilitiesTests:\n \"\"\"QasmSimulator snapshot probabilities tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic',\n 'statevector',\n 'statevector_gpu',\n 'statevector_thrust',\n 'stabilizer',\n 'density_matrix',\n 'density_matrix_gpu',\n 'density_matrix_thrust',\n 'matrix_product_state',\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def probability_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"probabilities\",\n {}).get(label, [])\n output[label] = {\n snap_dict['memory']: snap_dict['value']\n for snap_dict in snaps\n }\n return output\n\n def test_snapshot_probabilities_pre_measure(self):\n \"\"\"Test snapshot probabilities before final measurement\"\"\"\n shots = 1000\n labels = list(snapshot_probabilities_labels_qubits().keys())\n counts_targets = snapshot_probabilities_counts(shots)\n prob_targets = snapshot_probabilities_pre_meas_probs()\n\n circuits = snapshot_probabilities_circuits(post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.probability_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = prob_targets[j].get(label, {}).get(memory, {})\n self.assertDictAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_probabilities_post_measure(self):\n \"\"\"Test snapshot probabilities after final measurement\"\"\"\n shots = 1000\n labels = list(snapshot_probabilities_labels_qubits().keys())\n counts_targets = snapshot_probabilities_counts(shots)\n prob_targets = snapshot_probabilities_post_meas_probs()\n\n circuits = snapshot_probabilities_circuits(post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.probability_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n for memory, value in snaps.items():\n target = prob_targets[j].get(label, {}).get(memory, {})\n self.assertDictAlmostEqual(value, target, delta=1e-7)\n\n\nclass QasmSnapshotExpValPauliTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',\n 'matrix_product_state', 'stabilizer'\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def expval_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"expectation_value\",\n {}).get(label, [])\n # Convert list into dict\n inner = {}\n for snap_dict in snaps:\n val = snap_dict['value']\n inner[snap_dict['memory']] = val\n output[label] = inner\n return output\n\n def test_snapshot_expval_pauli_pre_measure(self):\n \"\"\"Test snapshot expectation value (pauli) before final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_pre_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=True, post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_expval_pauli_post_measure(self):\n \"\"\"Test snapshot expectation value (pauli) after final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_post_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=True, post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n\nclass QasmSnapshotExpvalPauliNCTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests on random states.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',\n 'matrix_product_state',\n ]\n BACKEND_OPTS = {}\n\n def general_test(self, pauli, num_qubits=None, seed=None):\n \"\"\"General test case\"\"\"\n pauli_qubits = list(range(len(pauli)))\n if num_qubits is None:\n num_qubits = len(pauli_qubits)\n\n # Prepare random N-qubit product input state\n # from seed\n rng = np.random.default_rng(seed)\n params = rng.uniform(-1, 1, size=(num_qubits, 3))\n init_circ = QuantumCircuit(num_qubits)\n for i, par in enumerate(params):\n init_circ.u3(*par, i)\n\n # Compute the target expectation value\n rho = DensityMatrix.from_instruction(init_circ)\n op = Operator.from_label(pauli)\n target = np.trace(Operator(rho).compose(op, pauli_qubits).data)\n\n # Simulate expectation value\n qc = init_circ.copy()\n qc.snapshot_expectation_value('final', [(1, pauli)], pauli_qubits)\n qobj = assemble(qc)\n result = self.SIMULATOR.run(\n qobj, backend_options=self.BACKEND_OPTS).result()\n self.assertTrue(getattr(result, 'success', False))\n snapshots = result.data(0).get('snapshots', {})\n self.assertIn('expectation_value', snapshots)\n self.assertIn('final', snapshots['expectation_value'])\n expval = snapshots.get('expectation_value', {})['final'][0]['value']\n self.assertAlmostEqual(expval, target)\n\n def test_pauli1(self):\n \"\"\"Test all 1-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in ['I', 'X', 'Y', 'Z']:\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n def test_pauli2(self):\n \"\"\"Test all 2-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=2):\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n def test_pauli3(self):\n \"\"\"Test all 3-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=3):\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n\nclass QasmSnapshotExpValMatrixTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'matrix_product_state'\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def expval_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"expectation_value\",\n {}).get(label, [])\n # Convert list into dict\n inner = {}\n for snap_dict in snaps:\n inner[snap_dict['memory']] = snap_dict['value']\n output[label] = inner\n return output\n\n def test_snapshot_expval_matrix_pre_measure(self):\n \"\"\"Test snapshot expectation value (matrix) before final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_pre_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=False, post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_expval_matrix_post_measure(self):\n \"\"\"Test snapshot expectation value (matrix) after final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_post_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=False, post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n"
] | [
[
"numpy.allclose",
"numpy.random.default_rng",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oshiooshi/cirneco | [
"f71f1cd583bf6e290d7b8e74f148f06cadd39d63"
] | [
"samoyed_ts/nmt.py"
] | [
"import torch\n# import torchtext\nimport torch.nn as nn\n# from torchtext.vocab import Vocab, build_vocab_from_iterator\n# from torchtext.utils import unicode_csv_reader\n# from torchtext.data.datasets_utils import _RawTextIterableDataset\nfrom torch import Tensor\nfrom typing import Iterable, List\n# import sentencepiece as spm\n# import io\nimport math\nimport vocab\n\nSEED = 1234\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\n\n# 特殊トークンの定義\nUNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3\nspecial_symbols = ['<unk>', '<pad>', '<sos>', '<eos>', '<blk>', '</blk>', '<sep>']\n\nMAX_LEN=80\n# sp = spm.SentencePieceProcessor(model_file='corpus_Python-JPN/p3/p3.model')\n\n# def jpn_tokenizer(text):\n# ss = [tok.replace('▁', '') for tok in sp.encode(text, out_type=str)][:MAX_LEN]\n# return [s for s in ss if len(s) != 0]\n\n# def py_tokenizer(text):\n# return [tok for tok in text.split()][:MAX_LEN]\n\nfrom torch.nn.utils.rnn import pad_sequence\n\n# 連続した操作をまとめて行うためのヘルパー関数\ndef sequential_transforms(*transforms):\n def func(txt_input):\n for transform in transforms:\n txt_input = transform(txt_input)\n return txt_input\n return func\n\n# SOS/EOSトークンを追加し、入力配列のインデックス用のテンソルを作成\ndef tensor_transform(token_ids: List[int]):\n return torch.cat((torch.tensor([SOS_IDX]), \n torch.tensor(token_ids), \n torch.tensor([EOS_IDX])))\n\n## Transformer の定義\n\nfrom torch.nn import (TransformerEncoder, TransformerDecoder,\n TransformerEncoderLayer, TransformerDecoderLayer)\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, \n emb_size: int, \n dropout: float, \n maxlen: int = 5000):\n super(PositionalEncoding, self).__init__()\n den = torch.exp(- torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)\n pos = torch.arange(0, maxlen).reshape(maxlen, 1)\n pos_embedding = torch.zeros((maxlen, emb_size))\n pos_embedding[:, 0::2] = torch.sin(pos * den)\n pos_embedding[:, 1::2] = torch.cos(pos * den)\n pos_embedding = pos_embedding.unsqueeze(-2)\n\n self.dropout = nn.Dropout(dropout)\n self.register_buffer('pos_embedding', pos_embedding)\n\n def forward(self, token_embedding: Tensor):\n return self.dropout(token_embedding + \n self.pos_embedding[:token_embedding.size(0),:])\n\nclass TokenEmbedding(nn.Module):\n def __init__(self, vocab_size: int, emb_size):\n super(TokenEmbedding, self).__init__()\n self.embedding = nn.Embedding(vocab_size, emb_size)\n self.emb_size = emb_size\n def forward(self, tokens: Tensor):\n return self.embedding(tokens.long()) * math.sqrt(self.emb_size)\n\nclass Seq2SeqTransformer(nn.Module):\n def __init__(self, \n num_encoder_layers: int, \n num_decoder_layers: int,\n emb_size: int, \n nhead: int, \n src_vocab_size: int, \n tgt_vocab_size: int,\n dim_feedforward: int = 512, \n dropout: float = 0.1):\n super(Seq2SeqTransformer, self).__init__()\n encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,\n dim_feedforward=dim_feedforward)\n self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)\n decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,\n dim_feedforward=dim_feedforward)\n self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)\n \n self.generator = nn.Linear(emb_size, tgt_vocab_size)\n self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)\n self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)\n self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)\n\n def forward(self, \n src: Tensor, \n tgt: Tensor, \n src_mask: Tensor,\n tgt_mask: Tensor, \n src_padding_mask: Tensor,\n tgt_padding_mask: Tensor, \n memory_key_padding_mask: Tensor):\n src_emb = self.positional_encoding(self.src_tok_emb(src))\n tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))\n memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)\n outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,\n tgt_padding_mask, memory_key_padding_mask)\n return self.generator(outs)\n\n def encode(self, src: Tensor, src_mask: Tensor):\n return self.transformer_encoder(self.positional_encoding(\n self.src_tok_emb(src)), src_mask)\n\n def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):\n return self.transformer_decoder(self.positional_encoding(\n self.tgt_tok_emb(tgt)), memory,\n tgt_mask)\n\nDEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n### Masking\n## 異なるマスク処理を行う2つの関数を定義\n\n# モデルが予測を行う際に、未来の単語を見ないようにするためのマスク\ndef generate_square_subsequent_mask(sz):\n mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n# ソースとターゲットのパディングトークンを隠すためのマスク\ndef create_mask(src, tgt):\n src_seq_len = src.shape[0]\n tgt_seq_len = tgt.shape[0]\n\n tgt_mask = generate_square_subsequent_mask(tgt_seq_len)\n src_mask = torch.zeros((src_seq_len, src_seq_len), device=DEVICE).type(torch.bool)\n\n src_padding_mask = (src == PAD_IDX).transpose(0, 1)\n tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)\n return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask\n\ndef greedy_decode(model, src, src_mask, max_len, beamsize, start_symbol):\n src = src.to(DEVICE)\n src_mask = src_mask.to(DEVICE)\n\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)\n for i in range(max_len-1):\n memory = memory.to(DEVICE)\n tgt_mask = (generate_square_subsequent_mask(ys.size(0))\n .type(torch.bool)).to(DEVICE)\n out = model.decode(ys, memory, tgt_mask)\n out = out.transpose(0, 1)\n prob = model.generator(out[:, -1]) # prob.size() の実行結果 : torch.Size([1, 1088]) => 1088 はTGT のVOCAV_SIZE\n next_prob, next_word = prob.topk(k=beamsize, dim=1)\n # print(next_word)\n # print(next_prob)\n\n next_word = next_word[:, 0] # greedy なので、もっとも確率が高いものを選ぶ\n next_word = next_word.item() # 要素の値を取得 (int に変換)\n\n ys = torch.cat([ys,\n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)\n if next_word == EOS_IDX:\n break\n return ys\n\nclass NMT(object):\n src_vocab: object\n tgt_vocab: object\n\n def __init__(self, src_vocab='kujira', tgt_vocab='python'):\n self.src_vocab = vocab.load_vocab(src_vocab)\n self.tgt_vocab = vocab.load_vocab(tgt_vocab)\n tokenizer = vocab.tokenizer_from_vocab(self.src_vocab)\n self.src_transform = sequential_transforms(tokenizer, #Tokenization\n self.src_vocab, #Numericalization\n tensor_transform) # Add SOS/EOS and create tensor\n\n # パラメータの定義\n self.SRC_VOCAB_SIZE = len(self.src_vocab)\n self.TGT_VOCAB_SIZE = len(self.tgt_vocab)\n self.EMB_SIZE = 512 # BERT の次元に揃えれば良いよ\n self.NHEAD = 8\n self.FFN_HID_DIM = 512\n self.BATCH_SIZE = 128\n self.NUM_ENCODER_LAYERS = 3\n self.NUM_DECODER_LAYERS = 3\n\n # インスタンスの作成\n self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS, \n self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,\n self.FFN_HID_DIM)\n\n # TODO: ?\n for p in self.transformer.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n # デバイスの設定\n self.transformer = self.transformer.to(DEVICE)\n\n # 損失関数の定義 (クロスエントロピー)\n self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)\n\n # オプティマイザの定義 (Adam)\n self.optimizer = torch.optim.Adam(self.transformer.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9)\n\n\n def load(self, filename='all-model.pt'):\n self.transformer.load_state_dict(torch.load(filename, map_location=DEVICE)) \n\n def translate(self, src_sentence: str):\n self.transformer.eval()\n src = self.src_transform(src_sentence).view(-1, 1)\n num_tokens = src.shape[0]\n src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)\n tgt_tokens = greedy_decode(\n self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=5, start_symbol=SOS_IDX).flatten()\n return \" \".join(self.tgt_vocab.lookup_tokens(list(tgt_tokens.cpu().numpy()))).replace(\"<sos>\", \"\").replace(\"<eos>\", \"\")\n\nif __name__ == '__main__':\n nmt = NMT()\n nmt.load('./all-model.pt')\n pred = nmt.translate('もし<A>が偶数のとき')\n print('pred:', pred)"
] | [
[
"torch.sin",
"torch.zeros",
"torch.load",
"torch.nn.Embedding",
"torch.cuda.is_available",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.TransformerDecoderLayer",
"torch.nn.TransformerDecoder",
"torch.tensor",
"torch.nn.TransformerEncoder",
"torch.arange",
"torch.cos",
"torch.nn.TransformerEncoderLayer",
"torch.nn.Linear",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.init.xavier_uniform_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lkoelman/python-neo | [
"58a207976fb33a50ea8e42b70d7da73b03474f42",
"58a207976fb33a50ea8e42b70d7da73b03474f42",
"58a207976fb33a50ea8e42b70d7da73b03474f42"
] | [
"neo/io/pynnio.py",
"neo/io/nestio.py",
"neo/test/iotest/test_pynnio.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nModule for reading/writing data from/to legacy PyNN formats.\n\nPyNN is available at http://neuralensemble.org/PyNN\n\nClasses:\n PyNNNumpyIO\n PyNNTextIO\n\nSupported: Read/Write\n\nAuthors: Andrew Davison, Pierre Yger\n\"\"\"\n\nfrom itertools import chain\nimport numpy\nimport quantities as pq\nimport warnings\n\nfrom neo.io.baseio import BaseIO\nfrom neo.core import Segment, AnalogSignal, SpikeTrain\n\ntry:\n unicode\n PY2 = True\nexcept NameError:\n PY2 = False\n\nUNITS_MAP = {\n 'spikes': pq.ms,\n 'v': pq.mV,\n 'gsyn': pq.UnitQuantity('microsiemens', 1e-6 * pq.S, 'uS', 'µS'), # checked\n}\n\n\nclass BasePyNNIO(BaseIO):\n \"\"\"\n Base class for PyNN IO classes\n \"\"\"\n is_readable = True\n is_writable = True\n has_header = True\n is_streameable = False # TODO - correct spelling to \"is_streamable\"\n supported_objects = [Segment, AnalogSignal, SpikeTrain]\n readable_objects = supported_objects\n writeable_objects = supported_objects\n mode = 'file'\n\n def __init__(self, filename=None, **kargs):\n BaseIO.__init__(self, filename, *kargs)\n warnings.warn(\"PyNNTextIO and PyNNNumpyIO will be removed in Neo 0.7.0. \" +\n \"Please contact the Neo developers if this will cause you problems.\",\n DeprecationWarning)\n\n def _read_file_contents(self):\n raise NotImplementedError\n\n def _extract_array(self, data, channel_index):\n idx = numpy.where(data[:, 1] == channel_index)[0]\n return data[idx, 0]\n\n def _determine_units(self, metadata):\n if 'units' in metadata:\n return metadata['units']\n elif 'variable' in metadata and metadata['variable'] in UNITS_MAP:\n return UNITS_MAP[metadata['variable']]\n else:\n raise IOError(\"Cannot determine units\")\n\n def _extract_signals(self, data, metadata):\n\n arr = numpy.vstack(self._extract_array(data, channel_index)\n for channel_index in\n range(metadata['first_index'], metadata['last_index'] + 1))\n if len(arr) > 0:\n signal = AnalogSignal(arr.T,\n units=self._determine_units(metadata),\n sampling_period=metadata['dt'] * pq.ms)\n signal.annotate(label=metadata[\"label\"],\n variable=metadata[\"variable\"])\n return signal\n\n def _extract_spikes(self, data, metadata, channel_index):\n spiketrain = None\n spike_times = self._extract_array(data, channel_index)\n if len(spike_times) > 0:\n spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())\n spiketrain.annotate(label=metadata[\"label\"],\n channel_index=channel_index,\n dt=metadata[\"dt\"])\n return spiketrain\n\n def _write_file_contents(self, data, metadata):\n raise NotImplementedError\n\n def read_segment(self, lazy=False):\n assert not lazy, 'Do not support lazy'\n\n data, metadata = self._read_file_contents()\n annotations = dict((k, metadata.get(k, 'unknown'))\n for k in (\"label\", \"variable\", \"first_id\", \"last_id\"))\n seg = Segment(**annotations)\n if metadata['variable'] == 'spikes':\n for i in range(metadata['first_index'], metadata['last_index'] + 1):\n spiketrain = self._extract_spikes(data, metadata, i)\n if spiketrain is not None:\n seg.spiketrains.append(spiketrain)\n # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal\n seg.annotate(dt=metadata['dt'])\n else:\n signal = self._extract_signals(data, metadata)\n if signal is not None:\n seg.analogsignals.append(signal)\n seg.create_many_to_one_relationship()\n return seg\n\n def write_segment(self, segment):\n source = segment.analogsignals or segment.spiketrains\n assert len(source) > 0, \"Segment contains neither analog signals nor spike trains.\"\n metadata = segment.annotations.copy()\n s0 = source[0]\n if isinstance(s0, AnalogSignal):\n if len(source) > 1:\n warnings.warn(\"Cannot handle multiple analog signals. Writing only the first.\")\n source = s0.T\n metadata['size'] = s0.shape[1]\n n = source.size\n else:\n metadata['size'] = len(source)\n n = sum(s.size for s in source)\n metadata['first_index'] = 0\n metadata['last_index'] = metadata['size'] - 1\n if 'label' not in metadata:\n metadata['label'] = 'unknown'\n if 'dt' not in metadata: # dt not included in annotations if Segment contains only AnalogSignals\n metadata['dt'] = s0.sampling_period.rescale(pq.ms).magnitude\n metadata['n'] = n\n data = numpy.empty((n, 2))\n # if the 'variable' annotation is a standard one from PyNN, we rescale\n # to use standard PyNN units\n # we take the units from the first element of source and scale all\n # the signals to have the same units\n if 'variable' in segment.annotations:\n units = UNITS_MAP.get(segment.annotations['variable'], source[0].dimensionality)\n else:\n units = source[0].dimensionality\n metadata['variable'] = 'unknown'\n try:\n metadata['units'] = units.unicode\n except AttributeError:\n metadata['units'] = units.u_symbol\n\n start = 0\n for i, signal in enumerate(source): # here signal may be AnalogSignal or SpikeTrain\n end = start + signal.size\n data[start:end, 0] = numpy.array(signal.rescale(units))\n data[start:end, 1] = i * numpy.ones((signal.size,), dtype=float)\n start = end\n self._write_file_contents(data, metadata)\n\n def read_analogsignal(self, lazy=False):\n assert not lazy, 'Do not support lazy'\n\n data, metadata = self._read_file_contents()\n if metadata['variable'] == 'spikes':\n raise TypeError(\"File contains spike data, not analog signals\")\n else:\n signal = self._extract_signals(data, metadata)\n if signal is None:\n raise IndexError(\"File does not contain a signal\")\n else:\n return signal\n\n def read_spiketrain(self, lazy=False, channel_index=0):\n assert not lazy, 'Do not support lazy'\n data, metadata = self._read_file_contents()\n if metadata['variable'] != 'spikes':\n raise TypeError(\"File contains analog signals, not spike data\")\n else:\n spiketrain = self._extract_spikes(data, metadata, channel_index)\n if spiketrain is None:\n raise IndexError(\n \"File does not contain any spikes with channel index %d\" % channel_index)\n else:\n return spiketrain\n\n\nclass PyNNNumpyIO(BasePyNNIO):\n \"\"\"\n (DEPRECATED) Reads/writes data from/to PyNN NumpyBinaryFile format\n \"\"\"\n name = \"PyNN NumpyBinaryFile\"\n extensions = ['npz']\n\n def _read_file_contents(self):\n contents = numpy.load(self.filename)\n data = contents[\"data\"]\n metadata = {}\n for name, value in contents['metadata']:\n try:\n metadata[name] = eval(value)\n except Exception:\n metadata[name] = value\n return data, metadata\n\n def _write_file_contents(self, data, metadata):\n # we explicitly set the dtype to ensure roundtrips preserve file contents exactly\n max_metadata_length = max(chain([len(k) for k in metadata.keys()],\n [len(str(v)) for v in metadata.values()]))\n if PY2:\n dtype = \"S%d\" % max_metadata_length\n else:\n dtype = \"U%d\" % max_metadata_length\n metadata_array = numpy.array(sorted(metadata.items()), dtype)\n numpy.savez(self.filename, data=data, metadata=metadata_array)\n\n\nclass PyNNTextIO(BasePyNNIO):\n \"\"\"\n (DEPRECATED) Reads/writes data from/to PyNN StandardTextFile format\n \"\"\"\n name = \"PyNN StandardTextFile\"\n extensions = ['v', 'ras', 'gsyn']\n\n def _read_metadata(self):\n metadata = {}\n with open(self.filename) as f:\n for line in f:\n if line[0] == \"#\":\n name, value = line[1:].strip().split(\"=\")\n name = name.strip()\n try:\n metadata[name] = eval(value)\n except Exception:\n metadata[name] = value.strip()\n else:\n break\n return metadata\n\n def _read_file_contents(self):\n data = numpy.loadtxt(self.filename)\n metadata = self._read_metadata()\n return data, metadata\n\n def _write_file_contents(self, data, metadata):\n with open(self.filename, 'wb') as f:\n for item in sorted(metadata.items()):\n f.write((\"# %s = %s\\n\" % item).encode('utf8'))\n numpy.savetxt(f, data)\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nClass for reading output files from NEST simulations\n( http://www.nest-simulator.org/ ).\nTested with NEST2.10.0\n\nDepends on: numpy, quantities\n\nSupported: Read\n\nAuthors: Julia Sprenger, Maximilian Schmidt, Johanna Senk\n\n\"\"\"\n\n# needed for Python3 compatibility\nfrom __future__ import absolute_import\n\nimport os.path\nimport warnings\nfrom datetime import datetime\nimport numpy as np\nimport quantities as pq\n\nfrom neo.io.baseio import BaseIO\nfrom neo.core import Block, Segment, SpikeTrain, AnalogSignal\n\nvalue_type_dict = {'V': pq.mV,\n 'I': pq.pA,\n 'g': pq.CompoundUnit(\"10^-9*S\"),\n 'no type': pq.dimensionless}\n\n\nclass NestIO(BaseIO):\n \"\"\"\n Class for reading NEST output files. GDF files for the spike data and DAT\n files for analog signals are possible.\n\n Usage:\n >>> from neo.io.nestio import NestIO\n\n >>> files = ['membrane_voltages-1261-0.dat',\n 'spikes-1258-0.gdf']\n >>> r = NestIO(filenames=files)\n >>> seg = r.read_segment(gid_list=[], t_start=400 * pq.ms,\n t_stop=600 * pq.ms,\n id_column_gdf=0, time_column_gdf=1,\n id_column_dat=0, time_column_dat=1,\n value_columns_dat=2)\n \"\"\"\n\n is_readable = True # class supports reading, but not writing\n is_writable = False\n\n supported_objects = [SpikeTrain, AnalogSignal, Segment, Block]\n readable_objects = [SpikeTrain, AnalogSignal, Segment, Block]\n\n has_header = False\n is_streameable = False\n\n write_params = None # writing is not supported\n\n name = 'nest'\n extensions = ['gdf', 'dat']\n mode = 'file'\n\n def __init__(self, filenames=None):\n \"\"\"\n Parameters\n ----------\n filenames: string or list of strings, default=None\n The filename or list of filenames to load.\n \"\"\"\n\n if isinstance(filenames, str):\n filenames = [filenames]\n\n self.filenames = filenames\n self.avail_formats = {}\n self.avail_IOs = {}\n\n for filename in filenames:\n path, ext = os.path.splitext(filename)\n ext = ext.strip('.')\n if ext in self.extensions:\n if ext in self.avail_IOs:\n raise ValueError('Received multiple files with \"%s\" '\n 'extention. Can only load single file of '\n 'this type.' % ext)\n self.avail_IOs[ext] = ColumnIO(filename)\n self.avail_formats[ext] = path\n\n def __read_analogsignals(self, gid_list, time_unit, t_start=None,\n t_stop=None, sampling_period=None,\n id_column=0, time_column=1,\n value_columns=2, value_types=None,\n value_units=None):\n \"\"\"\n Internal function called by read_analogsignal() and read_segment().\n \"\"\"\n\n if 'dat' not in self.avail_formats:\n raise ValueError('Can not load analogsignals. No DAT file '\n 'provided.')\n\n # checking gid input parameters\n gid_list, id_column = self._check_input_gids(gid_list, id_column)\n # checking time input parameters\n t_start, t_stop = self._check_input_times(t_start, t_stop,\n mandatory=False)\n\n # checking value input parameters\n (value_columns, value_types, value_units) = \\\n self._check_input_values_parameters(value_columns, value_types,\n value_units)\n\n # defining standard column order for internal usage\n # [id_column, time_column, value_column1, value_column2, ...]\n column_ids = [id_column, time_column] + value_columns\n for i, cid in enumerate(column_ids):\n if cid is None:\n column_ids[i] = -1\n\n # assert that no single column is assigned twice\n column_list = [id_column, time_column] + value_columns\n column_list_no_None = [c for c in column_list if c is not None]\n if len(np.unique(column_list_no_None)) < len(column_list_no_None):\n raise ValueError(\n 'One or more columns have been specified to contain '\n 'the same data. Columns were specified to %s.'\n '' % column_list_no_None)\n\n # extracting condition and sorting parameters for raw data loading\n (condition, condition_column,\n sorting_column) = self._get_conditions_and_sorting(id_column,\n time_column,\n gid_list,\n t_start,\n t_stop)\n # loading raw data columns\n data = self.avail_IOs['dat'].get_columns(\n column_ids=column_ids,\n condition=condition,\n condition_column=condition_column,\n sorting_columns=sorting_column)\n\n sampling_period = self._check_input_sampling_period(sampling_period,\n time_column,\n time_unit,\n data)\n analogsignal_list = []\n\n # extracting complete gid list for anasig generation\n if (gid_list == []) and id_column is not None:\n gid_list = np.unique(data[:, id_column])\n\n # generate analogsignals for each neuron ID\n for i in gid_list:\n selected_ids = self._get_selected_ids(\n i, id_column, time_column, t_start, t_stop, time_unit,\n data)\n\n # extract starting time of analogsignal\n if (time_column is not None) and data.size:\n anasig_start_time = data[selected_ids[0], 1] * time_unit\n else:\n # set t_start equal to sampling_period because NEST starts\n # recording only after 1 sampling_period\n anasig_start_time = 1. * sampling_period\n\n # create one analogsignal per value column requested\n for v_id, value_column in enumerate(value_columns):\n signal = data[\n selected_ids[0]:selected_ids[1], value_column]\n\n # create AnalogSignal objects and annotate them with\n # the neuron ID\n analogsignal_list.append(AnalogSignal(\n signal * value_units[v_id],\n sampling_period=sampling_period,\n t_start=anasig_start_time,\n id=i,\n type=value_types[v_id]))\n # check for correct length of analogsignal\n assert (analogsignal_list[-1].t_stop ==\n anasig_start_time + len(signal) * sampling_period)\n return analogsignal_list\n\n def __read_spiketrains(self, gdf_id_list, time_unit,\n t_start, t_stop, id_column,\n time_column, **args):\n \"\"\"\n Internal function for reading multiple spiketrains at once.\n This function is called by read_spiketrain() and read_segment().\n \"\"\"\n\n if 'gdf' not in self.avail_IOs:\n raise ValueError('Can not load spiketrains. No GDF file provided.')\n\n # assert that the file contains spike times\n if time_column is None:\n raise ValueError('Time column is None. No spike times to '\n 'be read in.')\n\n gdf_id_list, id_column = self._check_input_gids(gdf_id_list, id_column)\n\n t_start, t_stop = self._check_input_times(t_start, t_stop,\n mandatory=True)\n\n # assert that no single column is assigned twice\n if id_column == time_column:\n raise ValueError('One or more columns have been specified to '\n 'contain the same data.')\n\n # defining standard column order for internal usage\n # [id_column, time_column, value_column1, value_column2, ...]\n column_ids = [id_column, time_column]\n for i, cid in enumerate(column_ids):\n if cid is None:\n column_ids[i] = -1\n\n (condition, condition_column, sorting_column) = \\\n self._get_conditions_and_sorting(id_column, time_column,\n gdf_id_list, t_start, t_stop)\n\n data = self.avail_IOs['gdf'].get_columns(\n column_ids=column_ids,\n condition=condition,\n condition_column=condition_column,\n sorting_columns=sorting_column)\n\n # create a list of SpikeTrains for all neuron IDs in gdf_id_list\n # assign spike times to neuron IDs if id_column is given\n if id_column is not None:\n if (gdf_id_list == []) and id_column is not None:\n gdf_id_list = np.unique(data[:, id_column])\n\n spiketrain_list = []\n for nid in gdf_id_list:\n selected_ids = self._get_selected_ids(nid, id_column,\n time_column, t_start,\n t_stop, time_unit, data)\n times = data[selected_ids[0]:selected_ids[1], time_column]\n spiketrain_list.append(SpikeTrain(\n\n times, units=time_unit,\n t_start=t_start, t_stop=t_stop,\n id=nid, **args))\n\n # if id_column is not given, all spike times are collected in one\n # spike train with id=None\n else:\n train = data[:, time_column]\n spiketrain_list = [SpikeTrain(train, units=time_unit,\n t_start=t_start, t_stop=t_stop,\n id=None, **args)]\n return spiketrain_list\n\n def _check_input_times(self, t_start, t_stop, mandatory=True):\n \"\"\"\n Checks input times for existence and setting default values if\n necessary.\n\n t_start: pq.quantity.Quantity, start time of the time range to load.\n t_stop: pq.quantity.Quantity, stop time of the time range to load.\n mandatory: bool, if True times can not be None and an error will be\n raised. if False, time values of None will be replaced by\n -infinity or infinity, respectively. default: True.\n \"\"\"\n if t_stop is None:\n if mandatory:\n raise ValueError('No t_start specified.')\n else:\n t_stop = np.inf * pq.s\n if t_start is None:\n if mandatory:\n raise ValueError('No t_stop specified.')\n else:\n t_start = -np.inf * pq.s\n\n for time in (t_start, t_stop):\n if not isinstance(time, pq.quantity.Quantity):\n raise TypeError('Time value (%s) is not a quantity.' % time)\n return t_start, t_stop\n\n def _check_input_values_parameters(self, value_columns, value_types,\n value_units):\n \"\"\"\n Checks value parameters for consistency.\n\n value_columns: int, column id containing the value to load.\n value_types: list of strings, type of values.\n value_units: list of units of the value columns.\n\n Returns\n adjusted list of [value_columns, value_types, value_units]\n \"\"\"\n if value_columns is None:\n raise ValueError('No value column provided.')\n if isinstance(value_columns, int):\n value_columns = [value_columns]\n if value_types is None:\n value_types = ['no type'] * len(value_columns)\n elif isinstance(value_types, str):\n value_types = [value_types]\n\n # translating value types into units as far as possible\n if value_units is None:\n short_value_types = [vtype.split('_')[0] for vtype in value_types]\n if not all([svt in value_type_dict for svt in short_value_types]):\n raise ValueError('Can not interpret value types '\n '\"%s\"' % value_types)\n value_units = [value_type_dict[svt] for svt in short_value_types]\n\n # checking for same number of value types, units and columns\n if not (len(value_types) == len(value_units) == len(value_columns)):\n raise ValueError('Length of value types, units and columns does '\n 'not match (%i,%i,%i)' % (len(value_types),\n len(value_units),\n len(value_columns)))\n if not all([isinstance(vunit, pq.UnitQuantity) for vunit in\n value_units]):\n raise ValueError('No value unit or standard value type specified.')\n\n return value_columns, value_types, value_units\n\n def _check_input_gids(self, gid_list, id_column):\n \"\"\"\n Checks gid values and column for consistency.\n\n gid_list: list of int or None, gid to load.\n id_column: int, id of the column containing the gids.\n\n Returns\n adjusted list of [gid_list, id_column].\n \"\"\"\n if gid_list is None:\n gid_list = [gid_list]\n\n if None in gid_list and id_column is not None:\n raise ValueError('No neuron IDs specified but file contains '\n 'neuron IDs in column %s. Specify empty list to '\n 'retrieve spiketrains of all neurons.'\n '' % str(id_column))\n\n if gid_list != [None] and id_column is None:\n raise ValueError('Specified neuron IDs to be %s, but no ID column '\n 'specified.' % gid_list)\n return gid_list, id_column\n\n def _check_input_sampling_period(self, sampling_period, time_column,\n time_unit, data):\n \"\"\"\n Checks sampling period, times and time unit for consistency.\n\n sampling_period: pq.quantity.Quantity, sampling period of data to load.\n time_column: int, column id of times in data to load.\n time_unit: pq.quantity.Quantity, unit of time used in the data to load.\n data: numpy array, the data to be loaded / interpreted.\n\n Returns\n pq.quantities.Quantity object, the updated sampling period.\n \"\"\"\n if sampling_period is None:\n if time_column is not None:\n data_sampling = np.unique(\n np.diff(sorted(np.unique(data[:, 1]))))\n if len(data_sampling) > 1:\n raise ValueError('Different sampling distances found in '\n 'data set (%s)' % data_sampling)\n else:\n dt = data_sampling[0]\n else:\n raise ValueError('Can not estimate sampling rate without time '\n 'column id provided.')\n sampling_period = pq.CompoundUnit(str(dt) + '*'\n + time_unit.units.u_symbol)\n elif not isinstance(sampling_period, pq.UnitQuantity):\n raise ValueError(\"sampling_period is not specified as a unit.\")\n return sampling_period\n\n def _get_conditions_and_sorting(self, id_column, time_column, gid_list,\n t_start, t_stop):\n \"\"\"\n Calculates the condition, condition_column and sorting_column based on\n other parameters supplied for loading the data.\n\n id_column: int, id of the column containing gids.\n time_column: int, id of the column containing times.\n gid_list: list of int, gid to be loaded.\n t_start: pq.quantity.Quantity, start of the time range to be loaded.\n t_stop: pq.quantity.Quantity, stop of the time range to be loaded.\n\n Returns\n updated [condition, condition_column, sorting_column].\n \"\"\"\n condition, condition_column = None, None\n sorting_column = []\n curr_id = 0\n if ((gid_list != [None]) and (gid_list is not None)):\n if gid_list != []:\n def condition(x): return x in gid_list\n\n condition_column = id_column\n sorting_column.append(curr_id) # Sorting according to gids first\n curr_id += 1\n if time_column is not None:\n sorting_column.append(curr_id) # Sorting according to time\n curr_id += 1\n elif t_start != -np.inf and t_stop != np.inf:\n warnings.warn('Ignoring t_start and t_stop parameters, because no '\n 'time column id is provided.')\n if sorting_column == []:\n sorting_column = None\n else:\n sorting_column = sorting_column[::-1]\n return condition, condition_column, sorting_column\n\n def _get_selected_ids(self, gid, id_column, time_column, t_start, t_stop,\n time_unit, data):\n \"\"\"\n Calculates the data range to load depending on the selected gid\n and the provided time range (t_start, t_stop)\n\n gid: int, gid to be loaded.\n id_column: int, id of the column containing gids.\n time_column: int, id of the column containing times.\n t_start: pq.quantity.Quantity, start of the time range to load.\n t_stop: pq.quantity.Quantity, stop of the time range to load.\n time_unit: pq.quantity.Quantity, time unit of the data to load.\n data: numpy array, data to load.\n\n Returns\n list of selected gids\n \"\"\"\n gid_ids = np.array([0, data.shape[0]])\n if id_column is not None:\n gid_ids = np.array([np.searchsorted(data[:, 0], gid, side='left'),\n np.searchsorted(data[:, 0], gid, side='right')])\n gid_data = data[gid_ids[0]:gid_ids[1], :]\n\n # select only requested time range\n id_shifts = np.array([0, 0])\n if time_column is not None:\n id_shifts[0] = np.searchsorted(gid_data[:, 1],\n t_start.rescale(\n time_unit).magnitude,\n side='left')\n id_shifts[1] = (np.searchsorted(gid_data[:, 1],\n t_stop.rescale(\n time_unit).magnitude,\n side='left') - gid_data.shape[0])\n\n selected_ids = gid_ids + id_shifts\n return selected_ids\n\n def read_block(self, gid_list=None, time_unit=pq.ms, t_start=None,\n t_stop=None, sampling_period=None, id_column_dat=0,\n time_column_dat=1, value_columns_dat=2,\n id_column_gdf=0, time_column_gdf=1, value_types=None,\n value_units=None, lazy=False):\n assert not lazy, 'Do not support lazy'\n\n seg = self.read_segment(gid_list, time_unit, t_start,\n t_stop, sampling_period, id_column_dat,\n time_column_dat, value_columns_dat,\n id_column_gdf, time_column_gdf, value_types,\n value_units)\n blk = Block(file_origin=seg.file_origin, file_datetime=seg.file_datetime)\n blk.segments.append(seg)\n seg.block = blk\n return blk\n\n def read_segment(self, gid_list=None, time_unit=pq.ms, t_start=None,\n t_stop=None, sampling_period=None, id_column_dat=0,\n time_column_dat=1, value_columns_dat=2,\n id_column_gdf=0, time_column_gdf=1, value_types=None,\n value_units=None, lazy=False):\n \"\"\"\n Reads a Segment which contains SpikeTrain(s) with specified neuron IDs\n from the GDF data.\n\n Arguments\n ----------\n gid_list : list, default: None\n A list of GDF IDs of which to return SpikeTrain(s). gid_list must\n be specified if the GDF file contains neuron IDs, the default None\n then raises an error. Specify an empty list [] to retrieve the spike\n trains of all neurons.\n time_unit : Quantity (time), optional, default: quantities.ms\n The time unit of recorded time stamps in DAT as well as GDF files.\n t_start : Quantity (time), optional, default: 0 * pq.ms\n Start time of SpikeTrain.\n t_stop : Quantity (time), default: None\n Stop time of SpikeTrain. t_stop must be specified, the default None\n raises an error.\n sampling_period : Quantity (frequency), optional, default: None\n Sampling period of the recorded data.\n id_column_dat : int, optional, default: 0\n Column index of neuron IDs in the DAT file.\n time_column_dat : int, optional, default: 1\n Column index of time stamps in the DAT file.\n value_columns_dat : int, optional, default: 2\n Column index of the analog values recorded in the DAT file.\n id_column_gdf : int, optional, default: 0\n Column index of neuron IDs in the GDF file.\n time_column_gdf : int, optional, default: 1\n Column index of time stamps in the GDF file.\n value_types : str, optional, default: None\n Nest data type of the analog values recorded, eg.'V_m', 'I', 'g_e'\n value_units : Quantity (amplitude), default: None\n The physical unit of the recorded signal values.\n lazy : bool, optional, default: False\n\n Returns\n -------\n seg : Segment\n The Segment contains one SpikeTrain and one AnalogSignal for\n each ID in gid_list.\n \"\"\"\n assert not lazy, 'Do not support lazy'\n\n if isinstance(gid_list, tuple):\n if gid_list[0] > gid_list[1]:\n raise ValueError('The second entry in gid_list must be '\n 'greater or equal to the first entry.')\n gid_list = range(gid_list[0], gid_list[1] + 1)\n\n # __read_xxx() needs a list of IDs\n if gid_list is None:\n gid_list = [None]\n\n # create an empty Segment\n seg = Segment(file_origin=\",\".join(self.filenames))\n seg.file_datetime = datetime.fromtimestamp(os.stat(self.filenames[0]).st_mtime)\n # todo: rather than take the first file for the timestamp, we should take the oldest\n # in practice, there won't be much difference\n\n # Load analogsignals and attach to Segment\n if 'dat' in self.avail_formats:\n seg.analogsignals = self.__read_analogsignals(\n gid_list,\n time_unit,\n t_start,\n t_stop,\n sampling_period=sampling_period,\n id_column=id_column_dat,\n time_column=time_column_dat,\n value_columns=value_columns_dat,\n value_types=value_types,\n value_units=value_units)\n if 'gdf' in self.avail_formats:\n seg.spiketrains = self.__read_spiketrains(\n gid_list,\n time_unit,\n t_start,\n t_stop,\n id_column=id_column_gdf,\n time_column=time_column_gdf)\n\n return seg\n\n def read_analogsignal(self, gid=None, time_unit=pq.ms, t_start=None,\n t_stop=None, sampling_period=None, id_column=0,\n time_column=1, value_column=2, value_type=None,\n value_unit=None, lazy=False):\n \"\"\"\n Reads an AnalogSignal with specified neuron ID from the DAT data.\n\n Arguments\n ----------\n gid : int, default: None\n The GDF ID of the returned SpikeTrain. gdf_id must be specified if\n the GDF file contains neuron IDs, the default None then raises an\n error. Specify an empty list [] to retrieve the spike trains of all\n neurons.\n time_unit : Quantity (time), optional, default: quantities.ms\n The time unit of recorded time stamps.\n t_start : Quantity (time), optional, default: 0 * pq.ms\n Start time of SpikeTrain.\n t_stop : Quantity (time), default: None\n Stop time of SpikeTrain. t_stop must be specified, the default None\n raises an error.\n sampling_period : Quantity (frequency), optional, default: None\n Sampling period of the recorded data.\n id_column : int, optional, default: 0\n Column index of neuron IDs.\n time_column : int, optional, default: 1\n Column index of time stamps.\n value_column : int, optional, default: 2\n Column index of the analog values recorded.\n value_type : str, optional, default: None\n Nest data type of the analog values recorded, eg.'V_m', 'I', 'g_e'.\n value_unit : Quantity (amplitude), default: None\n The physical unit of the recorded signal values.\n lazy : bool, optional, default: False\n\n Returns\n -------\n spiketrain : SpikeTrain\n The requested SpikeTrain object with an annotation 'id'\n corresponding to the gdf_id parameter.\n \"\"\"\n assert not lazy, 'Do not support lazy'\n\n # __read_spiketrains() needs a list of IDs\n return self.__read_analogsignals([gid], time_unit,\n t_start, t_stop,\n sampling_period=sampling_period,\n id_column=id_column,\n time_column=time_column,\n value_columns=value_column,\n value_types=value_type,\n value_units=value_unit)[0]\n\n def read_spiketrain(\n self, gdf_id=None, time_unit=pq.ms, t_start=None, t_stop=None,\n id_column=0, time_column=1, lazy=False, **args):\n \"\"\"\n Reads a SpikeTrain with specified neuron ID from the GDF data.\n\n Arguments\n ----------\n gdf_id : int, default: None\n The GDF ID of the returned SpikeTrain. gdf_id must be specified if\n the GDF file contains neuron IDs. Providing [] loads all available\n IDs.\n time_unit : Quantity (time), optional, default: quantities.ms\n The time unit of recorded time stamps.\n t_start : Quantity (time), default: None\n Start time of SpikeTrain. t_start must be specified.\n t_stop : Quantity (time), default: None\n Stop time of SpikeTrain. t_stop must be specified.\n id_column : int, optional, default: 0\n Column index of neuron IDs.\n time_column : int, optional, default: 1\n Column index of time stamps.\n lazy : bool, optional, default: False\n\n Returns\n -------\n spiketrain : SpikeTrain\n The requested SpikeTrain object with an annotation 'id'\n corresponding to the gdf_id parameter.\n \"\"\"\n assert not lazy, 'Do not support lazy'\n\n if (not isinstance(gdf_id, int)) and gdf_id is not None:\n raise ValueError('gdf_id has to be of type int or None.')\n\n if gdf_id is None and id_column is not None:\n raise ValueError('No neuron ID specified but file contains '\n 'neuron IDs in column ' + str(id_column) + '.')\n\n return self.__read_spiketrains([gdf_id], time_unit,\n t_start, t_stop,\n id_column, time_column,\n **args)[0]\n\n\nclass ColumnIO:\n '''\n Class for reading an ASCII file containing multiple columns of data.\n '''\n\n def __init__(self, filename):\n \"\"\"\n filename: string, path to ASCII file to read.\n \"\"\"\n\n self.filename = filename\n\n # read the first line to check the data type (int or float) of the data\n f = open(self.filename)\n line = f.readline()\n\n additional_parameters = {}\n if '.' not in line:\n additional_parameters['dtype'] = np.int32\n\n self.data = np.loadtxt(self.filename, **additional_parameters)\n\n if len(self.data.shape) == 1:\n self.data = self.data[:, np.newaxis]\n\n def get_columns(self, column_ids='all', condition=None,\n condition_column=None, sorting_columns=None):\n \"\"\"\n column_ids : 'all' or list of int, the ids of columns to\n extract.\n condition : None or function, which is applied to each row to evaluate\n if it should be included in the result.\n Needs to return a bool value.\n condition_column : int, id of the column on which the condition\n function is applied to\n sorting_columns : int or list of int, column ids to sort by.\n List entries have to be ordered by increasing sorting\n priority!\n\n Returns\n -------\n numpy array containing the requested data.\n \"\"\"\n\n if column_ids == [] or column_ids == 'all':\n column_ids = range(self.data.shape[-1])\n\n if isinstance(column_ids, (int, float)):\n column_ids = [column_ids]\n column_ids = np.array(column_ids)\n\n if column_ids is not None:\n if max(column_ids) >= len(self.data) - 1:\n raise ValueError('Can not load column ID %i. File contains '\n 'only %i columns' % (max(column_ids),\n len(self.data)))\n\n if sorting_columns is not None:\n if isinstance(sorting_columns, int):\n sorting_columns = [sorting_columns]\n if (max(sorting_columns) >= self.data.shape[1]):\n raise ValueError('Can not sort by column ID %i. File contains '\n 'only %i columns' % (max(sorting_columns),\n self.data.shape[1]))\n\n # Starting with whole dataset being selected for return\n selected_data = self.data\n\n # Apply filter condition to rows\n if condition and (condition_column is None):\n raise ValueError('Filter condition provided, but no '\n 'condition_column ID provided')\n elif (condition_column is not None) and (condition is None):\n warnings.warn('Condition column ID provided, but no condition '\n 'given. No filtering will be performed.')\n\n elif (condition is not None) and (condition_column is not None):\n condition_function = np.vectorize(condition)\n mask = condition_function(\n selected_data[\n :, condition_column]).astype(bool)\n\n selected_data = selected_data[mask, :]\n\n # Apply sorting if requested\n if sorting_columns is not None:\n values_to_sort = selected_data[:, sorting_columns].T\n ordered_ids = np.lexsort(tuple(values_to_sort[i] for i in\n range(len(values_to_sort))))\n selected_data = selected_data[ordered_ids, :]\n\n # Select only requested columns\n selected_data = selected_data[:, column_ids]\n\n return selected_data\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nTests of the neo.io.pynnio.PyNNNumpyIO and neo.io.pynnio.PyNNTextIO classes\n\"\"\"\n\n# needed for python 3 compatibility\nfrom __future__ import absolute_import, division\n\nimport os\n\nimport unittest\n\nimport numpy as np\nimport quantities as pq\n\nfrom neo.core import Segment, AnalogSignal, SpikeTrain\nfrom neo.io import PyNNNumpyIO, PyNNTextIO\nfrom numpy.testing import assert_array_equal\nfrom neo.test.tools import assert_arrays_equal, assert_file_contents_equal\nfrom neo.test.iotest.common_io_test import BaseTestIO\n\n# class CommonTestPyNNNumpyIO(BaseTestIO, unittest.TestCase):\n# ioclass = PyNNNumpyIO\n\nNCELLS = 5\n\n\nclass CommonTestPyNNTextIO(BaseTestIO, unittest.TestCase):\n ioclass = PyNNTextIO\n read_and_write_is_bijective = False\n\n\ndef read_test_file(filename):\n contents = np.load(filename)\n data = contents[\"data\"]\n metadata = {}\n for name, value in contents['metadata']:\n try:\n metadata[name] = eval(value)\n except Exception:\n metadata[name] = value\n return data, metadata\n\n\nread_test_file.__test__ = False\n\n\nclass BaseTestPyNNIO(object):\n __test__ = False\n\n def tearDown(self):\n if os.path.exists(self.test_file):\n os.remove(self.test_file)\n\n def test_write_segment(self):\n in_ = self.io_cls(self.test_file)\n write_test_file = \"write_test.%s\" % self.file_extension\n out = self.io_cls(write_test_file)\n out.write_segment(in_.read_segment(lazy=False))\n assert_file_contents_equal(self.test_file, write_test_file)\n if os.path.exists(write_test_file):\n os.remove(write_test_file)\n\n def build_test_data(self, variable='v'):\n metadata = {\n 'size': NCELLS,\n 'first_index': 0,\n 'first_id': 0,\n 'n': 505,\n 'variable': variable,\n 'last_id': NCELLS - 1,\n 'last_index': NCELLS - 1,\n 'dt': 0.1,\n 'label': \"population0\",\n }\n if variable == 'v':\n metadata['units'] = 'mV'\n elif variable == 'spikes':\n metadata['units'] = 'ms'\n data = np.empty((505, 2))\n for i in range(NCELLS):\n # signal\n data[i * 101:(i + 1) * 101, 0] = np.arange(i, i + 101, dtype=float)\n # index\n data[i * 101:(i + 1) * 101, 1] = i * np.ones((101,), dtype=float)\n return data, metadata\n\n build_test_data.__test__ = False\n\n\nclass BaseTestPyNNIO_Signals(BaseTestPyNNIO):\n def setUp(self):\n self.test_file = \"test_file_v.%s\" % self.file_extension\n self.write_test_file(\"v\")\n\n def test_read_segment_containing_analogsignals_using_eager_cascade(self):\n # eager == not lazy\n io = self.io_cls(self.test_file)\n segment = io.read_segment(lazy=False)\n self.assertIsInstance(segment, Segment)\n self.assertEqual(len(segment.analogsignals), 1)\n\n as0 = segment.analogsignals[0]\n self.assertIsInstance(as0, AnalogSignal)\n self.assertEqual(as0.shape, (101, NCELLS))\n assert_array_equal(as0[:, 0],\n AnalogSignal(np.arange(0, 101, dtype=float),\n sampling_period=0.1 * pq.ms,\n t_start=0 * pq.s,\n units=pq.mV))\n as4 = as0[:, 4]\n self.assertIsInstance(as4, AnalogSignal)\n assert_array_equal(as4,\n AnalogSignal(np.arange(4, 105, dtype=float),\n sampling_period=0.1 * pq.ms,\n t_start=0 * pq.s,\n units=pq.mV))\n # test annotations (stuff from file metadata)\n\n def test_read_analogsignal_using_eager(self):\n io = self.io_cls(self.test_file)\n sig = io.read_analogsignal(lazy=False)\n self.assertIsInstance(sig, AnalogSignal)\n assert_array_equal(sig[:, 3],\n AnalogSignal(np.arange(3, 104, dtype=float),\n sampling_period=0.1 * pq.ms,\n t_start=0 * pq.s,\n units=pq.mV))\n # should test annotations: 'channel_index', etc.\n\n def test_read_spiketrain_should_fail_with_analogsignal_file(self):\n io = self.io_cls(self.test_file)\n self.assertRaises(TypeError, io.read_spiketrain, channel_index=0)\n\n\nclass BaseTestPyNNIO_Spikes(BaseTestPyNNIO):\n def setUp(self):\n self.test_file = \"test_file_spikes.%s\" % self.file_extension\n self.write_test_file(\"spikes\")\n\n def test_read_segment_containing_spiketrains_using_eager_cascade(self):\n io = self.io_cls(self.test_file)\n segment = io.read_segment(lazy=False)\n self.assertIsInstance(segment, Segment)\n self.assertEqual(len(segment.spiketrains), NCELLS)\n st0 = segment.spiketrains[0]\n self.assertIsInstance(st0, SpikeTrain)\n assert_arrays_equal(st0,\n SpikeTrain(np.arange(0, 101, dtype=float),\n t_start=0 * pq.s,\n t_stop=101 * pq.ms,\n units=pq.ms))\n st4 = segment.spiketrains[4]\n self.assertIsInstance(st4, SpikeTrain)\n assert_arrays_equal(st4,\n SpikeTrain(np.arange(4, 105, dtype=float),\n t_start=0 * pq.s,\n t_stop=105 * pq.ms,\n units=pq.ms))\n # test annotations (stuff from file metadata)\n\n def test_read_spiketrain_using_eager(self):\n io = self.io_cls(self.test_file)\n st3 = io.read_spiketrain(lazy=False, channel_index=3)\n self.assertIsInstance(st3, SpikeTrain)\n assert_arrays_equal(st3,\n SpikeTrain(np.arange(3, 104, dtype=float),\n t_start=0 * pq.s,\n t_stop=104 * pq.s,\n units=pq.ms))\n # should test annotations: 'channel_index', etc.\n\n def test_read_analogsignal_should_fail_with_spiketrain_file(self):\n io = self.io_cls(self.test_file)\n self.assertRaises(TypeError, io.read_analogsignal, channel_index=2)\n\n\nclass BaseTestPyNNNumpyIO(object):\n io_cls = PyNNNumpyIO\n file_extension = \"npz\"\n\n def write_test_file(self, variable='v', check=False):\n data, metadata = self.build_test_data(variable)\n metadata_array = np.array(sorted(metadata.items()))\n np.savez(self.test_file, data=data, metadata=metadata_array)\n if check:\n data1, metadata1 = read_test_file(self.test_file)\n assert metadata == metadata1, \"%s != %s\" % (metadata, metadata1)\n assert data.shape == data1.shape == (505, 2), \\\n \"%s, %s, (505, 2)\" % (data.shape, data1.shape)\n assert (data == data1).all()\n assert metadata[\"n\"] == 505\n\n write_test_file.__test__ = False\n\n\nclass BaseTestPyNNTextIO(object):\n io_cls = PyNNTextIO\n file_extension = \"txt\"\n\n def write_test_file(self, variable='v', check=False):\n data, metadata = self.build_test_data(variable)\n with open(self.test_file, 'wb') as f:\n for item in sorted(metadata.items()):\n f.write((\"# %s = %s\\n\" % item).encode('utf8'))\n np.savetxt(f, data)\n if check:\n raise NotImplementedError\n\n write_test_file.__test__ = False\n\n\nclass TestPyNNNumpyIO_Signals(BaseTestPyNNNumpyIO, BaseTestPyNNIO_Signals,\n unittest.TestCase):\n __test__ = True\n\n\nclass TestPyNNNumpyIO_Spikes(BaseTestPyNNNumpyIO, BaseTestPyNNIO_Spikes,\n unittest.TestCase):\n __test__ = True\n\n\nclass TestPyNNTextIO_Signals(BaseTestPyNNTextIO, BaseTestPyNNIO_Signals,\n unittest.TestCase):\n __test__ = True\n\n\nclass TestPyNNTextIO_Spikes(BaseTestPyNNTextIO, BaseTestPyNNIO_Spikes,\n unittest.TestCase):\n __test__ = True\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.savez",
"numpy.empty",
"numpy.ones",
"numpy.savetxt",
"numpy.load",
"numpy.where",
"numpy.loadtxt"
],
[
"numpy.unique",
"numpy.vectorize",
"numpy.searchsorted",
"numpy.array",
"numpy.loadtxt"
],
[
"numpy.savez",
"numpy.arange",
"numpy.ones",
"numpy.savetxt",
"numpy.load",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ilyasdc/pycro-manager | [
"5f0153e8a90104eb8715348c6eb22c4d8fdee477"
] | [
"pycromanager/zmq.py"
] | [
"import json\nimport re\nimport time\nimport typing\nimport warnings\nimport inspect\nimport numpy as np\nimport zmq\nfrom weakref import WeakSet\nimport threading\nimport copy\nimport sys\nfrom threading import Lock\n\n\nclass DataSocket:\n \"\"\"\n Wrapper for ZMQ socket that sends and recieves dictionaries\n Includes ZMQ client, push, and pull sockets\n \"\"\"\n\n def __init__(self, context, port, type, debug=False, ip_address=\"127.0.0.1\"):\n # request reply socket\n self._socket = context.socket(type)\n self._debug = debug\n # store these as wekrefs so that circular refs dont prevent garbage collection\n self._java_objects = set()\n self._port = port\n self._close_lock = Lock()\n self._closed = False\n if type == zmq.PUSH:\n if debug:\n print(\"binding {}\".format(port))\n self._socket.bind(\"tcp://{}:{}\".format(ip_address, port))\n else:\n if debug:\n print(\"connecting {}\".format(port))\n self._socket.connect(\"tcp://{}:{}\".format(ip_address, port))\n\n def _register_java_object(self, object):\n self._java_objects.add(object)\n\n def _convert_np_to_python(self, d):\n \"\"\"\n recursively search dictionary and convert any values from numpy floats/ints to\n python floats/ints so they can be json serialized\n :return:\n \"\"\"\n if type(d) != dict:\n return\n for k, v in d.items():\n if isinstance(v, dict):\n self._convert_np_to_python(v)\n elif type(v) == list:\n for e in v:\n self._convert_np_to_python(e)\n elif np.issubdtype(type(v), np.floating):\n d[k] = float(v)\n elif np.issubdtype(type(v), np.integer):\n d[k] = int(v)\n\n def _make_array_identifier(self, entry):\n \"\"\"\n make a string to replace bytes data or numpy array in message, which encode data type if numpy\n \"\"\"\n # make up a random 32 bit int as the identifier\n # TODO: change to simple counting\n identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]\n # '@{some_number}_{bytes_per_pixel}'\n # if its a numpy array, include bytes per pixel, otherwise just interpret it as raw byts\n # TODO : I thinkg its always raw binary and the argument deserialization types handles conversion to java arrays\n # This definitely could use some cleanup and simplification. Probably best to encode the data type here and remove\n # argument deserialization types\n return identifier, \"@\" + str(int(identifier)) + \"_\" + str(\n 0 if isinstance(entry, bytes) else entry.dtype.itemsize\n )\n\n def _remove_bytes(self, bytes_data, structure):\n if isinstance(structure, list):\n for i, entry in enumerate(structure):\n if isinstance(entry, bytes) or isinstance(entry, np.ndarray):\n int_id, str_id = self._make_array_identifier(entry)\n structure[i] = str_id\n bytes_data.append((int_id, entry))\n elif isinstance(entry, list) or isinstance(entry, dict):\n self._remove_bytes(bytes_data, entry)\n elif isinstance(structure, dict):\n for key in structure.keys():\n entry = structure[key]\n if isinstance(entry, bytes) or isinstance(entry, np.ndarray):\n int_id, str_id = self._make_array_identifier(entry)\n structure[key] = str_id\n bytes_data.append((int_id, entry))\n elif isinstance(entry, list) or isinstance(entry, dict):\n self._remove_bytes(bytes_data, structure[key])\n\n def send(self, message, timeout=0):\n if message is None:\n message = {}\n # make sure any np types convert to python types so they can be json serialized\n self._convert_np_to_python(message)\n # Send binary data in seperate messages so it doesnt need to be json serialized\n bytes_data = []\n self._remove_bytes(bytes_data, message)\n message_string = json.dumps(message)\n if self._debug:\n print(\"DEBUG, sending: {}\".format(message))\n # convert keys to byte array\n key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]\n message_parts = [bytes(message_string, \"iso-8859-1\")] + [\n item for keyval in key_vals for item in keyval\n ]\n if timeout == 0:\n self._socket.send_multipart(message_parts)\n else:\n start = time.time()\n while 1000 * (time.time() - start) < timeout:\n try:\n self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)\n return True\n except zmq.ZMQError:\n pass # ignore, keep trying\n return False\n\n def _replace_bytes(self, dict_or_list, hash, value):\n \"\"\"\n Replace placeholders for byte arrays in JSON message with their actual values\n \"\"\"\n if isinstance(dict_or_list, dict):\n for key in dict_or_list:\n if isinstance(dict_or_list[key], str) and \"@\" in dict_or_list[key]:\n hash_in_message = int(\n dict_or_list[key].split(\"@\")[1], 16\n ) # interpret hex hash string\n if hash == hash_in_message:\n dict_or_list[key] = value\n return\n elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):\n self._replace_bytes(dict_or_list[key], hash, value)\n elif isinstance(dict_or_list, list):\n for i, entry in enumerate(dict_or_list):\n if isinstance(entry, str) and \"@\" in dict_or_list[entry]:\n hash_in_message = int(entry.split(\"@\")[1], 16) # interpret hex hash string\n if hash == hash_in_message:\n dict_or_list[i] = value\n return\n elif isinstance(entry, list) or isinstance(entry, dict):\n self._replace_bytes(entry, hash, value)\n\n def receive(self, timeout=0):\n if timeout == 0:\n reply = self._socket.recv_multipart()\n else:\n start = time.time()\n reply = None\n while 1000 * (time.time() - start) < timeout:\n try:\n reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)\n if reply is not None:\n break\n except zmq.ZMQError:\n pass # ignore, keep trying\n if reply is None:\n return reply\n message = json.loads(reply[0].decode(\"iso-8859-1\"))\n # replace any byte data placeholders with the byte data itself\n for i in np.arange(1, len(reply), 2):\n # messages come in pairs: first is hash, second it byte data\n identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)\n value = reply[i + 1]\n self._replace_bytes(message, identity_hash, value)\n\n if self._debug:\n print(\"DEBUG, recieved: {}\".format(message))\n self._check_exception(message)\n return message\n\n def _check_exception(self, response):\n if \"type\" in response and response[\"type\"] == \"exception\":\n raise Exception(response[\"value\"])\n\n def __del__(self):\n self.close() # make sure it closes properly\n\n def close(self):\n with self._close_lock:\n if not self._closed:\n for java_object in self._java_objects:\n java_object._close()\n del java_object #potentially redundant, trying to fix closing race condition\n self._java_objects = None\n self._socket.close()\n while not self._socket.closed:\n time.sleep(0.01)\n self._socket = None\n if self._debug:\n print('closed socket {}'.format(self._port))\n self._closed = True\n\n\nclass Bridge:\n \"\"\"\n Create an object which acts as a client to a corresponding server (running in a Java process).\n This enables construction and interaction with arbitrary java objects. Each bridge object should\n be run using a context manager (i.e. `with Bridge() as b:`) or bridge.close() should be explicitly\n called when finished\n \"\"\"\n\n DEFAULT_PORT = 4827\n DEFAULT_TIMEOUT = 500\n _EXPECTED_ZMQ_SERVER_VERSION = \"4.2.0\"\n\n thread_local = threading.local()\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Only one instance of Bridge per a thread\n \"\"\"\n port = kwargs.get('port', Bridge.DEFAULT_PORT)\n if hasattr(Bridge.thread_local, \"bridge\") and Bridge.thread_local.bridge is not None and port in Bridge.thread_local.bridge:\n Bridge.thread_local.bridge_count[port] += 1\n return Bridge.thread_local.bridge[port]\n else:\n if (not hasattr(Bridge.thread_local, \"bridge_count\")) or Bridge.thread_local.bridge_count is None:\n Bridge.thread_local.bridge_count = {}\n Bridge.thread_local.bridge_count[port] = 1\n return super(Bridge, cls).__new__(cls)\n\n def __init__(\n self, port: int=DEFAULT_PORT, convert_camel_case: bool=True,\n debug: bool=False, ip_address: str=\"127.0.0.1\", timeout: int=DEFAULT_TIMEOUT\n ):\n \"\"\"\n Parameters\n ----------\n port : int\n The port on which the bridge operates\n convert_camel_case : bool\n If True, methods for Java objects that are passed across the bridge\n will have their names converted from camel case to underscores. i.e. class.methodName()\n becomes class.method_name()\n debug : bool\n If True print helpful stuff for debugging\n \"\"\"\n self._ip_address = ip_address\n self._port = port\n self._closed = False\n if not hasattr(self, \"_context\"):\n Bridge._context = zmq.Context()\n # if hasattr(self.thread_local, \"bridge\") and port in self.thread_local.bridge:\n # return ### What was this supposed to do?\n if not hasattr(Bridge.thread_local, \"bridge\") or Bridge.thread_local.bridge is None:\n Bridge.thread_local.bridge = {}\n Bridge.thread_local.bridge[port] = self # cache a thread-local version of the bridge\n\n self._convert_camel_case = convert_camel_case\n self._debug = debug\n self._timeout = timeout\n self._master_socket = DataSocket(\n self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address\n )\n self._master_socket.send({\"command\": \"connect\", \"debug\": debug})\n self._class_factory = _JavaClassFactory()\n reply_json = self._master_socket.receive(timeout=timeout)\n if reply_json is None:\n raise TimeoutError(\n f\"Socket timed out after {timeout} milliseconds. Is Micro-Manager running and is the ZMQ server on {port} option enabled?\"\n )\n if reply_json[\"type\"] == \"exception\":\n raise Exception(reply_json[\"message\"])\n if \"version\" not in reply_json:\n reply_json[\"version\"] = \"2.0.0\" # before version was added\n if reply_json[\"version\"] != self._EXPECTED_ZMQ_SERVER_VERSION:\n warnings.warn(\n \"Version mistmatch between Java ZMQ server and Python client. \"\n \"\\nJava ZMQ server version: {}\\nPython client expected version: {}\"\n \"\\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build\".format(\n reply_json[\"version\"], self._EXPECTED_ZMQ_SERVER_VERSION\n )\n )\n\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n Bridge.thread_local.bridge_count[self._port] -= 1\n if Bridge.thread_local.bridge_count[self._port] == 0:\n del Bridge.thread_local.bridge_count[self._port]\n del Bridge.thread_local.bridge[self._port]\n self._master_socket.close()\n self._master_socket = None\n self._closed = True\n\n if len(Bridge.thread_local.bridge) == 0:\n Bridge.thread_local.bridge = None\n Bridge.thread_local.bridge_count = None\n\n\n def get_class(self, serialized_object) -> typing.Type[\"JavaObjectShadow\"]:\n return self._class_factory.create(\n serialized_object, convert_camel_case=self._convert_camel_case\n )\n\n def construct_java_object(self, classpath: str, new_socket: bool=False, args: list=None):\n \"\"\"\n Create a new instance of a an object on the Java side. Returns a Python \"Shadow\" of the object, which behaves\n just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at\n runtime using iPython autocomplete\n\n Parameters\n ----------\n classpath : str\n Full classpath of the java object\n new_socket : bool\n If True, will create new java object on a new port so that blocking calls will not interfere\n with the bridges master port\n args : list\n list of arguments to the constructor, if applicable\n Returns\n -------\n\n Python \"Shadow\" to the Java object\n \"\"\"\n if args is None:\n args = []\n # classpath_minus_class = '.'.join(classpath.split('.')[:-1])\n # query the server for constructors matching this classpath\n message = {\"command\": \"get-constructors\", \"classpath\": classpath}\n self._master_socket.send(message)\n constructors = self._master_socket.receive()[\"api\"]\n\n methods_with_name = [m for m in constructors if m[\"name\"] == classpath]\n if len(methods_with_name) == 0:\n raise Exception(\"No valid java constructor found with classpath {}\".format(classpath))\n valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)\n\n # Calling a constructor, rather than getting return from method\n message = {\n \"command\": \"constructor\",\n \"classpath\": classpath,\n \"argument-types\": valid_method_spec[\"arguments\"],\n \"argument-deserialization-types\": deserialize_types,\n \"arguments\": _package_arguments(valid_method_spec, args),\n }\n if new_socket:\n message[\"new-port\"] = True\n self._master_socket.send(message)\n serialized_object = self._master_socket.receive()\n if new_socket:\n socket = DataSocket(\n self._context, serialized_object[\"port\"], zmq.REQ, ip_address=self._ip_address\n )\n else:\n socket = self._master_socket\n return self._class_factory.create(\n serialized_object, convert_camel_case=self._convert_camel_case\n )(socket=socket, serialized_object=serialized_object, bridge=self)\n\n def get_java_class(self, classpath: str, new_socket: bool=False):\n \"\"\"\n Get an an object corresponding to a java class, for example to be used\n when calling static methods on the class directly\n\n Parameters\n ----------\n classpath : str\n Full classpath of the java object\n new_socket : bool\n If True, will create new java object on a new port so that blocking calls will not interfere\n with the bridges master port\n Returns\n -------\n\n Python \"Shadow\" to the Java class\n \"\"\"\n message = {\"command\": \"get-class\", \"classpath\": classpath}\n if new_socket:\n message[\"new-port\"] = True\n self._master_socket.send(message)\n serialized_object = self._master_socket.receive()\n\n if new_socket:\n socket = DataSocket(\n self._context, serialized_object[\"port\"], zmq.REQ, ip_address=self._ip_address\n )\n else:\n socket = self._master_socket\n return self._class_factory.create(\n serialized_object, convert_camel_case=self._convert_camel_case\n )(socket=socket, serialized_object=serialized_object, bridge=self)\n\n def _connect_push(self, port):\n \"\"\"\n Connect a push socket on the given port\n :param port:\n :return:\n \"\"\"\n return DataSocket(\n self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address\n )\n\n def _connect_pull(self, port):\n \"\"\"\n Connect to a pull socket on the given port\n :param port:\n :return:\n \"\"\"\n return DataSocket(\n self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address\n )\n\n def get_magellan(self):\n \"\"\"\n return an instance of the Micro-Magellan API\n \"\"\"\n return self.construct_java_object(\"org.micromanager.magellan.api.MagellanAPI\")\n\n def get_core(self):\n \"\"\"\n Connect to CMMCore and return object that has its methods\n\n :return: Python \"shadow\" object for micromanager core\n \"\"\"\n if hasattr(self, \"core\"):\n return getattr(self, \"core\")\n self.core = self.construct_java_object(\"mmcorej.CMMCore\")\n return self.core\n\n def get_studio(self):\n \"\"\"\n return an instance of the Studio object that provides access to micro-manager Java APIs\n \"\"\"\n return self.construct_java_object(\"org.micromanager.Studio\")\n\n\nclass _JavaClassFactory:\n \"\"\"\n This class is responsible for generating subclasses of JavaObjectShadow. Each generated class is kept in a `dict`.\n If a given class has already been generate once it will be returns from the cache rather than re-generating it.\n \"\"\"\n\n def __init__(self):\n self.classes = {}\n\n def create(\n self, serialized_obj: dict, convert_camel_case: bool = True\n ) -> typing.Type[\"JavaObjectShadow\"]:\n \"\"\"Create a class (or return a class from the cache) based on the contents of `serialized_object` message.\"\"\"\n if serialized_obj[\"class\"] in self.classes.keys(): # Return a cached class\n return self.classes[serialized_obj[\"class\"]]\n else: # Generate a new class since it wasn't found in the cache.\n _java_class: str = serialized_obj[\"class\"]\n python_class_name_translation = _java_class.replace(\n \".\", \"_\"\n ) # Having periods in the name would be problematic.\n _interfaces = serialized_obj[\"interfaces\"]\n static_attributes = {\"_java_class\": _java_class, \"_interfaces\": _interfaces}\n\n fields = {} # Create a dict of field names with getter and setter funcs.\n for field in serialized_obj[\"fields\"]:\n fields[field] = property(\n fget=lambda instance, Field=field: instance._access_field(Field),\n fset=lambda instance, val, Field=field: instance._set_field(Field, val),\n )\n\n methods = {} # Create a dict of methods for the class by name.\n methodSpecs = serialized_obj[\"api\"]\n method_names = set([m[\"name\"] for m in methodSpecs])\n # parse method descriptions to make python stand ins\n for method_name in method_names:\n params, methods_with_name, method_name_modified = _parse_arg_names(\n methodSpecs, method_name, convert_camel_case\n )\n return_type = methods_with_name[0][\"return-type\"]\n fn = lambda instance, *args, signatures_list=tuple(\n methods_with_name\n ): instance._translate_call(signatures_list, args, static = _java_class == 'java.lang.Class')\n fn.__name__ = method_name_modified\n fn.__doc__ = \"{}.{}: A dynamically generated Java method.\".format(\n _java_class, method_name_modified\n )\n sig = inspect.signature(fn)\n params = [\n inspect.Parameter(\"self\", inspect.Parameter.POSITIONAL_ONLY)\n ] + params # Add `self` as the first argument.\n return_type = (\n _JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]\n if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE\n else return_type\n )\n fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)\n methods[method_name_modified] = fn\n\n newclass = type( # Dynamically create a class to shadow a java class.\n python_class_name_translation, # Name, based on the original java name\n (JavaObjectShadow,), # Inheritance\n {\n \"__init__\": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(\n instance, socket, serialized_object, bridge\n ),\n **static_attributes,\n **fields,\n **methods,\n },\n )\n\n self.classes[_java_class] = newclass\n return newclass\n\n\nclass JavaObjectShadow:\n \"\"\"\n Generic class for serving as a python interface for a java class using a zmq server backend\n \"\"\"\n\n _interfaces = (\n None # Subclasses should fill these out. This class should never be directly instantiated.\n )\n _java_class = None\n\n def __init__(self, socket, serialized_object, bridge: Bridge):\n self._socket = socket\n self._hash_code = serialized_object[\"hash-code\"]\n self._bridge = bridge\n # register objects with bridge so it can tell Java side to release them before socket shuts down\n socket._register_java_object(self)\n self._closed = False\n # atexit.register(self._close)\n self._close_lock = Lock()\n\n def _close(self):\n with self._close_lock:\n if self._closed:\n return\n if not hasattr(self, \"_hash_code\"):\n return # constructor didnt properly finish, nothing to clean up on java side\n message = {\"command\": \"destructor\", \"hash-code\": self._hash_code}\n if self._bridge._debug:\n \"closing: {}\".format(self)\n self._socket.send(message)\n reply_json = self._socket.receive()\n if reply_json[\"type\"] == \"exception\":\n raise Exception(reply_json[\"value\"])\n self._closed = True\n\n def __del__(self):\n \"\"\"\n Tell java side this object is garbage collected so it can do the same if needed\n \"\"\"\n self._close()\n\n def _access_field(self, name):\n \"\"\"\n Return a python version of the field with a given name\n :return:\n \"\"\"\n message = {\"command\": \"get-field\", \"hash-code\": self._hash_code, \"name\": name}\n self._socket.send(message)\n return self._deserialize(self._socket.receive())\n\n def _set_field(self, name, value):\n \"\"\"\n Return a python version of the field with a given name\n :return:\n \"\"\"\n message = {\n \"command\": \"set-field\",\n \"hash-code\": self._hash_code,\n \"name\": name,\n \"value\": _serialize_arg(value),\n }\n self._socket.send(message)\n reply = self._deserialize(self._socket.receive())\n\n def _translate_call(self, method_specs, fn_args: tuple, static: bool):\n \"\"\"\n Translate to appropriate Java method, call it, and return converted python version of its result\n Parameters\n ----------\n args :\n args[0] is list of dictionaries of possible method specifications\n kwargs :\n hold possible polymorphic args, or none\n \"\"\"\n # args that are none are placeholders to allow for polymorphism and not considered part of the spec\n # fn_args = [a for a in fn_args if a is not None]\n valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)\n # args are good, make call through socket, casting the correct type if needed (e.g. int to float)\n message = {\n \"command\": \"run-method\",\n \"static\": static,\n \"hash-code\": self._hash_code,\n \"name\": valid_method_spec[\"name\"],\n \"argument-types\": valid_method_spec[\"arguments\"],\n \"argument-deserialization-types\": deserialize_types,\n }\n message[\"arguments\"] = _package_arguments(valid_method_spec, fn_args)\n\n if self._bridge._closed:\n raise Exception('The Bridge used to create this has been closed. Are you trying to call it outside of a \"with\" block?')\n self._socket.send(message)\n recieved = self._socket.receive()\n return self._deserialize(recieved)\n\n def _deserialize(self, json_return):\n \"\"\"\n method_spec :\n info about the method that called it\n reply :\n bytes that represents return\n Returns\n -------\n An appropriate python type of the converted value\n \"\"\"\n if json_return[\"type\"] == \"exception\":\n raise Exception(json_return[\"value\"])\n elif json_return[\"type\"] == \"null\":\n return None\n elif json_return[\"type\"] == \"primitive\":\n return json_return[\"value\"]\n elif json_return[\"type\"] == \"string\":\n return json_return[\"value\"]\n elif json_return[\"type\"] == \"list\":\n return [self._deserialize(obj) for obj in json_return[\"value\"]]\n elif json_return[\"type\"] == \"object\":\n if json_return[\"class\"] == \"JSONObject\":\n return json.loads(json_return[\"value\"])\n else:\n raise Exception(\"Unrecognized return class\")\n elif json_return[\"type\"] == \"unserialized-object\":\n # inherit socket from parent object\n return self._bridge.get_class(json_return)(\n socket=self._socket, serialized_object=json_return, bridge=self._bridge\n )\n else:\n return deserialize_array(json_return)\n\n\ndef deserialize_array(json_return):\n \"\"\"\n Convert a serialized java array to the appropriate numpy type\n Parameters\n ----------\n json_return\n \"\"\"\n if json_return[\"type\"] in [\"byte-array\", \"int-array\", \"short-array\", \"float-array\"]:\n decoded = json_return[\"value\"]\n if json_return[\"type\"] == \"byte-array\":\n return np.frombuffer(decoded, dtype=\"=u1\").copy()\n elif json_return[\"type\"] == \"double-array\":\n return np.frombuffer(decoded, dtype=\"=f8\").copy()\n elif json_return[\"type\"] == \"int-array\":\n return np.frombuffer(decoded, dtype=\"=u4\").copy()\n elif json_return[\"type\"] == \"short-array\":\n return np.frombuffer(decoded, dtype=\"=u2\").copy()\n elif json_return[\"type\"] == \"float-array\":\n return np.frombuffer(decoded, dtype=\"=f4\").copy()\n\n\ndef _package_arguments(valid_method_spec, fn_args):\n \"\"\"\n Serialize function arguments and also include description of their Java types\n\n Parameters\n ----------\n valid_method_spec:\n fn_args :\n \"\"\"\n arguments = []\n for arg_type, arg_val in zip(valid_method_spec[\"arguments\"], fn_args):\n if isinstance(arg_val, JavaObjectShadow):\n arguments.append(_serialize_arg(arg_val))\n elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:\n arguments.append(_serialize_arg(arg_val))\n elif arg_val is None:\n arguments.append(_serialize_arg(arg_val))\n elif isinstance(arg_val, np.ndarray):\n arguments.append(_serialize_arg(arg_val))\n else:\n arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))\n return arguments\n\n\ndef _serialize_arg(arg):\n if arg is None:\n return None\n if type(arg) in [bool, str, int, float]:\n return arg # json handles serialization\n elif type(arg) == np.ndarray:\n return arg.tobytes()\n elif isinstance(arg, JavaObjectShadow):\n return {\"hash-code\": arg._hash_code}\n else:\n raise Exception(\"Unknown argumetn type\")\n\n\ndef _check_single_method_spec(method_spec, fn_args):\n \"\"\"\n Check if a single method specificiation is compatible with the arguments the function recieved\n\n Parameters\n ----------\n method_spec :\n fn_args :\n \"\"\"\n if len(method_spec[\"arguments\"]) != len(fn_args):\n return False\n for arg_java_type, arg_val in zip(method_spec[\"arguments\"], fn_args):\n if isinstance(arg_val, JavaObjectShadow):\n if arg_java_type not in arg_val._interfaces:\n # check that it shadows object of the correct type\n return False\n elif type(arg_val) == np.ndarray:\n # For ND Arrays, need to make sure data types match\n if (\n arg_java_type != \"java.lang.Object\"\n and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]\n ):\n return False\n elif not any(\n [\n isinstance(arg_val, acceptable_type)\n for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]\n ]\n ) and not (\n arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES\n ): # could be null if its an object\n # if a type that gets converted\n return False\n return True\n\n\ndef _check_method_args(method_specs, fn_args):\n \"\"\"\n Compare python arguments to java arguments to find correct function to call\n\n Parameters\n ----------\n method_specs :\n fn_args :\n\n Returns\n -------\n one of the method_specs that is valid\n \"\"\"\n valid_method_spec = None\n for method_spec in method_specs:\n if _check_single_method_spec(method_spec, fn_args):\n valid_method_spec = method_spec\n break\n\n if valid_method_spec is None:\n raise Exception(\n \"Incorrect arguments. \\nExpected {} \\nGot {}\".format(\n \" or \".join([\", \".join(method_spec[\"arguments\"]) for method_spec in method_specs]),\n \", \".join([str(type(a)) for a in fn_args]),\n )\n )\n\n # subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects\n valid_method_spec = copy.deepcopy(valid_method_spec)\n deserialize_types = []\n for java_arg_class, python_arg_val in zip(valid_method_spec[\"arguments\"], fn_args):\n if isinstance(python_arg_val, np.ndarray):\n deserialize_types.append(\n [\n ja\n for ja, npdt in zip(\n _JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()\n )\n if python_arg_val.dtype.type == npdt\n ][0]\n )\n else:\n deserialize_types.append(java_arg_class)\n\n return valid_method_spec, deserialize_types\n\n\ndef _parse_arg_names(methods, method_name, convert_camel_case):\n method_name_modified = (\n _camel_case_2_snake_case(method_name) if convert_camel_case else method_name\n )\n # all methods with this name and different argument lists\n methods_with_name = [m for m in methods if m[\"name\"] == method_name]\n min_required_args = (\n 0\n if len(methods_with_name) == 1 and len(methods_with_name[0][\"arguments\"]) == 0\n else min([len(m[\"arguments\"]) for m in methods_with_name])\n )\n # sort with largest number of args last so lambda at end gets max num args\n methods_with_name.sort(key=lambda val: len(val[\"arguments\"]))\n method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.\n params = []\n unique_argument_names = []\n for arg_index, typ in enumerate(method[\"arguments\"]):\n hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else \"object\"\n python_type = (\n _JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ\n )\n if hint in unique_argument_names: # append numbers to end so arg hints have unique names\n i = 1\n while hint + str(i) in unique_argument_names:\n i += 1\n arg_name = hint + str(i)\n else:\n arg_name = hint\n unique_argument_names.append(arg_name)\n # this is how overloading is handled for now, by making default arguments as none, but\n # it might be better to explicitly compare argument types\n if arg_index >= min_required_args:\n default_arg_value = None\n else:\n default_arg_value = inspect.Parameter.empty\n params.append(\n inspect.Parameter(\n name=arg_name,\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n default=default_arg_value,\n annotation=python_type,\n )\n )\n return params, methods_with_name, method_name_modified\n\n\ndef _camel_case_2_snake_case(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n\n# Used for generating type hints in arguments\n_CLASS_NAME_MAPPING = {\n \"byte[]\": \"uint8array\",\n \"double[]\": \"float64_array\",\n \"int[]\": \"uint32_array\",\n \"short[]\": \"int16_array\",\n \"char[]\": \"int16_array\",\n \"float[]\": \"int16_array\",\n \"long[]\": \"int16_array\",\n \"java.lang.String\": \"string\",\n \"boolean\": \"boolean\",\n \"double\": \"float\",\n \"float\": \"float\",\n \"int\": \"int\",\n \"long\": \"int\",\n \"short\": \"int\",\n \"void\": \"void\",\n}\n#Used for deserializing java arrarys into numpy arrays\n_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {\n \"boolean[]\": np.bool,\n \"byte[]\": np.uint8,\n \"short[]\": np.int16,\n \"char[]\": np.uint16,\n \"float[]\": np.float32,\n \"double[]\": np.float64,\n \"int[]\": np.int32,\n \"long[]\": np.int64,\n}\n#used for figuring our which java methods to call and if python args match\n_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {\n \"boolean\": bool,\n \"double\": float,\n \"float\": float,\n #maybe could make these more specific to array type?\n \"byte[]\": np.ndarray,\n \"short[]\": np.ndarray,\n \"double[]\": np.ndarray,\n \"int[]\": np.ndarray,\n \"char[]\": np.ndarray,\n \"float[]\": np.ndarray,\n \"long[]\": np.ndarray,\n \"int\": int,\n \"java.lang.String\": str,\n \"long\": int,\n \"short\": int,\n \"char\": int,\n \"byte\": int,\n \"void\": None,\n \"java.lang.Object\": object,\n}\n# type conversions that allow for autocasting\n_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {\n \"boolean\": {bool},\n \"byte[]\": {np.ndarray},\n \"double\": {float, int},\n \"double[]\": {np.ndarray},\n \"float\": {float},\n \"int\": {int},\n \"int[]\": {np.ndarray},\n \"java.lang.String\": {str},\n \"long\": {int},\n \"short\": {int},\n \"char\": {int},\n \"byte\": {int},\n \"void\": {None},\n \"java.lang.Object\": {object},\n}\n_JAVA_NON_PRIMITIVES = {\"byte[]\", \"double[]\", \"int[]\", \"short[]\", \"char[]\", \"long[]\", \"boolean[]\",\n \"java.lang.String\", \"java.lang.Object\"}\n\nif __name__ == \"__main__\":\n # Test basic bridge operations\n import traceback\n\n b = Bridge()\n try:\n s = b.get_studio()\n except:\n traceback.print_exc()\n try:\n c = b.get_core()\n except:\n traceback.print_exc()\n a = 1\n"
] | [
[
"numpy.frombuffer",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GFDRR/mobility_app | [
"27285a0691fabcc2cede6772a04bb98d29e636da"
] | [
"app2.py"
] | [
"import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport pylab as plt\nimport datetime as dt\n#import geopandas as gpd\n\ndf = pd.read_csv('/Users/nicholasjones/Desktop/code/wbg-location-data/notebooks/nick/df_india_may9.csv')\ndf.ds = pd.to_datetime(df.ds)\ndf = df.set_index('ds')\ndf['datetime'] = df.index.copy()\n\n## Header\n\nst.title('Mobility trends of states in India')\nst.write('This app visualizes mobility trends for states in India, based on the Facebook movement range maps data.')\n\ndefault_states = ['Gujarat','NCT of Delhi','West Bengal','Rajasthan','Tamil Nadu','Maharashtra','Bihar']\nstates = st.multiselect('Select a state',df.polygon_name.unique())\n\n# Line plot\n\ncolors = 'rgbycmkrgbycmkrgbycmkrgbycmk'\n\nf, ax = plt.subplots(figsize = [9,9])\nfor background_state in df.polygon_name.unique():\n sns.lineplot(x=df.index[df.polygon_name == background_state], y=df[\"all_day_bing_tiles_visited_relative_change\"][df.polygon_name == background_state], color = 'grey', alpha = 0.3, linewidth = 1)\nfor n, state in enumerate(list(states)):\n\tcol = colors[n]\n\tax = sns.lineplot(x=df.index[df.polygon_name == state], y=\"all_day_bing_tiles_visited_relative_change\", color = col,data=df[df.polygon_name == state], linewidth = 4)\nplt.axvline(dt.datetime(2020, 3, 22),linestyle='--', alpha = 0.5)\nplt.axvline(dt.datetime(2020, 3, 24),linestyle='--', alpha = 0.5)\nplt.title('Percent users remaining in home grid cell all day', fontsize = 16);\n \nst.write(f)\n\ndf\n\n## Map\n\ngdf = gpd.read_file('/Users/nicholasjones/Desktop/code/data/FB/India/gadm36_IND_shp/gadm36_IND_1.shp')\ngdf = gdf[['NAME_1','geometry']]\n\nincome_data = pd.read_csv('/Users/nicholasjones/Desktop/code/data/FB/India/NSDP_per_capita.csv',names=['state','nsdp_USD'])\nincome_data = income_data.dropna()\nincome_data.nsdp_USD = [x[4:] for x in income_data.nsdp_USD]\nincome_data.nsdp_USD = income_data.nsdp_USD.str.replace(',','')\nincome_data.nsdp_USD = income_data.nsdp_USD.astype(int)\n\ngdf = gpd.GeoDataFrame(df.merge(gdf, left_on='polygon_name', right_on = 'NAME_1'))\ngdf = gdf[['NAME_1','all_day_bing_tiles_visited_relative_change','all_day_ratio_single_tile_users','geometry','datetime']]\ngdf.head(1)\n\nmydate = st.selectbox('Select a date',['2020-03-05','2020-03-22','2020-04-29'])\nf = gdf[gdf.datetime == mydate].plot(column = 'all_day_bing_tiles_visited_relative_change')\nst.pyplot()\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ozacas/asxtrade | [
"a3645ae526bfc7a546fdf2a39520feda99e3390a"
] | [
"src/ingest_financials.py"
] | [
"#!/usr/bin/python3\n\"\"\"\nResponsible for ingesting data related to the business performance over time. Data is placed into the asx_company_financial_metric\ncollection, ready for the core viewer app to use. Stocks whose financial details have been retrieved in the past month are skipped.\n\"\"\"\nimport pymongo\nimport argparse\nimport yfinance as yf\nimport time\nfrom utils import read_config\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom bson.objectid import ObjectId\n\n\ndef melt_dataframes(dfs: tuple) -> pd.DataFrame:\n result = None\n for df in filter(lambda df: df is not None and len(df) > 0, dfs):\n df[\"metric\"] = df.index\n melted = pd.melt(df, id_vars=(\"metric\"), var_name=\"date\")\n melted = melted.dropna(axis=0, how=\"any\")\n if len(melted) == 0:\n continue\n # print(melted)\n # print(melted.shape)\n if result is None:\n result = melted\n else:\n result = result.append(melted)\n if result is not None and \"date\" in result.columns:\n # print(result)\n result[\"date\"] = pd.to_datetime(\n result[\"date\"], infer_datetime_format=True\n ) # format=\"%Y-%m-%d\")\n # print(result)\n return result\n\n\ndef desired_stocks():\n available_stocks = set(db.asx_company_details.distinct(\"asx_code\"))\n print(f\"Found {len(available_stocks)} available stocks.\")\n gen_time = datetime.today() - timedelta(days=30)\n month_ago = ObjectId.from_datetime(gen_time)\n recently_updated_stocks = set(\n [\n rec[\"asx_code\"]\n for rec in db.asx_company_financial_metrics.find(\n {\"_id\": {\"$gte\": month_ago}}\n )\n ]\n )\n\n ret = available_stocks.difference(recently_updated_stocks)\n print(f\"Found {len(ret)} desired stocks to process.\")\n return ret\n\n\ndef update_all_metrics(df: pd.DataFrame, asx_code: str) -> int:\n \"\"\"\n Add (or update) all financial metrics (ie. rows) for the specified asx_code in the specified dataframe\n :rtype: the number of records updated/created is returned\n \"\"\"\n print(f\"Updating {len(df)} financial metrics for {asx_code}\")\n n = 0\n for t in df.itertuples():\n d = {\n \"metric\": t.metric,\n \"date\": t.date,\n \"value\": t.value,\n \"asx_code\": t.asx_code,\n }\n assert t.asx_code == asx_code\n result = db.asx_company_financial_metrics.update_one(\n {\"asx_code\": asx_code, \"date\": t.date, \"metric\": t.metric},\n {\"$set\": d},\n upsert=True,\n )\n assert result is not None\n assert isinstance(result, pymongo.results.UpdateResult)\n assert result.matched_count == 1 or result.upserted_id is not None\n n += 1\n return n\n\n\ndef fetch_metrics(asx_code: str) -> pd.DataFrame:\n \"\"\"\n Using the excellent yfinance, we fetch all possible metrics of business performance for the specified stock code.\n Returns a dataframe (possibly empty or none) representing each metric and its datapoints as separate rows\n \"\"\"\n assert len(asx_code) >= 3\n ticker = yf.Ticker(asx_code + \".AX\")\n cashflow_df = ticker.cashflow\n financial_df = ticker.financials\n earnings_df = ticker.earnings\n if set(earnings_df.columns) == set([\"Earnings\", \"Revenue\"]):\n earnings_df.index = earnings_df.index.map(\n str\n ) # convert years to str (maybe int)\n earnings_df = earnings_df.transpose()\n\n # print(earnings_df)\n balance_sheet_df = ticker.balance_sheet\n melted_df = melt_dataframes(\n (cashflow_df, financial_df, earnings_df, balance_sheet_df)\n )\n return melted_df\n\n\ndef make_asx_prices_dict(new_quote: tuple, asx_code: str) -> dict:\n #print(new_quote)\n\n d = {\n \"asx_code\": asx_code,\n \"fetch_date\": new_quote.Index,\n \"volume\": new_quote.Volume,\n \"last_price\": new_quote.Close,\n \"day_low_price\": new_quote.Low,\n \"day_high_price\": new_quote.High,\n \"open_price\": new_quote.Open,\n \"error_code\": \"\",\n \"error_descr\": \"\",\n # we dont set nan fields so that existing values (if any) are used ie. merge with existing data\n # \"annual_dividend_yield\": np.nan, # no available data from yf.Ticker.history() although may be available elsewhere, but for now set to missing\n # \"annual_daily_volume\": np.nan,\n # \"bid_price\": np.nan,\n \"change_price\": new_quote.change_price,\n \"change_in_percent\": new_quote.change_in_percent,\n }\n return d\n\n\ndef fill_stock_quote_gaps(db, stock_to_fetch: str, force=False) -> int:\n assert db is not None\n assert len(stock_to_fetch) >= 3\n ticker = yf.Ticker(stock_to_fetch + \".AX\")\n df = ticker.history(period=\"max\")\n df.index = [d.strftime(\"%Y-%m-%d\") for d in df.index]\n # print(df)\n available_dates = set(df.index)\n available_quotes = list(db.asx_prices.find({\"asx_code\": stock_to_fetch}))\n quoted_dates = set(\n [q[\"fetch_date\"] for q in available_quotes if not np.isnan(q[\"last_price\"])]\n )\n assert set(df.columns) == set(\n [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\", \"Dividends\", \"Stock Splits\"]\n )\n dates_to_fill = (\n available_dates.difference(quoted_dates) if not force else available_dates\n )\n print(\n \"Got {} existing daily quotes for {}, found {} yfinance daily quotes, gap filling for {} dates (force={})\".format(\n len(available_quotes), stock_to_fetch, len(df), len(dates_to_fill), force\n )\n )\n if len(dates_to_fill) < 1:\n return 0\n\n df[\"change_price\"] = df[\"Close\"].diff()\n df[\"change_in_percent\"] = df[\"Close\"].pct_change() * 100.0\n gap_quotes_df = df.filter(dates_to_fill, axis=0)\n # print(df)\n n = 0\n for new_quote in gap_quotes_df.itertuples():\n d = make_asx_prices_dict(new_quote, stock_to_fetch)\n result = db.asx_prices.update_one(\n {\"fetch_date\": d[\"fetch_date\"], \"asx_code\": d[\"asx_code\"]},\n {\"$set\": d},\n upsert=True,\n )\n assert result is not None\n\n # assert result.modified_count == 1 or result.upserted_id is not None\n n += 1\n assert n == len(gap_quotes_df)\n return n\n\n\nif __name__ == \"__main__\":\n args = argparse.ArgumentParser(\n description=\"Update financial performance metrics for ASX stocks using yfinance\"\n )\n args.add_argument(\n \"--config\",\n help=\"Configuration file to use [config.json]\",\n type=str,\n default=\"config.json\",\n )\n args.add_argument(\n \"--fill-gaps\",\n help=\"Fill dates with no existing quotes for each stock (use --debug for a particular stock)\",\n action=\"store_true\",\n )\n args.add_argument(\"--fail-fast\", help=\"Stop on first error\", action=\"store_true\")\n args.add_argument(\n \"--delay\", help=\"Delay between stocks in seconds [30]\", type=int, default=30\n )\n args.add_argument(\"--force\", help=\"Overwrite existing data (if any)\", action=\"store_true\")\n args.add_argument(\n \"--debug\",\n help=\"Try to fetch specified stock (for debugging)\",\n type=str,\n required=False,\n default=None,\n )\n a = args.parse_args()\n config, password = read_config(a.config)\n m = config.get(\"mongo\")\n mongo = pymongo.MongoClient(\n m.get(\"host\"), m.get(\"port\"), username=m.get(\"user\"), password=password\n )\n db = mongo[m.get(\"db\")]\n\n stock_codes = desired_stocks() if not a.debug else set([a.debug])\n print(f\"Updating financial metrics for {len(stock_codes)} stocks\")\n for asx_code in sorted(stock_codes):\n print(f\"Processing stock {asx_code}\")\n try:\n melted_df = fetch_metrics(asx_code)\n if melted_df is None or len(melted_df) < 1:\n raise ValueError(f\"No data available for {asx_code}... skipping\")\n melted_df[\"asx_code\"] = asx_code\n ret = update_all_metrics(melted_df, asx_code)\n assert ret == len(melted_df)\n if a.fill_gaps:\n fill_stock_quote_gaps(db, asx_code, force=a.force)\n # FALLTHRU...\n time.sleep(a.delay)\n except Exception as e:\n print(f\"WARNING: unable to download financials for {asx_code}\")\n print(str(e))\n if a.fail_fast:\n raise e\n\n exit(0)\n"
] | [
[
"numpy.isnan",
"pandas.to_datetime",
"pandas.melt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
f0k/scipy | [
"3145a226339b14bbc22f2e984848e05def7659c5",
"3145a226339b14bbc22f2e984848e05def7659c5",
"3145a226339b14bbc22f2e984848e05def7659c5",
"3145a226339b14bbc22f2e984848e05def7659c5"
] | [
"scipy/interpolate/polyint.py",
"scipy/io/idl.py",
"scipy/sparse/linalg/isolve/tests/test_iterative.py",
"scipy/interpolate/tests/test_polyint.py"
] | [
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy.misc import factorial\n\nfrom scipy.lib.six.moves import xrange\n\n__all__ = [\"KroghInterpolator\", \"krogh_interpolate\", \"BarycentricInterpolator\", \"barycentric_interpolate\", \"PiecewisePolynomial\", \"piecewise_polynomial_interpolate\",\"approximate_taylor_polynomial\", \"pchip\"]\n\nclass KroghInterpolator(object):\n \"\"\"\n The interpolating polynomial for a set of points\n\n Constructs a polynomial that passes through a given set of points,\n optionally with specified derivatives at those points.\n Allows evaluation of the polynomial and all its derivatives.\n For reasons of numerical stability, this function does not compute\n the coefficients of the polynomial, although they can be obtained\n by evaluating all the derivatives.\n\n Be aware that the algorithms implemented here are not necessarily\n the most numerically stable known. Moreover, even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon. In general, even with well-chosen\n x values, degrees higher than about thirty cause problems with\n numerical instability in this code.\n\n Based on [1]_.\n\n Parameters\n ----------\n xi : array_like, length N\n Known x-coordinates\n yi : array_like, N by R\n Known y-coordinates, interpreted as vectors of length R,\n or scalars if R=1. When an xi occurs two or more times in\n a row, the corresponding yi's represent derivative values.\n\n References\n ----------\n .. [1] Krogh, \"Efficient Algorithms for Polynomial Interpolation\n and Numerical Differentiation\", 1970.\n\n \"\"\"\n def __init__(self, xi, yi):\n \"\"\"Construct an interpolator passing through the specified points\n\n The polynomial passes through all the pairs (xi,yi). One may additionally\n specify a number of derivatives at each point xi; this is done by\n repeating the value xi and specifying the derivatives as successive\n yi values.\n\n Parameters\n ----------\n xi : array-like, length N\n known x-coordinates\n yi : array-like, N by R\n known y-coordinates, interpreted as vectors of length R,\n or scalars if R=1. When an xi occurs two or more times in\n a row, the corresponding yi's represent derivative values.\n\n Examples\n --------\n To produce a polynomial that is zero at 0 and 1 and has\n derivative 2 at 0, call\n\n >>> KroghInterpolator([0,0,1],[0,2,0])\n\n This constructs the quadratic 2*X**2-2*X. The derivative condition\n is indicated by the repeated zero in the xi array; the corresponding\n yi values are 0, the function value, and 2, the derivative value.\n\n For another example, given xi, yi, and a derivative ypi for each\n point, appropriate arrays can be constructed as:\n\n >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))\n >>> KroghInterpolator(xi_k, yi_k)\n\n To produce a vector-valued polynomial, supply a higher-dimensional\n array for yi:\n\n >>> KroghInterpolator([0,1],[[2,3],[4,5]])\n\n This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.\n\n \"\"\"\n self.xi = np.asarray(xi)\n self.yi = np.asarray(yi)\n if len(self.yi.shape)==1:\n self.vector_valued = False\n self.yi = self.yi[:,np.newaxis]\n elif len(self.yi.shape)>2:\n raise ValueError(\"y coordinates must be either scalars or vectors\")\n else:\n self.vector_valued = True\n\n n = len(xi)\n self.n = n\n nn, r = self.yi.shape\n if nn!=n:\n raise ValueError(\"%d x values provided and %d y values; must be equal\" % (n, nn))\n self.r = r\n\n c = np.zeros((n+1,r))\n c[0] = yi[0]\n Vk = np.zeros((n,r))\n for k in xrange(1,n):\n s = 0\n while s<=k and xi[k-s]==xi[k]:\n s += 1\n s -= 1\n Vk[0] = yi[k]/float(factorial(s))\n for i in xrange(k-s):\n if xi[i] == xi[k]:\n raise ValueError(\"Elements if `xi` can't be equal.\")\n if s==0:\n Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])\n else:\n Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])\n c[k] = Vk[k-s]\n self.c = c\n\n def __call__(self,x):\n \"\"\"Evaluate the polynomial at the point x\n\n Parameters\n ----------\n x : scalar or array-like of length N\n\n Returns\n -------\n y : scalar, array of length R, array of length N, or array of length N by R\n If x is a scalar, returns either a vector or a scalar depending on\n whether the interpolator is vector-valued or scalar-valued.\n If x is a vector, returns a vector of values.\n \"\"\"\n if _isscalar(x):\n scalar = True\n m = 1\n else:\n scalar = False\n m = len(x)\n x = np.asarray(x)\n\n n = self.n\n pi = 1\n p = np.zeros((m,self.r))\n p += self.c[0,np.newaxis,:]\n for k in xrange(1,n):\n w = x - self.xi[k-1]\n pi = w*pi\n p = p + np.multiply.outer(pi,self.c[k])\n if not self.vector_valued:\n if scalar:\n return p[0,0]\n else:\n return p[:,0]\n else:\n if scalar:\n return p[0]\n else:\n return p\n\n def derivatives(self,x,der=None):\n \"\"\"\n Evaluate many derivatives of the polynomial at the point x\n\n Produce an array of all derivative values at the point x.\n\n Parameters\n ----------\n x : scalar or array_like of length N\n Point or points at which to evaluate the derivatives\n\n der : None or integer\n How many derivatives to extract; None for all potentially\n nonzero derivatives (that is a number equal to the number\n of points). This number includes the function value as 0th\n derivative.\n\n Returns\n -------\n d : ndarray\n If the interpolator's values are R-dimensional then the\n returned array will be der by N by R. If x is a scalar,\n the middle dimension will be dropped; if R is 1 then the\n last dimension will be dropped.\n\n Examples\n --------\n >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)\n array([1.0,2.0,3.0])\n >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])\n array([[1.0,1.0],\n [2.0,2.0],\n [3.0,3.0]])\n\n \"\"\"\n if _isscalar(x):\n scalar = True\n m = 1\n else:\n scalar = False\n m = len(x)\n x = np.asarray(x)\n\n n = self.n\n r = self.r\n\n if der is None:\n der = self.n\n dern = min(self.n,der)\n pi = np.zeros((n,m))\n w = np.zeros((n,m))\n pi[0] = 1\n p = np.zeros((m,self.r))\n p += self.c[0,np.newaxis,:]\n\n for k in xrange(1,n):\n w[k-1] = x - self.xi[k-1]\n pi[k] = w[k-1]*pi[k-1]\n p += np.multiply.outer(pi[k],self.c[k])\n\n cn = np.zeros((max(der,n+1),m,r))\n cn[:n+1,...] += self.c[:n+1,np.newaxis,:]\n cn[0] = p\n for k in xrange(1,n):\n for i in xrange(1,n-k+1):\n pi[i] = w[k+i-1]*pi[i-1]+pi[i]\n cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]\n cn[k]*=factorial(k)\n\n cn[n,...] = 0\n if not self.vector_valued:\n if scalar:\n return cn[:der,0,0]\n else:\n return cn[:der,:,0]\n else:\n if scalar:\n return cn[:der,0]\n else:\n return cn[:der]\n def derivative(self,x,der):\n \"\"\"\n Evaluate one derivative of the polynomial at the point x\n\n Parameters\n ----------\n x : scalar or array_like of length N\n Point or points at which to evaluate the derivatives\n\n der : None or integer\n Which derivative to extract. This number includes the\n function value as 0th derivative.\n\n Returns\n -------\n d : ndarray\n If the interpolator's values are R-dimensional then the\n returned array will be N by R. If x is a scalar,\n the middle dimension will be dropped; if R is 1 then the\n last dimension will be dropped.\n\n Notes\n -----\n This is computed by evaluating all derivatives up to the desired\n one (using self.derivatives()) and then discarding the rest.\n\n \"\"\"\n return self.derivatives(x,der=der+1)[der]\n\ndef krogh_interpolate(xi,yi,x,der=0):\n \"\"\"\n Convenience function for polynomial interpolation.\n\n Constructs a polynomial that passes through a given set of points,\n optionally with specified derivatives at those points.\n Evaluates the polynomial or some of its derivatives.\n For reasons of numerical stability, this function does not compute\n the coefficients of the polynomial, although they can be obtained\n by evaluating all the derivatives.\n\n Be aware that the algorithms implemented here are not necessarily\n the most numerically stable known. Moreover, even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon. In general, even with well-chosen\n x values, degrees higher than about thirty cause problems with\n numerical instability in this code.\n\n Based on Krogh 1970, \"Efficient Algorithms for Polynomial Interpolation\n and Numerical Differentiation\"\n\n The polynomial passes through all the pairs (xi,yi). One may additionally\n specify a number of derivatives at each point xi; this is done by\n repeating the value xi and specifying the derivatives as successive\n yi values.\n\n Parameters\n ----------\n xi : array_like, length N\n known x-coordinates\n yi : array_like, N by R\n known y-coordinates, interpreted as vectors of length R,\n or scalars if R=1\n x : scalar or array_like of length N\n Point or points at which to evaluate the derivatives\n der : integer or list\n How many derivatives to extract; None for all potentially\n nonzero derivatives (that is a number equal to the number\n of points), or a list of derivatives to extract. This number\n includes the function value as 0th derivative.\n\n Returns\n -------\n d : ndarray\n If the interpolator's values are R-dimensional then the\n returned array will be the number of derivatives by N by R.\n If x is a scalar, the middle dimension will be dropped; if\n the yi are scalars then the last dimension will be dropped.\n\n Notes\n -----\n Construction of the interpolating polynomial is a relatively expensive\n process. If you want to evaluate it repeatedly consider using the class\n KroghInterpolator (which is what this function uses).\n\n \"\"\"\n P = KroghInterpolator(xi, yi)\n if der==0:\n return P(x)\n elif _isscalar(der):\n return P.derivative(x,der=der)\n else:\n return P.derivatives(x,der=np.amax(der)+1)[der]\n\n\n\n\ndef approximate_taylor_polynomial(f,x,degree,scale,order=None):\n \"\"\"\n Estimate the Taylor polynomial of f at x by polynomial fitting.\n\n Parameters\n ----------\n f : callable\n The function whose Taylor polynomial is sought. Should accept\n a vector of x values.\n x : scalar\n The point at which the polynomial is to be evaluated.\n degree : int\n The degree of the Taylor polynomial\n scale : scalar\n The width of the interval to use to evaluate the Taylor polynomial.\n Function values spread over a range this wide are used to fit the\n polynomial. Must be chosen carefully.\n order : int or None\n The order of the polynomial to be used in the fitting; f will be\n evaluated ``order+1`` times. If None, use `degree`.\n\n Returns\n -------\n p : poly1d instance\n The Taylor polynomial (translated to the origin, so that\n for example p(0)=f(x)).\n\n Notes\n -----\n The appropriate choice of \"scale\" is a trade-off; too large and the\n function differs from its Taylor polynomial too much to get a good\n answer, too small and round-off errors overwhelm the higher-order terms.\n The algorithm used becomes numerically unstable around order 30 even\n under ideal circumstances.\n\n Choosing order somewhat larger than degree may improve the higher-order\n terms.\n\n \"\"\"\n if order is None:\n order=degree\n\n n = order+1\n # Choose n points that cluster near the endpoints of the interval in\n # a way that avoids the Runge phenomenon. Ensure, by including the\n # endpoint or not as appropriate, that one point always falls at x\n # exactly.\n xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x\n\n P = KroghInterpolator(xs, f(xs))\n d = P.derivatives(x,der=degree+1)\n\n return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])\n\n\nclass BarycentricInterpolator(object):\n \"\"\"The interpolating polynomial for a set of points\n\n Constructs a polynomial that passes through a given set of points.\n Allows evaluation of the polynomial, efficient changing of the y\n values to be interpolated, and updating by adding more x values.\n For reasons of numerical stability, this function does not compute\n the coefficients of the polynomial.\n\n This class uses a \"barycentric interpolation\" method that treats\n the problem as a special case of rational function interpolation.\n This algorithm is quite stable, numerically, but even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon.\n\n Based on Berrut and Trefethen 2004, \"Barycentric Lagrange Interpolation\".\n \"\"\"\n def __init__(self, xi, yi=None):\n \"\"\"Construct an object capable of interpolating functions sampled at xi\n\n The values yi need to be provided before the function is evaluated,\n but none of the preprocessing depends on them, so rapid updates\n are possible.\n\n Parameters\n ----------\n xi : array-like of length N\n The x coordinates of the points the polynomial should pass through\n yi : array-like N by R or None\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued. If None the y values\n will be supplied later.\n \"\"\"\n self.n = len(xi)\n self.xi = np.asarray(xi)\n if yi is not None and len(yi)!=len(self.xi):\n raise ValueError(\"yi dimensions do not match xi dimensions\")\n self.set_yi(yi)\n self.wi = np.zeros(self.n)\n self.wi[0] = 1\n for j in xrange(1,self.n):\n self.wi[:j]*=(self.xi[j]-self.xi[:j])\n self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])\n self.wi**=-1\n\n def set_yi(self, yi):\n \"\"\"\n Update the y values to be interpolated\n\n The barycentric interpolation algorithm requires the calculation\n of weights, but these depend only on the xi. The yi can be changed\n at any time.\n\n Parameters\n ----------\n yi : array_like N by R\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued. If None the y values\n will be supplied later.\n\n \"\"\"\n if yi is None:\n self.yi = None\n return\n yi = np.asarray(yi)\n if len(yi.shape)==1:\n self.vector_valued = False\n yi = yi[:,np.newaxis]\n elif len(yi.shape)>2:\n raise ValueError(\"y coordinates must be either scalars or vectors\")\n else:\n self.vector_valued = True\n\n n, r = yi.shape\n if n!=len(self.xi):\n raise ValueError(\"yi dimensions do not match xi dimensions\")\n self.yi = yi\n self.r = r\n\n\n def add_xi(self, xi, yi=None):\n \"\"\"\n Add more x values to the set to be interpolated\n\n The barycentric interpolation algorithm allows easy updating by\n adding more points for the polynomial to pass through.\n\n Parameters\n ----------\n xi : array_like of length N1\n The x coordinates of the points the polynomial should pass through\n yi : array_like N1 by R or None\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued. If None the y values\n will be supplied later. The yi should be specified if and only if\n the interpolator has y values specified.\n\n \"\"\"\n if yi is not None:\n if self.yi is None:\n raise ValueError(\"No previous yi value to update!\")\n yi = np.asarray(yi)\n if len(yi.shape)==1:\n if self.vector_valued:\n raise ValueError(\"Cannot extend dimension %d y vectors with scalars\" % self.r)\n yi = yi[:,np.newaxis]\n elif len(yi.shape)>2:\n raise ValueError(\"y coordinates must be either scalars or vectors\")\n else:\n n, r = yi.shape\n if r!=self.r:\n raise ValueError(\"Cannot extend dimension %d y vectors with dimension %d y vectors\" % (self.r, r))\n\n self.yi = np.vstack((self.yi,yi))\n else:\n if self.yi is not None:\n raise ValueError(\"No update to yi provided!\")\n old_n = self.n\n self.xi = np.concatenate((self.xi,xi))\n self.n = len(self.xi)\n self.wi**=-1\n old_wi = self.wi\n self.wi = np.zeros(self.n)\n self.wi[:old_n] = old_wi\n for j in xrange(old_n,self.n):\n self.wi[:j]*=(self.xi[j]-self.xi[:j])\n self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])\n self.wi**=-1\n\n def __call__(self, x):\n \"\"\"Evaluate the interpolating polynomial at the points x\n\n Parameters\n ----------\n x : scalar or array-like of length M\n\n Returns\n -------\n y : scalar or array-like of length R or length M or M by R\n The shape of y depends on the shape of x and whether the\n interpolator is vector-valued or scalar-valued.\n\n Notes\n -----\n Currently the code computes an outer product between x and the\n weights, that is, it constructs an intermediate array of size\n N by M, where N is the degree of the polynomial.\n \"\"\"\n scalar = _isscalar(x)\n x = np.atleast_1d(x)\n c = np.subtract.outer(x,self.xi)\n z = c==0\n c[z] = 1\n c = self.wi/c\n p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]\n i, j = np.nonzero(z)\n p[i] = self.yi[j]\n if not self.vector_valued:\n if scalar:\n return p[0,0]\n else:\n return p[:,0]\n else:\n if scalar:\n return p[0]\n else:\n return p\ndef barycentric_interpolate(xi, yi, x):\n \"\"\"\n Convenience function for polynomial interpolation\n\n Constructs a polynomial that passes through a given set of points,\n then evaluates the polynomial. For reasons of numerical stability,\n this function does not compute the coefficients of the polynomial.\n\n This function uses a \"barycentric interpolation\" method that treats\n the problem as a special case of rational function interpolation.\n This algorithm is quite stable, numerically, but even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon.\n\n Based on Berrut and Trefethen 2004, \"Barycentric Lagrange Interpolation\".\n\n\n Parameters\n ----------\n xi : array_like of length N\n The x coordinates of the points the polynomial should pass through\n yi : array_like N by R\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued.\n x : scalar or array_like of length M\n\n\n Returns\n -------\n y : scalar or array_like of length R or length M or M by R\n The shape of y depends on the shape of x and whether the\n interpolator is vector-valued or scalar-valued.\n\n\n Notes\n -----\n\n Construction of the interpolation weights is a relatively slow process.\n If you want to call this many times with the same xi (but possibly\n varying yi or x) you should use the class BarycentricInterpolator.\n This is what this function uses internally.\n\n \"\"\"\n return BarycentricInterpolator(xi, yi)(x)\n\n\nclass PiecewisePolynomial(object):\n \"\"\"Piecewise polynomial curve specified by points and derivatives\n\n This class represents a curve that is a piecewise polynomial. It\n passes through a list of points and has specified derivatives at\n each point. The degree of the polynomial may very from segment to\n segment, as may the number of derivatives available. The degree\n should not exceed about thirty.\n\n Appending points to the end of the curve is efficient.\n \"\"\"\n def __init__(self, xi, yi, orders=None, direction=None):\n \"\"\"Construct a piecewise polynomial\n\n Parameters\n ----------\n xi : array-like of length N\n a sorted list of x-coordinates\n yi : list of lists of length N\n yi[i] is the list of derivatives known at xi[i]\n orders : list of integers, or integer\n a list of polynomial orders, or a single universal order\n direction : {None, 1, -1}\n indicates whether the xi are increasing or decreasing\n +1 indicates increasing\n -1 indicates decreasing\n None indicates that it should be deduced from the first two xi\n\n Notes\n -----\n If orders is None, or orders[i] is None, then the degree of the\n polynomial segment is exactly the degree required to match all i\n available derivatives at both endpoints. If orders[i] is not None,\n then some derivatives will be ignored. The code will try to use an\n equal number of derivatives from each end; if the total number of\n derivatives needed is odd, it will prefer the rightmost endpoint. If\n not enough derivatives are available, an exception is raised.\n \"\"\"\n yi0 = np.asarray(yi[0])\n if len(yi0.shape)==2:\n self.vector_valued = True\n self.r = yi0.shape[1]\n elif len(yi0.shape)==1:\n self.vector_valued = False\n self.r = 1\n else:\n raise ValueError(\"Each derivative must be a vector, not a higher-rank array\")\n\n self.xi = [xi[0]]\n self.yi = [yi0]\n self.n = 1\n\n self.direction = direction\n self.orders = []\n self.polynomials = []\n self.extend(xi[1:],yi[1:],orders)\n\n def _make_polynomial(self,x1,y1,x2,y2,order,direction):\n \"\"\"Construct the interpolating polynomial object\n\n Deduces the number of derivatives to match at each end\n from order and the number of derivatives available. If\n possible it uses the same number of derivatives from\n each end; if the number is odd it tries to take the\n extra one from y2. In any case if not enough derivatives\n are available at one end or another it draws enough to\n make up the total from the other end.\n \"\"\"\n n = order+1\n n1 = min(n//2,len(y1))\n n2 = min(n-n1,len(y2))\n n1 = min(n-n2,len(y1))\n if n1+n2!=n:\n raise ValueError(\"Point %g has %d derivatives, point %g has %d derivatives, but order %d requested\" % (x1, len(y1), x2, len(y2), order))\n if not (n1 <= len(y1) and n2 <= len(y2)):\n raise ValueError(\"`order` input incompatible with length y1 or y2.\")\n\n xi = np.zeros(n)\n if self.vector_valued:\n yi = np.zeros((n,self.r))\n else:\n yi = np.zeros((n,))\n\n xi[:n1] = x1\n yi[:n1] = y1[:n1]\n xi[n1:] = x2\n yi[n1:] = y2[:n2]\n\n return KroghInterpolator(xi,yi)\n\n def append(self, xi, yi, order=None):\n \"\"\"\n Append a single point with derivatives to the PiecewisePolynomial\n\n Parameters\n ----------\n xi : float\n\n yi : array_like\n yi is the list of derivatives known at xi\n\n order : integer or None\n a polynomial order, or instructions to use the highest\n possible order\n\n \"\"\"\n\n yi = np.asarray(yi)\n if self.vector_valued:\n if (len(yi.shape)!=2 or yi.shape[1]!=self.r):\n raise ValueError(\"Each derivative must be a vector of length %d\" % self.r)\n else:\n if len(yi.shape)!=1:\n raise ValueError(\"Each derivative must be a scalar\")\n\n if self.direction is None:\n self.direction = np.sign(xi-self.xi[-1])\n elif (xi-self.xi[-1])*self.direction < 0:\n raise ValueError(\"x coordinates must be in the %d direction: %s\" % (self.direction, self.xi))\n\n self.xi.append(xi)\n self.yi.append(yi)\n\n\n if order is None:\n n1 = len(self.yi[-2])\n n2 = len(self.yi[-1])\n n = n1+n2\n order = n-1\n\n self.orders.append(order)\n self.polynomials.append(self._make_polynomial(\n self.xi[-2], self.yi[-2],\n self.xi[-1], self.yi[-1],\n order, self.direction))\n self.n += 1\n\n\n def extend(self, xi, yi, orders=None):\n \"\"\"\n Extend the PiecewisePolynomial by a list of points\n\n Parameters\n ----------\n xi : array_like of length N1\n a sorted list of x-coordinates\n yi : list of lists of length N1\n yi[i] is the list of derivatives known at xi[i]\n orders : list of integers, or integer\n a list of polynomial orders, or a single universal order\n direction : {None, 1, -1}\n indicates whether the xi are increasing or decreasing\n +1 indicates increasing\n -1 indicates decreasing\n None indicates that it should be deduced from the first two xi\n\n \"\"\"\n\n for i in xrange(len(xi)):\n if orders is None or _isscalar(orders):\n self.append(xi[i],yi[i],orders)\n else:\n self.append(xi[i],yi[i],orders[i])\n\n def __call__(self, x):\n \"\"\"Evaluate the piecewise polynomial\n\n Parameters\n ----------\n x : scalar or array-like of length N\n\n Returns\n -------\n y : scalar or array-like of length R or length N or N by R\n \"\"\"\n if _isscalar(x):\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n y = self.polynomials[pos](x)\n else:\n x = np.asarray(x)\n m = len(x)\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n if self.vector_valued:\n y = np.zeros((m,self.r))\n else:\n y = np.zeros(m)\n for i in xrange(self.n-1):\n c = pos==i\n y[c] = self.polynomials[i](x[c])\n return y\n\n def derivative(self, x, der):\n \"\"\"\n Evaluate a derivative of the piecewise polynomial\n\n Parameters\n ----------\n x : scalar or array_like of length N\n\n der : integer\n which single derivative to extract\n\n Returns\n -------\n y : scalar or array_like of length R or length N or N by R\n\n Notes\n -----\n This currently computes (using self.derivatives()) all derivatives\n of the curve segment containing each x but returns only one.\n\n \"\"\"\n return self.derivatives(x,der=der+1)[der]\n\n def derivatives(self, x, der):\n \"\"\"\n Evaluate a derivative of the piecewise polynomial\n\n Parameters\n ----------\n x : scalar or array_like of length N\n\n der : integer\n how many derivatives (including the function value as\n 0th derivative) to extract\n\n Returns\n -------\n y : array_like of shape der by R or der by N or der by N by R\n\n \"\"\"\n if _isscalar(x):\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n y = self.polynomials[pos].derivatives(x,der=der)\n else:\n x = np.asarray(x)\n m = len(x)\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n if self.vector_valued:\n y = np.zeros((der,m,self.r))\n else:\n y = np.zeros((der,m))\n for i in xrange(self.n-1):\n c = pos==i\n y[:,c] = self.polynomials[i].derivatives(x[c],der=der)\n return y\n\n\ndef piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):\n \"\"\"\n Convenience function for piecewise polynomial interpolation\n\n Parameters\n ----------\n xi : array_like\n A sorted list of x-coordinates, of length N.\n yi : list of lists\n yi[i] is the list of derivatives known at xi[i]. Of length N.\n x : scalar or array_like\n Of length M.\n orders : int or list of ints\n a list of polynomial orders, or a single universal order\n der : int\n Which single derivative to extract.\n\n Returns\n -------\n y : scalar or array_like\n The result, of length R or length M or M by R,\n\n Notes\n -----\n If orders is None, or orders[i] is None, then the degree of the\n polynomial segment is exactly the degree required to match all i\n available derivatives at both endpoints. If orders[i] is not None,\n then some derivatives will be ignored. The code will try to use an\n equal number of derivatives from each end; if the total number of\n derivatives needed is odd, it will prefer the rightmost endpoint. If\n not enough derivatives are available, an exception is raised.\n\n Construction of these piecewise polynomials can be an expensive process;\n if you repeatedly evaluate the same polynomial, consider using the class\n PiecewisePolynomial (which is what this function does).\n\n \"\"\"\n\n P = PiecewisePolynomial(xi, yi, orders)\n if der==0:\n return P(x)\n elif _isscalar(der):\n return P.derivative(x,der=der)\n else:\n return P.derivatives(x,der=np.amax(der)+1)[der]\n\ndef _isscalar(x):\n \"\"\"Check whether x is if a scalar type, or 0-dim\"\"\"\n return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()\n\ndef _edge_case(m0, d1):\n return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))\n\ndef _find_derivatives(x, y):\n # Determine the derivatives at the points y_k, d_k, by using\n # PCHIP algorithm is:\n # We choose the derivatives at the point x_k by\n # Let m_k be the slope of the kth segment (between k and k+1)\n # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0\n # else use weighted harmonic mean:\n # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}\n # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})\n # where h_k is the spacing between x_k and x_{k+1}\n\n hk = x[1:] - x[:-1]\n mk = (y[1:] - y[:-1]) / hk\n smk = np.sign(mk)\n condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))\n\n w1 = 2*hk[1:] + hk[:-1]\n w2 = hk[1:] + 2*hk[:-1]\n whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])\n\n dk = np.zeros_like(y)\n dk[1:-1][condition] = 0.0\n dk[1:-1][~condition] = 1.0/whmean[~condition]\n\n # For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless\n # one of d_1 or m_0 is 0, then choose d_0 = 0\n\n dk[0] = _edge_case(mk[0],dk[1])\n dk[-1] = _edge_case(mk[-1],dk[-2])\n return dk\n\n\ndef pchip(x, y):\n \"\"\"PCHIP 1-d monotonic cubic interpolation\n\n x and y are arrays of values used to approximate some function f, with\n ``y = f(x)``. This class factory function returns a callable class whose\n ``__call__`` method uses monotonic cubic, interpolation to find the value\n of new points.\n\n Parameters\n ----------\n x : array\n A 1D array of monotonically increasing real values. x cannot\n include duplicate values (otherwise f is overspecified)\n y : array\n A 1-D array of real values. y's length along the interpolation\n axis must be equal to the length of x.\n\n Assumes x is sorted in monotonic order (e.g. ``x[1] > x[0]``).\n\n Returns\n -------\n pchip : PiecewisePolynomial instance\n The result of the interpolation.\n\n \"\"\"\n derivs = _find_derivatives(x,y)\n return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None)\n",
"# IDLSave - a python module to read IDL 'save' files\n# Copyright (c) 2010 Thomas P. Robitaille\n\n# Many thanks to Craig Markwardt for publishing the Unofficial Format\n# Specification for IDL .sav files, without which this Python module would not\n# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).\n\n# This code was developed by with permission from ITT Visual Information\n# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,\n# Inc. for their Interactive Data Language software.\n\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\nfrom __future__ import division, print_function, absolute_import\n\nimport struct\nimport numpy as np\nfrom numpy.compat import asstr\nimport tempfile\nimport zlib\nimport warnings\n\n# Define the different data types that can be found in an IDL save file\nDTYPE_DICT = {}\nDTYPE_DICT[1] = '>u1'\nDTYPE_DICT[2] = '>i2'\nDTYPE_DICT[3] = '>i4'\nDTYPE_DICT[4] = '>f4'\nDTYPE_DICT[5] = '>f8'\nDTYPE_DICT[6] = '>c8'\nDTYPE_DICT[7] = '|O'\nDTYPE_DICT[8] = '|O'\nDTYPE_DICT[9] = '>c16'\nDTYPE_DICT[10] = '|O'\nDTYPE_DICT[11] = '|O'\nDTYPE_DICT[12] = '>u2'\nDTYPE_DICT[13] = '>u4'\nDTYPE_DICT[14] = '>i8'\nDTYPE_DICT[15] = '>u8'\n\n# Define the different record types that can be found in an IDL save file\nRECTYPE_DICT = {}\nRECTYPE_DICT[0] = \"START_MARKER\"\nRECTYPE_DICT[1] = \"COMMON_VARIABLE\"\nRECTYPE_DICT[2] = \"VARIABLE\"\nRECTYPE_DICT[3] = \"SYSTEM_VARIABLE\"\nRECTYPE_DICT[6] = \"END_MARKER\"\nRECTYPE_DICT[10] = \"TIMESTAMP\"\nRECTYPE_DICT[12] = \"COMPILED\"\nRECTYPE_DICT[13] = \"IDENTIFICATION\"\nRECTYPE_DICT[14] = \"VERSION\"\nRECTYPE_DICT[15] = \"HEAP_HEADER\"\nRECTYPE_DICT[16] = \"HEAP_DATA\"\nRECTYPE_DICT[17] = \"PROMOTE64\"\nRECTYPE_DICT[19] = \"NOTICE\"\n\n# Define a dictionary to contain structure definitions\nSTRUCT_DICT = {}\n\n\ndef _align_32(f):\n '''Align to the next 32-bit position in a file'''\n\n pos = f.tell()\n if pos % 4 != 0:\n f.seek(pos + 4 - pos % 4)\n return\n\n\ndef _skip_bytes(f, n):\n '''Skip `n` bytes'''\n f.read(n)\n return\n\n\ndef _read_bytes(f, n):\n '''Read the next `n` bytes'''\n return f.read(n)\n\n\ndef _read_byte(f):\n '''Read a single byte'''\n return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])\n\n\ndef _read_long(f):\n '''Read a signed 32-bit integer'''\n return np.int32(struct.unpack('>l', f.read(4))[0])\n\n\ndef _read_int16(f):\n '''Read a signed 16-bit integer'''\n return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])\n\n\ndef _read_int32(f):\n '''Read a signed 32-bit integer'''\n return np.int32(struct.unpack('>i', f.read(4))[0])\n\n\ndef _read_int64(f):\n '''Read a signed 64-bit integer'''\n return np.int64(struct.unpack('>q', f.read(8))[0])\n\n\ndef _read_uint16(f):\n '''Read an unsigned 16-bit integer'''\n return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])\n\n\ndef _read_uint32(f):\n '''Read an unsigned 32-bit integer'''\n return np.uint32(struct.unpack('>I', f.read(4))[0])\n\n\ndef _read_uint64(f):\n '''Read an unsigned 64-bit integer'''\n return np.uint64(struct.unpack('>Q', f.read(8))[0])\n\n\ndef _read_float32(f):\n '''Read a 32-bit float'''\n return np.float32(struct.unpack('>f', f.read(4))[0])\n\n\ndef _read_float64(f):\n '''Read a 64-bit float'''\n return np.float64(struct.unpack('>d', f.read(8))[0])\n\n\nclass Pointer(object):\n '''Class used to define pointers'''\n\n def __init__(self, index):\n self.index = index\n return\n\n\nclass ObjectPointer(Pointer):\n '''Class used to define object pointers'''\n pass\n\n\ndef _read_string(f):\n '''Read a string'''\n length = _read_long(f)\n if length > 0:\n chars = _read_bytes(f, length)\n _align_32(f)\n chars = asstr(chars)\n else:\n warnings.warn(\"warning: empty strings are now set to '' instead of None\")\n chars = ''\n return chars\n\n\ndef _read_string_data(f):\n '''Read a data string (length is specified twice)'''\n length = _read_long(f)\n if length > 0:\n length = _read_long(f)\n string_data = _read_bytes(f, length)\n _align_32(f)\n else:\n warnings.warn(\"warning: empty strings are now set to '' instead of None\")\n string_data = ''\n return string_data\n\n\ndef _read_data(f, dtype):\n '''Read a variable with a specified data type'''\n if dtype==1:\n if _read_int32(f) != 1:\n raise Exception(\"Error occurred while reading byte variable\")\n return _read_byte(f)\n elif dtype==2:\n return _read_int16(f)\n elif dtype==3:\n return _read_int32(f)\n elif dtype==4:\n return _read_float32(f)\n elif dtype==5:\n return _read_float64(f)\n elif dtype==6:\n real = _read_float32(f)\n imag = _read_float32(f)\n return np.complex64(real + imag * 1j)\n elif dtype==7:\n return _read_string_data(f)\n elif dtype==8:\n raise Exception(\"Should not be here - please report this\")\n elif dtype==9:\n real = _read_float64(f)\n imag = _read_float64(f)\n return np.complex128(real + imag * 1j)\n elif dtype==10:\n return Pointer(_read_int32(f))\n elif dtype==11:\n return ObjectPointer(_read_int32(f))\n elif dtype==12:\n return _read_uint16(f)\n elif dtype==13:\n return _read_uint32(f)\n elif dtype==14:\n return _read_int64(f)\n elif dtype==15:\n return _read_uint64(f)\n else:\n raise Exception(\"Unknown IDL type: %i - please report this\" % dtype)\n\n\ndef _read_structure(f, array_desc, struct_desc):\n '''\n Read a structure, with the array and structure descriptors given as\n `array_desc` and `structure_desc` respectively.\n '''\n\n nrows = array_desc['nelements']\n ncols = struct_desc['ntags']\n columns = struct_desc['tagtable']\n\n dtype = []\n for col in columns:\n if col['structure'] or col['array']:\n dtype.append(((col['name'].lower(), col['name']), np.object_))\n else:\n if col['typecode'] in DTYPE_DICT:\n dtype.append(((col['name'].lower(), col['name']),\n DTYPE_DICT[col['typecode']]))\n else:\n raise Exception(\"Variable type %i not implemented\" %\n col['typecode'])\n\n structure = np.recarray((nrows, ), dtype=dtype)\n\n for i in range(nrows):\n for col in columns:\n dtype = col['typecode']\n if col['structure']:\n structure[col['name']][i] = _read_structure(f, \\\n struct_desc['arrtable'][col['name']], \\\n struct_desc['structtable'][col['name']])\n elif col['array']:\n structure[col['name']][i] = _read_array(f, dtype, \\\n struct_desc['arrtable'][col['name']])\n else:\n structure[col['name']][i] = _read_data(f, dtype)\n\n # Reshape structure if needed\n if array_desc['ndims'] > 1:\n warnings.warn(\"warning: multi-dimensional structures are now correctly reshaped\")\n dims = array_desc['dims'][:int(array_desc['ndims'])]\n dims.reverse()\n structure = structure.reshape(dims)\n\n return structure\n\n\ndef _read_array(f, typecode, array_desc):\n '''\n Read an array of type `typecode`, with the array descriptor given as\n `array_desc`.\n '''\n\n if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:\n\n if typecode == 1:\n nbytes = _read_int32(f)\n if nbytes != array_desc['nbytes']:\n raise Exception(\"Error occurred while reading byte array\")\n\n # Read bytes as numpy array\n array = np.fromstring(f.read(array_desc['nbytes']), \\\n dtype=DTYPE_DICT[typecode])\n\n elif typecode in [2, 12]:\n\n # These are 2 byte types, need to skip every two as they are not packed\n\n array = np.fromstring(f.read(array_desc['nbytes']*2), \\\n dtype=DTYPE_DICT[typecode])[1::2]\n\n else:\n\n # Read bytes into list\n array = []\n for i in range(array_desc['nelements']):\n dtype = typecode\n data = _read_data(f, dtype)\n array.append(data)\n\n array = np.array(array, dtype=np.object_)\n\n # Reshape array if needed\n if array_desc['ndims'] > 1:\n dims = array_desc['dims'][:int(array_desc['ndims'])]\n dims.reverse()\n array = array.reshape(dims)\n\n # Go to next alignment position\n _align_32(f)\n\n return array\n\n\ndef _read_record(f):\n '''Function to read in a full record'''\n\n record = {}\n\n recpos = f.tell()\n record['rectype'] = _read_long(f)\n\n nextrec = _read_uint32(f)\n nextrec += _read_uint32(f) * 2**32\n\n _skip_bytes(f, 4)\n\n if not record['rectype'] in RECTYPE_DICT:\n raise Exception(\"Unknown RECTYPE: %i\" % record['rectype'])\n\n record['rectype'] = RECTYPE_DICT[record['rectype']]\n\n if record['rectype'] in [\"VARIABLE\", \"HEAP_DATA\"]:\n\n if record['rectype'] == \"VARIABLE\":\n record['varname'] = _read_string(f)\n else:\n record['heap_index'] = _read_long(f)\n _skip_bytes(f, 4)\n\n rectypedesc = _read_typedesc(f)\n\n varstart = _read_long(f)\n if varstart != 7:\n raise Exception(\"VARSTART is not 7\")\n\n if rectypedesc['structure']:\n record['data'] = _read_structure(f, rectypedesc['array_desc'], \\\n rectypedesc['struct_desc'])\n elif rectypedesc['array']:\n record['data'] = _read_array(f, rectypedesc['typecode'], \\\n rectypedesc['array_desc'])\n else:\n dtype = rectypedesc['typecode']\n record['data'] = _read_data(f, dtype)\n\n elif record['rectype'] == \"TIMESTAMP\":\n\n _skip_bytes(f, 4*256)\n record['date'] = _read_string(f)\n record['user'] = _read_string(f)\n record['host'] = _read_string(f)\n\n elif record['rectype'] == \"VERSION\":\n\n record['format'] = _read_long(f)\n record['arch'] = _read_string(f)\n record['os'] = _read_string(f)\n record['release'] = _read_string(f)\n\n elif record['rectype'] == \"IDENTIFICATON\":\n\n record['author'] = _read_string(f)\n record['title'] = _read_string(f)\n record['idcode'] = _read_string(f)\n\n elif record['rectype'] == \"NOTICE\":\n\n record['notice'] = _read_string(f)\n\n elif record['rectype'] == \"HEAP_HEADER\":\n\n record['nvalues'] = _read_long(f)\n record['indices'] = []\n for i in range(record['nvalues']):\n record['indices'].append(_read_long(f))\n\n elif record['rectype'] == \"COMMONBLOCK\":\n\n record['nvars'] = _read_long(f)\n record['name'] = _read_string(f)\n record['varnames'] = []\n for i in range(record['nvars']):\n record['varnames'].append(_read_string(f))\n\n elif record['rectype'] == \"END_MARKER\":\n\n record['end'] = True\n\n elif record['rectype'] == \"UNKNOWN\":\n\n warnings.warn(\"Skipping UNKNOWN record\")\n\n elif record['rectype'] == \"SYSTEM_VARIABLE\":\n\n warnings.warn(\"Skipping SYSTEM_VARIABLE record\")\n\n else:\n\n raise Exception(\"record['rectype']=%s not implemented\" % \\\n record['rectype'])\n\n f.seek(nextrec)\n\n return record\n\n\ndef _read_typedesc(f):\n '''Function to read in a type descriptor'''\n\n typedesc = {}\n\n typedesc['typecode'] = _read_long(f)\n typedesc['varflags'] = _read_long(f)\n\n if typedesc['varflags'] & 2 == 2:\n raise Exception(\"System variables not implemented\")\n\n typedesc['array'] = typedesc['varflags'] & 4 == 4\n typedesc['structure'] = typedesc['varflags'] & 32 == 32\n\n if typedesc['structure']:\n typedesc['array_desc'] = _read_arraydesc(f)\n typedesc['struct_desc'] = _read_structdesc(f)\n elif typedesc['array']:\n typedesc['array_desc'] = _read_arraydesc(f)\n\n return typedesc\n\n\ndef _read_arraydesc(f):\n '''Function to read in an array descriptor'''\n\n arraydesc = {}\n\n arraydesc['arrstart'] = _read_long(f)\n\n if arraydesc['arrstart'] == 8:\n\n _skip_bytes(f, 4)\n\n arraydesc['nbytes'] = _read_long(f)\n arraydesc['nelements'] = _read_long(f)\n arraydesc['ndims'] = _read_long(f)\n\n _skip_bytes(f, 8)\n\n arraydesc['nmax'] = _read_long(f)\n\n arraydesc['dims'] = []\n for d in range(arraydesc['nmax']):\n arraydesc['dims'].append(_read_long(f))\n\n elif arraydesc['arrstart'] == 18:\n\n warnings.warn(\"Using experimental 64-bit array read\")\n\n _skip_bytes(f, 8)\n\n arraydesc['nbytes'] = _read_uint64(f)\n arraydesc['nelements'] = _read_uint64(f)\n arraydesc['ndims'] = _read_long(f)\n\n _skip_bytes(f, 8)\n\n arraydesc['nmax'] = 8\n\n arraydesc['dims'] = []\n for d in range(arraydesc['nmax']):\n v = _read_long(f)\n if v != 0:\n raise Exception(\"Expected a zero in ARRAY_DESC\")\n arraydesc['dims'].append(_read_long(f))\n\n else:\n\n raise Exception(\"Unknown ARRSTART: %i\" % arraydesc['arrstart'])\n\n return arraydesc\n\n\ndef _read_structdesc(f):\n '''Function to read in a structure descriptor'''\n\n structdesc = {}\n\n structstart = _read_long(f)\n if structstart != 9:\n raise Exception(\"STRUCTSTART should be 9\")\n\n structdesc['name'] = _read_string(f)\n predef = _read_long(f)\n structdesc['ntags'] = _read_long(f)\n structdesc['nbytes'] = _read_long(f)\n\n structdesc['predef'] = predef & 1\n structdesc['inherits'] = predef & 2\n structdesc['is_super'] = predef & 4\n\n if not structdesc['predef']:\n\n structdesc['tagtable'] = []\n for t in range(structdesc['ntags']):\n structdesc['tagtable'].append(_read_tagdesc(f))\n\n for tag in structdesc['tagtable']:\n tag['name'] = _read_string(f)\n\n structdesc['arrtable'] = {}\n for tag in structdesc['tagtable']:\n if tag['array']:\n structdesc['arrtable'][tag['name']] = _read_arraydesc(f)\n\n structdesc['structtable'] = {}\n for tag in structdesc['tagtable']:\n if tag['structure']:\n structdesc['structtable'][tag['name']] = _read_structdesc(f)\n\n if structdesc['inherits'] or structdesc['is_super']:\n structdesc['classname'] = _read_string(f)\n structdesc['nsupclasses'] = _read_long(f)\n structdesc['supclassnames'] = []\n for s in range(structdesc['nsupclasses']):\n structdesc['supclassnames'].append(_read_string(f))\n structdesc['supclasstable'] = []\n for s in range(structdesc['nsupclasses']):\n structdesc['supclasstable'].append(_read_structdesc(f))\n\n STRUCT_DICT[structdesc['name']] = structdesc\n\n else:\n\n if not structdesc['name'] in STRUCT_DICT:\n raise Exception(\"PREDEF=1 but can't find definition\")\n\n structdesc = STRUCT_DICT[structdesc['name']]\n\n return structdesc\n\n\ndef _read_tagdesc(f):\n '''Function to read in a tag descriptor'''\n\n tagdesc = {}\n\n tagdesc['offset'] = _read_long(f)\n\n if tagdesc['offset'] == -1:\n tagdesc['offset'] = _read_uint64(f)\n\n tagdesc['typecode'] = _read_long(f)\n tagflags = _read_long(f)\n\n tagdesc['array'] = tagflags & 4 == 4\n tagdesc['structure'] = tagflags & 32 == 32\n tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT\n # Assume '10'x is scalar\n\n return tagdesc\n\n\ndef _replace_heap(variable, heap):\n\n if isinstance(variable, Pointer):\n\n while isinstance(variable, Pointer):\n\n if variable.index == 0:\n variable = None\n else:\n variable = heap[variable.index]\n\n replace, new = _replace_heap(variable, heap)\n\n if replace:\n variable = new\n\n return True, variable\n\n elif isinstance(variable, np.core.records.recarray):\n\n # Loop over records\n for ir, record in enumerate(variable):\n\n replace, new = _replace_heap(record, heap)\n\n if replace:\n variable[ir] = new\n\n return False, variable\n\n elif isinstance(variable, np.core.records.record):\n\n # Loop over values\n for iv, value in enumerate(variable):\n\n replace, new = _replace_heap(value, heap)\n\n if replace:\n variable[iv] = new\n\n return False, variable\n\n elif isinstance(variable, np.ndarray):\n\n # Loop over values if type is np.object_\n if variable.dtype.type is np.object_:\n\n for iv in range(variable.size):\n\n replace, new = _replace_heap(variable.item(iv), heap)\n\n if replace:\n variable.itemset(iv, new)\n\n return False, variable\n\n else:\n\n return False, variable\n\n\nclass AttrDict(dict):\n '''\n A case-insensitive dictionary with access via item, attribute, and call\n notations:\n\n >>> d = AttrDict()\n >>> d['Variable'] = 123\n >>> d['Variable']\n 123\n >>> d.Variable\n 123\n >>> d.variable\n 123\n >>> d('VARIABLE')\n 123\n '''\n\n def __init__(self, init={}):\n dict.__init__(self, init)\n\n def __getitem__(self, name):\n return super(AttrDict, self).__getitem__(name.lower())\n\n def __setitem__(self, key, value):\n return super(AttrDict, self).__setitem__(key.lower(), value)\n\n __getattr__ = __getitem__\n __setattr__ = __setitem__\n __call__ = __getitem__\n\n\ndef readsav(file_name, idict=None, python_dict=False,\n uncompressed_file_name=None, verbose=False):\n '''\n Read an IDL .sav file\n\n Parameters\n ----------\n file_name : str\n Name of the IDL save file.\n idict : dict, optional\n Dictionary in which to insert .sav file variables\n python_dict : bool, optional\n By default, the object return is not a Python dictionary, but a\n case-insensitive dictionary with item, attribute, and call access\n to variables. To get a standard Python dictionary, set this option\n to True.\n uncompressed_file_name : str, optional\n This option only has an effect for .sav files written with the\n /compress option. If a file name is specified, compressed .sav\n files are uncompressed to this file. Otherwise, readsav will use\n the `tempfile` module to determine a temporary filename\n automatically, and will remove the temporary file upon successfully\n reading it in.\n verbose : bool, optional\n Whether to print out information about the save file, including\n the records read, and available variables.\n\n Returns\n ----------\n idl_dict : AttrDict or dict\n If `python_dict` is set to False (default), this function returns a\n case-insensitive dictionary with item, attribute, and call access\n to variables. If `python_dict` is set to True, this function\n returns a Python dictionary with all variable names in lowercase.\n If `idict` was specified, then variables are written to the\n dictionary specified, and the updated dictionary is returned.\n '''\n\n # Initialize record and variable holders\n records = []\n if python_dict or idict:\n variables = {}\n else:\n variables = AttrDict()\n\n # Open the IDL file\n f = open(file_name, 'rb')\n\n # Read the signature, which should be 'SR'\n signature = _read_bytes(f, 2)\n if signature != b'SR':\n raise Exception(\"Invalid SIGNATURE: %s\" % signature)\n\n # Next, the record format, which is '\\x00\\x04' for normal .sav\n # files, and '\\x00\\x06' for compressed .sav files.\n recfmt = _read_bytes(f, 2)\n\n if recfmt == b'\\x00\\x04':\n pass\n\n elif recfmt == b'\\x00\\x06':\n\n if verbose:\n print(\"IDL Save file is compressed\")\n\n if uncompressed_file_name:\n fout = open(uncompressed_file_name, 'w+b')\n else:\n fout = tempfile.NamedTemporaryFile(suffix='.sav')\n\n if verbose:\n print(\" -> expanding to %s\" % fout.name)\n\n # Write header\n fout.write(b'SR\\x00\\x04')\n\n # Cycle through records\n while True:\n\n # Read record type\n rectype = _read_long(f)\n fout.write(struct.pack('>l', int(rectype)))\n\n # Read position of next record and return as int\n nextrec = _read_uint32(f)\n nextrec += _read_uint32(f) * 2**32\n\n # Read the unknown 4 bytes\n unknown = f.read(4)\n\n # Check if the end of the file has been reached\n if RECTYPE_DICT[rectype] == 'END_MARKER':\n fout.write(struct.pack('>I', int(nextrec) % 2**32))\n fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))\n fout.write(unknown)\n break\n\n # Find current position\n pos = f.tell()\n\n # Decompress record\n rec_string = zlib.decompress(f.read(nextrec-pos))\n\n # Find new position of next record\n nextrec = fout.tell() + len(rec_string) + 12\n\n # Write out record\n fout.write(struct.pack('>I', int(nextrec % 2**32)))\n fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))\n fout.write(unknown)\n fout.write(rec_string)\n\n # Close the original compressed file\n f.close()\n\n # Set f to be the decompressed file, and skip the first four bytes\n f = fout\n f.seek(4)\n\n else:\n raise Exception(\"Invalid RECFMT: %s\" % recfmt)\n\n # Loop through records, and add them to the list\n while True:\n r = _read_record(f)\n records.append(r)\n if 'end' in r:\n if r['end']:\n break\n\n # Close the file\n f.close()\n\n # Find heap data variables\n heap = {}\n for r in records:\n if r['rectype'] == \"HEAP_DATA\":\n heap[r['heap_index']] = r['data']\n\n # Find all variables\n for r in records:\n if r['rectype'] == \"VARIABLE\":\n replace, new = _replace_heap(r['data'], heap)\n if replace:\n r['data'] = new\n variables[r['varname'].lower()] = r['data']\n\n if verbose:\n\n # Print out timestamp info about the file\n for record in records:\n if record['rectype'] == \"TIMESTAMP\":\n print(\"-\"*50)\n print(\"Date: %s\" % record['date'])\n print(\"User: %s\" % record['user'])\n print(\"Host: %s\" % record['host'])\n break\n\n # Print out version info about the file\n for record in records:\n if record['rectype'] == \"VERSION\":\n print(\"-\"*50)\n print(\"Format: %s\" % record['format'])\n print(\"Architecture: %s\" % record['arch'])\n print(\"Operating System: %s\" % record['os'])\n print(\"IDL Version: %s\" % record['release'])\n break\n\n # Print out identification info about the file\n for record in records:\n if record['rectype'] == \"IDENTIFICATON\":\n print(\"-\"*50)\n print(\"Author: %s\" % record['author'])\n print(\"Title: %s\" % record['title'])\n print(\"ID Code: %s\" % record['idcode'])\n break\n\n print(\"-\"*50)\n print(\"Successfully read %i records of which:\" % \\\n (len(records)))\n\n # Create convenience list of record types\n rectypes = [r['rectype'] for r in records]\n\n for rt in set(rectypes):\n if rt != 'END_MARKER':\n print(\" - %i are of type %s\" % (rectypes.count(rt), rt))\n print(\"-\"*50)\n\n if 'VARIABLE' in rectypes:\n print(\"Available variables:\")\n for var in variables:\n print(\" - %s [%s]\" % (var, type(variables[var])))\n print(\"-\"*50)\n\n if idict:\n for var in variables:\n idict[var] = variables[var]\n return idict\n else:\n return variables\n",
"#!/usr/bin/env python\n\"\"\" Test functions for the sparse.linalg.isolve module\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\n\nfrom numpy.testing import TestCase, assert_equal, assert_array_equal, \\\n assert_, assert_allclose, assert_raises\n\nfrom numpy import zeros, ones, arange, array, abs, max\nfrom numpy.linalg import cond\nfrom scipy.linalg import norm\nfrom scipy.sparse import spdiags, csr_matrix\n\nfrom scipy.sparse.linalg import LinearOperator, aslinearoperator\nfrom scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres\n\n#TODO check that method preserve shape and type\n#TODO test both preconditioner methods\n\nclass Case(object):\n def __init__(self, name, A, skip=None):\n self.name = name\n self.A = A\n if skip is None:\n self.skip = []\n else:\n self.skip = skip\n def __repr__(self):\n return \"<%s>\" % self.name\n\nclass IterativeParams(object):\n def __init__(self):\n # list of tuples (solver, symmetric, positive_definite )\n solvers = [cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres]\n sym_solvers = [minres, cg]\n posdef_solvers = [cg]\n real_solvers = [minres]\n\n self.solvers = solvers\n\n # list of tuples (A, symmetric, positive_definite )\n self.cases = []\n\n # Symmetric and Positive Definite\n N = 40\n data = ones((3,N))\n data[0,:] = 2\n data[1,:] = -1\n data[2,:] = -1\n Poisson1D = spdiags(data, [0,-1,1], N, N, format='csr')\n self.Poisson1D = Case(\"poisson1d\", Poisson1D)\n self.cases.append(self.Poisson1D)\n\n # Symmetric and Negative Definite\n self.cases.append(Case(\"neg-poisson1d\", -Poisson1D,\n skip=posdef_solvers))\n\n # Symmetric and Indefinite\n data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]],dtype='d')\n RandDiag = spdiags( data, [0], 10, 10, format='csr' )\n self.cases.append(Case(\"rand-diag\", RandDiag, skip=posdef_solvers))\n\n # Random real-valued\n np.random.seed(1234)\n data = np.random.rand(4, 4)\n self.cases.append(Case(\"rand\", data, skip=posdef_solvers+sym_solvers))\n\n # Random symmetric real-valued\n np.random.seed(1234)\n data = np.random.rand(4, 4)\n data = data + data.T\n self.cases.append(Case(\"rand-sym\", data, skip=posdef_solvers))\n\n # Random pos-def symmetric real\n np.random.seed(1234)\n data = np.random.rand(9, 9)\n data = np.dot(data.conj(), data.T)\n self.cases.append(Case(\"rand-sym-pd\", data))\n\n # Random complex-valued\n np.random.seed(1234)\n data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)\n self.cases.append(Case(\"rand-cmplx\", data,\n skip=posdef_solvers+sym_solvers+real_solvers))\n\n # Random hermitian complex-valued\n np.random.seed(1234)\n data = np.random.rand(4, 4) + 1j*np.random.rand(4, 4)\n data = data + data.T.conj()\n self.cases.append(Case(\"rand-cmplx-herm\", data,\n skip=posdef_solvers+real_solvers))\n\n # Random pos-def hermitian complex-valued\n np.random.seed(1234)\n data = np.random.rand(9, 9) + 1j*np.random.rand(9, 9)\n data = np.dot(data.conj(), data.T)\n self.cases.append(Case(\"rand-cmplx-sym-pd\", data, skip=real_solvers))\n\n # Non-symmetric and Positive Definite\n #\n # cgs, qmr, and bicg fail to converge on this one\n # -- algorithmic limitation apparently\n data = ones((2,10))\n data[0,:] = 2\n data[1,:] = -1\n A = spdiags( data, [0,-1], 10, 10, format='csr')\n self.cases.append(Case(\"nonsymposdef\", A,\n skip=sym_solvers+[cgs, qmr, bicg]))\n\ndef setup_module():\n global params\n params = IterativeParams()\n\ndef check_maxiter(solver, case):\n A = case.A\n tol = 1e-12\n\n b = arange(A.shape[0], dtype=float)\n x0 = 0*b\n\n residuals = []\n def callback(x):\n residuals.append(norm(b - case.A*x))\n\n x, info = solver(A, b, x0=x0, tol=tol, maxiter=3, callback=callback)\n\n assert_equal(len(residuals), 3)\n assert_equal(info, 3)\n\ndef test_maxiter():\n case = params.Poisson1D\n for solver in params.solvers:\n if solver in case.skip: continue\n yield check_maxiter, solver, case\n\ndef assert_normclose(a, b, tol=1e-8):\n residual = norm(a - b)\n tolerance = tol*norm(b)\n msg = \"residual (%g) not smaller than tolerance %g\" % (residual, tolerance)\n assert_(residual < tolerance, msg=msg)\n\ndef check_convergence(solver, case):\n tol = 1e-8\n\n A = case.A\n\n b = arange(A.shape[0], dtype=float)\n x0 = 0*b\n\n x, info = solver(A, b, x0=x0, tol=tol)\n\n assert_array_equal(x0, 0*b) #ensure that x0 is not overwritten\n assert_equal(info,0)\n assert_normclose(A.dot(x), b, tol=tol)\n\ndef test_convergence():\n for solver in params.solvers:\n for case in params.cases:\n if solver in case.skip: continue\n yield check_convergence, solver, case\n\ndef check_precond_dummy(solver, case):\n tol = 1e-8\n\n def identity(b,which=None):\n \"\"\"trivial preconditioner\"\"\"\n return b\n\n A = case.A\n\n M,N = A.shape\n D = spdiags( [1.0/A.diagonal()], [0], M, N)\n\n b = arange(A.shape[0], dtype=float)\n x0 = 0*b\n\n precond = LinearOperator(A.shape, identity, rmatvec=identity)\n\n if solver is qmr:\n x, info = solver(A, b, M1=precond, M2=precond, x0=x0, tol=tol)\n else:\n x, info = solver(A, b, M=precond, x0=x0, tol=tol)\n assert_equal(info,0)\n assert_normclose(A.dot(x), b, tol)\n\n A = aslinearoperator(A)\n A.psolve = identity\n A.rpsolve = identity\n\n x, info = solver(A, b, x0=x0, tol=tol)\n assert_equal(info,0)\n assert_normclose(A*x, b, tol=tol)\n\ndef test_precond_dummy():\n case = params.Poisson1D\n for solver in params.solvers:\n if solver in case.skip: continue\n yield check_precond_dummy, solver, case\n\ndef test_gmres_basic():\n A = np.vander(np.arange(10) + 1)[:, ::-1]\n b = np.zeros(10)\n b[0] = 1\n x = np.linalg.solve(A, b)\n\n x_gm, err = gmres(A, b, restart=5, maxiter=1)\n\n assert_allclose(x_gm[0], 0.359, rtol=1e-2)\n\ndef test_reentrancy():\n non_reentrant = [cg, cgs, bicg, bicgstab, gmres, qmr]\n reentrant = [lgmres, minres]\n for solver in reentrant + non_reentrant:\n yield _check_reentrancy, solver, solver in reentrant\n\ndef _check_reentrancy(solver, is_reentrant):\n def matvec(x):\n A = np.array([[1.0, 0, 0], [0, 2.0, 0], [0, 0, 3.0]])\n y, info = solver(A, x)\n assert_equal(info, 0)\n return y\n b = np.array([1, 1./2, 1./3])\n op = LinearOperator((3, 3), matvec=matvec, rmatvec=matvec,\n dtype=b.dtype)\n\n if not is_reentrant:\n assert_raises(RuntimeError, solver, op, b)\n else:\n y, info = solver(op, b)\n assert_equal(info, 0)\n assert_allclose(y, [1, 1, 1])\n\n\n#------------------------------------------------------------------------------\n\nclass TestQMR(TestCase):\n def test_leftright_precond(self):\n \"\"\"Check that QMR works with left and right preconditioners\"\"\"\n\n from scipy.sparse.linalg.dsolve import splu\n from scipy.sparse.linalg.interface import LinearOperator\n\n n = 100\n\n dat = ones(n)\n A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1] ,n,n)\n b = arange(n,dtype='d')\n\n L = spdiags([-dat/2, dat], [-1,0], n, n)\n U = spdiags([4*dat, -dat], [ 0,1], n, n)\n\n L_solver = splu(L)\n U_solver = splu(U)\n\n def L_solve(b):\n return L_solver.solve(b)\n def U_solve(b):\n return U_solver.solve(b)\n def LT_solve(b):\n return L_solver.solve(b,'T')\n def UT_solve(b):\n return U_solver.solve(b,'T')\n\n M1 = LinearOperator( (n,n), matvec=L_solve, rmatvec=LT_solve )\n M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve )\n\n x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2)\n\n assert_equal(info,0)\n assert_normclose(A*x, b, tol=1e-8)\n\nclass TestGMRES(TestCase):\n def test_callback(self):\n\n def store_residual(r, rvec):\n rvec[rvec.nonzero()[0].max()+1] = r\n\n #Define, A,b\n A = csr_matrix(array([[-2,1,0,0,0,0],[1,-2,1,0,0,0],[0,1,-2,1,0,0],[0,0,1,-2,1,0],[0,0,0,1,-2,1],[0,0,0,0,1,-2]]))\n b = ones((A.shape[0],))\n maxiter=1\n rvec = zeros(maxiter+1)\n rvec[0] = 1.0\n callback = lambda r:store_residual(r, rvec)\n x,flag = gmres(A, b, x0=zeros(A.shape[0]), tol=1e-16, maxiter=maxiter, callback=callback)\n diff = max(abs((rvec - array([1.0, 0.81649658092772603]))))\n assert_(diff < 1e-5)\n\n\nif __name__ == \"__main__\":\n import nose\n nose.run(argv=['', __file__])\n",
"from __future__ import division, print_function, absolute_import\n\nfrom numpy.testing import assert_almost_equal, assert_array_equal, \\\n TestCase, run_module_suite\nfrom scipy.interpolate import KroghInterpolator, krogh_interpolate, \\\n BarycentricInterpolator, barycentric_interpolate, \\\n PiecewisePolynomial, piecewise_polynomial_interpolate, \\\n approximate_taylor_polynomial\nfrom scipy.lib.six.moves import xrange\nimport scipy\nimport numpy as np\nfrom scipy.interpolate import splrep, splev\n\nclass CheckKrogh(TestCase):\n def setUp(self):\n self.true_poly = scipy.poly1d([-2,3,1,5,-4])\n self.test_xs = np.linspace(-1,1,100)\n self.xs = np.linspace(-1,1,5)\n self.ys = self.true_poly(self.xs)\n\n def test_lagrange(self):\n P = KroghInterpolator(self.xs,self.ys)\n assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))\n\n def test_scalar(self):\n P = KroghInterpolator(self.xs,self.ys)\n assert_almost_equal(self.true_poly(7),P(7))\n assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))\n\n def test_derivatives(self):\n P = KroghInterpolator(self.xs,self.ys)\n D = P.derivatives(self.test_xs)\n for i in xrange(D.shape[0]):\n assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),\n D[i])\n\n def test_low_derivatives(self):\n P = KroghInterpolator(self.xs,self.ys)\n D = P.derivatives(self.test_xs,len(self.xs)+2)\n for i in xrange(D.shape[0]):\n assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),\n D[i])\n\n def test_derivative(self):\n P = KroghInterpolator(self.xs,self.ys)\n m = 10\n r = P.derivatives(self.test_xs,m)\n for i in xrange(m):\n assert_almost_equal(P.derivative(self.test_xs,i),r[i])\n\n def test_high_derivative(self):\n P = KroghInterpolator(self.xs,self.ys)\n for i in xrange(len(self.xs),2*len(self.xs)):\n assert_almost_equal(P.derivative(self.test_xs,i),\n np.zeros(len(self.test_xs)))\n\n def test_hermite(self):\n xs = [0,0,0,1,1,1,2]\n ys = [self.true_poly(0),\n self.true_poly.deriv(1)(0),\n self.true_poly.deriv(2)(0),\n self.true_poly(1),\n self.true_poly.deriv(1)(1),\n self.true_poly.deriv(2)(1),\n self.true_poly(2)]\n P = KroghInterpolator(self.xs,self.ys)\n assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))\n\n def test_vector(self):\n xs = [0, 1, 2]\n ys = np.array([[0,1],[1,0],[2,1]])\n P = KroghInterpolator(xs,ys)\n Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]\n test_xs = np.linspace(-1,3,100)\n assert_almost_equal(P(test_xs),\n np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))\n assert_almost_equal(P.derivatives(test_xs),\n np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),\n (1,2,0)))\n\n def test_empty(self):\n P = KroghInterpolator(self.xs,self.ys)\n assert_array_equal(P([]), [])\n\n def test_shapes_scalarvalue(self):\n P = KroghInterpolator(self.xs,self.ys)\n assert_array_equal(np.shape(P(0)), ())\n assert_array_equal(np.shape(P(np.array(0))), ())\n assert_array_equal(np.shape(P([0])), (1,))\n assert_array_equal(np.shape(P([0,1])), (2,))\n\n def test_shapes_scalarvalue_derivative(self):\n P = KroghInterpolator(self.xs,self.ys)\n n = P.n\n assert_array_equal(np.shape(P.derivatives(0)), (n,))\n assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))\n assert_array_equal(np.shape(P.derivatives([0])), (n,1))\n assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))\n\n def test_shapes_vectorvalue(self):\n P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))\n assert_array_equal(np.shape(P(0)), (3,))\n assert_array_equal(np.shape(P([0])), (1,3))\n assert_array_equal(np.shape(P([0,1])), (2,3))\n\n def test_shapes_1d_vectorvalue(self):\n P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))\n assert_array_equal(np.shape(P(0)), (1,))\n assert_array_equal(np.shape(P([0])), (1,1))\n assert_array_equal(np.shape(P([0,1])), (2,1))\n\n def test_shapes_vectorvalue_derivative(self):\n P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))\n n = P.n\n assert_array_equal(np.shape(P.derivatives(0)), (n,3))\n assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))\n assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))\n\n def test_wrapper(self):\n P = KroghInterpolator(self.xs,self.ys)\n assert_almost_equal(P(self.test_xs),krogh_interpolate(self.xs,self.ys,self.test_xs))\n assert_almost_equal(P.derivative(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=2))\n assert_almost_equal(P.derivatives(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=[0,1]))\n\nclass CheckTaylor(TestCase):\n def test_exponential(self):\n degree = 5\n p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)\n for i in xrange(degree+1):\n assert_almost_equal(p(0),1)\n p = p.deriv()\n assert_almost_equal(p(0),0)\n\nclass CheckBarycentric(TestCase):\n def setUp(self):\n self.true_poly = scipy.poly1d([-2,3,1,5,-4])\n self.test_xs = np.linspace(-1,1,100)\n self.xs = np.linspace(-1,1,5)\n self.ys = self.true_poly(self.xs)\n\n def test_lagrange(self):\n P = BarycentricInterpolator(self.xs,self.ys)\n assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))\n\n def test_scalar(self):\n P = BarycentricInterpolator(self.xs,self.ys)\n assert_almost_equal(self.true_poly(7),P(7))\n assert_almost_equal(self.true_poly(np.array(7)),P(np.array(7)))\n\n def test_delayed(self):\n P = BarycentricInterpolator(self.xs)\n P.set_yi(self.ys)\n assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))\n\n def test_append(self):\n P = BarycentricInterpolator(self.xs[:3],self.ys[:3])\n P.add_xi(self.xs[3:],self.ys[3:])\n assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))\n\n def test_vector(self):\n xs = [0, 1, 2]\n ys = np.array([[0,1],[1,0],[2,1]])\n P = BarycentricInterpolator(xs,ys)\n Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]\n test_xs = np.linspace(-1,3,100)\n assert_almost_equal(P(test_xs),\n np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))\n\n def test_shapes_scalarvalue(self):\n P = BarycentricInterpolator(self.xs,self.ys)\n assert_array_equal(np.shape(P(0)), ())\n assert_array_equal(np.shape(P(np.array(0))), ())\n assert_array_equal(np.shape(P([0])), (1,))\n assert_array_equal(np.shape(P([0,1])), (2,))\n\n def test_shapes_vectorvalue(self):\n P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))\n assert_array_equal(np.shape(P(0)), (3,))\n assert_array_equal(np.shape(P([0])), (1,3))\n assert_array_equal(np.shape(P([0,1])), (2,3))\n\n def test_shapes_1d_vectorvalue(self):\n P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1]))\n assert_array_equal(np.shape(P(0)), (1,))\n assert_array_equal(np.shape(P([0])), (1,1))\n assert_array_equal(np.shape(P([0,1])), (2,1))\n\n def test_wrapper(self):\n P = BarycentricInterpolator(self.xs,self.ys)\n assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs))\n\nclass CheckPiecewise(TestCase):\n def setUp(self):\n self.tck = splrep([0,1,2,3,4,5], [0,10,-1,3,7,2], s=0)\n self.test_xs = np.linspace(-1,6,100)\n self.spline_ys = splev(self.test_xs, self.tck)\n self.spline_yps = splev(self.test_xs, self.tck, der=1)\n self.xi = np.unique(self.tck[0])\n self.yi = [[splev(x, self.tck, der=j) for j in xrange(3)] for x in self.xi]\n\n def test_construction(self):\n P = PiecewisePolynomial(self.xi, self.yi, 3)\n assert_almost_equal(P(self.test_xs), self.spline_ys)\n\n def test_scalar(self):\n P = PiecewisePolynomial(self.xi,self.yi,3)\n assert_almost_equal(P(self.test_xs[0]),self.spline_ys[0])\n assert_almost_equal(P.derivative(self.test_xs[0],1),self.spline_yps[0])\n assert_almost_equal(P(np.array(self.test_xs[0])),self.spline_ys[0])\n assert_almost_equal(P.derivative(np.array(self.test_xs[0]),1),\n self.spline_yps[0])\n\n def test_derivative(self):\n P = PiecewisePolynomial(self.xi,self.yi,3)\n assert_almost_equal(P.derivative(self.test_xs,1),self.spline_yps)\n\n def test_derivatives(self):\n P = PiecewisePolynomial(self.xi,self.yi,3)\n m = 4\n r = P.derivatives(self.test_xs,m)\n #print r.shape, r\n for i in xrange(m):\n assert_almost_equal(P.derivative(self.test_xs,i),r[i])\n\n def test_vector(self):\n xs = [0, 1, 2]\n ys = [[[0,1]],[[1,0],[-1,-1]],[[2,1]]]\n P = PiecewisePolynomial(xs,ys)\n Pi = [PiecewisePolynomial(xs,[[yd[i] for yd in y] for y in ys])\n for i in xrange(len(ys[0][0]))]\n test_xs = np.linspace(-1,3,100)\n assert_almost_equal(P(test_xs),\n np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))\n assert_almost_equal(P.derivative(test_xs,1),\n np.transpose(np.asarray([p.derivative(test_xs,1) for p in Pi]),\n (1,0)))\n\n def test_incremental(self):\n P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3)\n for i in xrange(1,len(self.xi)):\n P.append(self.xi[i],self.yi[i],3)\n assert_almost_equal(P(self.test_xs),self.spline_ys)\n\n def test_shapes_scalarvalue(self):\n P = PiecewisePolynomial(self.xi,self.yi,4)\n assert_array_equal(np.shape(P(0)), ())\n assert_array_equal(np.shape(P(np.array(0))), ())\n assert_array_equal(np.shape(P([0])), (1,))\n assert_array_equal(np.shape(P([0,1])), (2,))\n\n def test_shapes_scalarvalue_derivative(self):\n P = PiecewisePolynomial(self.xi,self.yi,4)\n n = 4\n assert_array_equal(np.shape(P.derivative(0,1)), ())\n assert_array_equal(np.shape(P.derivative(np.array(0),1)), ())\n assert_array_equal(np.shape(P.derivative([0],1)), (1,))\n assert_array_equal(np.shape(P.derivative([0,1],1)), (2,))\n\n def test_shapes_vectorvalue(self):\n yi = np.multiply.outer(np.asarray(self.yi),np.arange(3))\n P = PiecewisePolynomial(self.xi,yi,4)\n assert_array_equal(np.shape(P(0)), (3,))\n assert_array_equal(np.shape(P([0])), (1,3))\n assert_array_equal(np.shape(P([0,1])), (2,3))\n\n def test_shapes_vectorvalue_1d(self):\n yi = np.multiply.outer(np.asarray(self.yi),np.arange(1))\n P = PiecewisePolynomial(self.xi,yi,4)\n assert_array_equal(np.shape(P(0)), (1,))\n assert_array_equal(np.shape(P([0])), (1,1))\n assert_array_equal(np.shape(P([0,1])), (2,1))\n\n def test_shapes_vectorvalue_derivative(self):\n P = PiecewisePolynomial(self.xi,np.multiply.outer(self.yi,np.arange(3)),4)\n n = 4\n assert_array_equal(np.shape(P.derivative(0,1)), (3,))\n assert_array_equal(np.shape(P.derivative([0],1)), (1,3))\n assert_array_equal(np.shape(P.derivative([0,1],1)), (2,3))\n\n def test_wrapper(self):\n P = PiecewisePolynomial(self.xi,self.yi)\n assert_almost_equal(P(self.test_xs),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs))\n assert_almost_equal(P.derivative(self.test_xs,2),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs,der=2))\n assert_almost_equal(P.derivatives(self.test_xs,2),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs,der=[0,1]))\n\n\nif __name__=='__main__':\n run_module_suite()\n"
] | [
[
"numpy.dot",
"numpy.amax",
"numpy.linspace",
"numpy.asarray",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.searchsorted",
"numpy.where",
"scipy.lib.six.moves.xrange",
"numpy.arange",
"numpy.multiply.outer",
"numpy.atleast_1d",
"numpy.zeros",
"numpy.nonzero",
"scipy.misc.factorial",
"numpy.sum",
"numpy.multiply.reduce",
"numpy.sign",
"numpy.subtract.outer",
"numpy.isscalar",
"numpy.vstack"
],
[
"numpy.complex128",
"numpy.complex64",
"numpy.compat.asstr",
"numpy.array",
"numpy.recarray"
],
[
"scipy.sparse.linalg.isolve.qmr",
"numpy.testing.assert_equal",
"numpy.arange",
"scipy.sparse.spdiags",
"scipy.sparse.linalg.isolve.gmres",
"scipy.sparse.linalg.aslinearoperator",
"scipy.linalg.norm",
"numpy.zeros",
"scipy.sparse.linalg.dsolve.splu",
"numpy.testing.assert_raises",
"numpy.random.rand",
"numpy.testing.assert_",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.linalg.solve",
"numpy.random.seed",
"numpy.ones",
"numpy.testing.assert_array_equal",
"scipy.sparse.linalg.interface.LinearOperator"
],
[
"scipy.interpolate.approximate_taylor_polynomial",
"scipy.interpolate.splrep",
"numpy.testing.run_module_suite",
"numpy.linspace",
"numpy.unique",
"scipy.interpolate.krogh_interpolate",
"scipy.interpolate.KroghInterpolator",
"numpy.asarray",
"numpy.arange",
"scipy.interpolate.barycentric_interpolate",
"scipy.interpolate.piecewise_polynomial_interpolate",
"scipy.interpolate.BarycentricInterpolator",
"scipy.interpolate.splev",
"scipy.interpolate.PiecewisePolynomial",
"scipy.poly1d",
"numpy.outer",
"numpy.array",
"scipy.lib.six.moves.xrange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"1.2",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hendriksanta/probability | [
"6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f",
"6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f",
"6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f",
"6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f"
] | [
"tensorflow_probability/python/distributions/student_t_process.py",
"tensorflow_probability/python/distributions/joint_distribution_vmap_mixin.py",
"tensorflow_probability/python/distributions/exponentially_modified_gaussian.py",
"tensorflow_probability/python/distributions/mvn_linear_operator.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The StudentTProcess distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport warnings\n\n# Dependency imports\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import identity as identity_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import multivariate_student_t\nfrom tensorflow_probability.python.distributions import student_t\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n 'StudentTProcess',\n]\n\n\ndef _add_diagonal_shift(matrix, shift):\n return tf.linalg.set_diag(\n matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')\n\n\ndef make_cholesky_factored_marginal_fn(jitter):\n \"\"\"Construct a `marginal_fn` for use with `tfd.StudentTProcess`.\n\n The returned function computes the Cholesky factorization of the input\n covariance plus a diagonal jitter, and uses that for the `scale` of a\n `tfd.MultivariateNormalLinearOperator`.\n\n Args:\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n\n Returns:\n marginal_fn: A Python function that takes a location, covariance matrix,\n optional `validate_args`, `allow_nan_stats` and `name` arguments, and\n returns a `tfd.MultivariateNormalLinearOperator`.\n \"\"\"\n def marginal_fn(\n df,\n loc,\n covariance,\n validate_args=False,\n allow_nan_stats=False,\n name='marginal_distribution'):\n squared_scale = ((df - 2.) / df)[\n ..., tf.newaxis, tf.newaxis] * covariance\n scale = tf.linalg.LinearOperatorLowerTriangular(\n tf.linalg.cholesky(_add_diagonal_shift(squared_scale, jitter)),\n is_non_singular=True,\n name='StudentTProcessScaleLinearOperator')\n return multivariate_student_t.MultivariateStudentTLinearOperator(\n df=df,\n loc=loc,\n scale=scale,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=name)\n\n return marginal_fn\n\n\nclass StudentTProcess(distribution.Distribution):\n \"\"\"Marginal distribution of a Student's T process at finitely many points.\n\n A Student's T process (TP) is an indexed collection of random variables, any\n finite collection of which are jointly Multivariate Student's T. While this\n definition applies to finite index sets, it is typically implicit that the\n index set is infinite; in applications, it is often some finite dimensional\n real or complex vector space. In such cases, the TP may be thought of as a\n distribution over (real- or complex-valued) functions defined over the index\n set.\n\n Just as Student's T distributions are fully specified by their degrees of\n freedom, location and scale, a Student's T process can be completely specified\n by a degrees of freedom parameter, mean function and covariance function.\n Let `S` denote the index set and `K` the space in\n which each indexed random variable takes its values (again, often R or C).\n The mean function is then a map `m: S -> K`, and the covariance function,\n or kernel, is a positive-definite function `k: (S x S) -> K`. The properties\n of functions drawn from a TP are entirely dictated (up to translation) by\n the form of the kernel function.\n\n This `Distribution` represents the marginal joint distribution over function\n values at a given finite collection of points `[x[1], ..., x[N]]` from the\n index set `S`. By definition, this marginal distribution is just a\n multivariate Student's T distribution, whose mean is given by the vector\n `[ m(x[1]), ..., m(x[N]) ]` and whose covariance matrix is constructed from\n pairwise applications of the kernel function to the given inputs:\n\n ```none\n | k(x[1], x[1]) k(x[1], x[2]) ... k(x[1], x[N]) |\n | k(x[2], x[1]) k(x[2], x[2]) ... k(x[2], x[N]) |\n | ... ... ... |\n | k(x[N], x[1]) k(x[N], x[2]) ... k(x[N], x[N]) |\n ```\n\n For this to be a valid covariance matrix, it must be symmetric and positive\n definite; hence the requirement that `k` be a positive definite function\n (which, by definition, says that the above procedure will yield PD matrices).\n\n Note also we use a parameterization as suggested in [1], which requires `df`\n to be greater than 2. This allows for the covariance for any finite\n dimensional marginal of the TP (a multivariate Student's T distribution) to\n just be the PD matrix generated by the kernel.\n\n\n #### Mathematical Details\n\n The probability density function (pdf) is a multivariate Student's T whose\n parameters are derived from the TP's properties:\n\n ```none\n pdf(x; df, index_points, mean_fn, kernel) = MultivariateStudentT(df, loc, K)\n K = (df - 2) / df * (kernel.matrix(index_points, index_points) +\n observation_noise_variance * eye(N))\n loc = (x - mean_fn(index_points))^T @ K @ (x - mean_fn(index_points))\n ```\n\n where:\n\n * `df` is the degrees of freedom parameter for the TP.\n * `index_points` are points in the index set over which the TP is defined,\n * `mean_fn` is a callable mapping the index set to the TP's mean values,\n * `kernel` is `PositiveSemidefiniteKernel`-like and represents the covariance\n function of the TP,\n * `observation_noise_variance` is a term added to the diagonal of the kernel\n matrix. In the limit of `df` to `inf`, this represents the observation noise\n of a gaussian likelihood.\n * `eye(N)` is an N-by-N identity matrix.\n\n #### Examples\n\n ##### Draw joint samples from a TP prior\n\n ```python\n import numpy as np\n import tensorflow.compat.v2 as tf\n import tensorflow_probability as tfp\n\n tf.enable_v2_behavior()\n\n tfd = tfp.distributions\n psd_kernels = tfp.math.psd_kernels\n\n num_points = 100\n # Index points should be a collection (100, here) of feature vectors. In this\n # example, we're using 1-d vectors, so we just need to reshape the output from\n # np.linspace, to give a shape of (100, 1).\n index_points = np.expand_dims(np.linspace(-1., 1., num_points), -1)\n\n # Define a kernel with default parameters.\n kernel = psd_kernels.ExponentiatedQuadratic()\n\n tp = tfd.StudentTProcess(3., kernel, index_points)\n\n samples = tp.sample(10)\n # ==> 10 independently drawn, joint samples at `index_points`\n\n noisy_tp = tfd.StudentTProcess(\n df=3.,\n kernel=kernel,\n index_points=index_points)\n noisy_samples = noisy_tp.sample(10)\n # ==> 10 independently drawn, noisy joint samples at `index_points`\n ```\n\n ##### Optimize kernel parameters via maximum marginal likelihood.\n\n ```python\n # Suppose we have some data from a known function. Note the index points in\n # general have shape `[b1, ..., bB, f1, ..., fF]` (here we assume `F == 1`),\n # so we need to explicitly consume the feature dimensions (just the last one\n # here).\n f = lambda x: np.sin(10*x[..., 0]) * np.exp(-x[..., 0]**2)\n observed_index_points = np.expand_dims(np.random.uniform(-1., 1., 50), -1)\n # Squeeze to take the shape from [50, 1] to [50].\n observed_values = f(observed_index_points)\n\n amplitude = tfp.util.TransformedVariable(\n 1., tfp.bijectors.Softplus(), dtype=np.float64, name='amplitude')\n length_scale = tfp.util.TransformedVariable(\n 1., tfp.bijectors.Softplus(), dtype=np.float64, name='length_scale')\n\n # Define a kernel with trainable parameters.\n kernel = psd_kernels.ExponentiatedQuadratic(\n amplitude=amplitude,\n length_scale=length_scale)\n\n tp = tfd.StudentTProcess(3., kernel, observed_index_points)\n\n optimizer = tf.optimizers.Adam()\n\n @tf.function\n def optimize():\n with tf.GradientTape() as tape:\n loss = -tp.log_prob(observed_values)\n grads = tape.gradient(loss, tp.trainable_variables)\n optimizer.apply_gradients(zip(grads, tp.trainable_variables))\n return loss\n\n for i in range(1000):\n nll = optimize()\n if i % 100 == 0:\n print(\"Step {}: NLL = {}\".format(i, nll))\n print(\"Final NLL = {}\".format(nll))\n ```\n\n #### References\n\n [1]: Amar Shah, Andrew Gordon Wilson, and Zoubin Ghahramani. Student-t\n Processes as Alternatives to Gaussian Processes. In _Artificial\n Intelligence and Statistics_, 2014.\n https://www.cs.cmu.edu/~andrewgw/tprocess.pdf\n \"\"\"\n\n @deprecation.deprecated_args(\n '2021-06-26',\n '`jitter` is deprecated; please use `marginal_fn` directly.',\n 'jitter')\n def __init__(self,\n df,\n kernel,\n index_points=None,\n mean_fn=None,\n observation_noise_variance=0.,\n marginal_fn=None,\n jitter=1e-6,\n validate_args=False,\n allow_nan_stats=False,\n name='StudentTProcess'):\n \"\"\"Instantiate a StudentTProcess Distribution.\n\n Args:\n df: Positive Floating-point `Tensor` representing the degrees of freedom.\n Must be greater than 2.\n kernel: `PositiveSemidefiniteKernel`-like instance representing the\n TP's covariance function.\n index_points: `float` `Tensor` representing finite (batch of) vector(s) of\n points in the index set over which the TP is defined. Shape has the form\n `[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e` is the number\n (size) of index points in each batch. Ultimately this distribution\n corresponds to a `e`-dimensional multivariate Student's T. The batch\n shape must be broadcastable with `kernel.batch_shape` and any batch dims\n yielded by `mean_fn`.\n mean_fn: Python `callable` that acts on `index_points` to produce a (batch\n of) vector(s) of mean values at `index_points`. Takes a `Tensor` of\n shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is\n broadcastable with `[b1, ..., bB]`. Default value: `None` implies\n constant zero function.\n observation_noise_variance: `float` `Tensor` representing (batch of)\n scalar variance(s) of the noise in the Normal likelihood\n distribution of the model. If batched, the batch shape must be\n broadcastable with the shapes of all other batched parameters\n (`kernel.batch_shape`, `index_points`, etc.).\n Default value: `0.`\n marginal_fn: A Python callable that takes a location, covariance matrix,\n optional `validate_args`, `allow_nan_stats` and `name` arguments, and\n returns a multivariate normal subclass of `tfd.Distribution`.\n Default value: `None`, in which case a Cholesky-factorizing function is\n is created using `make_cholesky_factorizing_marginal_fn` and the\n `jitter` argument.\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n Default value: `1e-6`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n Default value: `False`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"StudentTProcess\".\n\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype(\n [df, index_points, observation_noise_variance, jitter], tf.float32)\n df = tensor_util.convert_nonref_to_tensor(df, dtype=dtype, name='df')\n observation_noise_variance = tensor_util.convert_nonref_to_tensor(\n observation_noise_variance,\n dtype=dtype,\n name='observation_noise_variance')\n index_points = tensor_util.convert_nonref_to_tensor(\n index_points, dtype=dtype, name='index_points')\n jitter = tensor_util.convert_nonref_to_tensor(\n jitter, dtype=dtype, name='jitter')\n\n self._kernel = kernel\n self._index_points = index_points\n # Default to a constant zero function, borrowing the dtype from\n # index_points to ensure consistency.\n if mean_fn is None:\n mean_fn = lambda x: tf.zeros([1], dtype=dtype)\n else:\n if not callable(mean_fn):\n raise ValueError('`mean_fn` must be a Python callable')\n self._df = df\n self._observation_noise_variance = observation_noise_variance\n self._mean_fn = mean_fn\n self._jitter = jitter\n if marginal_fn is None:\n self._marginal_fn = make_cholesky_factored_marginal_fn(jitter)\n else:\n self._marginal_fn = marginal_fn\n\n with tf.name_scope('init'):\n super(StudentTProcess, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n def _is_univariate_marginal(self, index_points):\n \"\"\"True if the given index_points would yield a univariate marginal.\n\n Args:\n index_points: the set of index set locations at which to compute the\n marginal Student T distribution. If this set is of size 1, the marginal is\n univariate.\n\n Returns:\n is_univariate: Boolean indicating whether the marginal is univariate or\n multivariate. In the case of dynamic shape in the number of index points,\n defaults to \"multivariate\" since that's the best we can do.\n \"\"\"\n num_index_points = tf.compat.dimension_value(\n index_points.shape[-(self.kernel.feature_ndims + 1)])\n if num_index_points is None:\n warnings.warn(\n 'Unable to detect statically whether the number of index_points is '\n '1. As a result, defaulting to treating the marginal Student T '\n 'Process at `index_points` as a multivariate Student T. This makes '\n 'some methods, like `cdf` unavailable.')\n return num_index_points == 1\n\n def _compute_covariance(self, index_points):\n kernel_matrix = self.kernel.matrix(index_points, index_points)\n if self._is_univariate_marginal(index_points):\n # kernel_matrix thus has shape [..., 1, 1]; squeeze off the last dims and\n # tack on the observation noise variance.\n return (tf.squeeze(kernel_matrix, axis=[-2, -1]) +\n self.observation_noise_variance)\n else:\n observation_noise_variance = tf.convert_to_tensor(\n self.observation_noise_variance)\n # We are compute K + obs_noise_variance * I. The shape of this matrix\n # is going to be a broadcast of the shapes of K and obs_noise_variance *\n # I.\n broadcast_shape = distribution_util.get_broadcast_shape(\n kernel_matrix,\n # We pad with two single dimension since this represents a batch of\n # scaled identity matrices.\n observation_noise_variance[..., tf.newaxis, tf.newaxis])\n\n kernel_matrix = tf.broadcast_to(kernel_matrix, broadcast_shape)\n return _add_diagonal_shift(\n kernel_matrix, observation_noise_variance[..., tf.newaxis])\n\n def get_marginal_distribution(self, index_points=None):\n \"\"\"Compute the marginal over function values at `index_points`.\n\n Args:\n index_points: `float` `Tensor` representing finite (batch of) vector(s) of\n points in the index set over which the TP is defined. Shape has the form\n `[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e` is the number\n (size) of index points in each batch. Ultimately this distribution\n corresponds to a `e`-dimensional multivariate student t. The batch shape\n must be broadcastable with `kernel.batch_shape` and any batch dims\n yielded by `mean_fn`.\n\n Returns:\n marginal: a `StudentT` or `MultivariateStudentT` distribution,\n according to whether `index_points` consists of one or many index\n points, respectively.\n \"\"\"\n with self._name_and_control_scope('get_marginal_distribution'):\n df = tf.convert_to_tensor(self.df)\n index_points = self._get_index_points(index_points)\n covariance = self._compute_covariance(index_points)\n loc = self._mean_fn(index_points)\n\n # If we're sure the number of index points is 1, we can just construct a\n # scalar Normal. This has computational benefits and supports things like\n # CDF that aren't otherwise straightforward to provide.\n if self._is_univariate_marginal(index_points):\n squared_scale = (df - 2.) / df * covariance\n scale = tf.sqrt(squared_scale)\n # `loc` has a trailing 1 in the shape; squeeze it.\n loc = tf.squeeze(loc, axis=-1)\n return student_t.StudentT(\n df=df,\n loc=loc,\n scale=scale,\n validate_args=self.validate_args,\n allow_nan_stats=self.allow_nan_stats,\n name='marginal_distribution')\n else:\n return self._marginal_fn(\n df=df,\n loc=loc,\n covariance=covariance,\n validate_args=self.validate_args,\n allow_nan_stats=self.allow_nan_stats,\n name='marginal_distribution')\n\n @property\n def df(self):\n return self._df\n\n @property\n def observation_noise_variance(self):\n return self._observation_noise_variance\n\n @property\n def mean_fn(self):\n return self._mean_fn\n\n @property\n def kernel(self):\n return self._kernel\n\n @property\n def index_points(self):\n return self._index_points\n\n @property\n def marginal_fn(self):\n return self._marginal_fn\n\n @property\n def jitter(self):\n return self._jitter\n\n def _get_index_points(self, index_points=None):\n \"\"\"Return `index_points` if not None, else `self._index_points`.\n\n Args:\n index_points: if given, this is what is returned; else,\n `self._index_points`\n\n Returns:\n index_points: the given arg, if not None, else the class member\n `self._index_points`.\n\n Rases:\n ValueError: if `index_points` and `self._index_points` are both `None`.\n \"\"\"\n if self._index_points is None and index_points is None:\n raise ValueError(\n 'This StudentTProcess instance was not instantiated with a value for '\n 'index_points. One must therefore be provided when calling sample, '\n 'log_prob, and other such methods.')\n return (index_points if index_points is not None\n else tf.convert_to_tensor(self._index_points))\n\n def _log_prob(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).log_prob(value)\n\n def _batch_shape_tensor(self, index_points=None):\n index_points = self._get_index_points(index_points)\n return functools.reduce(tf.broadcast_dynamic_shape, [\n tf.shape(index_points)[:-(self.kernel.feature_ndims + 1)],\n self.kernel.batch_shape_tensor(),\n tf.shape(self.observation_noise_variance),\n tf.shape(self.df)\n ])\n\n def _batch_shape(self, index_points=None):\n index_points = (\n index_points if index_points is not None else self._index_points)\n return functools.reduce(\n tf.broadcast_static_shape,\n [index_points.shape[:-(self.kernel.feature_ndims + 1)],\n self.kernel.batch_shape,\n self.observation_noise_variance.shape,\n self.df.shape])\n\n def _event_shape_tensor(self, index_points=None):\n index_points = self._get_index_points(index_points)\n if self._is_univariate_marginal(index_points):\n return tf.constant([], dtype=tf.int32)\n else:\n # The examples index is one position to the left of the feature dims.\n examples_index = -(self.kernel.feature_ndims + 1)\n return tf.shape(index_points)[examples_index:examples_index + 1]\n\n def _event_shape(self, index_points=None):\n index_points = (\n index_points if index_points is not None else self._index_points)\n if self._is_univariate_marginal(index_points):\n return tf.TensorShape([])\n else:\n # The examples index is one position to the left of the feature dims.\n examples_index = -(self.kernel.feature_ndims + 1)\n shape = index_points.shape[examples_index:examples_index + 1]\n if tensorshape_util.rank(shape) is None:\n return tf.TensorShape([None])\n return shape\n\n def _sample_n(self, n, seed=None, index_points=None):\n return self.get_marginal_distribution(index_points).sample(n, seed=seed)\n\n def _log_survival_function(self, value, index_points=None):\n return self.get_marginal_distribution(\n index_points).log_survival_function(value)\n\n def _survival_function(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).survival_function(value)\n\n def _log_cdf(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).log_cdf(value)\n\n def _entropy(self, index_points=None):\n return self.get_marginal_distribution(index_points).entropy()\n\n def _mean(self, index_points=None):\n return self.get_marginal_distribution(index_points).mean()\n\n def _quantile(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).quantile(value)\n\n def _stddev(self, index_points=None):\n return tf.sqrt(self._variance(index_points=index_points))\n\n def _variance(self, index_points=None):\n index_points = self._get_index_points(index_points)\n\n kernel_diag = self.kernel.apply(index_points, index_points, example_ndims=1)\n if self._is_univariate_marginal(index_points):\n return (tf.squeeze(kernel_diag, axis=[-1]) +\n self.observation_noise_variance)\n else:\n # We are computing diag(K + obs_noise_variance * I) = diag(K) +\n # obs_noise_variance. We pad obs_noise_variance with a dimension in order\n # to broadcast batch shapes of kernel_diag and obs_noise_variance (since\n # kernel_diag has an extra dimension corresponding to the number of index\n # points).\n return kernel_diag + self.observation_noise_variance[..., tf.newaxis]\n\n def _covariance(self, index_points=None):\n # Using the result of get_marginal_distribution would involve an extra\n # matmul, and possibly even an unnecessary cholesky first. We can avoid that\n # by going straight through the kernel function.\n return self._compute_covariance(self._get_index_points(index_points))\n\n def _mode(self, index_points=None):\n return self.get_marginal_distribution(index_points).mode()\n\n def _default_event_space_bijector(self):\n return identity_bijector.Identity(validate_args=self.validate_args)\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n if is_init != tensor_util.is_ref(self.df):\n assertions.append(\n assert_util.assert_greater(\n self.df, dtype_util.as_numpy_dtype(self.df.dtype)(2.),\n message='`df` must be greater than 2.'))\n return assertions\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"`JointDistribution` mixin class implementing automatic vectorization.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\nfrom tensorflow_probability.python.distributions import joint_distribution as joint_distribution_lib\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import vectorization_util\n\n\nJAX_MODE = False\n\n\ndef _might_have_nonzero_size(sample_shape):\n static_size = tf.get_static_value(tf.size(sample_shape))\n return (static_size is None) or static_size >= 1\n\n\ndef _might_have_excess_ndims(flat_value, flat_core_ndims):\n for v, nd in zip(flat_value, flat_core_ndims):\n static_excess_ndims = (\n 0 if v is None else\n tf.get_static_value(ps.convert_to_shape_tensor(ps.rank(v) - nd)))\n if static_excess_ndims is None or static_excess_ndims > 0:\n return True\n return False\n\n\ndef _pad_value_to_full_length(value, dtype):\n \"\"\"Fills a partial `value` structure with `None`s for any unspecified RVs.\"\"\"\n # If dtype is dict-like, set missing values to `None`.\n if hasattr(dtype, 'keys'):\n return type(dtype)({k: value.get(k, None) for k in dtype.keys()})\n\n # Otherwise, dtype is a sequence, so append `None`s.\n return tf.nest.pack_sequence_as(dtype,\n [value[i] if i < len(value) else None\n for i in range(len(dtype))])\n\n\n# Lint doesn't know that docstrings are defined in the base JD class.\n# pylint: disable=missing-docstring\nclass JointDistributionVmapMixin(object):\n \"\"\"A joint distribution with automatically vectorized sample and log-prob.\n\n Auto-vectorized variants of JointDistribution treat the underlying\n model as describing a single possible world, or equivalently, as\n specifying the process of generating a single sample from the model.\n Drawing multiple samples, and computing batched log-probs, is accomplished\n using `tf.vectorized_map`. In many cases this allows for significant\n simplication of the model. For example, the following\n manually-vectorized `tfd.JointDistributionCoroutine` model:\n\n ```python\n def model_fn():\n x = yield tfd.JointDistributionCoroutine.Root(\n tfd.Normal(0., tf.ones([3])))\n y = yield tfd.JointDistributionCoroutine.Root(\n tfd.Normal(0., 1.)))\n z = yield tfd.Normal(x[..., :2] + y[..., tf.newaxis], 1.)\n\n can be written in auto-vectorized form as\n\n ```python\n def model_fn():\n x = yield tfd.Normal(0., tf.ones([3]))\n y = yield tfd.Normal(0., 1.))\n z = yield tfd.Normal(x[:2] + y, 1.)\n ```\n\n in which we were able to drop the specification of `Root` nodes and to\n avoid explicitly accounting for batch dimensions when indexing and slicing\n computed quantities in the third line.\n\n Note: auto-vectorization is still experimental and some TensorFlow ops may\n be unsupported.\n\n A limitation relative to standard `JointDistribution`s is that the\n `sample_distributions()` method does not currently support (nontrivial) sample\n shapes.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._use_vectorized_map = kwargs.pop('use_vectorized_map', True)\n super(JointDistributionVmapMixin, self).__init__(*args, **kwargs)\n\n # TODO(b/166658748): Drop this (make it always True).\n _stateful_to_stateless = JAX_MODE\n\n @property\n def use_vectorized_map(self):\n return self._use_vectorized_map\n\n @property\n def _single_sample_ndims(self):\n \"\"\"Computes the rank of values produced by executing the base model.\"\"\"\n result = []\n for d in self._get_single_sample_distributions():\n batch_ndims = ps.rank_from_shape(d.batch_shape_tensor, d.batch_shape)\n result.append(tf.nest.map_structure(\n lambda a, b, nd=batch_ndims: nd + ps.rank_from_shape(a, b),\n d.event_shape_tensor(),\n d.event_shape))\n return result\n\n def sample_distributions(self, sample_shape=(), seed=None, value=None,\n name='sample_distributions', **kwargs):\n with self._name_and_control_scope(name):\n\n value_might_have_sample_dims = False\n if (value is None) and kwargs:\n value = self._resolve_value_from_kwargs(**kwargs)\n if value is not None:\n value = _pad_value_to_full_length(value, self.dtype)\n value = tf.nest.map_structure(\n lambda v: v if v is None else tf.convert_to_tensor(v), value)\n value_might_have_sample_dims = _might_have_excess_ndims(\n flat_value=self._model_flatten(value),\n flat_core_ndims=self._single_sample_ndims)\n\n # TODO(b/157953455): Return distributions as CompositeTensors once\n # vectorized_map supports this.\n if self.use_vectorized_map and (\n _might_have_nonzero_size(sample_shape) or\n value_might_have_sample_dims):\n raise NotImplementedError('`sample_distributions` with nontrivial '\n 'sample shape is not yet supported '\n 'for autovectorized JointDistributions.')\n else:\n ds, xs = self._call_flat_sample_distributions(\n sample_shape=sample_shape, seed=seed, value=value)\n return self._model_unflatten(ds), self._model_unflatten(xs)\n\n def _sample_n(self, sample_shape, seed, value=None, **kwargs):\n\n value_might_have_sample_dims = False\n if (value is None) and kwargs:\n value = self._resolve_value_from_kwargs(**kwargs)\n if value is not None:\n value = _pad_value_to_full_length(value, self.dtype)\n value = tf.nest.map_structure(\n lambda v: v if v is None else tf.convert_to_tensor(v), value)\n value_might_have_sample_dims = _might_have_excess_ndims(\n flat_value=self._model_flatten(value),\n flat_core_ndims=self._single_sample_ndims)\n\n if not self.use_vectorized_map or not (\n _might_have_nonzero_size(sample_shape) or\n value_might_have_sample_dims):\n # No need to auto-vectorize.\n xs = self._call_flat_sample_distributions(\n sample_shape=sample_shape, seed=seed, value=value)[1]\n return self._model_unflatten(xs)\n\n # Set up for autovectorized sampling. To support the `value` arg, we need to\n # first understand which dims are from the model itself, then wrap\n # `_call_flat_sample_distributions` to batch over all remaining dims.\n value_core_ndims = None\n if value is not None:\n value_core_ndims = tf.nest.map_structure(\n lambda v, nd: None if v is None else nd,\n value, self._model_unflatten(self._single_sample_ndims),\n check_types=False)\n batch_flat_sample = vectorization_util.make_rank_polymorphic(\n lambda v, seed: self._call_flat_sample_distributions( # pylint: disable=g-long-lambda\n sample_shape=(), seed=seed, value=v)[1],\n core_ndims=[value_core_ndims, None],\n validate_args=self.validate_args)\n\n # Draw samples.\n vectorized_flat_sample = vectorization_util.iid_sample(\n # Redefine the polymorphic fn to hack around `make_rank_polymorphic`\n # not currently supporting keyword args.\n lambda v, seed: batch_flat_sample(v, seed), sample_shape) # pylint: disable=unnecessary-lambda\n xs = vectorized_flat_sample(value, seed=seed)\n return self._model_unflatten(xs)\n\n # Redefine `_map_measure_over_dists` to autovectorize the measure if needed.\n def _map_measure_over_dists(self, attr, value):\n if any(x is None for x in self._model_flatten(value)):\n raise ValueError('No `value` part can be `None`; saw: {}.'.format(value))\n if value is not None:\n value = self._model_flatten(value)\n\n def map_measure_fn(value):\n # We always provide a seed, since _flat_sample_distributions will\n # unconditionally split the seed.\n with tf.name_scope('map_measure_fn'):\n constant_seed = samplers.zeros_seed()\n return [getattr(d, attr)(x) for (d, x) in zip(\n *self._flat_sample_distributions(value=value, seed=constant_seed))]\n if self.use_vectorized_map:\n map_measure_fn = vectorization_util.make_rank_polymorphic(\n map_measure_fn,\n core_ndims=[self._single_sample_ndims],\n validate_args=self.validate_args)\n\n return map_measure_fn(value)\n\n def _default_event_space_bijector(self, *args, **kwargs):\n bijector_class = joint_distribution_lib._DefaultJointBijector # pylint: disable=protected-access\n if self.use_vectorized_map:\n bijector_class = _DefaultJointBijectorAutoBatched\n if bool(args) or bool(kwargs):\n return self.experimental_pin(\n *args, **kwargs).experimental_default_event_space_bijector()\n return bijector_class(self)\n\n\nclass _DefaultJointBijectorAutoBatched(bijector_lib.Bijector):\n \"\"\"Automatically vectorized support bijector for autobatched JDs.\"\"\"\n\n def __init__(self, jd, **kwargs):\n parameters = dict(locals())\n self._jd = jd\n self._bijector_kwargs = kwargs\n self._joint_bijector = joint_distribution_lib._DefaultJointBijector(\n jd=self._jd, **self._bijector_kwargs)\n super(_DefaultJointBijectorAutoBatched, self).__init__(\n forward_min_event_ndims=self._joint_bijector.forward_min_event_ndims,\n inverse_min_event_ndims=self._joint_bijector.inverse_min_event_ndims,\n validate_args=self._joint_bijector.validate_args,\n parameters=parameters,\n name=self._joint_bijector.name)\n # Wrap the non-batched `joint_bijector` to take batched args.\n # pylint: disable=protected-access\n self._forward = self._vectorize_member_fn(\n lambda bij, x: bij._forward(x),\n core_ndims=[self._joint_bijector.forward_min_event_ndims])\n self._inverse = self._vectorize_member_fn(\n lambda bij, y: bij._inverse(y),\n core_ndims=[self._joint_bijector.inverse_min_event_ndims])\n self._forward_log_det_jacobian = self._vectorize_member_fn(\n lambda bij, x: bij._forward_log_det_jacobian( # pylint: disable=g-long-lambda\n x, event_ndims=bij.forward_min_event_ndims),\n core_ndims=[self._joint_bijector.forward_min_event_ndims])\n self._inverse_log_det_jacobian = self._vectorize_member_fn(\n lambda bij, y: bij._inverse_log_det_jacobian( # pylint: disable=g-long-lambda\n y, event_ndims=bij.inverse_min_event_ndims),\n core_ndims=[self._joint_bijector.inverse_min_event_ndims])\n for attr in ('_forward_event_shape',\n '_forward_event_shape_tensor',\n '_inverse_event_shape',\n '_inverse_event_shape_tensor',\n '_forward_dtype',\n '_inverse_dtype',\n 'forward_event_ndims',\n 'inverse_event_ndims',):\n setattr(self, attr, getattr(self._joint_bijector, attr))\n # pylint: enable=protected-access\n\n def _vectorize_member_fn(self, member_fn, core_ndims):\n return vectorization_util.make_rank_polymorphic(\n lambda x: member_fn(self._joint_bijector, x),\n core_ndims=core_ndims)\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The exponentially modified Gaussian distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport tensorflow.compat.v2 as tf\nfrom tensorflow_probability.python.bijectors import identity as identity_bijector\nfrom tensorflow_probability.python.bijectors import softplus as softplus_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import exponential as exponential_lib\nfrom tensorflow_probability.python.distributions import normal as normal_lib\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import parameter_properties\nfrom tensorflow_probability.python.internal import prefer_static\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import special_math\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.math import generic as tfp_math\n\n__all__ = [\n 'ExponentiallyModifiedGaussian',\n]\n\n\nclass ExponentiallyModifiedGaussian(distribution.Distribution):\n \"\"\"Exponentially modified Gaussian distribution.\n\n #### Mathematical details\n\n The exponentially modified Gaussian distribution is the sum of a normal\n distribution and an exponential distribution.\n ```none\n X ~ Normal(loc, scale)\n Y ~ Exponential(rate)\n Z = X + Y\n ```\n is equivalent to\n ```none\n Z ~ ExponentiallyModifiedGaussian(loc, scale, rate)\n ```\n\n #### Examples\n ```python\n tfd = tfp.distributions\n\n # Define a single scalar ExponentiallyModifiedGaussian distribution\n dist = tfd.ExponentiallyModifiedGaussian(loc=0., scale=1., rate=3.)\n\n # Evaluate the pdf at 1, returing a scalar.\n dist.prob(1.)\n ```\n\n\n \"\"\"\n\n def __init__(self,\n loc,\n scale,\n rate,\n validate_args=False,\n allow_nan_stats=True,\n name='ExponentiallyModifiedGaussian'):\n \"\"\"Construct an exponentially-modified Gaussian distribution.\n\n The Gaussian distribution has mean `loc` and stddev `scale`,\n and Exponential distribution has rate parameter `rate`.\n\n The parameters `loc`, `scale`, and `rate` must be shaped in a way that\n supports broadcasting (e.g. `loc + scale + rate` is a valid operation).\n Args:\n loc: Floating-point `Tensor`; the means of the distribution(s).\n scale: Floating-point `Tensor`; the stddevs of the distribution(s). Must\n contain only positive values.\n rate: Floating-point `Tensor`; the rate parameter for the exponential\n distribution.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or more\n of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class.\n\n Raises:\n TypeError: if `loc`, `scale`, and `rate` are not all the same `dtype`.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, scale, rate], dtype_hint=tf.float32)\n self._loc = tensor_util.convert_nonref_to_tensor(\n loc, dtype=dtype, name='loc')\n self._scale = tensor_util.convert_nonref_to_tensor(\n scale, dtype=dtype, name='scale')\n self._rate = tensor_util.convert_nonref_to_tensor(\n rate, dtype=dtype, name='rate')\n super(ExponentiallyModifiedGaussian, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n @staticmethod\n def _param_shapes(sample_shape):\n return dict(\n zip(('loc', 'scale', 'rate'),\n ([tf.convert_to_tensor(sample_shape, dtype=tf.int32)] * 3)))\n\n @classmethod\n def _parameter_properties(cls, dtype, num_classes=None):\n return dict(\n loc=parameter_properties.ParameterProperties(),\n scale=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),\n rate=parameter_properties.ParameterProperties(\n default_constraining_bijector_fn=(\n lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))\n\n @property\n def loc(self):\n \"\"\"Distribution parameter for the mean of the normal distribution.\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"Distribution parameter for standard deviation of the normal distribution.\"\"\"\n return self._scale\n\n @property\n def rate(self):\n \"\"\"Distribution parameter for rate parameter of exponential distribution.\"\"\"\n return self._rate\n\n def _batch_shape_tensor(self, loc=None, scale=None, rate=None):\n return prefer_static.broadcast_shape(\n prefer_static.shape(self.loc if loc is None else loc),\n prefer_static.broadcast_shape(\n prefer_static.shape(self.scale if scale is None else scale),\n prefer_static.shape(self.rate if rate is None else rate)))\n\n def _batch_shape(self):\n return tf.broadcast_static_shape(\n self.loc.shape,\n tf.broadcast_static_shape(self.scale.shape, self.rate.shape))\n\n def _event_shape_tensor(self):\n return tf.constant([], dtype=tf.int32)\n\n def _event_shape(self):\n return tf.TensorShape([])\n\n def _sample_n(self, n, seed=None):\n normal_seed, exp_seed = samplers.split_seed(seed, salt='emg_sample')\n # need to make sure component distributions are broadcast appropriately\n # for correct generation of samples\n loc = tf.convert_to_tensor(self.loc)\n rate = tf.convert_to_tensor(self.rate)\n scale = tf.convert_to_tensor(self.scale)\n batch_shape = self._batch_shape_tensor(loc, scale, rate)\n loc_broadcast = tf.broadcast_to(loc, batch_shape)\n rate_broadcast = tf.broadcast_to(rate, batch_shape)\n normal_dist = normal_lib.Normal(loc=loc_broadcast, scale=scale)\n exp_dist = exponential_lib.Exponential(rate_broadcast)\n x = normal_dist.sample(n, normal_seed)\n y = exp_dist.sample(n, exp_seed)\n return x + y\n\n def _log_prob(self, x):\n loc = tf.convert_to_tensor(self.loc)\n rate = tf.convert_to_tensor(self.rate)\n scale = tf.convert_to_tensor(self.scale)\n two = dtype_util.as_numpy_dtype(x.dtype)(2.)\n z = (x - loc) / scale\n w = rate * scale\n return (tf.math.log(rate) + w / two * (w - 2 * z) +\n special_math.log_ndtr(z - w))\n\n def _log_cdf(self, x):\n rate = tf.convert_to_tensor(self.rate)\n x_centralized = x - self.loc\n u = rate * x_centralized\n v = rate * self.scale\n vsquared = tf.square(v)\n return tfp_math.log_sub_exp(\n special_math.log_ndtr(x_centralized / self.scale),\n -u + vsquared / 2. + special_math.log_ndtr((u - vsquared) / v))\n\n def _mean(self):\n return self.loc + 1 / self.rate\n\n def _variance(self):\n return tf.square(self.scale) + 1 / tf.square(self.rate)\n\n def _parameter_control_dependencies(self, is_init):\n assertions = []\n\n if is_init:\n try:\n self._batch_shape()\n except ValueError:\n raise ValueError(\n 'Arguments `loc`, `scale`, and `rate` must have compatible shapes; '\n 'loc.shape={}, scale.shape={}, rate.shape={}.'.format(\n self.loc.shape, self.scale.shape, self.rate.shape))\n # We don't bother checking the shapes in the dynamic case because\n # all member functions access both arguments anyway.\n\n if is_init != tensor_util.is_ref(self.scale):\n assertions.append(assert_util.assert_positive(\n self.scale, message='Argument `scale` must be positive.'))\n\n if is_init != tensor_util.is_ref(self.rate):\n assertions.append(assert_util.assert_positive(\n self.rate, message='Argument `rate` must be positive.'))\n\n return assertions\n\n def _default_event_space_bijector(self):\n return identity_bijector.Identity(validate_args=self.validate_args)\n",
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Multivariate Normal distribution classes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import identity as identity_bijector\nfrom tensorflow_probability.python.bijectors import scale_matvec_linear_operator\nfrom tensorflow_probability.python.bijectors import shift as shift_bijector\nfrom tensorflow_probability.python.distributions import kullback_leibler\nfrom tensorflow_probability.python.distributions import normal\nfrom tensorflow_probability.python.distributions import sample\nfrom tensorflow_probability.python.distributions import transformed_distribution\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\n\n\n__all__ = [\n 'MultivariateNormalLinearOperator',\n]\n\n\n_mvn_sample_note = \"\"\"\n`value` is a batch vector with compatible shape if `value` is a `Tensor` whose\nshape can be broadcast up to either:\n\n```python\nself.batch_shape + self.event_shape\n```\n\nor\n\n```python\n[M1, ..., Mm] + self.batch_shape + self.event_shape\n```\n\n\"\"\"\n\n\nclass MultivariateNormalLinearOperator(\n transformed_distribution.TransformedDistribution):\n \"\"\"The multivariate normal distribution on `R^k`.\n\n The Multivariate Normal distribution is defined over `R^k` and parameterized\n by a (batch of) length-`k` `loc` vector (aka \"mu\") and a (batch of) `k x k`\n `scale` matrix; `covariance = scale @ scale.T`, where `@` denotes\n matrix-multiplication.\n\n #### Mathematical Details\n\n The probability density function (pdf) is,\n\n ```none\n pdf(x; loc, scale) = exp(-0.5 ||y||**2) / Z,\n y = inv(scale) @ (x - loc),\n Z = (2 pi)**(0.5 k) |det(scale)|,\n ```\n\n where:\n\n * `loc` is a vector in `R^k`,\n * `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,\n * `Z` denotes the normalization constant, and,\n * `||y||**2` denotes the squared Euclidean norm of `y`.\n\n The MultivariateNormal distribution is a member of the [location-scale\n family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be\n constructed as,\n\n ```none\n X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.\n Y = scale @ X + loc\n ```\n\n #### Examples\n\n ```python\n tfd = tfp.distributions\n\n # Initialize a single 3-variate Gaussian.\n mu = [1., 2, 3]\n cov = [[ 0.36, 0.12, 0.06],\n [ 0.12, 0.29, -0.13],\n [ 0.06, -0.13, 0.26]]\n scale = tf.linalg.cholesky(cov)\n # ==> [[ 0.6, 0. , 0. ],\n # [ 0.2, 0.5, 0. ],\n # [ 0.1, -0.3, 0.4]])\n\n mvn = tfd.MultivariateNormalLinearOperator(\n loc=mu,\n scale=tf.linalg.LinearOperatorLowerTriangular(scale))\n\n # Covariance agrees with cholesky(cov) parameterization.\n mvn.covariance()\n # ==> [[ 0.36, 0.12, 0.06],\n # [ 0.12, 0.29, -0.13],\n # [ 0.06, -0.13, 0.26]]\n\n # Compute the pdf of an`R^3` observation; return a scalar.\n mvn.prob([-1., 0, 1]) # shape: []\n\n # Initialize a 2-batch of 3-variate Gaussians.\n mu = [[1., 2, 3],\n [11, 22, 33]] # shape: [2, 3]\n scale_diag = [[1., 2, 3],\n [0.5, 1, 1.5]] # shape: [2, 3]\n\n mvn = tfd.MultivariateNormalLinearOperator(\n loc=mu,\n scale=tf.linalg.LinearOperatorDiag(scale_diag))\n\n # Compute the pdf of two `R^3` observations; return a length-2 vector.\n x = [[-0.9, 0, 0.1],\n [-10, 0, 9]] # shape: [2, 3]\n mvn.prob(x) # shape: [2]\n ```\n\n \"\"\"\n\n def __init__(self,\n loc=None,\n scale=None,\n validate_args=False,\n allow_nan_stats=True,\n experimental_use_kahan_sum=False,\n name='MultivariateNormalLinearOperator'):\n \"\"\"Construct Multivariate Normal distribution on `R^k`.\n\n The `batch_shape` is the broadcast shape between `loc` and `scale`\n arguments.\n\n The `event_shape` is given by last dimension of the matrix implied by\n `scale`. The last dimension of `loc` (if provided) must broadcast with this.\n\n Recall that `covariance = scale @ scale.T`.\n\n Additional leading dimensions (if any) will index batches.\n\n Args:\n loc: Floating-point `Tensor`. If this is set to `None`, `loc` is\n implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where\n `b >= 0` and `k` is the event size.\n scale: Instance of `LinearOperator` with same `dtype` as `loc` and shape\n `[B1, ..., Bb, k, k]`.\n validate_args: Python `bool`, default `False`. Whether to validate input\n with asserts. If `validate_args` is `False`, and the inputs are\n invalid, correct behavior is not guaranteed.\n allow_nan_stats: Python `bool`, default `True`. If `False`, raise an\n exception if a statistic (e.g. mean/mode/etc...) is undefined for any\n batch member If `True`, batch members with valid parameters leading to\n undefined statistics will return NaN for this statistic.\n experimental_use_kahan_sum: Python `bool`. When `True`, we use Kahan\n summation to aggregate independent underlying log_prob values. For best\n results, Kahan summation should also be applied when computing the\n log-determinant of the `LinearOperator` representing the scale matrix.\n Kahan summation improves against the precision of a naive float32 sum.\n This can be noticeable in particular for large dimensions in float32.\n See CPU caveat on `tfp.math.reduce_kahan_sum`.\n name: The name to give Ops created by the initializer.\n\n Raises:\n ValueError: if `scale` is unspecified.\n TypeError: if not `scale.dtype.is_floating`\n \"\"\"\n parameters = dict(locals())\n self._experimental_use_kahan_sum = experimental_use_kahan_sum\n if scale is None:\n raise ValueError('Missing required `scale` parameter.')\n if not dtype_util.is_floating(scale.dtype):\n raise TypeError('`scale` parameter must have floating-point dtype.')\n\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype([loc, scale], dtype_hint=tf.float32)\n # Since expand_dims doesn't preserve constant-ness, we obtain the\n # non-dynamic value if possible.\n loc = tensor_util.convert_nonref_to_tensor(\n loc, dtype=dtype, name='loc')\n batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(\n loc, scale)\n self._loc = loc\n self._scale = scale\n\n bijector = scale_matvec_linear_operator.ScaleMatvecLinearOperator(\n scale, validate_args=validate_args)\n if loc is not None:\n bijector = shift_bijector.Shift(\n shift=loc, validate_args=validate_args)(bijector)\n super(MultivariateNormalLinearOperator, self).__init__(\n # TODO(b/137665504): Use batch-adding meta-distribution to set the batch\n # shape instead of tf.zeros.\n # We use `Sample` instead of `Independent` because `Independent`\n # requires concatenating `batch_shape` and `event_shape`, which loses\n # static `batch_shape` information when `event_shape` is not statically\n # known.\n distribution=sample.Sample(\n normal.Normal(\n loc=tf.zeros(batch_shape, dtype=dtype),\n scale=tf.ones([], dtype=dtype)),\n event_shape,\n experimental_use_kahan_sum=experimental_use_kahan_sum),\n bijector=bijector,\n validate_args=validate_args,\n name=name)\n self._parameters = parameters\n\n @property\n def loc(self):\n \"\"\"The `loc` `Tensor` in `Y = scale @ X + loc`.\"\"\"\n return self._loc\n\n @property\n def scale(self):\n \"\"\"The `scale` `LinearOperator` in `Y = scale @ X + loc`.\"\"\"\n return self._scale\n\n experimental_is_sharded = False\n\n @distribution_util.AppendDocstring(_mvn_sample_note)\n def _log_prob(self, x):\n return super(MultivariateNormalLinearOperator, self)._log_prob(x)\n\n @distribution_util.AppendDocstring(_mvn_sample_note)\n def _prob(self, x):\n return super(MultivariateNormalLinearOperator, self)._prob(x)\n\n def _mean(self):\n shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape)\n has_static_shape = tensorshape_util.is_fully_defined(shape)\n if not has_static_shape:\n shape = tf.concat([\n self.batch_shape_tensor(),\n self.event_shape_tensor(),\n ], 0)\n\n if self.loc is None:\n return tf.zeros(shape, self.dtype)\n\n return tf.broadcast_to(self.loc, shape)\n\n def _covariance(self):\n if distribution_util.is_diagonal_scale(self.scale):\n return tf.linalg.diag(tf.square(self.scale.diag_part()))\n else:\n return self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)\n\n def _variance(self):\n if distribution_util.is_diagonal_scale(self.scale):\n variance = tf.square(self.scale.diag_part())\n elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and\n self.scale.is_self_adjoint):\n variance = self.scale.matmul(self.scale.adjoint()).diag_part()\n elif isinstance(self.scale, tf.linalg.LinearOperatorKronecker):\n factors_sq_operators = [\n factor.matmul(factor.adjoint()) for factor in self.scale.operators\n ]\n variance = (tf.linalg.LinearOperatorKronecker(factors_sq_operators)\n .diag_part())\n else:\n variance = self.scale.matmul(self.scale.adjoint()).diag_part()\n\n return tf.broadcast_to(\n variance,\n ps.broadcast_shape(\n ps.shape(variance),\n ps.shape(self.loc)))\n\n def _stddev(self):\n if distribution_util.is_diagonal_scale(self.scale):\n stddev = tf.abs(self.scale.diag_part())\n elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and\n self.scale.is_self_adjoint):\n stddev = tf.sqrt(\n tf.linalg.diag_part(self.scale.matmul(self.scale.to_dense())))\n else:\n stddev = tf.sqrt(\n tf.linalg.diag_part(\n self.scale.matmul(self.scale.to_dense(), adjoint_arg=True)))\n\n shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape)\n has_static_shape = tensorshape_util.is_fully_defined(shape)\n if not has_static_shape:\n shape = tf.concat([\n self.batch_shape_tensor(),\n self.event_shape_tensor(),\n ], 0)\n\n if has_static_shape and shape == stddev.shape:\n return stddev\n\n # Add dummy tensor of zeros to broadcast. This is only necessary if shape\n # != stddev.shape, but we could not determine if this is the case.\n return stddev + tf.zeros(shape, self.dtype)\n\n def _mode(self):\n return self._mean()\n\n def _default_event_space_bijector(self):\n return identity_bijector.Identity(validate_args=self.validate_args)\n\n def _parameter_control_dependencies(self, is_init):\n # Nothing to do here.\n return []\n\n _composite_tensor_nonshape_params = ('loc', 'scale')\n\n\n@kullback_leibler.RegisterKL(MultivariateNormalLinearOperator,\n MultivariateNormalLinearOperator)\ndef _kl_brute_force(a, b, name=None):\n \"\"\"Batched KL divergence `KL(a || b)` for multivariate Normals.\n\n With `X`, `Y` both multivariate Normals in `R^k` with means `mu_a`, `mu_b` and\n covariance `C_a`, `C_b` respectively,\n\n ```\n KL(a || b) = 0.5 * ( L - k + T + Q ),\n L := Log[Det(C_b)] - Log[Det(C_a)]\n T := trace(C_b^{-1} C_a),\n Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),\n ```\n\n This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient\n methods for solving systems with `C_b` may be available, a dense version of\n (the square root of) `C_a` is used, so performance is `O(B s k**2)` where `B`\n is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`\n and `y`.\n\n Args:\n a: Instance of `MultivariateNormalLinearOperator`.\n b: Instance of `MultivariateNormalLinearOperator`.\n name: (optional) name to use for created ops. Default \"kl_mvn\".\n\n Returns:\n Batchwise `KL(a || b)`.\n \"\"\"\n\n def squared_frobenius_norm(x):\n \"\"\"Helper to make KL calculation slightly more readable.\"\"\"\n # http://mathworld.wolfram.com/FrobeniusNorm.html\n # The gradient of KL[p,q] is not defined when p==q. The culprit is\n # tf.norm, i.e., we cannot use the commented out code.\n # return tf.square(tf.norm(x, ord=\"fro\", axis=[-2, -1]))\n return tf.reduce_sum(tf.square(x), axis=[-2, -1])\n\n # TODO(b/35041439): See also b/35040945. Remove this function once LinOp\n # supports something like:\n # A.inverse().solve(B).norm(order='fro', axis=[-1, -2])\n def is_diagonal(x):\n \"\"\"Helper to identify if `LinearOperator` has only a diagonal component.\"\"\"\n return (isinstance(x, tf.linalg.LinearOperatorIdentity) or\n isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or\n isinstance(x, tf.linalg.LinearOperatorDiag))\n\n with tf.name_scope(name or 'kl_mvn'):\n # Calculation is based on:\n # http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians\n # and,\n # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm\n # i.e.,\n # If Ca = AA', Cb = BB', then\n # tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']\n # = tr[inv(B) A A' inv(B)']\n # = tr[(inv(B) A) (inv(B) A)']\n # = sum_{ij} (inv(B) A)_{ij}**2\n # = ||inv(B) A||_F**2\n # where ||.||_F is the Frobenius norm and the second equality follows from\n # the cyclic permutation property.\n if is_diagonal(a.scale) and is_diagonal(b.scale):\n # Using `stddev` because it handles expansion of Identity cases.\n b_inv_a = (a.stddev() / b.stddev())[..., tf.newaxis]\n else:\n b_inv_a = b.scale.solve(a.scale.to_dense())\n kl_div = (\n b.scale.log_abs_determinant() - a.scale.log_abs_determinant() +\n 0.5 * (-tf.cast(a.scale.domain_dimension_tensor(), a.dtype) +\n squared_frobenius_norm(b_inv_a) + squared_frobenius_norm(\n b.scale.solve((b.mean() - a.mean())[..., tf.newaxis]))))\n tensorshape_util.set_shape(\n kl_div, tf.broadcast_static_shape(a.batch_shape, b.batch_shape))\n return kl_div\n"
] | [
[
"tensorflow.compat.v2.linalg.diag_part",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.sqrt",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.squeeze",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.compat.dimension_value"
],
[
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.size"
],
[
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.constant"
],
[
"tensorflow.compat.v2.linalg.LinearOperatorKronecker",
"tensorflow.compat.v2.square",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.broadcast_static_shape",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eberharf/cfl | [
"077b99a05824f1371ac47d76dfed6bb160222668",
"077b99a05824f1371ac47d76dfed6bb160222668"
] | [
"testing/test_cde_io.py",
"visual_bars/generate_visual_bars_data.py"
] | [
"import os\nimport shutil\nfrom shutil import Error\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom cdes_for_testing import all_cdes \nfrom cfl.dataset import Dataset\n\n''' The following code runs all tests in CondExpInputTests on all implemented\n CondExpXxxx classes.\n'''\n\n\ndef make_cde_io_tests(cond_exp_class):\n\n # generic test class for any CondExpBase descendant \n # (passed in as cond_exp_class)\n class CondExpIOTests(unittest.TestCase):\n def setUp(self): # overriden unittest.TestCase method that will be\n # called in initializaiton\n self.data_info = { 'X_dims' : (10,3), \n 'Y_dims' : (10,2), \n 'Y_type' : 'continuous'}\n self.params = { 'show_plot' : False,\n 'n_epochs' : 2}\n self.ceb = cond_exp_class(self.data_info, self.params)\n\n ## INIT ###############################################################\n def test_init_wrong_input_types(self):\n data_info = 'str is bad'\n params = 'these are not params'\n self.assertRaises(AssertionError, cond_exp_class, data_info, params)\n\n def test_init_wrong_data_info_keys(self):\n data_info = {}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n\n def test_init_wrong_data_info_value_types(self):\n data_info = {'X_dims' : None, 'Y_dims' : None, 'Y_type' : None}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n\n def test_init_wrong_data_info_values(self):\n data_info = { 'X_dims' : (0,0), \n 'Y_dims' : (0,0), \n 'Y_type' : 'continuous'}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n \n data_info = { 'X_dims' : (10,3), \n 'Y_dims' : (12,2), \n 'Y_type' : 'continuous'}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n\n def test_init_correct_inputs(self):\n data_info = {'X_dims' : (10,3), \n 'Y_dims' : (10,2), \n 'Y_type' : 'continuous'}\n params = {}\n ceb = cond_exp_class(data_info, params)\n\n ## SAVE_BLOCK #########################################################\n def test_save_block_wrong_input_type(self):\n path = 123\n self.assertRaises(AssertionError, self.ceb.save_block, path)\n\n def test_save_block_correct_input_type(self):\n path = 'not/a/real/path'\n self.ceb.save_block(path)\n shutil.rmtree('not')\n\n ## LOAD_BLOCK #########################################################\n def test_load_block_wrong_input_type(self):\n path = 123\n self.assertRaises(AssertionError, self.ceb.load_block, path)\n\n def test_load_block_correct_input_type(self):\n # should only be run after test_save_block_correct_input_type so \n # there is something to load\n path = 'not/a/real/path'\n self.ceb.save_block(path)\n self.ceb.load_block(path)\n shutil.rmtree('not')\n # check and reset state\n assert self.ceb.trained, 'CDE should be trained after loading'\n self.ceb.trained = False\n\n\n ### TRAIN ############################################################\n def test_train_wrong_input_type(self):\n dataset = 'this is not a Dataset'\n prev_results = 'this is not a dict'\n self.assertRaises(AssertionError, self.ceb.train, dataset, \n prev_results)\n\n def test_train_correct_input_type(self):\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n\n # what we expect from train outputs\n tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']\n tshapes = {'train_loss' : (self.params['n_epochs'],),\n 'val_loss' : (self.params['n_epochs'],),\n 'pyx' : (self.data_info['Y_dims'])\n }\n\n for prev_results in [None, {}]:\n # reset\n self.ceb.trained = False\n\n train_results = self.ceb.train(dataset, prev_results)\n\n # check state\n assert self.ceb.trained, 'CDE should be trained after loading'\n\n # check outputs\n assert set(train_results.keys())==set(tkeys), \\\n f'train should return dict with keys: {tkeys}'\n for k in tshapes.keys():\n assert tshapes[k]==np.array(train_results[k]).shape, \\\n f'expected {k} to have shape {tshapes[k]} but got \\\n {train_results[k].shape}'\n\n def test_train_twice(self):\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n prev_results = None\n\n # reset\n self.ceb.trained = False\n\n # what we expect from train outputs first time\n tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']\n \n train_results = self.ceb.train(dataset, prev_results)\n\n # check state and outputs\n assert self.ceb.trained, 'CDE should be trained after loading'\n assert set(train_results.keys())==set(tkeys), \\\n f'train should return dict with keys: {tkeys}'\n\n # what we expect from train outputs second time\n tkeys = ['pyx']\n \n train_results = self.ceb.train(dataset, prev_results)\n\n # check state and outputs\n assert self.ceb.trained, 'CDE should be trained after loading'\n assert set(train_results.keys())==set(tkeys), \\\n f'train should return dict with keys: {tkeys}'\n\n\n ### PREDICT ##########################################################\n def test_predict_wrong_input_type(self):\n # artifically set CDE trained = True\n self.ceb.trained = True\n\n dataset = 'this is not a Dataset'\n prev_results = 'this is not a dict'\n self.assertRaises(AssertionError, self.ceb.predict, dataset, \n prev_results)\n\n def test_predict_correct_input_type(self):\n\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n prev_results = None\n\n for prev_results in [None, {}]:\n self.ceb.train(dataset, prev_results)\n pred_results = self.ceb.predict(dataset, prev_results)\n\n # check output\n assert set(pred_results.keys())==set(['pyx']), f'pred_results \\\n keys should contain pyx, but contains {pred_results.keys()}'\n assert pred_results['pyx'].shape==self.data_info['Y_dims'], \\\n f\"expected {self.data_info['Y_dims']} but got \\\n {pred_results['pyx'].shape}\"\n \n ### EVALUATE #########################################################\n def test_evaluate_wrong_input_type(self):\n # artifically set CDE trained = True\n self.ceb.trained = True\n \n dataset = 'this is not a Dataset'\n prev_results = 'this is not a dict'\n self.assertRaises(AssertionError, self.ceb.evaluate, dataset)\n\n def test_evaluate_correct_input_type(self):\n\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n prev_results = None\n\n self.ceb.train(dataset, prev_results)\n score = self.ceb.evaluate(dataset)\n assert score.shape==()\n assert score.dtype==np.float32\n\n ### BUILD_MODEL ######################################################\n\n def test_build_model(self):\n assert isinstance(self.ceb._build_model(), tf.keras.Sequential)\n\n\n return CondExpIOTests\n\n\nfor cond_exp_class in all_cdes:\n class ConcreteIOTests(make_cde_io_tests(cond_exp_class)):\n pass\n\n",
"import numpy as np # must be numpy 1.17 or higher\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\n'''\r\n# Jenna Kahn\r\n# adapted from dataset_binary_gratings.py (Chalupka 2015)\r\n\r\n\r\nA binary image dataset created from the following probabilities:\r\n(H1 is a \"hidden variable\", VB is \"vertical bars\"\r\n and HB stands for \"horizontal bars\".)\r\n\r\nCausal graph:\r\nH1 is a binary variable that, when on, causes vertical bars and increases\r\nthe probability of T (the target variable). Horizontal bars also increase\r\nthe probability of T directly, but vertical bars do not increase the probability\r\nof T. Thus, H1 is a hidden source of confounding.\r\n\r\n\r\nP(H=0) = 0.5, P(H=1) = 0.5\r\n\r\n\r\nBelow is the 'ground truth' that CFL should attempt to recover:\r\nH2 = the presence of horizontal bars in image\r\nH1 = presence of confounding hidden variable/vertical bar in image\r\n\r\nclass labels and P(T) for each class:\r\n0. p(T|H1=0,H2=0) = 0.1 NO bars\r\n1. p(T|H1=1,H2=0) = 0.4 Vertical bar only\r\n2. P(T|H1=0,H2=1) = 0.7 Horizontal bar only\r\n3. P(T|H1=1,H2=1) = 1. Both bars\r\n\r\n\r\nHere are some example function calls using this class:\r\nvb_data = VisualBarsData(n_samples=20, noise_lvl=0.1)\r\nvb_data.getImages()\r\nvb_data.getGroundTruth()\r\nvb_data.getTarget()\r\nvb_data.viewImages()\r\n'''\r\n\r\nclass VisualBarsData():\r\n\r\n def __init__(self, n_samples=1000, im_shape=(10, 10), noise_lvl=0, set_random_seed=None, hBarFreq=0.5, vBarFreq=0.5):\r\n '''the constructor generates n_samples binary vertical bars images,\r\n generates the ground labels for each image, and generates the target behavior associated\r\n with each image in separate, aligned np arrays\r\n\r\n Parameters:\r\n n_samples (int): number of images to generate\r\n im_shape (2-tuple): size of each image to generate, in pixels\r\n noise_lvl (float [0,1]): the amount of random noise that each image should contain (default is 0)\r\n set_random_seed (int): Optional, if enabled sets the random generator to a specific seed, allowing reproducible random results\r\n hBarFreq, vBarFreq (float between 0 and 1): the frequency with which a horizontal bar and a vertical bar (respectively) appear in the set of images\r\n\r\n Returns: \r\n None\r\n ''' \r\n assert 0 <= noise_lvl <= 1, \"noise_lvl must be between 0 and 1 but is {}\".format(\r\n noise_lvl)\r\n assert len(\r\n im_shape) == 2, \"im_shape should contain the dimensions of a 2D image but instead is {}\".format(im_shape)\r\n assert n_samples > 0, \"n_samples must be a positive integer (the number of images to generate) but instead is {}\".format(\r\n n_samples)\r\n assert 0 <= hBarFreq <= 1, \"hBarFreq must be between 0 and 1 but is {}\".format(\r\n hBarFreq)\r\n assert 0 <= vBarFreq <= 1, \"vBarFreq must be between 0 and 1 but is {}\".format(\r\n vBarFreq)\r\n\r\n self.n_samples = n_samples # number of images to generate\r\n self.im_shape = im_shape\r\n\r\n # create a random number generator (optionally seeded to allow reproducible results)\r\n self.random = np.random.default_rng(set_random_seed)\r\n\r\n # H1 and HBs = arrays of len n containing ground truth about the values of the hidden variables\r\n # causing vertical bars and horizontal bars, respectively\r\n self.X_images, self.H1, self.HBs = self._generate_images(\r\n n_samples, im_shape, noise_lvl, hBarFreq, vBarFreq)\r\n # gt_labels = array of len n with 'correct' class labels for each image\r\n self.gt_labels = self._ground_truth_classes()\r\n\r\n # target_vals = array of len n with value of T for each image (generated probabilistically)\r\n self.target_vals = self._generate_target()\r\n\r\n def __repr__(self):\r\n '''prints the binary images as np arrays when the VisualBarsData class is printed'''\r\n return str(self.X_images)\r\n\r\n def _generate_images(self, n_samples, im_shape, noise_lvl, hBarFreq, vBarFreq):\r\n '''\r\n Generates the 'ground truth'\r\n classification labels for each image based on whether hidden variable is\r\n active and/or horizontal bars present\r\n\r\n Parameters:\r\n X_images (np array) : array of binary images \r\n Hs (np array) : aligned with X_images, where Hs[i] indicates whether the hidden variable\r\n is active for X_images[i]\r\n HBs (np array) : aligned with X_images, where HBs[i] indicates whether there is a horizontal bar\r\n in X_images[i] or not \r\n\r\n Note:\r\n modified from the behave() function in ai_gratings.py (Chalupka 2015)\r\n #TODO: proper citation ?\r\n\r\n '''\r\n\r\n # X_images = array containing each image (each val in array represents a pixel)\r\n # start by generating the array with noise pixels\r\n X_images = self.random.random((n_samples, im_shape[0],\r\n im_shape[1])) < noise_lvl\r\n X_images = X_images.astype('float32')\r\n\r\n # possible values for the number of HBs and VBs\r\n # starting with 0 will allow for up to 1 HB and 1 VB in each image\r\n HB_val_poss = [0]\r\n VB_val_poss = [0]\r\n\r\n # Select hidden variable values.\r\n # H1 = array containing presence/absence of hidden var for each image\r\n H1 = self.random.random(n_samples) < vBarFreq\r\n\r\n # Select numbers of VBs and HBs.\r\n # when HB_val_poss and VB_val_poss = [0], then\r\n # VBs and HBs are each an array of length n_samples with values 0 or 1\r\n # the frequency of 1s vs 0s are determined by hBarFreq or vBarFreq\r\n VBs = self.random.choice(VB_val_poss, n_samples)+H1\r\n HBs = self.random.choice(HB_val_poss, n_samples) + \\\r\n (self.random.random(n_samples) < hBarFreq)\r\n\r\n # Make images with randomly placed Gs.\r\n for sample_id in range(n_samples):\r\n # Place the vertical bars.\r\n VB_locs = self.random.choice(range(im_shape[1]),\r\n VBs[sample_id], replace=False)\r\n HB_locs = self.random.choice(range(im_shape[0]),\r\n HBs[sample_id], replace=False)\r\n X_images[sample_id, HB_locs, :] = 1.\r\n X_images[sample_id, :, VB_locs] = 1.\r\n\r\n return X_images, H1, HBs\r\n\r\n def _ground_truth_classes(self):\r\n \"\"\"\r\n Generates the 'ground truth'\r\n classification labels for each image based on whether hidden variable is\r\n active and/or horizontal bars present\r\n\r\n Input\r\n X_images - array of binary images with some combo of horiz/vert bars\r\n H1 - array, aligned with X_images, saying whether hidden var is active for each image\r\n HBs - array, aligned with X_images, saying whether each image contains a horiz bar or not\r\n\r\n modified from behave() in ai_gratings.py (Chalupka 2015)\r\n\r\n \"\"\"\r\n gt_labels = np.zeros(\r\n self.n_samples) # gt_labels = array containing \"ground truth\" class labels for each image in X_images\r\n\r\n for i in range(self.n_samples):\r\n # H1 = indicates whether the vertical bar hidden var is active (1) or not (0)\r\n H1 = self.H1[i]\r\n # H2 = indicates whether the current image contains a horizontal bar (1) or not (0)\r\n H2 = self.HBs[i]\r\n\r\n if H2 == 0 and H1 == 0:\r\n gt_labels[i] = 0 \r\n if H2 == 0 and H1 == 1:\r\n gt_labels[i] = 1 \r\n if H2 == 1 and H1 == 0:\r\n gt_labels[i] = 2 \r\n if H2 == 1 and H1 == 1:\r\n gt_labels[i] = 3 \r\n\r\n return gt_labels.astype(int)\r\n\r\n def _generate_target(self):\r\n '''probabilistically generates the target behavior for each image, based on the\r\n ground truth probabilities expressed at the top of this file'''\r\n \r\n # this is the ground truth probability distribution \r\n # key= macrovariable class of image, value= probability that target equals one\r\n P_DICT = {0: 0.1, 1: 0.4, 2: 0.7, 3: 1.}\r\n\r\n target_vals = np.zeros(self.n_samples)\r\n\r\n for i in range(self.n_samples):\r\n currentP = P_DICT[self.gt_labels[i]]\r\n target_vals[i] = (self.random.random() < currentP)\r\n return target_vals\r\n\r\n def getImages(self):\r\n return self.X_images\r\n\r\n def getGroundTruth(self):\r\n return self.gt_labels\r\n\r\n def getTarget(self):\r\n return self.target_vals\r\n\r\n def getSomeImages(self, n_images, which_class=None):\r\n '''returns n visual bars images from the desired class (0=no bars, 1=vertical bars, 2 = horizontal bars, 3=both types of bars)(which_class should be a float).\r\n If no class is specified (or an invalid class label is specified), then images from any class will be returned\r\n If it is not possible to return n_images, then as many images as possible will be returned'''\r\n\r\n # get images from the specified class\r\n whichImages = np.where(self.gt_labels == which_class)[0]\r\n\r\n # if which_class was not a valid class label or no which_class was given\r\n if whichImages.shape == (0,):\r\n whichImages = range(self.n_samples)\r\n\r\n # get all images from the corresponding class label\r\n images = self.X_images[whichImages]\r\n\r\n # return n_images of them\r\n return images[:n_images]\r\n\r\n def saveSingleImage(self, fname):\r\n '''chooses a random image from X_images and saves it with the name fname'''\r\n image = self.X_images[self.random.choice(len(self.X_images))]\r\n fig = plt.figure()\r\n plt.axis(\"off\")\r\n plt.imshow(image)\r\n plt.savefig(fname)\r\n\r\n def saveData(self):\r\n '''saves the images, ground truth, and target effects'''\r\n pass # TODO: implement?\r\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.ones"
],
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axis",
"numpy.zeros",
"numpy.where",
"numpy.random.default_rng",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shengxinhu/tvm | [
"06c443e9959452c6da3a911fe0c11e08c5554477",
"06c443e9959452c6da3a911fe0c11e08c5554477",
"06c443e9959452c6da3a911fe0c11e08c5554477",
"06c443e9959452c6da3a911fe0c11e08c5554477",
"06c443e9959452c6da3a911fe0c11e08c5554477",
"06c443e9959452c6da3a911fe0c11e08c5554477"
] | [
"tests/python/unittest/test_tir_ptx_ldmatrix.py",
"tests/python/contrib/test_cudnn.py",
"tests/python/contrib/test_hexagon/test_cache_read_write.py",
"tests/python/unittest/test_tir_structural_equal_hash.py",
"python/tvm/relay/frontend/oneflow.py",
"tests/python/relay/test_op_grad_level10.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nfrom tvm.script import tir as T\nimport numpy as np\nimport tvm.testing\n\n\[email protected]_func\ndef ptx_ldmatrix(\n A: T.Buffer[(16, 16), \"float16\"], B: T.Buffer[(16, 16), \"float16\"], num: T.int32, trans: T.uint8\n) -> None:\n T.func_attr({\"global_symbol\": \"default_function\", \"tir.noalias\": True})\n bx = T.env_thread(\"blockIdx.x\")\n tx = T.env_thread(\"threadIdx.x\")\n T.launch_thread(bx, 1)\n T.launch_thread(tx, 32)\n with T.block():\n A_shared = T.alloc_buffer([16, 16], \"float16\", scope=\"shared\")\n A_local = T.alloc_buffer([8], \"float16\", scope=\"local\")\n\n for i in range(8):\n A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]\n\n T.evaluate(\n T.ptx_ldmatrix(\n trans,\n num,\n \".b16\",\n A_local.data,\n 0,\n A_shared.data,\n 16 * (tx % 16) + 8 * (tx // 16),\n dtype=\"float16\",\n )\n )\n\n for k in range(2):\n for j in range(2):\n for i in range(2):\n B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]\n\n\[email protected]_cuda\ndef test_ptx_ldmatrix():\n f = ptx_ldmatrix\n _, _, param_num, param_trans = f.params\n arch = tvm.contrib.nvcc.get_target_compute_version()\n major, minor = tvm.contrib.nvcc.parse_compute_version(arch)\n if major * 10 + minor < 75:\n # Require at least SM75\n return\n for num in [1, 2, 4]:\n for trans in [False, True]:\n mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target=\"cuda\")\n A_np = np.random.rand(16, 16).astype(\"float16\")\n A_mask_np = np.zeros_like(A_np)\n if num == 1:\n if trans:\n A_mask_np[:8, :8] = A_np[:8, :8].T\n else:\n A_mask_np[:8, :8] = A_np[:8, :8]\n elif num == 2:\n if trans:\n A_mask_np[:8, :8] = A_np[:8, :8].T\n A_mask_np[8:16, :8] = A_np[8:16, :8].T\n else:\n A_mask_np[:16, :8] = A_np[:16, :8]\n else: # num == 4\n if trans:\n A_mask_np[:8, :8] = A_np[:8, :8].T\n A_mask_np[8:16, :8] = A_np[8:16, :8].T\n A_mask_np[:8, 8:16] = A_np[:8, 8:16].T\n A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T\n else:\n A_mask_np[:16, :16] = A_np[:16, :16]\n B_np = np.zeros((16, 16)).astype(\"float16\")\n dev = tvm.cuda(0)\n A_nd = tvm.nd.array(A_np, device=dev)\n B_nd = tvm.nd.array(B_np, device=dev)\n mod(A_nd, B_nd)\n tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)\n\n\nif __name__ == \"__main__\":\n test_ptx_ldmatrix()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport sys\n\nimport pytest\n\nimport tvm\nimport tvm.testing\nfrom tvm import te\nfrom tvm import relay\nfrom tvm.contrib import cudnn\nfrom tvm.contrib.nvcc import have_fp16\nfrom tvm.contrib import graph_executor\nimport numpy as np\nimport tvm.topi.testing\nimport tvm.testing\nfrom tvm.relay.op.contrib.cudnn import partition_for_cudnn\n\n\nrequires_cudnn = pytest.mark.skipif(\n tvm.get_global_func(\"tvm.contrib.cudnn.conv2d.forward\", True) is None,\n reason=\"CuDNN is not enabled\",\n)\n\n\ndef verify_conv2d(data_dtype, conv_dtype, tensor_format=0, groups=1):\n in_channel = 4\n out_channel = 16\n filter_h = 3\n filter_w = 3\n pad_h = 1\n pad_w = 1\n stride_h = 1\n stride_w = 1\n dilation_h = 1\n dilation_w = 1\n batch = 3\n height = 32\n width = 32\n\n if data_dtype == \"float16\" and not have_fp16(tvm.cuda(0).compute_version):\n print(\"Skip because gpu does not have fp16 support\")\n return\n\n # schedule\n if tensor_format == 0:\n xshape = [batch, in_channel, height, width]\n wshape = [out_channel, in_channel // groups, filter_h, filter_w]\n else:\n xshape = [batch, height, width, in_channel]\n wshape = [out_channel, filter_h, filter_w, in_channel // groups]\n\n X = te.placeholder(xshape, name=\"X\", dtype=data_dtype)\n W = te.placeholder(wshape, name=\"W\", dtype=data_dtype)\n Y = cudnn.conv_forward(\n X,\n W,\n [pad_h, pad_w],\n [stride_h, stride_w],\n [dilation_h, dilation_w],\n conv_mode=1,\n tensor_format=tensor_format,\n conv_dtype=conv_dtype,\n algo=-1,\n groups=groups,\n )\n yshape = [x.value for x in Y.shape]\n s = te.create_schedule(Y.op)\n\n # validation\n dev = tvm.cuda(0)\n f = tvm.build(s, [X, W, Y], \"cuda --host=llvm\", name=\"conv2d\")\n x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)\n w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)\n y_np = np.zeros(yshape).astype(data_dtype)\n x = tvm.nd.array(x_np, dev)\n w = tvm.nd.array(w_np, dev)\n y = tvm.nd.array(y_np, dev)\n if tensor_format == 0:\n c_np = tvm.topi.testing.conv2d_nchw_python(x_np, w_np, 1, 1, groups=groups)\n elif tensor_format == 1:\n wt = w_np.transpose((1, 2, 3, 0)) # OHWI => HWIO\n c_np = tvm.topi.testing.conv2d_nhwc_python(x_np, wt, 1, 1, groups=groups)\n\n f(x, w, y)\n tvm.testing.assert_allclose(y.numpy(), c_np, atol=1e-2, rtol=1e-2)\n\n\[email protected]_gpu\n@requires_cudnn\ndef test_conv2d():\n verify_conv2d(\"float32\", \"float32\", tensor_format=0)\n verify_conv2d(\"float16\", \"float32\", tensor_format=1)\n verify_conv2d(\"float16\", \"float16\", tensor_format=0)\n verify_conv2d(\"float16\", \"float16\", tensor_format=1)\n verify_conv2d(\"int8\", \"int32\", tensor_format=1)\n\n verify_conv2d(\"float32\", \"float32\", tensor_format=0, groups=2)\n verify_conv2d(\"float16\", \"float32\", tensor_format=1, groups=2)\n verify_conv2d(\"float16\", \"float16\", tensor_format=0, groups=2)\n verify_conv2d(\"int8\", \"int32\", tensor_format=1, groups=2)\n\n\ndef verify_conv3d(data_dtype, conv_dtype, tensor_format=0, groups=1):\n in_channel = 4\n out_channel = 16\n filter_d = 3\n filter_h = 3\n filter_w = 3\n pad_d = 1\n pad_h = 1\n pad_w = 1\n stride_d = 1\n stride_h = 1\n stride_w = 1\n dilation_d = 1\n dilation_h = 1\n dilation_w = 1\n batch = 3\n depth = 32\n height = 32\n width = 32\n\n # schedule\n xshape = [batch, in_channel, depth, height, width]\n wshape = [out_channel, in_channel // groups, filter_d, filter_h, filter_w]\n\n X = te.placeholder(xshape, name=\"X\", dtype=data_dtype)\n W = te.placeholder(wshape, name=\"W\", dtype=data_dtype)\n Y = cudnn.conv_forward(\n X,\n W,\n [pad_d, pad_h, pad_w],\n [stride_d, stride_h, stride_w],\n [dilation_d, dilation_h, dilation_w],\n conv_mode=1,\n tensor_format=tensor_format,\n algo=-1,\n conv_dtype=conv_dtype,\n groups=groups,\n )\n yshape = [x.value for x in Y.shape]\n s = te.create_schedule(Y.op)\n\n # validation\n dev = tvm.cuda(0)\n f = tvm.build(s, [X, W, Y], target=\"cuda --host=llvm\", name=\"conv3d\")\n x_np = np.random.uniform(-1, 1, xshape).astype(data_dtype)\n w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)\n y_np = np.zeros(yshape).astype(data_dtype)\n x = tvm.nd.array(x_np, dev)\n w = tvm.nd.array(w_np, dev)\n y = tvm.nd.array(y_np, dev)\n if tensor_format == 0:\n c_np = tvm.topi.testing.conv3d_ncdhw_python(x_np, w_np, 1, 1, groups)\n else:\n raise AssertionError(\"For now, conv3d tensor format only support: 0(NCHW)\")\n\n f(x, w, y)\n tvm.testing.assert_allclose(y.numpy(), c_np, atol=3e-5, rtol=1e-4)\n\n\[email protected]_gpu\n@requires_cudnn\ndef test_conv3d():\n verify_conv3d(\"float32\", \"float32\", tensor_format=0)\n verify_conv3d(\"float32\", \"float32\", tensor_format=0, groups=2)\n\n\ndef verify_softmax(shape, axis, dtype=\"float32\", log_softmax=False):\n cudnn_op = cudnn.log_softmax if log_softmax else cudnn.softmax\n testing_op = (\n tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python\n )\n\n A = te.placeholder(shape, dtype=dtype, name=\"A\")\n B = cudnn_op(A, axis)\n s = te.create_schedule([B.op])\n\n dev = tvm.cuda(0)\n a_np = np.random.uniform(size=shape).astype(dtype)\n b_np = testing_op(a_np)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(b_np, dev)\n f = tvm.build(s, [A, B], target=\"cuda --host=llvm\", name=\"softmax\")\n f(a, b)\n tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)\n\n\ndef verify_softmax_4d(shape, dtype=\"float32\", log_softmax=False):\n cudnn_op = cudnn.log_softmax if log_softmax else cudnn.softmax\n testing_op = (\n tvm.topi.testing.log_softmax_python if log_softmax else tvm.topi.testing.softmax_python\n )\n\n A = te.placeholder(shape, dtype=dtype, name=\"A\")\n B = cudnn_op(A, axis=1)\n s = te.create_schedule([B.op])\n\n dev = tvm.cuda(0)\n n, c, h, w = shape\n a_np = np.random.uniform(size=shape).astype(dtype)\n b_np = testing_op(a_np.transpose(0, 2, 3, 1).reshape(h * w, c))\n b_np = b_np.reshape(n, h, w, c).transpose(0, 3, 1, 2)\n a = tvm.nd.array(a_np, dev)\n b = tvm.nd.array(b_np, dev)\n f = tvm.build(s, [A, B], target=\"cuda --host=llvm\", name=\"softmax\")\n f(a, b)\n tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3)\n\n\[email protected]_gpu\n@requires_cudnn\ndef test_softmax():\n verify_softmax((32, 10), -1)\n verify_softmax((3, 4), -1)\n verify_softmax((1, 5), -1, \"float64\")\n verify_softmax_4d((1, 16, 256, 256))\n verify_softmax_4d((1, 16, 256, 256), \"float64\")\n\n verify_softmax((32, 10), -1, log_softmax=True)\n verify_softmax((3, 4), -1, log_softmax=True)\n verify_softmax((1, 5), -1, \"float64\", log_softmax=True)\n verify_softmax_4d((1, 16, 256, 256), log_softmax=True)\n verify_softmax_4d((1, 16, 256, 256), \"float64\", log_softmax=True)\n\n\ndef verify_conv2d_backward_data(data_dtype, conv_dtype, tensor_format=0, tol=1e-5):\n batch = 3\n in_channel = 4\n out_channel = 16\n filter_h, filter_w = 3, 3\n pad_h, pad_w = 1, 1\n stride_h, stride_w = 1, 1\n height, width = 32, 32\n\n if tensor_format == 0:\n xshape = [batch, in_channel, height, width]\n wshape = [out_channel, in_channel, filter_h, filter_w]\n oshape = xshape\n oshape[1] = out_channel\n ref_func = tvm.topi.testing.conv2d_transpose_nchw_python\n else:\n xshape = [batch, height, width, in_channel]\n wshape = [out_channel, filter_h, filter_w, in_channel]\n oshape = xshape\n oshape[3] = out_channel\n ref_func = lambda dy_np, w_np, strides, padding, out_pad: tvm.topi.testing.conv2d_transpose_nhwc_python(\n dy_np, np.transpose(w_np, [1, 2, 3, 0]), \"HWOI\", strides, padding, out_pad\n )\n\n dy_np = np.random.uniform(-1, 1, oshape).astype(data_dtype)\n w_np = np.random.uniform(-1, 1, wshape).astype(data_dtype)\n\n if data_dtype == \"float16\":\n dx_np = ref_func(\n dy_np.astype(\"float32\"),\n w_np.astype(\"float32\"),\n (stride_h, stride_w),\n (pad_h, pad_w),\n (0, 0),\n )\n dx_np = dx_np.astype(\"float16\")\n else:\n dx_np = ref_func(dy_np, w_np, (stride_h, stride_w), (pad_h, pad_w), (0, 0))\n\n dy = te.placeholder(oshape, name=\"dy\", dtype=data_dtype)\n w = te.placeholder(wshape, name=\"dw\", dtype=data_dtype)\n dx = cudnn.conv_backward_data(\n dy,\n w,\n [pad_h, pad_w],\n [stride_h, stride_w],\n [1, 1],\n conv_mode=1,\n tensor_format=tensor_format,\n conv_dtype=conv_dtype,\n groups=1,\n )\n\n s = te.create_schedule(dx.op)\n\n dev = tvm.cuda(0)\n f = tvm.build(s, [dy, w, dx], \"cuda --host=llvm\", name=\"conv2d_backward_data\")\n\n dy = tvm.nd.array(dy_np, dev)\n w = tvm.nd.array(w_np, dev)\n dx = tvm.nd.array(dx_np, dev)\n\n f(dy, w, dx)\n tvm.testing.assert_allclose(dx.numpy(), dx_np, atol=tol, rtol=tol)\n\n\[email protected]_gpu\n@requires_cudnn\ndef test_conv2d_backward_data():\n verify_conv2d_backward_data(\"float32\", \"float32\", tensor_format=0, tol=1e-5)\n verify_conv2d_backward_data(\"float32\", \"float32\", tensor_format=1, tol=1e-2)\n # The scipy convolve function does not support fp16, so the reference will be computed with\n # fp32. Use larger tolerance to be on the safe side (1e-2 also seems mostly ok).\n verify_conv2d_backward_data(\"float16\", \"float16\", tensor_format=1, tol=1e-1)\n\n\ndef verify_conv2d_backward_filter(data_dtype, conv_dtype, tensor_format=0, tol=1e-5):\n batch = 3\n in_channel = 4\n out_channel = 16\n filter_h, filter_w = 3, 3\n pad_h, pad_w = 1, 1\n stride_h, stride_w = 1, 1\n height, width = 32, 32\n\n if tensor_format == 0:\n x_shape = [batch, in_channel, height, width]\n dy_shape = [batch, out_channel, height, width]\n else:\n x_shape = [batch, height, width, in_channel]\n dy_shape = [batch, height, width, out_channel]\n\n x_np = np.random.uniform(-1, 1, x_shape).astype(data_dtype)\n dy_np = np.random.uniform(-1, 1, dy_shape).astype(data_dtype)\n\n dw_np = tvm.topi.testing.conv2d_backward_weight_python(\n dy_np,\n x_np,\n (filter_h, filter_w),\n (stride_h, stride_w),\n (pad_h, pad_w),\n \"NCHW\" if tensor_format == 0 else \"NHWC\",\n )\n\n x = te.placeholder(x_shape, name=\"x\", dtype=data_dtype)\n dy = te.placeholder(dy_shape, name=\"dy\", dtype=data_dtype)\n dw = cudnn.conv_backward_filter(\n dy,\n x,\n (filter_h, filter_w),\n [pad_h, pad_w],\n [stride_h, stride_w],\n [1, 1],\n conv_mode=1,\n tensor_format=tensor_format,\n conv_dtype=conv_dtype,\n )\n\n s = te.create_schedule(dw.op)\n\n dev = tvm.cuda(0)\n f = tvm.build(s, [dy, x, dw], \"cuda --host=llvm\", name=\"conv2d_backward_filter\")\n\n x = tvm.nd.array(x_np, dev)\n dy = tvm.nd.array(dy_np, dev)\n dw = tvm.nd.array(dw_np, dev)\n\n f(dy, x, dw)\n tvm.testing.assert_allclose(dw.numpy(), dw_np, atol=tol, rtol=tol)\n\n\[email protected]_gpu\n@requires_cudnn\ndef test_conv2d_backward_filter():\n verify_conv2d_backward_filter(\"float32\", \"float32\", tensor_format=0, tol=1e-2)\n verify_conv2d_backward_filter(\"float32\", \"float32\", tensor_format=1, tol=1e-2)\n\n\ntest_kwargs_default_2d = {\n \"tensor_format\": 0,\n \"pad\": [1, 1],\n \"stride\": [1, 1],\n \"dilation\": [1, 1],\n \"x_shape\": [16, 4, 32, 32],\n \"w_shape\": [8, 4, 3, 3],\n \"groups\": 1,\n \"conv_dtype\": \"float32\",\n \"data_dtype\": \"float32\",\n}\ntest_kwargs_default_3d = {\n \"tensor_format\": 0,\n \"pad\": [1, 1, 1],\n \"stride\": [1, 1, 1],\n \"dilation\": [1, 1, 1],\n \"x_shape\": [16, 4, 32, 32, 32],\n \"w_shape\": [8, 4, 3, 3, 3],\n \"groups\": 1,\n \"conv_dtype\": \"float32\",\n \"data_dtype\": \"float32\",\n}\nconv_output_shape_conditions = {\n \"2d_small\": test_kwargs_default_2d,\n \"2d_large\": {\n **test_kwargs_default_2d,\n \"x_shape\": [16, 32, 512, 1024],\n \"w_shape\": [8, 32, 5, 5],\n },\n \"2d_pad\": {**test_kwargs_default_2d, \"pad\": [2, 3]},\n \"2d_stride\": {**test_kwargs_default_2d, \"stride\": [2, 3]},\n \"2d_dilation\": {**test_kwargs_default_2d, \"dilation\": [2, 3]},\n \"2d_groups\": {**test_kwargs_default_2d, \"groups\": 4, \"w_shape\": [8, 1, 3, 3]},\n \"2d_NHWC\": {\n **test_kwargs_default_2d,\n \"tensor_format\": 1,\n \"x_shape\": [16, 32, 32, 4],\n \"w_shape\": [8, 3, 3, 4],\n },\n \"2d_NCHW_VECT_C\": {\n **test_kwargs_default_2d,\n \"tensor_format\": 2,\n \"w_shape\": [8, 16, 3, 3],\n \"data_dtype\": \"int8x4\",\n },\n \"3d_small\": test_kwargs_default_3d,\n \"3d_large\": {\n **test_kwargs_default_3d,\n \"x_shape\": [16, 32, 64, 128, 256],\n \"w_shape\": [8, 32, 5, 5, 5],\n },\n \"3d_pad\": {**test_kwargs_default_3d, \"pad\": [2, 3, 4]},\n \"3d_stride\": {**test_kwargs_default_3d, \"stride\": [2, 3, 4]},\n \"3d_dilation\": {**test_kwargs_default_3d, \"dilation\": [2, 3, 4]},\n \"3d_groups\": {**test_kwargs_default_3d, \"groups\": 4, \"w_shape\": [8, 1, 3, 3, 3]},\n \"3d_NCHW_VECT_C\": {\n **test_kwargs_default_3d,\n \"tensor_format\": 2,\n \"w_shape\": [8, 16, 3, 3, 3],\n \"data_dtype\": \"int8x4\",\n },\n}\n\n\[email protected](\n params=[pytest.param(kwargs, id=name) for name, kwargs in conv_output_shape_conditions.items()]\n)\ndef conv_output_shape_kwargs(request):\n return request.param\n\n\ndef _verify_cudnn_relay(expr):\n np.random.seed(42)\n\n mod = tvm.IRModule.from_expr(expr)\n mod = relay.transform.InferType()(mod)\n func = mod[\"main\"]\n cudnn_mod = partition_for_cudnn(mod)\n assert len(cudnn_mod.get_global_vars()) == 2\n\n input_data = []\n for param in func.params:\n shape = [int(x) for x in param.checked_type.shape]\n input_data.append(\n (\n param.name_hint,\n np.random.uniform(-32, 32, size=shape).astype(param.checked_type.dtype),\n )\n )\n\n cuda_config = (tvm.target.cuda(), tvm.cuda(), cudnn_mod)\n cpu_config = (tvm.target.Target(\"llvm\"), tvm.cpu(), mod)\n outputs = []\n for target, dev, test_mod in [cuda_config, cpu_config]:\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build(test_mod, target=target, target_host=cpu_config[0])\n module = graph_executor.GraphModule(lib[\"default\"](dev))\n for name, data in input_data:\n module.set_input(name, tvm.nd.array(data, dev))\n\n module.run()\n out_type = func.body.checked_type\n outputs.append(\n module.get_output(0, tvm.nd.empty(out_type.shape, dtype=out_type.dtype)).numpy()\n )\n\n tvm.testing.assert_allclose(\n outputs[0],\n outputs[1],\n rtol=1e-3,\n atol=30,\n )\n\n\[email protected]_cuda\[email protected](\n \"shape,axis\",\n [\n ((200,), 0),\n ((13, 27), 0),\n ((44, 12, 67), 1),\n ((1, 16, 16, 8), 2),\n ((2, 4, 6, 8, 10), 3),\n ],\n)\[email protected](\n \"dtype\",\n [\n \"float32\",\n \"float16\",\n \"float64\",\n ],\n)\ndef test_relay_cudnn_softmax(shape, axis, dtype):\n x = tvm.relay.var(\"x\", tvm.relay.TensorType(shape, dtype))\n softmax = relay.op.nn.softmax(x, axis=axis)\n _verify_cudnn_relay(softmax)\n\n\[email protected]_cuda\[email protected](\n \"shape,axis\",\n [\n ((32, 16), -1),\n ((13, 27), 1),\n ],\n)\[email protected](\n \"dtype\",\n [\n \"float32\",\n \"float16\",\n \"float64\",\n ],\n)\ndef test_relay_cudnn_log_softmax(shape, axis, dtype):\n x = tvm.relay.var(\"x\", tvm.relay.TensorType(shape, dtype))\n log_softmax = relay.op.nn.log_softmax(x, axis=axis)\n _verify_cudnn_relay(log_softmax)\n\n\[email protected]_cuda\[email protected](\n \"n,h,w,ci,co,groups\",\n [\n (1, 16, 20, 8, 16, 1),\n (10, 17, 19, 16, 8, 4),\n ],\n)\[email protected](\n \"kh,kw,padding\",\n [\n (1, 1, (3, 1, 3, 1)),\n (3, 3, (1, 2)),\n (7, 2, (0, 0)),\n ],\n)\[email protected](\n \"strides,dilation,dtype\",\n [\n ((1, 1), (1, 1), \"float32\"),\n ((2, 1), (2, 2), \"float16\"),\n ((3, 3), (1, 2), \"float64\"),\n ],\n)\ndef test_relay_cudnn_conv2d(n, h, w, ci, co, kh, kw, strides, dilation, padding, groups, dtype):\n data = tvm.relay.var(\"data\", tvm.relay.TensorType((n, ci, h, w), dtype))\n weight = tvm.relay.var(\"weight\", tvm.relay.TensorType((co, ci // groups, kh, kw), dtype))\n conv2d = relay.op.nn.conv2d(\n data,\n weight,\n groups=groups,\n channels=co,\n kernel_size=(kh, kw),\n strides=strides,\n dilation=dilation,\n padding=padding,\n data_layout=\"NCHW\",\n kernel_layout=\"OIHW\",\n )\n _verify_cudnn_relay(conv2d)\n\n\[email protected]_cuda\[email protected](\n \"n,h,w,ci,co,groups\",\n [\n (1, 16, 20, 8, 16, 1),\n (10, 17, 19, 16, 8, 4),\n ],\n)\[email protected](\n \"kh,kw,padding,strides,dilation,dtype\",\n [\n (1, 1, (3, 1, 3, 1), (1, 1), (1, 1), \"float32\"),\n (3, 3, (1, 2), (2, 1), (2, 2), \"float16\"),\n (7, 2, (0, 0), (3, 3), (1, 2), \"float64\"),\n ],\n)\[email protected](\"activation\", [True, False])\ndef test_relay_cudnn_conv2d_bias_act(\n n, h, w, ci, co, kh, kw, strides, dilation, padding, groups, dtype, activation\n):\n data = tvm.relay.var(\"data\", tvm.relay.TensorType((n, ci, h, w), dtype))\n weight = tvm.relay.var(\"weight\", tvm.relay.TensorType((co, ci // groups, kh, kw), dtype))\n bias = relay.var(\"bias\", relay.TensorType((co,), dtype))\n conv2d = relay.op.nn.conv2d(\n data,\n weight,\n groups=groups,\n channels=co,\n kernel_size=(kh, kw),\n strides=strides,\n dilation=dilation,\n padding=padding,\n data_layout=\"NCHW\",\n kernel_layout=\"OIHW\",\n )\n out = relay.op.nn.bias_add(conv2d, bias)\n if activation:\n out = relay.op.nn.relu(out)\n\n _verify_cudnn_relay(out)\n\n\nif __name__ == \"__main__\":\n tvm.testing.main()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport pytest\nimport numpy as np\nfrom tvm.contrib.hexagon.session import Session\n\nimport tvm.testing\nfrom tvm import te, tir\nfrom tvm.script import tir as T\nfrom tvm.contrib.hexagon.session import Session\n\n\ndef intrin_mem_copy(shape, dtype, dst_scope, src_scope):\n src = te.placeholder(shape=shape, dtype=dtype, name=\"src\")\n dst = te.compute(shape, lambda i: src[i], name=\"dst\")\n size = shape[0] * np.dtype(dtype).itemsize\n\n src_buffer = tvm.tir.decl_buffer(\n shape,\n dtype,\n scope=src_scope,\n offset_factor=1,\n name=\"mem_copy_src_buffer\",\n )\n\n dst_buffer = tvm.tir.decl_buffer(\n shape,\n dtype,\n scope=dst_scope,\n offset_factor=1,\n name=\"mem_copy_dst_buffer\",\n )\n\n zero_indices = [0 for _ in shape]\n\n def intrin_func(ins, outs):\n ib = tvm.tir.ir_builder.create()\n\n _src = ins[0]\n _dst = outs[0]\n\n dst_handle = ib.buffer_ptr(dst_buffer)\n src_handle = ib.buffer_ptr(src_buffer)\n\n ib.emit(\n tvm.tir.call_intrin(\n \"handle\",\n \"tir.mem_copy\",\n tvm.tir.call_intrin(\"handle\", \"tir.address_of\", dst_handle[zero_indices]),\n tvm.tir.call_intrin(\"handle\", \"tir.address_of\", src_handle[zero_indices]),\n size,\n )\n )\n return ib.get()\n\n return te.decl_tensor_intrin(dst.op, intrin_func, binds={src: src_buffer, dst: dst_buffer})\n\n\ndef verify(hexagon_session: Session, s, x, y, z, size):\n print(tvm.lower(s, [x, y, z]))\n\n target_hexagon = tvm.target.hexagon(\"v68\", link_params=True)\n func = tvm.build(\n s, [x, y, z], tvm.target.Target(target_hexagon, host=target_hexagon), name=\"dmacpy\"\n )\n\n mod = hexagon_session.load_module(func)\n xt = tvm.nd.array(\n np.random.randint(low=-128, high=127, size=size, dtype=x.dtype),\n device=hexagon_session.device,\n )\n yt = tvm.nd.array(\n np.random.randint(low=-128, high=127, size=size, dtype=y.dtype),\n device=hexagon_session.device,\n )\n zt = tvm.nd.array(\n np.random.randint(low=-128, high=127, size=size, dtype=z.dtype),\n device=hexagon_session.device,\n )\n mod[\"dmacpy\"](xt, yt, zt)\n\n ref = xt.numpy() + yt.numpy()\n np.testing.assert_equal(zt.numpy(), ref)\n\n\[email protected]_hexagon\ndef test_cache_read_write(hexagon_session: Session):\n size = 128\n outer_shape = (size,)\n factor = 16\n inner_shape = (factor,)\n dtype = \"int8\"\n\n x = te.placeholder(shape=outer_shape, dtype=dtype, name=\"x\")\n y = te.placeholder(shape=outer_shape, dtype=dtype, name=\"y\")\n z = te.compute(outer_shape, lambda i: x[i] + y[i], name=\"z\")\n s = te.create_schedule(z.op)\n\n x_vtcm = s.cache_read(x, \"global.vtcm\", [z])\n y_vtcm = s.cache_read(y, \"global.vtcm\", [z])\n z_vtcm = s.cache_write(z, \"global.vtcm\")\n\n zouter, zinner = s[z_vtcm].split(z_vtcm.op.axis[0], factor=factor)\n\n s[x_vtcm].compute_at(s[z_vtcm], zouter)\n s[y_vtcm].compute_at(s[z_vtcm], zouter)\n\n mem_copy_read = intrin_mem_copy(inner_shape, dtype, \"global.vtcm\", \"global\")\n\n (cache_read_x,) = s[x_vtcm].op.axis\n s[x_vtcm].tensorize(cache_read_x, mem_copy_read)\n\n (cache_read_y,) = s[y_vtcm].op.axis\n s[y_vtcm].tensorize(cache_read_y, mem_copy_read)\n\n mem_copy_write = intrin_mem_copy(outer_shape, dtype, \"global\", \"global.vtcm\")\n\n (cache_write_z,) = s[z].op.axis\n s[z].tensorize(cache_write_z, mem_copy_write)\n\n verify(hexagon_session, s, x, y, z, size)\n\n\ndef layout_transform_2d(n):\n return [n // 16, te.AXIS_SEPARATOR, n % 16]\n\n\[email protected]_hexagon\ndef test_cache_read_write_2d(hexagon_session: Session):\n size = 128\n outer_shape = (size,)\n factor = 16\n inner_shape = (factor,)\n dtype = \"int8\"\n\n x = te.placeholder(shape=outer_shape, dtype=dtype, name=\"x\")\n y = te.placeholder(shape=outer_shape, dtype=dtype, name=\"y\")\n z = te.compute(outer_shape, lambda i: x[i] + y[i], name=\"z\")\n s = te.create_schedule(z.op)\n\n x_vtcm = s.cache_read(x, \"global.vtcm\", [z])\n y_vtcm = s.cache_read(y, \"global.vtcm\", [z])\n z_vtcm = s.cache_write(z, \"global.vtcm\")\n\n layout_x_vtcm = s[x_vtcm].transform_layout(layout_transform_2d)\n layout_y_vtcm = s[y_vtcm].transform_layout(layout_transform_2d)\n layout_z_vtcm = s[z_vtcm].transform_layout(layout_transform_2d)\n\n mem_copy_read = intrin_mem_copy(inner_shape, dtype, \"global.vtcm\", \"global\")\n s[x_vtcm].tensorize(layout_x_vtcm[1], mem_copy_read)\n s[y_vtcm].tensorize(layout_y_vtcm[1], mem_copy_read)\n\n # The loop schedule over `z` is not modified when calling `transform_layout`\n # on `z_vtcm` above therefore we must call `split` to modify the loop schedule\n # over `z` to match the layout of `z_vtcm` such that we can accurately write\n # `z_vtcm` back to `z` using memory copy intrinsic\n zouter, zinner = s[z].split(z.op.axis[0], factor=factor)\n mem_copy_write = intrin_mem_copy(inner_shape, dtype, \"global\", \"global.vtcm\")\n s[z].tensorize(zinner, mem_copy_write)\n\n verify(hexagon_session, s, x, y, z, size)\n\n\[email protected]_func\ndef scale_by_two(A: T.Buffer[(8192,), \"int8\"], C: T.Buffer[(8192,), \"int8\"]):\n for i in T.serial(\n 0,\n 8192,\n ):\n with T.block(\"C\"):\n C[i] = A[i] * T.int8(2)\n\n\ndef test_vtcm_lowering():\n mod = tvm.IRModule.from_expr(scale_by_two.with_attr(\"global_symbol\", \"main\"))\n sch = tir.Schedule(mod, debug_mask=\"all\")\n block_c = sch.get_block(\"C\")\n (flat,) = sch.get_loops(block_c)\n o, i, ii, iii = sch.split(flat, factors=[8, 4, 2, 128])\n cache_block = sch.cache_read(block_c, 0, storage_scope=\"global.vtcm\")\n sch.compute_at(cache_block, o)\n lowered = tvm.lower(sch.mod[\"main\"])\n\n def ir_module_has_allocate_nodes(irmod):\n nallocs = 0\n\n def _visit(stmt):\n nonlocal nallocs\n if isinstance(stmt, tvm.tir.Allocate):\n nallocs += 1\n\n tvm.tir.stmt_functor.post_order_visit(irmod[\"main\"].body, _visit)\n return nallocs\n\n assert not ir_module_has_allocate_nodes(lowered), (\n \"AllocateNode found in lowered IRModule, \"\n \"VTCM allocations should have been lowered to tir.nd_mem_alloc_with_scope\"\n )\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nimport numpy as np\nimport pytest\nfrom tvm import te\n\n\ndef consistent_equal(x, y, map_free_vars=False):\n struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars)\n struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars)\n\n xhash = tvm.ir.structural_hash(x, map_free_vars)\n yhash = tvm.ir.structural_hash(y, map_free_vars)\n\n if struct_equal0 != struct_equal1:\n raise ValueError(\n \"Non-communicative {} vs {}, sequal0={}, sequal1={}\".format(\n x, y, struct_equal0, struct_equal1\n )\n )\n\n # NOTE: hash colision can happen but should be rare.\n # we can confirm that hash colison doesn't happen for our testcases\n if struct_equal0 != (xhash == yhash):\n raise ValueError(\n \"Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}\".format(\n x, y, struct_equal0, xhash, yhash\n )\n )\n return struct_equal0\n\n\ndef test_exprs():\n # save load json\n x = tvm.tir.const(1, \"int32\")\n y = tvm.tir.const(10, \"int32\")\n vx = te.var(\"x\")\n vy = te.var(\"y\")\n vz = te.var(\"z\")\n zx = vx + vx\n zy = vy + vy\n\n assert consistent_equal(zx * zx, (vx + vx) * (vx + vx), map_free_vars=False)\n\n # test assert trigger.\n with pytest.raises(ValueError):\n tvm.ir.assert_structural_equal(x, y)\n\n assert not consistent_equal(vx, vy)\n assert consistent_equal(vx, vy, map_free_vars=True)\n # corner case lhs:vx == rhs:vy, but cannot map it iteslf\n assert not consistent_equal(vx + vx, vy + vx, map_free_vars=True)\n # corner case lhs:vx == rhs:vy, lhs:vy == rhs:vx\n assert consistent_equal(vx + vy, vy + vx, map_free_vars=True)\n # corner case2: rolling remap.\n assert consistent_equal(vx + vy + vz, vy + vz + vx, map_free_vars=True)\n assert not consistent_equal(vx + 1, vy + 1, map_free_vars=False)\n # Defintition remap\n assert consistent_equal(tvm.tir.Let(vx, 1, vx - 1), tvm.tir.Let(vy, 1, vy - 1))\n # Default same address free var remap\n assert consistent_equal(tvm.tir.Let(vx, 1, vx // vz), tvm.tir.Let(vy, 1, vy // vz))\n\n assert consistent_equal(zx * zx, zx * zx)\n assert consistent_equal(zx * zx, zy * zy, map_free_vars=True)\n assert not consistent_equal(zx * zx, zy * zy, map_free_vars=False)\n\n\ndef test_prim_func():\n x = te.var(\"x\")\n y = te.var(\"y\")\n # counter example of same equality\n func0 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(x + y))\n func1 = tvm.tir.PrimFunc([x, y], tvm.tir.Evaluate(y + x))\n assert not consistent_equal(func0, func1)\n\n # new cases\n b = tvm.tir.decl_buffer((x,), \"float32\")\n stmt = tvm.tir.LetStmt(x, 10, tvm.tir.Evaluate(x + 1))\n func0 = tvm.tir.PrimFunc([x, y, b], stmt)\n # easiest way to deep copy is via save/load\n func1 = tvm.ir.load_json(tvm.ir.save_json(func0))\n tvm.ir.assert_structural_equal(func0, func1)\n\n data0 = tvm.nd.array([1, 2, 3])\n data1 = tvm.nd.array([1, 2, 3])\n # attributes and ndarrays\n func0 = func0.with_attr(\"data\", data0)\n func1 = func1.with_attr(\"data\", data1)\n # IRModules\n mod0 = tvm.IRModule.from_expr(func0)\n mod1 = tvm.IRModule.from_expr(func1)\n tvm.ir.assert_structural_equal(mod0, mod1)\n\n\ndef test_array():\n x = np.arange(10)\n nx = tvm.nd.array(x)\n ny = tvm.nd.array(x)\n nz = tvm.nd.array(x.reshape(2, 5))\n assert consistent_equal(nx, ny)\n assert not consistent_equal(nx, nz)\n\n\ndef test_env_func():\n @tvm.register_func(\"test.sequal.env_func\")\n def test(x):\n return x + 1\n\n x = tvm.ir.EnvFunc.get(\"test.sequal.env_func\")\n y = tvm.ir.EnvFunc.get(\"test.sequal.env_func\")\n assert consistent_equal(y, x)\n\n\ndef test_attrs():\n x = tvm.ir.make_node(\"attrs.TestAttrs\", axis=1, name=\"xx\")\n y = tvm.ir.make_node(\"attrs.TestAttrs\", axis=1, name=\"xx\")\n z = tvm.ir.make_node(\"attrs.TestAttrs\", axis=2, name=\"xx\")\n tvm.ir.assert_structural_equal(y, x)\n assert not consistent_equal(y, z)\n\n x = tvm.runtime.convert({\"x\": [1, 2, 3], \"y\": 2})\n y = tvm.runtime.convert({\"y\": 2, \"x\": [1, 2, 3]})\n z = tvm.runtime.convert({\"y\": 2, \"x\": [1, 2, 3, 4]})\n assert consistent_equal(y, x)\n assert not consistent_equal(y, z)\n\n\ndef test_stmt():\n x = te.var(\"x\")\n y = te.var(\"y\")\n n = 128\n A = te.placeholder((n, n), name=\"A\")\n B = te.placeholder((n, n), name=\"B\")\n ii = te.var(\"i\")\n jj = te.var(\"j\")\n\n Ab = tvm.tir.decl_buffer((n,), name=\"A\")\n n = te.var(\"n\")\n\n def func2():\n ib = tvm.tir.ir_builder.create()\n A = ib.buffer_ptr(Ab)\n with ib.for_range(0, n, name=\"i\") as i:\n A[i] = A[i] + 1\n with ib.for_range(0, 10, name=\"j\") as j:\n A[j] = A[j] + 2\n A[j] = A[j] + 2\n return ib.get()\n\n assert consistent_equal(func2(), func2())\n\n\ndef test_buffer_storage_scope():\n x = te.var(\"x\", dtype=\"handle\")\n\n buffer_local_0 = tvm.tir.decl_buffer((10, 10), \"float32\", scope=\"local\")\n buffer_local_1 = tvm.tir.decl_buffer((10, 10), \"float32\", scope=\"local\")\n buffer_global = tvm.tir.decl_buffer((10, 10), \"float32\", scope=\"global\")\n buffer_empty = tvm.tir.decl_buffer((10, 10), \"float32\", scope=\"\")\n\n func0 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_0})\n func1 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_local_1})\n func2 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_global})\n func3 = tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x), buffer_map={x: buffer_empty})\n\n assert consistent_equal(func0, func1)\n assert consistent_equal(func2, func3)\n assert not consistent_equal(func0, func2)\n\n\ndef test_buffer_load_store():\n b = tvm.tir.decl_buffer((10, 10), \"float32\")\n x = tvm.tir.BufferLoad(b, [0, 1])\n y = tvm.tir.BufferLoad(b, [0, 1])\n z = tvm.tir.BufferLoad(b, [1, 2])\n assert consistent_equal(y, x)\n assert not consistent_equal(y, z)\n\n i = tvm.tir.Var(\"x\", \"int32\")\n sx = tvm.tir.BufferStore(b, 0.1, [0, i])\n sy = tvm.tir.BufferStore(b, 0.1, [0, i])\n sz = tvm.tir.BufferStore(b, 0.1, [1, i])\n assert consistent_equal(sy, sx)\n assert not consistent_equal(sy, sz)\n\n\ndef test_while():\n x = tvm.tir.Var(\"x\", \"int32\")\n y = tvm.tir.Var(\"y\", \"int32\")\n wx = tvm.tir.While(x > 0, tvm.tir.Evaluate(x))\n wy = tvm.tir.While(y > 0, tvm.tir.Evaluate(y))\n assert not consistent_equal(wx, wy)\n assert consistent_equal(wx, wy, map_free_vars=True)\n\n\nif __name__ == \"__main__\":\n test_exprs()\n test_prim_func()\n test_attrs()\n test_array()\n test_env_func()\n test_stmt()\n test_buffer_storage_scope()\n test_buffer_load_store()\n test_while()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines\n# pylint: disable=import-outside-toplevel\n\"\"\"OneFlow: OneFlow is a performance-centered and open-source deep learning framework.\"\"\"\n\nimport os\nimport re\nimport copy\nfrom collections import OrderedDict\n\nimport numpy as np\nimport tvm\nfrom tvm.ir import IRModule\nfrom tvm.topi.utils import get_const_tuple\n\nfrom .. import analysis\nfrom .. import expr as _expr\nfrom .. import function as _function\nfrom .. import op as _op\nfrom .. import ty as _ty\nfrom .common import (\n AttrCvt,\n Renamer,\n fold_constant,\n get_relay_op,\n infer_shape,\n infer_type,\n new_var,\n)\n\n__all__ = [\"from_oneflow\"]\n\nFLOW_2_STR_DTYPE = {\n 2: \"float32\",\n 3: \"float64\",\n 6: \"int64\",\n 5: \"int32\",\n 4: \"int8\",\n 7: \"uint8\",\n 9: \"float16\",\n}\n\n\ndef is_input_op(node):\n \"\"\"Return true when the node is the input of the graph.\"\"\"\n return node.WhichOneof(\"op_type\") == \"input_conf\"\n\n\ndef is_user_op(node):\n \"\"\"Return true when the node is the intermediate variables of graph.\"\"\"\n return node.WhichOneof(\"op_type\") == \"user_conf\"\n\n\ndef is_output_op(node):\n \"\"\"Return true when the node is the output of the graph.\"\"\"\n return node.WhichOneof(\"op_type\") == \"output_conf\"\n\n\ndef is_param_op(node):\n \"\"\"Return true when the node is the intermediate variables of model(saved).\"\"\"\n return node.WhichOneof(\"op_type\") == \"variable_conf\"\n\n\ndef get_node_info(node):\n \"\"\"\n Get basic information about nodes: shape, data_type\n \"\"\"\n # list->tuple\n shape = tuple(node.input_conf.blob_conf.shape.dim)\n # get data type\n dtype = node.input_conf.blob_conf.data_type\n if dtype in list(FLOW_2_NP_DTYPE.keys()):\n data_type = FLOW_2_NP_DTYPE[dtype]\n else:\n raise IndexError(\"Please check the data type of your node: %s\" % node.name)\n\n return shape, data_type\n\n\ndef _dtype_shape_promotion(inputs):\n \"\"\"Promote data type and shape for list of tensors.\"\"\"\n\n dtype_order = [\"bool\", \"int8\", \"int16\", \"int32\", \"int64\", \"float32\", \"float64\"]\n ranks = [len(infer_shape(x)) for x in inputs]\n if set(ranks) == set([1, 0]):\n for i, r in enumerate(ranks):\n if r == 0:\n inputs[i] = _op.expand_dims(inputs[i], axis=0)\n\n dtypes = set(dtype_order.index(infer_type(x).checked_type.dtype) for x in inputs)\n if len(dtypes) == 1:\n return inputs\n max_dtype = dtype_order[max(dtypes)]\n for i, input_op in enumerate(inputs):\n if infer_type(input_op).checked_type.dtype != max_dtype:\n inputs[i] = input_op.astype(max_dtype)\n return inputs\n\n\ndef parse_attr(attr):\n \"\"\"Parse attribute of user op in oneflow.\"\"\"\n attrs = {}\n for a in attr:\n attr_str = str(attr[a])\n\n if attr_str[0:7] == \"at_list\":\n attr_str_ = attr_str.split(\" \")[0]\n\n if attr_str_ == \"at_list_float\":\n attrs[a] = tuple(attr[a].at_list_float.val)\n elif attr_str_ == \"at_list_int32\":\n attrs[a] = tuple(attr[a].at_list_int32.val)\n elif attr_str_ == \"at_list_int64\":\n attrs[a] = tuple(attr[a].at_list_int64.val)\n\n elif attr_str.split(\":\")[0] == \"at_string\":\n attrs[a] = attr[a].at_string\n\n elif attr_str.split(\" \")[0] == \"at_shape\":\n attrs[a] = tuple(list(attr[a].at_shape.dim))\n\n else:\n attr_str_ = attr_str.split(\":\")[0]\n if attr_str_ == \"at_bool\":\n attrs[a] = attr[a].at_bool\n elif attr_str_ == \"at_double\":\n attrs[a] = attr[a].at_double\n elif attr_str_ == \"at_float\":\n attrs[a] = attr[a].at_float\n elif attr_str_ == \"at_int32\":\n attrs[a] = attr[a].at_int32\n elif attr_str_ == \"at_int64\":\n attrs[a] = attr[a].at_int64\n\n return attrs\n\n\ndef shape_of(x, dtype=\"int64\"):\n ttype = infer_type(x).checked_type\n if not _ty.is_dynamic(ttype):\n shape = list(ttype.shape)\n return _expr.const(shape, dtype)\n\n return _op.shape_of(x, dtype)\n\n\ndef dimension_constraint():\n def _dim_check(attrs):\n if len(attrs[\"kernel_size\"]) in [1, 2, 3]:\n return True\n return False\n\n return _dim_check, \"Only 1d, 2d and 3d kernel supported.\"\n\n\nclass OneFlowOpConverter(object):\n \"\"\"A helper class for holding oneflow op converters.\"\"\"\n\n @classmethod\n def get_converter(cls):\n \"\"\"\n Get converter matches given opset.\n Parameters\n ----------\n None\n\n Returns\n -------\n converter, which should be `_impl_vx`.\n \"\"\"\n version = 1\n if hasattr(cls, \"_impl_v{}\".format(version)):\n return getattr(cls, \"_impl_v{}\".format(version))\n raise NotImplementedError(\"version {} of {} not implemented\".format(version, cls.__name__))\n\n\nclass Pool(OneFlowOpConverter):\n \"\"\"A helper class for pool op converters.\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n data = inputs[0]\n attrs.pop(\"data_format\")\n out = AttrCvt(\n op_name=cls.name,\n transforms={\n \"kernel_size\": \"pool_size\",\n \"stride\": \"strides\",\n \"dilations\": (\"dilation\", 1),\n },\n ignores=[\"return_indices\", \"divisor_override\"],\n custom_check=dimension_constraint(),\n )([data], attrs, params)\n\n return out\n\n\nclass AdaptiveAvgPool2d(OneFlowOpConverter):\n \"\"\"Operator converter for AdaptiveAvgPool2d\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.nn.adaptive_avg_pool2d(inputs[0], output_size=attrs[\"output_size\"])\n\n\nclass AdaptiveMaxPool2d(OneFlowOpConverter):\n \"\"\"Operator converter for AdaptiveMaxPool2d\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.nn.adaptive_max_pool2d(inputs[0], output_size=attrs[\"output_size\"])\n\n\nclass GlobalAveragePool(OneFlowOpConverter):\n \"\"\"Operator converter for GlobalAveragePool\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n rank = len(infer_shape(inputs[0]))\n if rank == 3:\n return _op.nn.global_avg_pool1d(inputs[0])\n if rank == 4:\n return _op.nn.global_avg_pool2d(inputs[0])\n if rank == 5:\n return _op.nn.global_avg_pool3d(inputs[0])\n raise NotImplementedError(\n \"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD.\"\n % (rank - 2),\n )\n\n\nclass GlobalMaxPool(OneFlowOpConverter):\n \"\"\"Operator converter for GlobalMaxPool\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n rank = len(infer_shape(inputs[0]))\n if rank == 3:\n return _op.nn.global_max_pool1d(inputs[0])\n if rank == 4:\n return _op.nn.global_max_pool2d(inputs[0])\n if rank == 5:\n return _op.nn.global_max_pool3d(inputs[0])\n raise NotImplementedError(\n \"Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD.\"\n % (rank - 2),\n )\n\n\nclass Conv(OneFlowOpConverter):\n \"\"\"A helper class for conv op converters.\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n # The kernel is imported from model_dir_path, without the \".weight\" logo, etc.\n # The data is obtained through the graph, its op contains \"_input.\"\n in_names = [\"_input.\"]\n kernel_names = [\".weight\"]\n for i in inputs:\n IN_NAMES = any(x in str(i) for x in in_names)\n KERNEL_NAMES = any(x in str(i) for x in kernel_names)\n if IN_NAMES:\n data = i\n elif KERNEL_NAMES:\n kernel = i\n else:\n data = i\n\n # Use shape of input to determine convolution type.\n kernel_type = infer_type(kernel)\n kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]\n\n if \"kernel_size\" not in attrs:\n attrs[\"kernel_size\"] = kernel_shapes[0][2:]\n if \"dilation_rate\" in attrs:\n attrs[\"dilation\"] = list(attrs[\"dilation_rate\"])\n attrs.pop(\"dilation_rate\")\n\n pad_v = attrs.get(\"padding_before\", [0, 0])\n attrs[\"padding\"] = [pad_v[0], pad_v[1], pad_v[0], pad_v[1]]\n\n group_conv1d = False\n if cls.name == \"conv1d\" and attrs.get(\"groups\") != 1:\n group_conv1d = True\n # Expand input from NCW to NCHW\n data = _op.expand_dims(data, axis=2)\n # Expand kernel from OIW to OIHW\n kernel = _op.expand_dims(kernel, axis=2)\n # Add new value to kernel_shape, strices, dilation, pads, if needed\n attrs[\"kernel_size\"] = [1] + list(attrs[\"kernel_size\"])\n if \"strides\" in attrs:\n attrs[\"strides\"] = [1] + list(attrs[\"strides\"])\n if \"dilations\" in attrs:\n attrs[\"dilation\"] = [1] + list(attrs[\"dilations\"])\n\n out = AttrCvt(\n op_name=cls.name,\n transforms={\n \"group\": (\"groups\", 1),\n },\n ignores=[\"data_format\", \"filters\", \"padding_after\", \"padding_before\"],\n custom_check=dimension_constraint(),\n )([data, kernel], attrs, params)\n\n # If this was a group_conv1d, squish output back to NCW.\n if group_conv1d:\n out = _op.squeeze(out, axis=[2])\n\n return out\n\n\nclass ConvTranspose(OneFlowOpConverter):\n \"\"\"Operator converter for ConvTranspose.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n in_names = [\"_input.\"]\n kernel_names = [\".weight\"]\n for i in inputs:\n IN_NAMES = any(x in str(i) for x in in_names)\n KERNEL_NAMES = any(x in str(i) for x in kernel_names)\n if IN_NAMES:\n data = i\n elif KERNEL_NAMES:\n kernel = i\n else:\n data = i\n\n # get number of channels\n attrs[\"channels\"] = attrs.get(\"filters\", 1)\n attrs[\"groups\"] = attrs.get(\"group\", 1)\n\n kernel_type = infer_type(kernel)\n kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]\n\n if \"kernel_size\" not in attrs:\n attrs[\"kernel_size\"] = kernel_shapes[0][2:]\n\n if \"dilation_rate\" in attrs:\n attrs[\"dilation\"] = list(attrs[\"dilation_rate\"])\n attrs.pop(\"dilation_rate\")\n\n pad_v = attrs.get(\"padding_before\", [0, 0])\n attrs[\"padding\"] = [pad_v[0], pad_v[1], pad_v[0], pad_v[1]]\n\n out = AttrCvt(\n op_name=cls.name,\n transforms={\n \"group\": (\"groups\", 1),\n },\n disables=[\"filters\", \"data_format\", \"padding_before\"],\n custom_check=dimension_constraint(),\n )([data, kernel], attrs, params)\n\n return out\n\n\nclass Upsample(OneFlowOpConverter):\n \"\"\"A helper class for upsample op converters\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n data = inputs[0]\n input_shape = infer_shape(data)\n dims = len(input_shape)\n\n width_scale = attrs.get(\"width_scale\", 1.0)\n height_scale = attrs.get(\"height_scale\", 1.0)\n align_corners = attrs.get(\"align_corners\", False)\n\n if \"nearest\" in cls.name:\n method = \"nearest_neighbor\"\n elif \"trilinear\" in cls.name:\n method = \"trilinear\"\n elif \"bilinear\" in cls.name:\n method = \"bilinear\"\n\n # in 3d case, we use the purely static op\n if dims == 5:\n if isinstance(scales, _expr.Expr):\n scale_h = _op.take(scales, _op.const(3))\n scale_w = _op.take(scales, _op.const(4))\n scale_d = _op.take(scales, _op.const(1))\n else:\n assert len(scales) == 5\n scale_h = scales[-2]\n scale_w = scales[-1]\n scale_d = scales[-3]\n\n layout = \"NCDHW\"\n out = _op.nn.upsampling3d(\n data,\n scale_d,\n scale_h,\n scale_w,\n layout=layout,\n method=method,\n coordinate_transformation_mode=\"asymmetric\",\n )\n # in 2d case, use dynamic op\n else:\n if isinstance(height_scale, _expr.Expr):\n height_scale = _op.take(height_scale, _op.const(3))\n width_scale = _op.take(width_scale, _op.const(4))\n layout = \"NCHW\"\n\n out = _op.nn.upsampling(\n inputs[0],\n height_scale,\n width_scale,\n layout=layout,\n method=method,\n align_corners=align_corners,\n )\n return out\n\n\nclass UpsampleNearest(Upsample):\n \"\"\"Operator converter for Upsample Nearest\"\"\"\n\n name = \"upsample_nearest\"\n\n\nclass UpsampleBiLinear(Upsample):\n \"\"\"Operator converter for Upsample Bilinear\"\"\"\n\n name = \"upsample_bilinear\"\n\n\nclass Conv2d(Conv):\n \"\"\"Operator converter for Conv2d\"\"\"\n\n name = \"conv2d\"\n\n\nclass ConvTranspose2d(ConvTranspose):\n \"\"\"Operator converter for ConvTranspose2d\"\"\"\n\n name = \"conv2d_transpose\"\n\n\nclass BatchNorm(OneFlowOpConverter):\n \"\"\"Operator converter for BatchNorm\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n # sort the inputs\n sorted_inputs = copy.deepcopy(inputs)\n for i in inputs:\n IN_NAMES = \"_input.\" in str(i)\n if IN_NAMES:\n sorted_inputs[0] = i\n elif \"weight\" in str(i) and not IN_NAMES:\n sorted_inputs[1] = i\n elif \"bias\" in str(i) and not IN_NAMES:\n sorted_inputs[2] = i\n elif \"mean\" in str(i) and not IN_NAMES:\n sorted_inputs[3] = i\n elif \"var\" in str(i) and not IN_NAMES:\n sorted_inputs[4] = i\n\n if \"data_format\" in attrs:\n if attrs[\"data_format\"] == \"channel_first\":\n attrs[\"axis\"] = 1\n\n out = AttrCvt(op_name=\"batch_norm\", ignores=[\"training\"], disables=[\"momentum\"])(\n sorted_inputs, attrs, params\n )\n return out[0]\n\n\nclass Flatten(OneFlowOpConverter):\n \"\"\"Operator converter for Flatten\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n x = inputs[0]\n input_shape = list(infer_shape(x))\n\n start = attrs[\"start_dim\"]\n end = attrs[\"end_dim\"]\n ndim = len(input_shape)\n if end < 0:\n end += ndim\n new_shape = [0] * start\n\n new_shape.append(-1)\n squeeze_axes = []\n for i in range(start + 1, end + 1):\n new_shape.append(1)\n squeeze_axes.append(i)\n for _ in range(end + 1, ndim):\n new_shape.append(0)\n out = _op.reshape(x, new_shape)\n if squeeze_axes:\n out = _op.squeeze(out, axis=squeeze_axes)\n return out\n\n\nclass MatMul(OneFlowOpConverter):\n \"\"\"Operator converter for MatMul\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 2, \"MatMul op take 2 inputs, {} given\".format(len(inputs))\n\n dtype = infer_type(inputs[0]).checked_type.dtype\n # Y = alpha * A * B\n alpha = float(attrs.get(\"alpha\", 1.0))\n transA = bool(attrs.get(\"transpose_a\", False))\n transB = bool(attrs.get(\"transpose_b\", False))\n\n a_shape = infer_shape(inputs[0])\n b_shape = infer_shape(inputs[1])\n if (\n (transA and transB and a_shape[-2] != b_shape[-1])\n or (transA and not transB and a_shape[-2] != b_shape[-2])\n or (transB and not transA and a_shape[-1] != b_shape[-1])\n or (not transB and not transA and a_shape[-1] != b_shape[-2])\n ):\n matmul_a = inputs[1]\n matmul_b = inputs[0]\n else:\n matmul_a = inputs[0]\n matmul_b = inputs[1]\n\n if transA:\n perm = list(range(len(a_shape)))\n perm[-2] = len(a_shape) - 1\n perm[-1] = len(a_shape) - 2\n matmul_a = _op.transpose(matmul_a, axes=perm)\n if transB:\n perm = list(range(len(b_shape)))\n perm[-2] = len(b_shape) - 1\n perm[-1] = len(b_shape) - 2\n matmul_b = _op.transpose(matmul_b, axes=perm)\n\n # This implemention almost keeps same with ONNX\n # Need to check input shape as batch matmul must be supported.\n a_shape = shape_of(matmul_a, dtype=\"int32\")\n a_rank = infer_shape(a_shape)[0]\n b_shape = shape_of(matmul_b, dtype=\"int32\")\n b_rank = infer_shape(b_shape)[0]\n # When performing a batch matmul, we need to properly handle N-dim shapes.\n if a_rank > 2 or b_rank > 2:\n\n def flatten_to_nd(x, x_shape, nd=3):\n ndims = infer_shape(x_shape)[0]\n if ndims == nd:\n return x\n newshape = _op.concatenate(\n [\n _expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),\n _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),\n ],\n 0,\n )\n out = _op.reshape(x, fold_constant(newshape))\n return out\n\n b_type = infer_type(matmul_b)\n # Convert to dense if the second matrix is 2d and non-dynamic\n if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):\n a = flatten_to_nd(matmul_a, a_shape, 2)\n b = _op.transpose(matmul_b)\n output = _op.nn.dense(a, b)\n else:\n # Convert a and b into 3 dimensional tensors.\n a = flatten_to_nd(matmul_a, a_shape, 3)\n b = flatten_to_nd(matmul_b, b_shape, 3)\n # Transpose matrix dimensions of b.\n b = _op.transpose(b, [0, 2, 1])\n # Perform a batch matmul.\n output = _op.nn.batch_matmul(a, b)\n # Determine the output batch dimension.\n if a_rank > b_rank:\n out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])\n elif a_rank < b_rank:\n out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])\n # If its unclear how broadcasting should be applied, the output\n # shape is determined by choosing the maximum value from each input.\n else:\n out_batch = _op.concatenate(\n [\n _op.maximum(\n _op.strided_slice(a_shape, [i], [i + 1]),\n _op.strided_slice(b_shape, [i], [i + 1]),\n )\n for i in range(a_rank - 2)\n ],\n 0,\n )\n # Reshape output to original dimensions.\n final_shape = _op.concatenate(\n [\n out_batch,\n _op.strided_slice(\n a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]\n ),\n _op.strided_slice(\n b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]\n ),\n ],\n 0,\n )\n out = _op.reshape(output, fold_constant(final_shape))\n else:\n if b_rank == 1:\n matmul_b = _op.expand_dims(matmul_b, 1, 1)\n # Otherwise a simple dense op will get the job done.\n input_1_t = _op.transpose(matmul_b, axes=(1, 0))\n out = _op.nn.dense(matmul_a, input_1_t)\n if b_rank == 1:\n out = _op.squeeze(out, axis=[-1])\n if not np.isclose(alpha, 1.0):\n out = out * _expr.const(alpha, dtype=dtype)\n return out\n\n\nclass Reduce(OneFlowOpConverter):\n \"\"\"Operator converter for reduce ops\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n attr = {\"axis\": attrs.get(\"axis\", 0), \"keepdims\": attrs.get(\"keepdims\", True)}\n return AttrCvt(cls.name)(inputs, attr)\n\n\nclass ReduceMax(Reduce):\n \"\"\"Operator converter for ReduceMax\"\"\"\n\n name = \"max\"\n\n\nclass ReduceMin(Reduce):\n \"\"\"Operator converter for ReduceMin\"\"\"\n\n name = \"min\"\n\n\nclass ReduceSum(Reduce):\n \"\"\"Operator converter for ReduceSum\"\"\"\n\n name = \"sum\"\n\n\nclass ReduceMean(Reduce):\n \"\"\"Operator converter for ReduceMean\"\"\"\n\n name = \"mean\"\n\n\nclass Square(OneFlowOpConverter):\n \"\"\"Operator converter for square\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 1, \"Square op {} take 1 inputs, {} given\".format(\n cls.name, len(inputs)\n )\n return _op.multiply(inputs[0], inputs[0])\n\n\nclass Add(OneFlowOpConverter):\n \"\"\"Operator converter for Add\"\"\"\n\n name = \"add\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 2, \"Math op {} take 2 inputs, {} given\".format(cls.name, len(inputs))\n axis = int(attrs.get(\"axis\", 0))\n\n true_names = [\"weight\", \"bias\"]\n false_names = [\"_input.\"]\n\n for i in inputs:\n T_NAMES = any(x in str(i) for x in true_names)\n F_NAMES = any(x in str(i) for x in false_names)\n if T_NAMES and not F_NAMES:\n add_b = i\n else:\n add_a = i\n\n # fix the shape\n add_shape = infer_shape(add_a)\n if len(add_shape) > 2:\n add_b = _op.expand_dims(add_b, axis=axis, num_newaxis=len(add_shape) - 2)\n add_b_shape = list(infer_shape(add_b))\n add_b_shape.insert(0, add_shape[0])\n\n add_b = _op.reshape(add_b, tuple(add_b_shape))\n out = get_relay_op(cls.name)(add_a, add_b)\n\n return out\n\n\nclass Expand(OneFlowOpConverter):\n \"\"\"Operator converter for Expand\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n data_in = inputs[0]\n shape = list(infer_shape(data_in))\n\n ndims = len(shape)\n sizes = attrs[\"logical_expand_shape\"]\n out = data_in\n out_dims = len(sizes)\n if ndims < out_dims:\n num_newaxis = out_dims - ndims\n out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)\n shape = [1] * num_newaxis + shape\n\n for i in range(out_dims):\n if sizes[i] != -1 and shape[i] == 1:\n out = _op.repeat(out, sizes[i], axis=i)\n\n return out\n\n\nclass Transpose(OneFlowOpConverter):\n \"\"\"Operator converter for transpose.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n perm = attrs[\"perm\"]\n return _op.transpose(inputs[0], axes=perm)\n\n\nclass ExpandDim(OneFlowOpConverter):\n \"\"\"Operator converter for ExpandDim\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n\n return _op.expand_dims(inputs[0], axis=attrs.get(\"axis\", 0))\n\n\nclass BroadcastMath(OneFlowOpConverter):\n \"\"\"Operator converter for broadcast math ops\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 2, \"Math op {} take 2 inputs, {} given\".format(cls.name, len(inputs))\n beta_names = [\"weight\", \"bias\", \"mean\", \"var\", \"Constant\"]\n\n for i in inputs:\n T_NAMES = any([x in str(i) for x in beta_names])\n if T_NAMES and \"_input.\" not in str(i):\n input_b = i\n else:\n input_a = i\n\n if cls.name == \"divide\":\n length = []\n for i in inputs:\n length.append(len(str(i)))\n for i in inputs:\n if len(str(i)) == max(length):\n input_a = i\n else:\n input_b = i\n if cls.name == \"subtract\":\n length = []\n for i in inputs:\n length.append(len(str(i)))\n for i in inputs:\n if len(str(i)) == max(length):\n input_b = i\n else:\n input_a = i\n try:\n return get_relay_op(cls.name)(input_a, input_b)\n except UnboundLocalError:\n return get_relay_op(cls.name)(*inputs)\n\n\nclass BroadcastMul(BroadcastMath):\n \"\"\"Operator converter for Mul broadcast\"\"\"\n\n name = \"multiply\"\n\n\nclass BroadcastAdd(BroadcastMath):\n \"\"\"Operator converter for Add broadcast\"\"\"\n\n name = \"add\"\n\n\nclass BroadcastSub(BroadcastMath):\n \"\"\"Operator converter for Sub broadcast\"\"\"\n\n name = \"subtract\"\n\n\nclass BroadcastDiv(BroadcastMath):\n \"\"\"Operator converter for Div broadcast\"\"\"\n\n name = \"divide\"\n\n\nclass LogicalGreater(OneFlowOpConverter):\n \"\"\"Operator converter for greater\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n res = None\n if attrs.get(\"has_int_operand\", True):\n value = attrs.get(\"int_operand\", 0.0)\n res = _op.greater(inputs[0], _op.full_like(inputs[0], fill_value=_expr.const(value)))\n elif attrs.get(\"has_float_operand\", True):\n value = float(attrs.get(\"float_operand\", 0.0))\n res = _op.greater(\n inputs[0], _op.full_like(inputs[0], fill_value=_expr.const(value)).astype(\"float32\")\n )\n else:\n raise AttributeError(\n \"please check if has_int_operand or has_float_operand in your attrs\"\n )\n return res\n\n\nclass Log1p(OneFlowOpConverter):\n \"\"\"Operator converter for Log1p\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.log(inputs[0] + _expr.const(1.0))\n\n\nclass Pow(OneFlowOpConverter):\n \"\"\"Operator converter for Power\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n inputs = _dtype_shape_promotion(inputs)\n return get_relay_op(cls.name)(inputs[0], inputs[1])\n\n\nclass Expm1(OneFlowOpConverter):\n \"\"\"Operator converter for Expm1\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.exp(inputs[0]) - _expr.const(1.0)\n\n\nclass Unary(OneFlowOpConverter):\n \"\"\"A helper class for unary op converters\"\"\"\n\n name = \"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 1, \"Unary math op {} takes 1 input, {} given\".format(\n cls.name, len(inputs)\n )\n return get_relay_op(cls.name)(*inputs)\n\n\nclass Absolute(Unary):\n \"\"\"Operator converter for Absolute.\"\"\"\n\n name = \"abs\"\n\n\nclass AddN(OneFlowOpConverter):\n \"\"\"Operator converter for Add_n\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) > 0, \"add_n take >=1 inputs, but 0 given.\"\n\n res = inputs[0]\n for each in inputs[1:]:\n res = _op.add(res, each)\n return res\n\n\nclass ScalarAdd(OneFlowOpConverter):\n \"\"\"Operator convert for Add_scalar\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 1, \"add_scalar take == 1 inputs, but {} given.\".format(len(inputs))\n\n if attrs.get(\"has_int_operand\", True):\n res = inputs[0] + _expr.const(attrs[\"int_operand\"])\n elif attrs.get(\"has_float_operand\", True):\n res = inputs[0] + _expr.const(attrs[\"float_operand\"])\n else:\n raise AttributeError(\n \"please check if has_int_operand or has_float_operand in your attrs\"\n )\n\n return res\n\n\nclass ScalarMul(OneFlowOpConverter):\n \"\"\"Operator convert for Mul_scalar\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 1, \"add_scalar take == 1 inputs, but {} given.\".format(len(inputs))\n\n if attrs.get(\"has_int_operand\", True):\n res = inputs[0] * _expr.const(attrs[\"int_operand\"], dtype=\"float32\")\n elif attrs.get(\"has_float_operand\", True):\n res = inputs[0] * _expr.const(attrs[\"float_operand\"])\n else:\n raise AttributeError(\n \"please check if has_int_operand or has_float_operand in your attrs\"\n )\n\n return res\n\n\nclass ScalarDiv(OneFlowOpConverter):\n \"\"\"Operator convert for Div_scalar\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 1, \"div_scalar take == 1 inputs, but {} given.\".format(len(inputs))\n\n if attrs.get(\"has_int_operand\", True):\n res = inputs[0] / _expr.const(attrs[\"int_operand\"], dtype=\"float32\")\n elif attrs.get(\"has_float_operand\", True):\n res = inputs[0] / _expr.const(attrs[\"float_operand\"])\n else:\n raise AttributeError(\n \"please check if has_int_operand or has_float_operand in your attrs\"\n )\n\n return res\n\n\nclass ScalarPow(OneFlowOpConverter):\n \"\"\"Operator convert for Pow_scalar\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n if attrs.get(\"has_int_operand\", True):\n coeff = _expr.const(attrs[\"int_operand\"])\n elif attrs.get(\"has_float_operand\", True):\n coeff = _expr.const(attrs[\"float_operand\"])\n return _op.power(inputs[0], coeff)\n\n\nclass MaxPool2d(Pool):\n \"\"\"Operator converter for MaxPool\"\"\"\n\n name = \"max_pool2d\"\n\n\nclass AveragePool2d(Pool):\n \"\"\"Operator converter for AveragePool.\"\"\"\n\n name = \"avg_pool2d\"\n\n\nclass Affine(OneFlowOpConverter):\n \"\"\"Operator converter for Affine transformation.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n alpha = _expr.const(attrs.get(\"alpha\", 1.0))\n beta = _expr.const(attrs.get(\"beta\", 0.0))\n return (alpha * inputs[0]) + beta\n\n\nclass Reshape(OneFlowOpConverter):\n \"\"\"Operator converter for Reshape.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.reshape(inputs[0], attrs[\"shape\"])\n\n\nclass Softmax(OneFlowOpConverter):\n \"\"\"Operator converter for Softmax.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n axis = attrs.get(\"axis\", -1)\n data = inputs[0]\n if isinstance(axis, str):\n axis = int(axis)\n\n return _op.nn.softmax(data, axis=axis)\n\n\nclass LogSoftmax(OneFlowOpConverter):\n \"\"\"Operator converter for LogSoftmax.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n axis = attrs.get(\"axis\", 1)\n ndim = len(infer_shape(inputs[0]))\n if axis < 0:\n axis += ndim\n axes = list(range(axis, ndim))\n x = inputs[0]\n m = _op.max(x, axes, keepdims=True)\n e = _op.exp(x - m)\n s = _op.sum(e, axes, keepdims=True)\n return x - m - _op.log(s)\n\n\nclass Dropout(OneFlowOpConverter):\n \"\"\"Operator converter for Dropout.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n out = AttrCvt(\"dropout\", {\"ratio\": \"rate\"}, ignores=[\"is_test\"])\n return out\n\n\nclass ThresholdedRelu(OneFlowOpConverter):\n \"\"\"Operator converter for ThresholdedRelu.\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n alpha = float(attrs.get(\"alpha\", 1.0))\n alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))\n mask = _op.greater(inputs[0], alpha_tensor).astype(\"float32\")\n return inputs[0] * mask\n\n\nclass Elu(OneFlowOpConverter):\n \"\"\"Operator converter for Elu\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n alpha = float(attrs.get(\"alpha\", 1.0))\n return _expr.const(-alpha) * _op.nn.relu(\n _expr.const(1.0) - _op.exp(inputs[0])\n ) + _op.nn.relu(inputs[0])\n\n\nclass PReLU(OneFlowOpConverter):\n \"\"\"Operator converter for PReLU\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n assert len(inputs) == 2, \"PReLU need 2 inputs, but {} given\".format(len(inputs))\n for i in inputs:\n if \"_input.\" in str(i):\n prelu_a = i\n else:\n prelu_b = i\n\n input_shape = shape_of(prelu_a)\n alpha = _op.broadcast_to_like(prelu_b, prelu_a)\n alpha = _op.reshape(alpha, [-1])\n\n output = _op.nn.prelu(_op.reshape(prelu_a, [-1]), alpha, axis=0)\n out = _op.reshape(output, input_shape)\n return out\n\n\nclass Selu(OneFlowOpConverter):\n \"\"\"Operator converter for Selu\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n alpha = float(attrs.get(\"alpha\", 1.67326319217681884765625))\n gamma = float(attrs.get(\"gamma\", 1.05070102214813232421875))\n return _expr.const(gamma) * (\n _expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0]))\n + _op.nn.relu(inputs[0])\n )\n\n\nclass Silu(OneFlowOpConverter):\n \"\"\"Operator converter for Silu\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n a = inputs[0]\n b = _op.sigmoid(inputs[0])\n return _op.multiply(a, b)\n\n\nclass Gelu(OneFlowOpConverter):\n \"\"\"Operator converter for Gelu\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n data = inputs[0]\n return data * (\n _expr.const(0.5) + _op.erf(data * _expr.const(0.5**0.5)) * _expr.const(0.5)\n )\n\n\nclass HardTanh(OneFlowOpConverter):\n \"\"\"Operator converter for HardTanh\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n tanh_min = attrs.get(\"min_val\", 0.0)\n tanh_max = attrs.get(\"max_val\", 0.0)\n return _op.tensor.clip(inputs[0], tanh_min, tanh_max)\n\n\nclass Softplus(OneFlowOpConverter):\n \"\"\"Operator converter for Softplus\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n data = inputs[0]\n data_dtype = infer_type(data).checked_type.dtype\n data = _op.exp(data) + _expr.const(1, dtype=data_dtype)\n return _op.log(data)\n\n\nclass Softsign(OneFlowOpConverter):\n \"\"\"Operator converter for Softsign\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return inputs[0] / (_expr.const(1.0) + Absolute.get_converter()(inputs, attrs, params))\n\n\nclass Variance(OneFlowOpConverter):\n \"\"\"Operator converter for Variance\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n axis = attrs[\"dim\"]\n keepdims = attrs[\"keepdim\"]\n unbiased = bool(attrs[\"unbiased\"])\n return _op.reduce.variance(inputs[0], axis=axis, keepdims=keepdims, unbiased=unbiased)\n\n\nclass Concat(OneFlowOpConverter):\n \"\"\"Operator converter for Concat\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n attrs.pop(\"max_dim_size\")\n inputs = _dtype_shape_promotion(inputs)\n return _op.concatenate(inputs, axis=attrs[\"axis\"])\n\n\nclass Clip(OneFlowOpConverter):\n \"\"\"Operator converter for Clip\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n attr = {}\n dtype = infer_type(inputs[0])\n\n if \"float\" in str(dtype):\n attr[\"a_min\"] = attrs[\"floating_min\"]\n attr[\"a_max\"] = attrs[\"floating_max\"]\n elif \"int\" in str(dtype):\n attr[\"a_min\"] = attrs[\"integral_min\"]\n attr[\"a_max\"] = attrs[\"integral_max\"]\n else:\n attr[\"a_min\"] = -np.inf\n attr[\"a_max\"] = np.inf\n\n out = AttrCvt(\"clip\")(inputs, attr, params)\n return out\n\n\nclass Slice(OneFlowOpConverter):\n \"\"\"Operator converter for Slice\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n starts = list(attrs[\"start\"])\n ends = list(attrs[\"stop\"])\n steps = list(attrs[\"step\"])\n return _op.strided_slice(inputs[0], starts, ends, steps)\n\n\nclass Split(OneFlowOpConverter):\n \"\"\"Operator converter for Split\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n splits = attrs.get(\"split\", None)\n if splits is not None:\n indices = []\n attrs[\"indices_or_sections\"] = []\n index = 0\n for i in splits[:-1]:\n index += i\n indices.append(index)\n output = _op.split(inputs[0], indices, attrs.get(\"axis\", 0))\n # If the output of split is a single value, unpack if from the TupleWrapper\n if len(output) == 1:\n output = output[0]\n return output\n\n\nclass Scatter(OneFlowOpConverter):\n \"\"\"Operator converter for Scatter\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n axis = attrs.get(\"axis\", 0)\n return _op.scatter(inputs[0], inputs[1], inputs[2], axis)\n\n\nclass Unsqueeze(OneFlowOpConverter):\n \"\"\"Operator converter for Unsqueeze\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n axes = sorted(attrs[\"axes\"])\n for axis in axes:\n inputs[0] = _op.expand_dims(inputs[0], axis=axis, num_newaxis=1)\n return inputs[0]\n\n\nclass Sign(OneFlowOpConverter):\n \"\"\"Operator converter for Sign\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.sign(inputs[0])\n\n\nclass Reciprocal(OneFlowOpConverter):\n \"\"\"Operator converter for Reciprocal\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n dtype = infer_type(inputs[0]).checked_type.dtype\n return _expr.const(1.0, dtype=dtype) / inputs[0]\n\n\nclass Erf(OneFlowOpConverter):\n \"\"\"Operator converter for Erf\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _op.erf(inputs[0])\n\n\nclass Erfc(OneFlowOpConverter):\n \"\"\"Operator converter for Erfs\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n return _expr.const(1.0) - _op.erf(inputs[0])\n\n\nclass HardSigmoid(OneFlowOpConverter):\n \"\"\"Operator converter for HardSigmoid\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n alpha = attrs.get(\"alpha\", 0.2)\n beta = attrs.get(\"beta\", 0.5)\n transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)\n attr = {\"a_min\": 0, \"a_max\": 1}\n return AttrCvt(\"clip\")([transformX], attr)\n\n\nclass OneHot(OneFlowOpConverter):\n \"\"\"Operator converter for OneHot\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n # Extract relay one_hot inputs.\n indices, depth, values = inputs\n ndim = len(infer_shape(indices))\n # Split onnx on off values into two separate expressions.\n off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1))\n # Extract the datatype of the output from on_value.\n dtype = infer_type(on_value).checked_type.dtype\n ind_dtype = infer_type(indices).checked_type.dtype\n # Normalize the indices to a positive range\n indices = _op.where(\n indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices\n )\n # set default value when axis is not set in the model\n axis = attrs.get(\"axis\", -1)\n if axis < 0:\n axis += ndim + 1\n\n return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype)\n\n\nclass Where(OneFlowOpConverter):\n \"\"\"Operator converter for Where\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n condition_rank = len(infer_shape(inputs[0]))\n x_rank = len(infer_shape(inputs[1]))\n y_rank = len(infer_shape(inputs[2]))\n ranks = [condition_rank, x_rank, y_rank]\n\n # If one rank is longer than others, then we can broadcast\n # to that shape.\n max_rank = max(ranks)\n max_rank_idxs = [i for i, x in enumerate(ranks) if x == max_rank]\n broadcast_shape = shape_of(inputs[max_rank_idxs[0]])\n # If two or more inputs have the same rank, compute the broadcast\n # shape by taking the maximum value of each dimensions.\n if len(max_rank_idxs) > 1:\n for idx in max_rank_idxs:\n broadcast_shape = _op.maximum(broadcast_shape, shape_of(inputs[idx]))\n\n broadcast_shape = fold_constant(broadcast_shape)\n\n condition = _op.broadcast_to(inputs[0], broadcast_shape)\n x = _op.broadcast_to(inputs[1], broadcast_shape)\n y = _op.broadcast_to(inputs[2], broadcast_shape)\n return _op.where(condition, x, y)\n\n\nclass Constant(OneFlowOpConverter):\n \"\"\"Operator converter for Constant\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n is_float = attrs.get(\"is_floating_value\", True)\n shape = attrs.get(\"shape\", (1,))\n if is_float:\n dtype = \"float32\"\n value = attrs.pop(\"floating_value\")\n else:\n dtype = \"int8\"\n value = attrs.pop(\"integer_value\")\n np_array = np.zeros(shape)\n np_array.fill(value)\n value = _expr.const(np_array, dtype)\n return value\n\n\nclass Range(OneFlowOpConverter):\n \"\"\"Operator converter for Range\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n if len(inputs) != 0:\n raise ValueError(\"Expect no inputs but get {}\".format(len(inputs)))\n start = attrs.get(\"start\", 0.0)\n limit = attrs.get(\"limit\", 1.0)\n delta = attrs.get(\"delta\", 1.0)\n return _op.arange(\n _expr.const(start, dtype=\"float32\"),\n _expr.const(limit, dtype=\"float32\"),\n _expr.const(delta, dtype=\"float32\"),\n )\n\n\nclass Cast(OneFlowOpConverter):\n \"\"\"Operator converter for Cast\"\"\"\n\n @classmethod\n def _impl_v1(cls, inputs, attrs, params):\n attrs[\"dtype\"] = infer_type(inputs[0]).checked_type.dtype\n return AttrCvt(op_name=\"cast\")(inputs, attrs)\n\n\ndef get_convert_map():\n # supported oneflow2relay op\n return {\n # defs/math\n \"bias_add\": Add.get_converter(),\n \"scalar_add\": ScalarAdd.get_converter(),\n \"scalar_mul\": ScalarMul.get_converter(),\n \"scalar_div\": ScalarDiv.get_converter(),\n \"scalar_pow\": ScalarPow.get_converter(),\n \"reduce_sum\": ReduceSum.get_converter(),\n \"reduce_max\": ReduceMax.get_converter(),\n \"reduce_min\": ReduceMin.get_converter(),\n \"reduce_mean\": ReduceMean.get_converter(),\n \"broadcast_add\": BroadcastAdd.get_converter(),\n \"broadcast_mul\": BroadcastMul.get_converter(),\n \"broadcast_sub\": BroadcastSub.get_converter(),\n \"broadcast_div\": BroadcastDiv.get_converter(),\n \"scalar_logical_greater\": LogicalGreater.get_converter(),\n \"log\": Renamer(\"log\"),\n \"log1p\": Log1p.get_converter(),\n \"acos\": Renamer(\"acos\"),\n \"acosh\": Renamer(\"acosh\"),\n \"asin\": Renamer(\"asin\"),\n \"asinh\": Renamer(\"asinh\"),\n \"atan\": Renamer(\"atan\"),\n \"atanh\": Renamer(\"atanh\"),\n \"cos\": Renamer(\"cos\"),\n \"cosh\": Renamer(\"cosh\"),\n \"sin\": Renamer(\"sin\"),\n \"sinh\": Renamer(\"sinh\"),\n \"tan\": Renamer(\"tan\"),\n \"tanh\": Renamer(\"tanh\"),\n \"pow\": Pow.get_converter(),\n \"exp\": Renamer(\"exp\"),\n \"expm1\": Expm1.get_converter(),\n \"floor\": Renamer(\"floor\"),\n \"ceil\": Renamer(\"ceil\"),\n \"round\": Renamer(\"round\"),\n \"add_n\": AddN.get_converter(),\n \"sqrt\": Renamer(\"sqrt\"),\n \"rsqrt\": Renamer(\"rsqrt\"),\n \"square\": Square.get_converter(),\n \"sign\": Sign.get_converter(),\n \"erf\": Erf.get_converter(),\n \"erfc\": Erfc.get_converter(),\n \"reciprocal\": Reciprocal.get_converter(),\n # defs/activation\n \"softmax\": Softmax.get_converter(),\n \"softsign\": Softsign.get_converter(),\n \"hardtanh\": HardTanh.get_converter(),\n \"relu\": Renamer(\"relu\"),\n \"leaky_relu\": Renamer(\"leaky_relu\"),\n \"prelu\": PReLU.get_converter(),\n \"selu\": Selu.get_converter(),\n \"silu\": Silu.get_converter(),\n \"gelu\": Gelu.get_converter(),\n # defs/nn\n \"conv2d\": Conv2d.get_converter(),\n \"deconv2d\": ConvTranspose2d.get_converter(),\n \"maxpool_2d\": MaxPool2d.get_converter(),\n \"avgpool_2d\": AveragePool2d.get_converter(),\n \"adaptive_avg_pool2d\": AdaptiveAvgPool2d.get_converter(),\n \"adaptive_max_pool2d\": AdaptiveMaxPool2d.get_converter(),\n \"dropout\": Dropout.get_converter(),\n \"normalization\": BatchNorm.get_converter(),\n \"upsample_nearest_2d\": UpsampleNearest.get_converter(),\n \"upsample_bilinear_2d\": UpsampleBiLinear.get_converter(),\n # defs/tensor\n \"matmul\": MatMul.get_converter(),\n \"batch_matmul\": MatMul.get_converter(),\n \"broadcast_matmul\": MatMul.get_converter(),\n \"concat\": Concat.get_converter(),\n \"clip_by_scalar\": Clip.get_converter(),\n \"slice\": Slice.get_converter(),\n \"expand\": Expand.get_converter(),\n \"transpose\": Transpose.get_converter(),\n \"expand_dims\": ExpandDim.get_converter(),\n \"range\": Range.get_converter(),\n \"cast\": Cast.get_converter(),\n # defs/others\n \"reshape\": Reshape.get_converter(),\n \"constant\": Constant.get_converter(),\n \"where\": Where.get_converter(),\n \"flatten\": Flatten.get_converter(),\n \"sigmoid\": Renamer(\"sigmoid\"),\n \"sigmoid_v2\": Renamer(\"sigmoid\"),\n \"hardsigmoid\": HardSigmoid.get_converter(),\n \"softplus\": Softplus.get_converter(),\n \"squeeze\": AttrCvt(\"squeeze\", {\"axes\": \"axis\"}),\n \"unsqueeze\": Unsqueeze.get_converter(),\n \"identity\": Renamer(\"copy\"),\n \"var\": Variance.get_converter(),\n }\n\n\nclass oneflow_input(object):\n \"\"\"\n Dual purpose list or dictionary access object\n \"\"\"\n\n def __init__(self):\n self.input_keys = []\n self.input_dict = {}\n self.n = 0\n\n def __getitem__(self, item):\n if isinstance(item, int):\n if item > (len(self.input_keys) - 1):\n return None\n return self.input_dict[self.input_keys[item]]\n if isinstance(item, str):\n if item not in self.input_keys:\n return None\n return self.input_dict[item]\n if isinstance(item, slice):\n keys = self.input_keys[item]\n return [self.input_dict[key] for key in keys]\n\n raise ValueError(\"Only integer, string, and slice accesses allowed.\")\n\n def __setitem__(self, item, value):\n if isinstance(item, int):\n self.input_dict[self.input_keys[item]] = value\n elif isinstance(item, str):\n self.input_keys.append(item)\n self.input_dict[item] = value\n else:\n raise ValueError(\"Only integer and string indexed writes allowed.\")\n\n def keys(self):\n return self.input_keys\n\n def __len__(self):\n return len(self.input_keys)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self.input_keys):\n output = self.input_dict[self.input_keys[self.n]]\n self.n += 1\n return output\n\n raise StopIteration\n\n\ndef deal_with_input_convert(\n node_input, node_input_shape, node_input_dtype, node_path, _nodes, _input_path_2_name\n):\n \"\"\"deal with input convert in oneflow.\"\"\"\n if node_input not in _nodes:\n if (\n node_path not in _input_path_2_name\n or \"_input.\" in node_input\n or \"FreeEagerTensor\" in node_input\n ):\n _nodes[node_input] = new_var(\n node_input,\n shape=node_input_shape,\n dtype=node_input_dtype,\n )\n else:\n names = _input_path_2_name[node_path]\n node_replace = None\n for k in names:\n if k in _nodes:\n node_replace = k\n if node_replace is not None:\n op_replace = copy.deepcopy(_nodes[node_replace])\n _nodes[node_input] = op_replace\n else:\n print(\"{} will not be in _nodes\".format(node_input))\n\n\ndef deal_parameter_convert(\n node_input_paths, model_dir_path, _input_path_2_name, _model_array, _params, _nodes\n):\n \"\"\"deal with parameter(weight) convert in oneflow.\"\"\"\n for node_input_path in node_input_paths:\n node_path = os.path.join(model_dir_path, node_input_path.replace(\"m.\", \"\", 1))\n node_input_name = node_input_path.split(\"/\")[0]\n _input_path_2_name[node_path] = node_input_name\n for param_name in _model_array:\n node_p = _model_array[param_name]\n if node_path == node_p[\"path\"]:\n node_array = node_p[\"params\"]\n _params[node_input_name] = node_array\n _nodes[node_input_name] = new_var(\n node_input_name, shape=node_array.shape, dtype=str(node_array.dtype)\n )\n break\n\n\nclass OneflowGraph(object):\n \"\"\"\n A helper class for handling Relay expression\n\n Parameters\n ----------\n shape : dict of str to tuple, optional\n The input shape to the graph\n dtype : dict of str to str\n The input types to the graph\n\n node name:\n 1. param: m.layer4.1.bn1.weight / ...\n 2. buffer: m.layer4.1.bn1.running_mean / ...\n 3. node inputs: m.layer4.1.bn1_input.0\n 4. node outputs: m.layer4.1.bn1_output.0\n \"\"\"\n\n def __init__(self, shape, dtype, nodes, model_dir_path):\n self._nodes = {}\n self._params = {}\n self._inputs = {}\n self._num_input = 0\n self._num_param = 0\n self._input_names = []\n self._model_array = {}\n self._input_path_2_name = {}\n self._output_path_2_name = {}\n self._init_variable_node = []\n self._shape = shape\n self._dtype = dtype\n self._identity_list = []\n self._sort_inputs = {}\n\n import oneflow\n\n model = oneflow.load(model_dir_path)\n # model_array: keys: layer_name, values: dict('path', 'params')\n for layer_name in model:\n layer = model[layer_name]\n layer_node = {}\n layer_node[\"path\"] = os.path.join(model_dir_path, layer_name, \"out\") # get path\n if \"System-Train\" in layer_name:\n continue\n node_name = \"m.\" + layer_name\n shape = self._shape[node_name]\n dtype = self._dtype[node_name]\n array = layer.detach().cpu().numpy()\n layer_node[\"params\"] = array.reshape(shape)\n self._model_array[layer_name] = layer_node\n\n for node_name in nodes:\n node = nodes[node_name]\n if is_user_op(node):\n for input_name in node.user_conf.input:\n node_input_paths = getattr(node.user_conf.input[input_name], \"s\")\n deal_parameter_convert(\n node_input_paths,\n model_dir_path,\n self._input_path_2_name,\n self._model_array,\n self._params,\n self._nodes,\n )\n for output_name in node.user_conf.output:\n node_output_paths = getattr(node.user_conf.output[output_name], \"s\")\n for node_output_path in node_output_paths:\n node_path = os.path.join(model_dir_path, node_output_path.replace(\"m.\", \"\"))\n node_output_name = node_output_path.split(\"/\")[0]\n self._output_path_2_name[node_path] = node_output_name\n elif is_output_op(node):\n node_output_path = getattr(node.output_conf, \"in\")\n output_path = os.path.join(\n model_dir_path, getattr(node.output_conf, \"in\").replace(\"m.\", \"\")\n )\n self._output_path_2_name[output_path] = node_name\n elif is_param_op(node):\n if \"FreeEagerTensor\" in node.name:\n shape = tuple(node.variable_conf.shape.dim)\n dtype = FLOW_2_STR_DTYPE[node.variable_conf.data_type]\n self._shape[node.name] = shape\n self._dtype[node.name] = dtype\n self._init_variable_node.append(node.name)\n if self._init_variable_node != []:\n print(\"{} should be defined by user\".format(self._init_variable_node))\n\n def _parse_input(self, node, model_dir_path):\n input_user_conf_list = []\n for input_name in node.user_conf.input:\n input_user_conf_list.append(input_name)\n input_user_conf_list.sort()\n for input_name in input_user_conf_list:\n node_input_paths = getattr(node.user_conf.input[input_name], \"s\")\n for i in node_input_paths:\n node_input = i.split(\"/\")[0]\n node_input_shape = self._shape[node_input]\n node_input_dtype = self._dtype[node_input]\n node_path = os.path.join(model_dir_path, i.replace(\"m.\", \"\"))\n deal_with_input_convert(\n node_input,\n node_input_shape,\n node_input_dtype,\n node_path,\n self._nodes,\n self._input_path_2_name,\n )\n\n def _parse_output(self, op_name, outputs, cnt_init=0):\n \"\"\"\n o: m.classifier.1_output.xxx\n new_o: m.classifier.1-conv2d_0\n \"_\"+new_o_xxx is in self._shape\n \"\"\"\n for o in outputs:\n if \"_output.\" not in o:\n new_o = o.replace(\"-\" + op_name, \"_output\")\n new_o = new_o.replace(\"-\" + new_o.split(\"-\")[-1], \".0\")\n for k in self._shape.keys():\n if new_o in k:\n self._shape[o] = self._shape[k]\n self._dtype[o] = self._dtype[k]\n break\n elif len(outputs) > 1:\n outputs.remove(o)\n if op_name.lower() == \"dropout\":\n if len(outputs) == 1:\n return outputs\n outputs = outputs[:-1]\n elif op_name.lower() == \"constant\":\n outputs = [self._init_variable_node[cnt_init]]\n\n if len(outputs) > 1:\n outputs = list(set(outputs))\n\n return outputs\n\n def from_oneflow(self, nodes, model_dir_path):\n \"\"\"\n Implementation of convert the OneFlow model into an equivalent Relay Function.\n \"\"\"\n # step 1: find out if unsupported ops are used\n convert_map = get_convert_map()\n unsupported_ops = set()\n for node_name in nodes:\n node = nodes[node_name]\n if is_user_op(node):\n # op names, not the layer names\n op_name = node.user_conf.op_type_name\n if (\n op_name not in convert_map\n and \"constant\" not in op_name\n and op_name not in self._identity_list\n ):\n unsupported_ops.add(op_name)\n # find out the unsupported op\n if unsupported_ops:\n msg = \"The following operators are not supported for frontend OneFlow: \"\n msg += \", \".join(unsupported_ops)\n raise tvm.error.OpNotImplemented(msg)\n\n # step 2: convert op\n for node_name in nodes:\n node = nodes[node_name]\n if is_user_op(node):\n # If there is a user-defined node, skip the following steps\n if node_name in self._inputs:\n continue\n\n op_name = node.user_conf.op_type_name\n op_attr = parse_attr(node.user_conf.attr)\n\n self._parse_input(node, model_dir_path=model_dir_path)\n\n node_inputs = oneflow_input()\n input_user_conf_list = []\n for input_name in node.user_conf.input:\n input_user_conf_list.append(input_name)\n input_user_conf_list.sort()\n for input_name in input_user_conf_list:\n node_input_paths = getattr(node.user_conf.input[input_name], \"s\")\n for i in node_input_paths:\n node_input = i.split(\"/\")[0]\n node_inputs[node_input] = self._nodes[node_input]\n\n node_outputs = []\n for output_name in node.user_conf.output:\n node_output_paths = getattr(node.user_conf.output[output_name], \"s\")\n for i in node_output_paths:\n node_output_path = os.path.join(model_dir_path, i.replace(\"m.\", \"\"))\n if node_output_path in self._input_path_2_name:\n node_outputs.append(self._input_path_2_name[node_output_path])\n elif node_output_path in self._output_path_2_name:\n node_outputs.append(self._output_path_2_name[node_output_path])\n node_outputs = self._parse_output(op_name, node_outputs)\n\n # convert\n op = self._convert_operator(op_name, node_inputs, op_attr)\n\n if not isinstance(op, _expr.TupleWrapper):\n outputs_num = 1\n else:\n outputs_num = len(op)\n\n assert (\n len(node_outputs) == outputs_num\n ), \"Number of output mismatch {} vs {} in {}.\".format(\n len(node_outputs), outputs_num, op_name\n )\n if outputs_num == 1:\n op = fold_constant(op)\n else:\n op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op))\n\n op_temp = []\n op_temp.append(op)\n for i, _ in enumerate(node_outputs):\n if isinstance(node_outputs[i], list):\n for k in node_outputs[i]:\n self._nodes[k] = op_temp[i]\n else:\n self._nodes[node_outputs[i]] = op_temp[i]\n\n # step 3: get the outputs\n outputs = []\n for node_name, node in nodes.items():\n if is_output_op(node):\n node_name_v2 = getattr(node.output_conf, \"in\").split(\"/\")[0]\n if node_name in self._nodes:\n outputs.append(self._nodes[node_name])\n elif node_name_v2 in self._nodes:\n outputs.append(self._nodes[node_name_v2])\n outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)\n\n # step 4: get the relay IR\n free_vars = analysis.free_vars(outputs)\n\n nodes = {v: k for k, v in self._nodes.items()}\n free_vars = [nodes[var] for var in free_vars]\n free_vars_inputs = []\n free_vars_parameters = []\n for x in free_vars:\n if \"_input.0\" in x:\n free_vars_inputs.append(x)\n else:\n free_vars_parameters.append(x)\n free_vars = free_vars_inputs + free_vars_parameters\n\n # step 5: make sure the '_input.0' is the first in self._inputs\n for free_var in free_vars:\n if free_var not in self._inputs:\n self._inputs[free_var] = self._nodes[free_var]\n\n input_names = list(self._inputs.keys())\n for input_name in input_names:\n if input_name in self._inputs:\n self._sort_inputs[input_name] = self._inputs[input_name]\n else:\n raise IndexError(\"{} is not in self._inputs\".format(input_name))\n\n # step 6: create a function from our output expression and all input variables.\n func = _function.Function([v for _, v in self._sort_inputs.items()], outputs)\n\n return IRModule.from_expr(func), self._params\n\n def _convert_operator(self, op_name, node_inputs, op_attr):\n \"\"\"\n Parameters\n ----------\n op_name : str\n Operator name, such as conv2d and relu\n node_inputs : list of tvm.relay.function.Function\n List of inputs.\n op_attr : dict\n Dict of operator attributes\n\n Returns\n -------\n sym : tvm.relay.function.Function\n Converted relay function\n \"\"\"\n convert_map = get_convert_map()\n if op_name in self._identity_list:\n sym = get_relay_op(op_name)(*node_inputs, **op_attr)\n elif op_name in convert_map:\n sym = convert_map[op_name](node_inputs, op_attr, self._params)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n\n return sym\n\n\ndef from_oneflow(graph, model_dir_path):\n \"\"\"Convert a OneFlow model into an equivalent Relay Function.\n\n At present, there are two ways to run models in deep learning framework\n Dynamic Graph and Static Graph, which are also called Eager Mode and Graph\n Mode in OneFlow.\n\n In general, dynamic graphs are easier to use and static graphs have better performance.\n OneFlow offers nn.Graph, so that users can use the eager-like programming style to build\n static graphs and train the models.\n\n We utilize the intermediate representation of nn.Graph to convert the OneFlow model to Reley.\n\n Parameters\n ----------\n nodes : dict, keys: node.name, value: node\n contain the graph\n model_dir_path: str\n The path of weight\n\n Returns\n -------\n mod : tvm.IRModule\n The returned relay module\n params : dict\n A dict of name: tvm.nd.array pairs, used as pretrained weights\n \"\"\"\n try:\n import oneflow as flow\n except ImportError:\n raise ImportError(\"please check that OneFlow is installed\")\n\n # get info of nodes\n shape = {}\n dtype = {}\n graph_str = repr(graph)\n size_where = 2\n if \"cuda\" in graph_str:\n size_where = 3\n\n p_size = re.compile(r\"size=\\(.*?\\)\", re.S)\n p_type = re.compile(r\"dtype=.*?\\)\", re.S)\n types = [\"INPUT\", \"PARAMETER\", \"BUFFER\", \"OUTPUT\"]\n for t in types:\n data = re.finditer(t + \":.*\", graph_str)\n for i in data:\n attrs = i.group().split(\":\")\n size_str = re.findall(p_size, attrs[size_where])\n type_str = re.findall(p_type, attrs[size_where])\n assert size_str != [], \"size should not be None, please check your repr(graph)\"\n\n size_attr = size_str[0].replace(\"size=\", \"\")\n if size_attr[-2] == \",\":\n size_attr = size_attr.replace(\",\", \"\")\n data_size = tuple(map(int, size_attr[1:-1].split(\", \")))\n node_name = attrs[1]\n shape[node_name] = data_size\n dtype[node_name] = \"float32\"\n\n if type_str != []:\n type_attr = type_str[0].replace(\"dtype=\", \"\").replace(\")\", \"\")\n if type_attr[-1] == \",\":\n type_attr = type_attr.replace(\",\", \"\")\n dtype[node_name] = type_attr.replace(\"oneflow.\", \"\")\n\n # get graph proto, if you don't _compile the graph, the _graph_proto will be None\n graph_input = re.search(r\"INPUT:.*\", graph_str).group().split(\":\")\n shape_input = tuple(\n map(\n int,\n re.findall(p_size, graph_input[size_where])[0].replace(\"size=\", \"\")[1:-1].split(\", \"),\n )\n )\n if not graph._is_compiled:\n graph._compile(flow.rand(shape_input))\n graph_proto = graph._graph_proto\n\n # get all nodes\n nodes = OrderedDict()\n for op in graph_proto.net.op:\n nodes[op.name] = op\n\n g = OneflowGraph(shape, dtype, nodes, model_dir_path)\n\n # Use the graph proto as a scope so that ops can access other nodes if needed.\n mod, params = g.from_oneflow(nodes=nodes, model_dir_path=model_dir_path)\n\n return mod, params\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport sys\n\nimport numpy as np\nimport pytest\n\nimport tvm\nimport tvm.testing\n\nfrom tvm import relay\nfrom tvm.relay.testing import check_grad\n\n\nindex_dtype = tvm.testing.parameter(\"int32\", \"int64\")\nval_dtype = tvm.testing.parameter(\"float32\", \"float64\")\nexecutor_kind = tvm.testing.parameter(\"debug\")\n\n\ndef test_cross_entropy_grad(executor_kind, target, dev, val_dtype):\n target = tvm.target.Target(target)\n if target.kind.name == \"vulkan\" and val_dtype == \"float64\":\n # GLSL.std.450's Log implementation only takes 16/32-bit floats.\n pytest.xfail(\"Known failing test case for vulkan runtime\")\n\n x = relay.var(\"x\", shape=(2, 5), dtype=val_dtype)\n y = relay.var(\"y\", shape=(2, 5), dtype=val_dtype)\n check_grad(\n relay.Function([x, y], relay.op.nn.cross_entropy(x, y)),\n eps=0.01,\n scale=0.1,\n mean=1,\n target_devices=[(target, dev)],\n executor_kind=executor_kind,\n )\n\n\ndef test_cross_entropy_with_logits_grad(executor_kind, target, dev, val_dtype):\n x = relay.var(\"x\", shape=(2, 5), dtype=val_dtype)\n y = relay.var(\"y\", shape=(2, 5), dtype=val_dtype)\n check_grad(\n relay.Function([x, y], relay.op.nn.cross_entropy_with_logits(x, y)),\n eps=0.01,\n scale=0.1,\n mean=1,\n target_devices=[(target, dev)],\n executor_kind=executor_kind,\n )\n\n\ndef test_checkpoint(executor_kind, target, dev):\n inputs = [relay.var(\"x{}\".format(i), shape=(1,)) for i in range(4)]\n output = relay.multiply(relay.add(inputs[0], inputs[1]), relay.add(inputs[2], inputs[3]))\n check_grad(\n relay.Function(inputs, relay.annotation.checkpoint(output)), executor_kind=executor_kind\n )\n\n scope = relay.ScopeBuilder()\n out_tuple = scope.let(\n \"out_tuple\",\n relay.Tuple([relay.add(inputs[0], inputs[1]), relay.multiply(inputs[2], inputs[3])]),\n )\n scope.ret(\n relay.subtract(\n relay.annotation.checkpoint(relay.TupleGetItem(out_tuple, 0)),\n relay.TupleGetItem(out_tuple, 1),\n )\n )\n out_single = scope.get()\n check_grad(\n relay.Function(inputs, out_single),\n target_devices=[(target, dev)],\n executor_kind=executor_kind,\n )\n\n\nclass TestBatchMatmulGrad:\n a_shape, b_shape, transpose_a, transpose_b = tvm.testing.parameters(\n ((2, 3, 5), (2, 5, 4), False, False),\n ((2, 3, 5), (2, 4, 5), False, True),\n ((2, 5, 3), (2, 5, 4), True, False),\n ((2, 5, 3), (2, 4, 5), True, True),\n )\n\n def test_batch_matmul_grad(\n self, executor_kind, target, dev, a_shape, b_shape, transpose_a, transpose_b\n ):\n tensor_a = relay.var(\"tensor_a\", relay.TensorType(a_shape, \"float32\"))\n tensor_b = relay.var(\"tensor_b\", relay.TensorType(b_shape, \"float32\"))\n check_grad(\n relay.Function(\n [tensor_a, tensor_b],\n relay.op.nn.batch_matmul(\n tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b\n ),\n ),\n target_devices=[(target, dev)],\n executor_kind=executor_kind,\n )\n\n\ndef test_reverse_reshape_grad(executor_kind, target, dev):\n x = relay.var(\"x\", shape=(3, 4, 5), dtype=\"float64\")\n check_grad(\n relay.Function([x], relay.op.reverse_reshape(x, (-1, 0))),\n target_devices=[(target, dev)],\n executor_kind=executor_kind,\n )\n\n\ndef test_one_hot_grad(executor_kind, target, dev, index_dtype, val_dtype):\n indices_shape = (3, 4)\n depth = 5\n axis = -1\n\n inputs = [\n np.random.randint(depth, size=indices_shape, dtype=index_dtype),\n np.array(np.random.randn() * 1e-5).astype(val_dtype),\n np.array(np.random.randn() * 1e-5).astype(val_dtype),\n ]\n test_inputs = inputs[1:]\n\n indices = relay.var(\"indices\", shape=indices_shape, dtype=index_dtype)\n on_val = relay.var(\"on_val\", shape=tuple(), dtype=val_dtype)\n off_val = relay.var(\"off_val\", shape=tuple(), dtype=val_dtype)\n y = relay.one_hot(indices, on_val, off_val, depth, axis, val_dtype)\n f = relay.Function([indices, on_val, off_val], y)\n\n check_grad(\n f,\n inputs=inputs,\n test_inputs=test_inputs,\n target_devices=[(target, dev)],\n executor_kind=executor_kind,\n )\n\n\nif __name__ == \"__main__\":\n tvm.testing.main()\n"
] | [
[
"numpy.zeros_like",
"numpy.random.rand",
"numpy.zeros"
],
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.seed",
"numpy.transpose"
],
[
"numpy.dtype",
"numpy.random.randint"
],
[
"numpy.arange"
],
[
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.random.randn",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brandontrabucco/playground | [
"069be961aaecb45d75f12f4a71cfa65d7152ea8a",
"069be961aaecb45d75f12f4a71cfa65d7152ea8a"
] | [
"playground/algorithms/ddpg.py",
"playground/algorithms/policy_gradient.py"
] | [
"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom playground.algorithms.algorithm import Algorithm\nimport tensorflow as tf\n\n\nclass DDPG(Algorithm):\n\n def __init__(\n self,\n policy,\n target_policy,\n qf,\n target_qf,\n replay_buffer,\n reward_scale=1.0,\n discount=0.99,\n observation_key=\"observation\",\n batch_size=32,\n update_every=1,\n update_after=0,\n logger=None,\n logging_prefix=\"ddpg/\"\n ):\n # train a policy using the deep deterministic policy gradient\n Algorithm.__init__(\n self,\n replay_buffer,\n batch_size=batch_size,\n update_every=update_every,\n update_after=update_after,\n logger=logger,\n logging_prefix=logging_prefix)\n\n # each neural network is probabilistic\n self.policy = policy\n self.target_policy = target_policy\n self.qf = qf\n self.target_qf = target_qf\n\n # select into the observation dictionary\n self.observation_key = observation_key\n\n # control some parameters that are important for ddpg\n self.reward_scale = reward_scale\n self.discount = discount\n\n def update_algorithm(\n self,\n observations,\n actions,\n rewards,\n next_observations,\n terminals\n ):\n # select from the observation dictionary\n observations = observations[self.observation_key]\n next_observations = next_observations[self.observation_key]\n\n # build a tape to collect gradients from the policy and critics\n with tf.GradientTape(persistent=True) as tape:\n mean_actions, log_pi = self.policy.expected_value(observations)\n next_mean_actions, next_log_pi = self.target_policy.expected_value(\n next_observations)\n\n # build the q function target value\n inputs = tf.concat([next_observations, next_mean_actions], -1)\n target_qf_value = self.target_qf(inputs)[..., 0]\n self.record(\"target_qf_value\", tf.reduce_mean(target_qf_value).numpy())\n qf_targets = tf.stop_gradient(\n self.reward_scale * rewards + terminals * self.discount * (\n target_qf_value))\n self.record(\"qf_targets\", tf.reduce_mean(qf_targets).numpy())\n\n # build the q function loss\n inputs = tf.concat([observations, actions], -1)\n qf_value = self.qf(inputs)[..., 0]\n self.record(\"qf_value\", tf.reduce_mean(qf_value).numpy())\n qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))\n self.record(\"qf_loss\", qf_loss.numpy())\n\n # build the policy loss\n inputs = tf.concat([observations, mean_actions], -1)\n policy_qf_value = self.qf(inputs)[..., 0]\n self.record(\"policy_qf_value\", tf.reduce_mean(policy_qf_value).numpy())\n policy_loss = -tf.reduce_mean(policy_qf_value)\n self.record(\"policy_loss\", policy_loss.numpy())\n\n # back prop gradients\n self.policy.apply_gradients(\n self.policy.compute_gradients(policy_loss, tape))\n self.qf.apply_gradients(\n self.qf.compute_gradients(qf_loss, tape))\n\n # soft update target parameters\n self.target_policy.soft_update(self.policy.get_weights())\n self.target_qf.soft_update(self.qf.get_weights())\n",
"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom playground.algorithms.algorithm import Algorithm\nfrom playground import discounted_sum\nimport tensorflow as tf\n\n\nclass PolicyGradient(Algorithm):\n\n def __init__(\n self,\n policy,\n replay_buffer,\n reward_scale=1.0,\n discount=0.99,\n observation_key=\"observation\",\n batch_size=32,\n update_every=1,\n update_after=0,\n logger=None,\n logging_prefix=\"policy_gradient/\"\n ):\n # train a policy using the vanilla policy gradient\n Algorithm.__init__(\n self,\n replay_buffer,\n batch_size=batch_size,\n update_every=update_every,\n update_after=update_after,\n logger=logger,\n logging_prefix=logging_prefix)\n\n # the policy is a probabilistic neural network\n self.policy = policy\n\n # select into the observation dictionary\n self.observation_key = observation_key\n\n # control the scale and decay of the reward\n self.reward_scale = reward_scale\n self.discount = discount\n\n def update_algorithm(\n self,\n observations,\n actions,\n rewards,\n terminals\n ):\n # select elements from the observation dictionary\n observations = observations[self.observation_key]\n\n # update the policy gradient algorithm\n with tf.GradientTape() as tape:\n\n # compute advantages using the sampled rewards\n discounted_returns = discounted_sum(rewards, self.discount)\n self.record(\"discounted_returns\", tf.reduce_mean(discounted_returns))\n advantages = discounted_returns - tf.reduce_mean(discounted_returns)\n self.record(\"advantages\", tf.reduce_mean(advantages))\n\n # compute the surrogate policy loss\n policy_log_prob = self.policy.log_prob(actions, observations)\n self.record(\"policy_log_prob\", tf.reduce_mean(policy_log_prob))\n policy_loss = -tf.reduce_mean(policy_log_prob * advantages)\n self.record(\"policy_loss\", policy_loss)\n\n # back prop gradients into the policy\n self.policy.apply_gradients(\n self.policy.compute_gradients(policy_loss, tape))\n"
] | [
[
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.keras.losses.logcosh",
"tensorflow.stop_gradient",
"tensorflow.GradientTape"
],
[
"tensorflow.reduce_mean",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
folk85/gen_turb | [
"4390938c4cefae334e95414f83b9c484991bff67"
] | [
"tests/plot_time_space.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport matplotlib as m\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import *\n\nfrom plot_spectr import *\n\ndef main_routine():\n print(os.getcwd())\n nfile = './store.dat'\n #Read the file by blocks to reduce required memory\n with open(nfile,'r') as f:\n nel = sum(1 for _ in f)\n f.close()\n #repeat for each timesteps\n nk = 64*64 *64\n ntimes = nel / nk\n\ndef get_nel(nfile):\n with open(nfile,'r') as f:\n nel = sum(1 for _ in f)\n f.close()\n return nel\n \ndef plot_spectr(uin,vin,win):\n\n alpha = 1.339e0\n L = 1.0e-1\n sigma = 1.0e+1\n\n # x,y,z = np.genfromtxt('tests/spectr.dat',unpack=True)\n # x,y,z = np.genfromtxt('../hita/spectrum.dat',unpack=True)\n # x1,y1,z1 = np.genfromtxt('../hita/spectrum_32.dat',unpack=True)\n \n uvel,vvel,wvel = np.genfromtxt('./store.dat',unpack=True)\n nk = int(round(np.size(uvel)**(1./3.)))\n nel = nk\n ufft = fftn(uvel.reshape(nk,nk,nk))\n vfft = fftn(vvel.reshape(nk,nk,nk))\n wfft = fftn(wvel.reshape(nk,nk,nk))\n muu = ufft*np.conj(ufft) / nel**6\n mvv = vfft*np.conj(vfft) / nel**6\n mww = wfft*np.conj(wfft) / nel**6\n\n # calc std\n umean = np.array([np.mean(uvel),np.mean(vvel),np.mean(wvel)])\n std_i = np.array([np.std(uvel),np.std(vvel),np.std(wvel)])\n sigma = np.sqrt(np.sum(std_i[:]**2))\n print(std_i[0],np.sqrt(np.mean((uvel[:]-umean[0])**2)), sigma)\n dx = 10.\n k = np.arange(-nk//2,nk//2)*dx\n k = np.roll(k,nk//2)\n spectrum = np.zeros(nk)\n count = np.zeros(nk)\n # ?np.meshgrid(k,k,k)\n X,Y,Z = np.meshgrid(k,k,k)\n r = np.sqrt(X**2+Y**2+Z**2) #*dx\n # print(np.shape(r),r.min(),r.max(),k.max(),r[:,0,0])\n for i,ki in enumerate(k[:nk//2]):\n t = np.where((r<=ki+dx/2)&(r>ki-dx/2))\n spectrum[i] = np.sum(muu[t].real) + np.sum(mvv[t].real) + np.sum(mww[t].real)\n count[i] = np.size(t[0]) \n spectrum[i] *= 2.*np.pi*k[i]**2/dx**3/(count[i]+1.0e-30)\n\n font = {'family': 'Droid Sans',\n 'weight': 'normal',\n 'size': 12}\n m.rc('axes',linewidth=2)\n m.rc('font',**font)\n m.rc('lines',markeredgewidth=1.0)\n f,ax = plt.subplots()\n xf = np.linspace(np.log(k[1]/2),np.log(k[nk//2-1]*2.),100)\n xf = np.exp(xf)\n ax.loglog(xf,Ek(xf,alpha,L,sigma),c='g',lw=2)\n ax.loglog(k[:nk//2],spectrum[:nk//2],'bx-',lw=0.5,ms=8)\n # ax.loglog(x,y,'bx')\n # ax.loglog(x1,y1,'ro')\n ax.set_xlabel(u'$k, 1/м$',size='large')\n ax.set_ylabel(u'$E(k), м^3/с^2$',size='large')\n plt.grid()\n plt.tight_layout()\n plt.show()\n del(f)\n del(ax)\n plt.clf()\n\n Rij_x=(ufft*np.conj(ufft)) # compute velo. correlation tensor\n Rij_y=(vfft*np.conj(vfft))\n Rij_z=(wfft*np.conj(wfft))\n\n R1=ifftn(Rij_x)/np.std((uvel))**2/nel**3;\n R2=ifftn(Rij_y)/np.std((vvel))**2/nel**3;\n R3=ifftn(Rij_z)/np.std((wvel))**2/nel**3;\n \n NFFT=np.size(ufft,1)\n R11 = (R3[0,0,:]+R2[0,:,0]+R1[:,0,0])/3.\n # R11 = R11[:np.size(ufft)//2+1]\n R1_22 = (R1[0,:,0]+R3[0,:,0])/2.0e0\n R2_22 = (R2[:,0,0]+R3[:,0,0])/2.0e0\n R3_22 = (R1[0,0,:]+R2[0,0,:])/2.0e0\n\n R22 = (R1_22+R2_22+R3_22)/3.0e0\n # R22 = R22(1:size(u_fft)/2+1);\n Lx = 2.0*np.pi*1.0e-1\n r = np.linspace(0,Lx,NFFT)/(Lx/2);\n\n l11 = np.trapz(np.real(R11[:NFFT//2+1]),dx=r[1]-r[0])\n l22 = np.trapz(np.real(R22[:NFFT//2+1]),dx=r[1]-r[0])\n print(\"Integral Length Scale Longitudal: %g\"%(l11))\n print(\"Integral Length Scale Tangent: %g\"%(l22))\n\n f,ax = plt.subplots(1)\n ax.plot(r[:NFFT//2+1],R11[:NFFT//2+1],marker='>',mfc='w',lw=2,label=u'$R_{11}$')\n ax.plot(r[:NFFT//2+1],R22[:NFFT//2+1],marker='s',markerfacecolor='w',lw=2,label=u'$R_{22}$')\n ax.plot(r[:NFFT//2],np.exp(-r[:NFFT//2]/l11))\n ax.plot(r[:NFFT//2],1.e0+(1.0e0-R22[NFFT//2])*(np.exp(-r[:NFFT//2]/(l22-R22[NFFT//2]))-1.0e0))\n plt.legend()\n plt.tight_layout()\n ax.set_xlabel(u'$r$')\n ax.set_ylabel(u'$R_{11}, R_{22}$')\n plt.grid()\n plt.show()\n return [k[:nk//2],spectrum[:nk//2],r[:NFFT//2+1],R11[:NFFT//2+1],R22[:NFFT//2+1]]\n\ndef Ek(k,alpha=1.339,L=0.01,sigma=10.):\n tmp = (alpha * L * k) **2\n tmp = sigma*sigma*L * tmp * tmp * 5.5e+1/ (27.0 * np.pi * (1.0 + tmp)**(1.7e+1/6.0e0))\n return tmp\n\nif __name__ == '__main__':\n main_routine()\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.roll",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.size",
"numpy.real",
"numpy.std",
"numpy.zeros",
"numpy.log",
"numpy.genfromtxt",
"matplotlib.pyplot.show",
"numpy.meshgrid",
"numpy.sum",
"matplotlib.rc",
"numpy.conj",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.grid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sagnik1511/U-Net-Lowered-with-keras | [
"364336b244ece288a52cf76df451501a665e745a"
] | [
"code/UNET_lowered.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nUNET LOwered Model :\r\n \r\n This customized UNet Model has been generated lowering the filters to their 25% .\r\n \r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers \r\nfrom tensorflow.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D\r\nfrom tensorflow.keras import models\r\nfrom tensorflow.keras import losses\r\nfrom tensorflow.keras import optimizers\r\nimport numpy as np\r\n\r\n\r\ndef UNet(input_shape):\r\n keras.backend.clear_session()\r\n inputs = Input(input_shape)\r\n conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\r\n conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n\r\n conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\r\n conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\r\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\r\n conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\r\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\r\n \r\n conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\r\n conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\r\n drop4 = Dropout(0.5)(conv4)\r\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\r\n\r\n conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\r\n conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\r\n drop5 = Dropout(0.5)(conv5)\r\n\r\n up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\r\n merge6 = concatenate([drop4,up6], axis = 3)\r\n conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\r\n conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\r\n\r\n up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\r\n merge7 = concatenate([conv3,up7], axis = 3)\r\n conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\r\n conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\r\n\r\n up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\r\n merge8 = concatenate([conv2,up8], axis = 3)\r\n conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\r\n conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\r\n\r\n up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\r\n merge9 = concatenate([conv1,up9], axis = 3)\r\n conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\r\n conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\r\n conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\r\n\r\n outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)\r\n\r\n model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')\r\n\r\n return model"
] | [
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
Rippling/modin | [
"b2cf1d5fc704803a1ce6699e9a373dc7abeb409e",
"d0c84590798f33358dc896eef9d7cd9c519b6289",
"b2cf1d5fc704803a1ce6699e9a373dc7abeb409e"
] | [
"modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py",
"modin/pandas/test/dataframe/test_window.py",
"modin/pandas/utils.py"
] | [
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom .expr import (\n InputRefExpr,\n LiteralExpr,\n OpExpr,\n AggregateExpr,\n build_if_then_else,\n build_row_idx_filter_expr,\n)\nfrom .calcite_algebra import (\n CalciteBaseNode,\n CalciteInputRefExpr,\n CalciteInputIdxExpr,\n CalciteScanNode,\n CalciteProjectionNode,\n CalciteFilterNode,\n CalciteAggregateNode,\n CalciteCollation,\n CalciteSortNode,\n CalciteJoinNode,\n CalciteUnionNode,\n)\nfrom .df_algebra import (\n FrameNode,\n MaskNode,\n GroupbyAggNode,\n TransformNode,\n JoinNode,\n UnionNode,\n SortNode,\n FilterNode,\n)\n\nfrom collections import abc\nfrom pandas.core.dtypes.common import _get_dtype\n\n\nclass CalciteBuilder:\n class CompoundAggregate:\n def __init__(self, builder, arg):\n self._builder = builder\n self._arg = arg\n\n def gen_proj_exprs(self):\n return []\n\n def gen_agg_exprs(self):\n pass\n\n def gen_reduce_expr(self):\n pass\n\n class StdAggregate(CompoundAggregate):\n def __init__(self, builder, arg):\n assert isinstance(arg, InputRefExpr)\n super().__init__(builder, arg)\n\n self._quad_name = self._arg.column + \"__quad__\"\n self._sum_name = self._arg.column + \"__sum__\"\n self._quad_sum_name = self._arg.column + \"__quad_sum__\"\n self._count_name = self._arg.column + \"__count__\"\n\n def gen_proj_exprs(self):\n expr = self._builder._translate(self._arg.mul(self._arg))\n return {self._quad_name: expr}\n\n def gen_agg_exprs(self):\n count_expr = self._builder._translate(AggregateExpr(\"count\", self._arg))\n sum_expr = self._builder._translate(AggregateExpr(\"sum\", self._arg))\n self._sum_dtype = sum_expr._dtype\n qsum_expr = AggregateExpr(\n \"SUM\",\n self._builder._ref_idx(self._arg.modin_frame, self._quad_name),\n dtype=sum_expr._dtype,\n )\n\n return {\n self._sum_name: sum_expr,\n self._quad_sum_name: qsum_expr,\n self._count_name: count_expr,\n }\n\n def gen_reduce_expr(self):\n count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)\n count_expr._dtype = _get_dtype(int)\n sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)\n sum_expr._dtype = self._sum_dtype\n qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)\n qsum_expr._dtype = self._sum_dtype\n\n null_expr = LiteralExpr(None)\n count_or_null = build_if_then_else(\n count_expr.eq(LiteralExpr(0)), null_expr, count_expr, count_expr._dtype\n )\n count_m_1_or_null = build_if_then_else(\n count_expr.eq(LiteralExpr(1)),\n null_expr,\n count_expr.sub(LiteralExpr(1)),\n count_expr._dtype,\n )\n\n # sqrt((sum(x * x) - sum(x) * sum(x) / n) / (n - 1))\n return (\n qsum_expr.sub(sum_expr.mul(sum_expr).truediv(count_or_null))\n .truediv(count_m_1_or_null)\n .pow(LiteralExpr(0.5))\n )\n\n class SkewAggregate(CompoundAggregate):\n def __init__(self, builder, arg):\n assert isinstance(arg, InputRefExpr)\n super().__init__(builder, arg)\n\n self._quad_name = self._arg.column + \"__quad__\"\n self._cube_name = self._arg.column + \"__cube__\"\n self._sum_name = self._arg.column + \"__sum__\"\n self._quad_sum_name = self._arg.column + \"__quad_sum__\"\n self._cube_sum_name = self._arg.column + \"__cube_sum__\"\n self._count_name = self._arg.column + \"__count__\"\n\n def gen_proj_exprs(self):\n quad_expr = self._builder._translate(self._arg.mul(self._arg))\n cube_expr = self._builder._translate(\n self._arg.mul(self._arg).mul(self._arg)\n )\n return {self._quad_name: quad_expr, self._cube_name: cube_expr}\n\n def gen_agg_exprs(self):\n count_expr = self._builder._translate(AggregateExpr(\"count\", self._arg))\n sum_expr = self._builder._translate(AggregateExpr(\"sum\", self._arg))\n self._sum_dtype = sum_expr._dtype\n qsum_expr = AggregateExpr(\n \"SUM\",\n self._builder._ref_idx(self._arg.modin_frame, self._quad_name),\n dtype=sum_expr._dtype,\n )\n csum_expr = AggregateExpr(\n \"SUM\",\n self._builder._ref_idx(self._arg.modin_frame, self._cube_name),\n dtype=sum_expr._dtype,\n )\n\n return {\n self._sum_name: sum_expr,\n self._quad_sum_name: qsum_expr,\n self._cube_sum_name: csum_expr,\n self._count_name: count_expr,\n }\n\n def gen_reduce_expr(self):\n count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)\n count_expr._dtype = _get_dtype(int)\n sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)\n sum_expr._dtype = self._sum_dtype\n qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)\n qsum_expr._dtype = self._sum_dtype\n csum_expr = self._builder._ref(self._arg.modin_frame, self._cube_sum_name)\n csum_expr._dtype = self._sum_dtype\n\n mean_expr = sum_expr.truediv(count_expr)\n\n # n * sqrt(n - 1) / (n - 2)\n # * (sum(x ** 3) - 3 * mean * sum(x * x) + 2 * mean * mean * sum(x))\n # / (sum(x * x) - mean * sum(x)) ** 1.5\n part1 = count_expr.mul(\n count_expr.sub(LiteralExpr(1)).pow(LiteralExpr(0.5))\n ).truediv(count_expr.sub(LiteralExpr(2)))\n part2 = csum_expr.sub(mean_expr.mul(qsum_expr).mul(LiteralExpr(3.0))).add(\n mean_expr.mul(mean_expr).mul(sum_expr).mul(LiteralExpr(2.0))\n )\n part3 = qsum_expr.sub(mean_expr.mul(sum_expr)).pow(LiteralExpr(1.5))\n skew_expr = part1.mul(part2).truediv(part3)\n\n # The result is NULL if n <= 2\n return build_if_then_else(\n count_expr.le(LiteralExpr(2)),\n LiteralExpr(None),\n skew_expr,\n skew_expr._dtype,\n )\n\n _compound_aggregates = {\"std\": StdAggregate, \"skew\": SkewAggregate}\n\n class InputContext:\n _simple_aggregates = {\n \"sum\": \"SUM\",\n \"mean\": \"AVG\",\n \"max\": \"MAX\",\n \"min\": \"MIN\",\n \"size\": \"COUNT\",\n \"count\": \"COUNT\",\n }\n _no_arg_aggregates = {\"size\"}\n\n def __init__(self, input_frames, input_nodes):\n self.input_nodes = input_nodes\n self.frame_to_node = {x: y for x, y in zip(input_frames, input_nodes)}\n self.input_offsets = {}\n self.replacements = {}\n offs = 0\n for frame in input_frames:\n self.input_offsets[frame] = offs\n offs += len(frame._table_cols)\n # Materialized frames have additional 'rowid' column\n if isinstance(frame._op, FrameNode):\n offs += 1\n\n def replace_input_node(self, frame, node, new_cols):\n self.replacements[frame] = new_cols\n\n def _idx(self, frame, col):\n assert (\n frame in self.input_offsets\n ), f\"unexpected reference to {frame.id_str()}\"\n\n offs = self.input_offsets[frame]\n\n if frame in self.replacements:\n return self.replacements[frame].index(col) + offs\n\n if col == \"__rowid__\":\n if not isinstance(self.frame_to_node[frame], CalciteScanNode):\n raise NotImplementedError(\n \"rowid can be accessed in materialized frames only\"\n )\n return len(frame._table_cols) + offs\n\n assert (\n col in frame._table_cols\n ), f\"unexpected reference to '{col}' in {frame.id_str()}\"\n return frame._table_cols.index(col) + offs\n\n def ref(self, frame, col):\n return CalciteInputRefExpr(self._idx(frame, col))\n\n def ref_idx(self, frame, col):\n return CalciteInputIdxExpr(self._idx(frame, col))\n\n def input_ids(self):\n return [x.id for x in self.input_nodes]\n\n def translate(self, expr):\n \"\"\"Copy those parts of expr tree that have input references\n and translate all references into CalciteInputRefExr\"\"\"\n return self._maybe_copy_and_translate_expr(expr)\n\n def _maybe_copy_and_translate_expr(self, expr, ref_idx=False):\n if isinstance(expr, InputRefExpr):\n if ref_idx:\n return self.ref_idx(expr.modin_frame, expr.column)\n else:\n return self.ref(expr.modin_frame, expr.column)\n\n if isinstance(expr, AggregateExpr):\n expr = expr.copy()\n if expr.agg in self._no_arg_aggregates:\n expr.operands = []\n else:\n expr.operands[0] = self._maybe_copy_and_translate_expr(\n expr.operands[0], True\n )\n expr.agg = self._simple_aggregates[expr.agg]\n return expr\n\n copied = False\n for i, op in enumerate(getattr(expr, \"operands\", [])):\n new_op = self._maybe_copy_and_translate_expr(op)\n if new_op != op:\n if not copied:\n expr = expr.copy()\n expr.operands[i] = new_op\n return expr\n\n class InputContextMgr:\n def __init__(self, builder, input_frames, input_nodes):\n self.builder = builder\n self.input_frames = input_frames\n self.input_nodes = input_nodes\n\n def __enter__(self):\n self.builder._input_ctx_stack.append(\n self.builder.InputContext(self.input_frames, self.input_nodes)\n )\n return self.builder._input_ctx_stack[-1]\n\n def __exit__(self, type, value, traceback):\n self.builder._input_ctx_stack.pop()\n\n type_strings = {\n int: \"INTEGER\",\n bool: \"BOOLEAN\",\n }\n\n def __init__(self):\n self._input_ctx_stack = []\n\n def build(self, op):\n CalciteBaseNode.reset_id()\n self.res = []\n self._to_calcite(op)\n return self.res\n\n def _input_ctx(self):\n return self._input_ctx_stack[-1]\n\n def _set_input_ctx(self, op):\n input_frames = getattr(op, \"input\", [])\n input_nodes = [self._to_calcite(x._op) for x in input_frames]\n return self.InputContextMgr(self, input_frames, input_nodes)\n\n def _set_tmp_ctx(self, input_frames, input_nodes):\n return self.InputContextMgr(self, input_frames, input_nodes)\n\n def _ref(self, frame, col):\n return self._input_ctx().ref(frame, col)\n\n def _ref_idx(self, frame, col):\n return self._input_ctx().ref_idx(frame, col)\n\n def _translate(self, exprs):\n if isinstance(exprs, abc.Iterable):\n return [self._input_ctx().translate(x) for x in exprs]\n return self._input_ctx().translate(exprs)\n\n def _push(self, node):\n self.res.append(node)\n\n def _last(self):\n return self.res[-1]\n\n def _input_nodes(self):\n return self._input_ctx().input_nodes\n\n def _input_node(self, idx):\n return self._input_nodes()[idx]\n\n def _input_ids(self):\n return self._input_ctx().input_ids()\n\n def _to_calcite(self, op):\n # This context translates input operands and setup current\n # input context to translate input references (recursion\n # over tree happens here).\n with self._set_input_ctx(op):\n if isinstance(op, FrameNode):\n self._process_frame(op)\n elif isinstance(op, MaskNode):\n self._process_mask(op)\n elif isinstance(op, GroupbyAggNode):\n self._process_groupby(op)\n elif isinstance(op, TransformNode):\n self._process_transform(op)\n elif isinstance(op, JoinNode):\n self._process_join(op)\n elif isinstance(op, UnionNode):\n self._process_union(op)\n elif isinstance(op, SortNode):\n self._process_sort(op)\n elif isinstance(op, FilterNode):\n self._process_filter(op)\n else:\n raise NotImplementedError(\n f\"CalciteBuilder doesn't support {type(op).__name__}\"\n )\n return self.res[-1]\n\n def _process_frame(self, op):\n self._push(CalciteScanNode(op.modin_frame))\n\n def _process_mask(self, op):\n if op.row_indices is not None:\n raise NotImplementedError(\"row indices masking is not yet supported\")\n\n frame = op.input[0]\n\n # select rows by rowid\n rowid_col = self._ref(frame, \"__rowid__\")\n condition = build_row_idx_filter_expr(op.row_numeric_idx, rowid_col)\n self._push(CalciteFilterNode(condition))\n\n # mask is currently always applied over scan, it means\n # we need additional projection to remove rowid column\n fields = frame._table_cols\n exprs = [self._ref(frame, col) for col in frame._table_cols]\n self._push(CalciteProjectionNode(fields, exprs))\n\n def _process_groupby(self, op):\n frame = op.input[0]\n\n # Aggregation's input should always be a projection and\n # group key columns should always go first\n proj_cols = op.by.copy()\n for col in frame._table_cols:\n if col not in op.by:\n proj_cols.append(col)\n proj_exprs = [self._ref(frame, col) for col in proj_cols]\n # Add expressions required for compound aggregates\n compound_aggs = {}\n for agg, expr in op.agg_exprs.items():\n if expr.agg in self._compound_aggregates:\n compound_aggs[agg] = self._compound_aggregates[expr.agg](\n self, expr.operands[0]\n )\n extra_exprs = compound_aggs[agg].gen_proj_exprs()\n proj_cols.extend(extra_exprs.keys())\n proj_exprs.extend(extra_exprs.values())\n proj = CalciteProjectionNode(proj_cols, proj_exprs)\n self._push(proj)\n\n self._input_ctx().replace_input_node(frame, proj, proj_cols)\n\n group = [self._ref_idx(frame, col) for col in op.by]\n fields = op.by.copy()\n aggs = []\n for agg, expr in op.agg_exprs.items():\n if agg in compound_aggs:\n extra_aggs = compound_aggs[agg].gen_agg_exprs()\n fields.extend(extra_aggs.keys())\n aggs.extend(extra_aggs.values())\n else:\n fields.append(agg)\n aggs.append(self._translate(expr))\n node = CalciteAggregateNode(fields, group, aggs)\n self._push(node)\n\n if compound_aggs:\n self._input_ctx().replace_input_node(frame, node, fields)\n proj_cols = op.by.copy()\n proj_exprs = [self._ref(frame, col) for col in proj_cols]\n proj_cols.extend(op.agg_exprs.keys())\n for agg in op.agg_exprs:\n if agg in compound_aggs:\n proj_exprs.append(compound_aggs[agg].gen_reduce_expr())\n else:\n proj_exprs.append(self._ref(frame, agg))\n proj = CalciteProjectionNode(proj_cols, proj_exprs)\n self._push(proj)\n\n if op.groupby_opts[\"sort\"]:\n collation = [CalciteCollation(col) for col in group]\n self._push(CalciteSortNode(collation))\n\n def _process_transform(self, op):\n fields = list(op.exprs.keys())\n exprs = self._translate(op.exprs.values())\n self._push(CalciteProjectionNode(fields, exprs))\n\n def _process_join(self, op):\n left = op.input[0]\n right = op.input[1]\n\n assert (\n op.on is not None\n ), \"Merge with unspecified 'on' parameter is not supported in the engine\"\n\n for col in op.on:\n assert (\n col in left._table_cols and col in right._table_cols\n ), f\"Column '{col}'' is missing in one of merge operands\"\n\n \"\"\" Join, only equal-join supported \"\"\"\n cmps = [self._ref(left, c).eq(self._ref(right, c)) for c in op.on]\n if len(cmps) > 1:\n condition = OpExpr(\"AND\", cmps, _get_dtype(bool))\n else:\n condition = cmps[0]\n node = CalciteJoinNode(\n left_id=self._input_node(0).id,\n right_id=self._input_node(1).id,\n how=op.how,\n condition=condition,\n )\n self._push(node)\n\n \"\"\"Projection for both frames\"\"\"\n fields = []\n exprs = []\n conflicting_cols = set(left.columns) & set(right.columns) - set(op.on)\n \"\"\"First goes 'on' column then all left columns(+suffix for conflicting names)\n but 'on' then all right columns(+suffix for conflicting names) but 'on'\"\"\"\n on_idx = [-1] * len(op.on)\n for c in left.columns:\n if c in op.on:\n on_idx[op.on.index(c)] = len(fields)\n suffix = op.suffixes[0] if c in conflicting_cols else \"\"\n fields.append(c + suffix)\n exprs.append(self._ref(left, c))\n\n for c in right.columns:\n if c not in op.on:\n suffix = op.suffixes[1] if c in conflicting_cols else \"\"\n fields.append(c + suffix)\n exprs.append(self._ref(right, c))\n\n self._push(CalciteProjectionNode(fields, exprs))\n\n # TODO: current input translation system doesn't work here\n # because there is no frame to reference for index computation.\n # We should build calcite tree to keep references to input\n # nodes and keep scheme in calcite nodes. For now just use\n # known index on_idx.\n if op.sort is True:\n \"\"\"Sort by key column\"\"\"\n collation = [CalciteCollation(CalciteInputIdxExpr(x)) for x in on_idx]\n self._push(CalciteSortNode(collation))\n\n def _process_union(self, op):\n self._push(CalciteUnionNode(self._input_ids(), True))\n\n def _process_sort(self, op):\n frame = op.input[0]\n\n # Sort should be applied to projections.\n if not isinstance(self._input_node(0), CalciteProjectionNode):\n proj = CalciteProjectionNode(\n frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]\n )\n self._push(proj)\n self._input_ctx().replace_input_node(frame, proj, frame._table_cols)\n\n nulls = op.na_position.upper()\n collations = []\n for col, asc in zip(op.columns, op.ascending):\n ascending = \"ASCENDING\" if asc else \"DESCENDING\"\n collations.append(\n CalciteCollation(self._ref_idx(frame, col), ascending, nulls)\n )\n self._push(CalciteSortNode(collations))\n\n def _process_filter(self, op):\n condition = self._translate(op.condition)\n self._push(CalciteFilterNode(condition))\n",
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pytest\nimport numpy as np\nimport pandas\nimport matplotlib\nimport modin.pandas as pd\n\nfrom modin.pandas.test.utils import (\n random_state,\n df_equals,\n arg_keys,\n name_contains,\n test_data_values,\n test_data_keys,\n test_data_with_duplicates_values,\n test_data_with_duplicates_keys,\n no_numeric_dfs,\n quantiles_keys,\n quantiles_values,\n axis_keys,\n axis_values,\n bool_arg_keys,\n bool_arg_values,\n int_arg_keys,\n int_arg_values,\n test_data,\n eval_general,\n create_test_dfs,\n test_data_diff_dtype,\n)\n\npd.DEFAULT_NPARTITIONS = 4\n\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use(\"Agg\")\n\n\[email protected](\"axis\", [0, 1])\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"method\", [\"cumprod\", \"cummin\", \"cummax\", \"cumsum\"])\ndef test_cumprod_cummin_cummax_cumsum(axis, skipna, method):\n eval_general(\n *create_test_dfs(test_data[\"float_nan_data\"]),\n lambda df: getattr(df, method)(axis=axis, skipna=skipna),\n )\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\[email protected](\"method\", [\"cumprod\", \"cummin\", \"cummax\", \"cumsum\"])\ndef test_cumprod_cummin_cummax_cumsum_transposed(axis, method):\n eval_general(\n *create_test_dfs(test_data[\"int_data\"]),\n lambda df: getattr(df.T, method)(axis=axis),\n )\n\n\[email protected](\"axis\", [0, 1])\[email protected](\"method\", [\"cummin\", \"cummax\"])\ndef test_cummin_cummax_int_and_float(axis, method):\n data = {\"col1\": list(range(1000)), \"col2\": [i * 0.1 for i in range(1000)]}\n eval_general(*create_test_dfs(data), lambda df: getattr(df, method)(axis=axis))\n\n\[email protected](\"axis\", [0, 1])\[email protected](\n \"periods\", int_arg_values, ids=arg_keys(\"periods\", int_arg_keys)\n)\ndef test_diff(axis, periods):\n eval_general(\n *create_test_dfs(test_data[\"float_nan_data\"]),\n lambda df: df.diff(axis=axis, periods=periods),\n )\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\ndef test_diff_transposed(axis):\n eval_general(\n *create_test_dfs(test_data[\"int_data\"]),\n lambda df: df.T.diff(axis=axis),\n )\n\n\[email protected](\n \"data\", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys\n)\[email protected](\n \"keep\", [\"last\", \"first\", False], ids=[\"last\", \"first\", \"False\"]\n)\ndef test_duplicated(data, keep):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n pandas_result = pandas_df.duplicated(keep=keep)\n modin_result = modin_df.duplicated(keep=keep)\n df_equals(modin_result, pandas_result)\n\n import random\n\n subset = random.sample(\n list(pandas_df.columns), random.randint(1, len(pandas_df.columns))\n )\n pandas_result = pandas_df.duplicated(keep=keep, subset=subset)\n modin_result = modin_df.duplicated(keep=keep, subset=subset)\n\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_ffill(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n df_equals(modin_df.ffill(), pandas_df.ffill())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\n \"method\",\n [\"backfill\", \"bfill\", \"pad\", \"ffill\", None],\n ids=[\"backfill\", \"bfill\", \"pad\", \"ffill\", \"None\"],\n)\[email protected](\"axis\", axis_values, ids=axis_keys)\[email protected](\"limit\", int_arg_values, ids=int_arg_keys)\ndef test_fillna(data, method, axis, limit):\n # We are not testing when limit is not positive until pandas-27042 gets fixed.\n # We are not testing when axis is over rows until pandas-17399 gets fixed.\n if limit > 0 and axis != 1 and axis != \"columns\":\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n try:\n pandas_result = pandas_df.fillna(0, method=method, axis=axis, limit=limit)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_df.fillna(0, method=method, axis=axis, limit=limit)\n else:\n modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)\n df_equals(modin_result, pandas_result)\n\n\ndef test_fillna_sanity():\n # with different dtype\n frame_data = [\n [\"a\", \"a\", np.nan, \"a\"],\n [\"b\", \"b\", np.nan, \"b\"],\n [\"c\", \"c\", np.nan, \"c\"],\n ]\n df = pandas.DataFrame(frame_data)\n\n result = df.fillna({2: \"foo\"})\n modin_df = pd.DataFrame(frame_data).fillna({2: \"foo\"})\n\n df_equals(modin_df, result)\n\n modin_df = pd.DataFrame(df)\n df.fillna({2: \"foo\"}, inplace=True)\n modin_df.fillna({2: \"foo\"}, inplace=True)\n df_equals(modin_df, result)\n\n frame_data = {\n \"Date\": [pandas.NaT, pandas.Timestamp(\"2014-1-1\")],\n \"Date2\": [pandas.Timestamp(\"2013-1-1\"), pandas.NaT],\n }\n df = pandas.DataFrame(frame_data)\n result = df.fillna(value={\"Date\": df[\"Date2\"]})\n modin_df = pd.DataFrame(frame_data).fillna(value={\"Date\": df[\"Date2\"]})\n df_equals(modin_df, result)\n\n frame_data = {\"A\": [pandas.Timestamp(\"2012-11-11 00:00:00+01:00\"), pandas.NaT]}\n df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data)\n df_equals(modin_df.fillna(method=\"pad\"), df.fillna(method=\"pad\"))\n\n frame_data = {\"A\": [pandas.NaT, pandas.Timestamp(\"2012-11-11 00:00:00+01:00\")]}\n df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data).fillna(method=\"bfill\")\n df_equals(modin_df, df.fillna(method=\"bfill\"))\n\n\ndef test_fillna_downcast():\n # infer int64 from float64\n frame_data = {\"a\": [1.0, np.nan]}\n df = pandas.DataFrame(frame_data)\n result = df.fillna(0, downcast=\"infer\")\n modin_df = pd.DataFrame(frame_data).fillna(0, downcast=\"infer\")\n df_equals(modin_df, result)\n\n # infer int64 from float64 when fillna value is a dict\n df = pandas.DataFrame(frame_data)\n result = df.fillna({\"a\": 0}, downcast=\"infer\")\n modin_df = pd.DataFrame(frame_data).fillna({\"a\": 0}, downcast=\"infer\")\n df_equals(modin_df, result)\n\n\ndef test_fillna_inplace():\n frame_data = random_state.randn(10, 4)\n df = pandas.DataFrame(frame_data)\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n\n modin_df = pd.DataFrame(df)\n df.fillna(value=0, inplace=True)\n try:\n df_equals(modin_df, df)\n except AssertionError:\n pass\n else:\n assert False\n\n modin_df.fillna(value=0, inplace=True)\n df_equals(modin_df, df)\n\n modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)\n assert modin_df is None\n\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n modin_df = pd.DataFrame(df)\n df.fillna(method=\"ffill\", inplace=True)\n try:\n df_equals(modin_df, df)\n except AssertionError:\n pass\n else:\n assert False\n\n modin_df.fillna(method=\"ffill\", inplace=True)\n df_equals(modin_df, df)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_frame_fillna_limit(data):\n pandas_df = pandas.DataFrame(data)\n\n index = pandas_df.index\n\n result = pandas_df[:2].reindex(index)\n modin_df = pd.DataFrame(result)\n df_equals(\n modin_df.fillna(method=\"pad\", limit=2), result.fillna(method=\"pad\", limit=2)\n )\n\n result = pandas_df[-2:].reindex(index)\n modin_df = pd.DataFrame(result)\n df_equals(\n modin_df.fillna(method=\"backfill\", limit=2),\n result.fillna(method=\"backfill\", limit=2),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_frame_pad_backfill_limit(data):\n pandas_df = pandas.DataFrame(data)\n\n index = pandas_df.index\n\n result = pandas_df[:2].reindex(index)\n modin_df = pd.DataFrame(result)\n df_equals(\n modin_df.fillna(method=\"pad\", limit=2), result.fillna(method=\"pad\", limit=2)\n )\n\n result = pandas_df[-2:].reindex(index)\n modin_df = pd.DataFrame(result)\n df_equals(\n modin_df.fillna(method=\"backfill\", limit=2),\n result.fillna(method=\"backfill\", limit=2),\n )\n\n\ndef test_fillna_dtype_conversion():\n # make sure that fillna on an empty frame works\n df = pandas.DataFrame(index=range(3), columns=[\"A\", \"B\"], dtype=\"float64\")\n modin_df = pd.DataFrame(index=range(3), columns=[\"A\", \"B\"], dtype=\"float64\")\n df_equals(modin_df.fillna(\"nan\"), df.fillna(\"nan\"))\n\n frame_data = {\"A\": [1, np.nan], \"B\": [1.0, 2.0]}\n df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data)\n for v in [\"\", 1, np.nan, 1.0]:\n df_equals(modin_df.fillna(v), df.fillna(v))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_fillna_skip_certain_blocks(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n # don't try to fill boolean, int blocks\n df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))\n\n\ndef test_fillna_dict_series():\n frame_data = {\n \"a\": [np.nan, 1, 2, np.nan, np.nan],\n \"b\": [1, 2, 3, np.nan, np.nan],\n \"c\": [np.nan, 1, 2, 3, 4],\n }\n df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data)\n\n df_equals(modin_df.fillna({\"a\": 0, \"b\": 5}), df.fillna({\"a\": 0, \"b\": 5}))\n\n df_equals(\n modin_df.fillna({\"a\": 0, \"b\": 5, \"d\": 7}),\n df.fillna({\"a\": 0, \"b\": 5, \"d\": 7}),\n )\n\n # Series treated same as dict\n df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))\n\n\ndef test_fillna_dataframe():\n frame_data = {\n \"a\": [np.nan, 1, 2, np.nan, np.nan],\n \"b\": [1, 2, 3, np.nan, np.nan],\n \"c\": [np.nan, 1, 2, 3, 4],\n }\n df = pandas.DataFrame(frame_data, index=list(\"VWXYZ\"))\n modin_df = pd.DataFrame(frame_data, index=list(\"VWXYZ\"))\n\n # df2 may have different index and columns\n df2 = pandas.DataFrame(\n {\n \"a\": [np.nan, 10, 20, 30, 40],\n \"b\": [50, 60, 70, 80, 90],\n \"foo\": [\"bar\"] * 5,\n },\n index=list(\"VWXuZ\"),\n )\n modin_df2 = pd.DataFrame(df2)\n\n # only those columns and indices which are shared get filled\n df_equals(modin_df.fillna(modin_df2), df.fillna(df2))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_fillna_columns(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n df_equals(\n modin_df.fillna(method=\"ffill\", axis=1),\n pandas_df.fillna(method=\"ffill\", axis=1),\n )\n\n df_equals(\n modin_df.fillna(method=\"ffill\", axis=1),\n pandas_df.fillna(method=\"ffill\", axis=1),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_fillna_invalid_method(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data) # noqa F841\n\n with pytest.raises(ValueError):\n modin_df.fillna(method=\"ffil\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_fillna_invalid_value(data):\n modin_df = pd.DataFrame(data)\n # list\n pytest.raises(TypeError, modin_df.fillna, [1, 2])\n # tuple\n pytest.raises(TypeError, modin_df.fillna, (1, 2))\n # frame with series\n pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_fillna_col_reordering(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n df_equals(modin_df.fillna(method=\"ffill\"), pandas_df.fillna(method=\"ffill\"))\n\n\ndef test_fillna_datetime_columns():\n frame_data = {\n \"A\": [-1, -2, np.nan],\n \"B\": pd.date_range(\"20130101\", periods=3),\n \"C\": [\"foo\", \"bar\", None],\n \"D\": [\"foo2\", \"bar2\", None],\n }\n df = pandas.DataFrame(frame_data, index=pd.date_range(\"20130110\", periods=3))\n modin_df = pd.DataFrame(frame_data, index=pd.date_range(\"20130110\", periods=3))\n df_equals(modin_df.fillna(\"?\"), df.fillna(\"?\"))\n\n frame_data = {\n \"A\": [-1, -2, np.nan],\n \"B\": [\n pandas.Timestamp(\"2013-01-01\"),\n pandas.Timestamp(\"2013-01-02\"),\n pandas.NaT,\n ],\n \"C\": [\"foo\", \"bar\", None],\n \"D\": [\"foo2\", \"bar2\", None],\n }\n df = pandas.DataFrame(frame_data, index=pd.date_range(\"20130110\", periods=3))\n modin_df = pd.DataFrame(frame_data, index=pd.date_range(\"20130110\", periods=3))\n df_equals(modin_df.fillna(\"?\"), df.fillna(\"?\"))\n\n\[email protected](\"axis\", [0, 1])\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"method\", [\"median\", \"skew\"])\ndef test_median_skew(axis, skipna, method):\n eval_general(\n *create_test_dfs(test_data[\"float_nan_data\"]),\n lambda df: getattr(df, method)(axis=axis, skipna=skipna),\n )\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\[email protected](\"method\", [\"median\", \"skew\"])\ndef test_median_skew_transposed(axis, method):\n eval_general(\n *create_test_dfs(test_data[\"int_data\"]),\n lambda df: getattr(df.T, method)(axis=axis),\n )\n\n\[email protected](\n \"numeric_only\",\n [\n pytest.param(\n True,\n marks=pytest.mark.xfail(\n reason=\"Internal and external indices do not match.\"\n ),\n ),\n False,\n pytest.param(\n None,\n marks=pytest.mark.xfail(\n reason=\"Internal and external indices do not match.\"\n ),\n ),\n ],\n)\[email protected](\"method\", [\"median\", \"skew\", \"std\", \"var\", \"rank\", \"sem\"])\ndef test_median_skew_std_var_rank_sem_specific(numeric_only, method):\n eval_general(\n *create_test_dfs(test_data_diff_dtype),\n lambda df: getattr(df, method)(numeric_only=numeric_only),\n )\n\n\[email protected](\"method\", [\"median\", \"skew\", \"std\", \"var\", \"sem\"])\ndef test_median_skew_std_var_sem_1953(method):\n # See #1953 for details\n arrays = [[\"1\", \"1\", \"2\", \"2\"], [\"1\", \"2\", \"3\", \"4\"]]\n data = [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]\n modin_df = pd.DataFrame(data, index=arrays)\n pandas_df = pandas.DataFrame(data, index=arrays)\n\n eval_general(modin_df, pandas_df, lambda df: getattr(df, method)(level=0))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"axis\", axis_values, ids=axis_keys)\[email protected](\n \"numeric_only\", bool_arg_values, ids=arg_keys(\"numeric_only\", bool_arg_keys)\n)\ndef test_mode(request, data, axis, numeric_only):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n try:\n pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)\n except Exception:\n with pytest.raises(TypeError):\n modin_df.mode(axis=axis, numeric_only=numeric_only)\n else:\n modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)\n df_equals(modin_result, pandas_result)\n\n\ndef test_nlargest():\n data = {\n \"population\": [\n 59000000,\n 65000000,\n 434000,\n 434000,\n 434000,\n 337000,\n 11300,\n 11300,\n 11300,\n ],\n \"GDP\": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],\n \"alpha-2\": [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\", \"IS\", \"NR\", \"TV\", \"AI\"],\n }\n index = [\n \"Italy\",\n \"France\",\n \"Malta\",\n \"Maldives\",\n \"Brunei\",\n \"Iceland\",\n \"Nauru\",\n \"Tuvalu\",\n \"Anguilla\",\n ]\n modin_df = pd.DataFrame(data=data, index=index)\n pandas_df = pandas.DataFrame(data=data, index=index)\n df_equals(modin_df.nlargest(3, \"population\"), pandas_df.nlargest(3, \"population\"))\n\n\ndef test_nsmallest():\n data = {\n \"population\": [\n 59000000,\n 65000000,\n 434000,\n 434000,\n 434000,\n 337000,\n 11300,\n 11300,\n 11300,\n ],\n \"GDP\": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],\n \"alpha-2\": [\"IT\", \"FR\", \"MT\", \"MV\", \"BN\", \"IS\", \"NR\", \"TV\", \"AI\"],\n }\n index = [\n \"Italy\",\n \"France\",\n \"Malta\",\n \"Maldives\",\n \"Brunei\",\n \"Iceland\",\n \"Nauru\",\n \"Tuvalu\",\n \"Anguilla\",\n ]\n modin_df = pd.DataFrame(data=data, index=index)\n pandas_df = pandas.DataFrame(data=data, index=index)\n df_equals(\n modin_df.nsmallest(n=3, columns=\"population\"),\n pandas_df.nsmallest(n=3, columns=\"population\"),\n )\n df_equals(\n modin_df.nsmallest(n=2, columns=[\"population\", \"GDP\"], keep=\"all\"),\n pandas_df.nsmallest(n=2, columns=[\"population\", \"GDP\"], keep=\"all\"),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"axis\", axis_values, ids=axis_keys)\[email protected](\n \"dropna\", bool_arg_values, ids=arg_keys(\"dropna\", bool_arg_keys)\n)\ndef test_nunique(data, axis, dropna):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n modin_result = modin_df.nunique(axis=axis, dropna=dropna)\n pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)\n pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"q\", quantiles_values, ids=quantiles_keys)\ndef test_quantile(request, data, q):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n if not name_contains(request.node.name, no_numeric_dfs):\n df_equals(modin_df.quantile(q), pandas_df.quantile(q))\n df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))\n\n try:\n pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_df.quantile(q, axis=1, numeric_only=False)\n else:\n modin_result = modin_df.quantile(q, axis=1, numeric_only=False)\n df_equals(modin_result, pandas_result)\n else:\n with pytest.raises(ValueError):\n modin_df.quantile(q)\n\n if not name_contains(request.node.name, no_numeric_dfs):\n df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))\n df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))\n\n try:\n pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)\n except Exception as e:\n with pytest.raises(type(e)):\n modin_df.T.quantile(q, axis=1, numeric_only=False)\n else:\n modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)\n df_equals(modin_result, pandas_result)\n else:\n with pytest.raises(ValueError):\n modin_df.T.quantile(q)\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\[email protected](\n \"na_option\", [\"keep\", \"top\", \"bottom\"], ids=[\"keep\", \"top\", \"bottom\"]\n)\ndef test_rank_transposed(axis, na_option):\n eval_general(\n *create_test_dfs(test_data[\"int_data\"]),\n lambda df: df.rank(axis=axis, na_option=na_option),\n )\n\n\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\ndef test_sem_float_nan_only(skipna, ddof):\n eval_general(\n *create_test_dfs(test_data[\"float_nan_data\"]),\n lambda df: df.sem(skipna=skipna, ddof=ddof),\n )\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\ndef test_sem_int_only(axis, ddof):\n eval_general(\n *create_test_dfs(test_data[\"int_data\"]),\n lambda df: df.sem(axis=axis, ddof=ddof),\n )\n\n\[email protected](\"axis\", [0, 1])\[email protected](\n \"skipna\", bool_arg_values, ids=arg_keys(\"skipna\", bool_arg_keys)\n)\[email protected](\"method\", [\"std\", \"var\", \"rank\"])\ndef test_std_var_rank(axis, skipna, method):\n eval_general(\n *create_test_dfs(test_data[\"float_nan_data\"]),\n lambda df: getattr(df, method)(axis=axis, skipna=skipna),\n )\n\n\[email protected](\"axis\", [\"rows\", \"columns\"])\[email protected](\"ddof\", int_arg_values, ids=arg_keys(\"ddof\", int_arg_keys))\[email protected](\"method\", [\"std\", \"var\"])\ndef test_std_var_transposed(axis, ddof, method):\n eval_general(\n *create_test_dfs(test_data[\"int_data\"]),\n lambda df: getattr(df.T, method)(axis=axis, ddof=ddof),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_values(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n np.testing.assert_equal(modin_df.values, pandas_df.values)\n",
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Implement utils for pandas component.\"\"\"\n\n\ndef from_non_pandas(df, index, columns, dtype):\n \"\"\"\n Implement [METHOD_NAME].\n\n TODO: Add more details for this docstring template.\n\n Parameters\n ----------\n What arguments does this function have.\n [\n PARAMETER_NAME: PARAMETERS TYPES\n Description.\n ]\n\n Returns\n -------\n What this returns (if anything)\n \"\"\"\n from modin.data_management.factories.dispatcher import EngineDispatcher\n\n new_qc = EngineDispatcher.from_non_pandas(df, index, columns, dtype)\n if new_qc is not None:\n from .dataframe import DataFrame\n\n return DataFrame(query_compiler=new_qc)\n return new_qc\n\n\ndef from_pandas(df):\n \"\"\"\n Convert a pandas DataFrame to a Modin DataFrame.\n\n Parameters\n ----------\n df: pandas.DataFrame\n The pandas DataFrame to convert.\n\n Returns\n -------\n A new Modin DataFrame object.\n \"\"\"\n from modin.data_management.factories.dispatcher import EngineDispatcher\n from .dataframe import DataFrame\n\n return DataFrame(query_compiler=EngineDispatcher.from_pandas(df))\n\n\ndef from_arrow(at):\n \"\"\"\n Convert an Arrow Table to a Modin DataFrame.\n\n Parameters\n ----------\n at: Arrow Table\n The Arrow Table to convert from.\n\n Returns\n -------\n DataFrame\n A new Modin DataFrame object.\n \"\"\"\n from modin.data_management.factories.dispatcher import EngineDispatcher\n from .dataframe import DataFrame\n\n return DataFrame(query_compiler=EngineDispatcher.from_arrow(at))\n\n\ndef is_scalar(obj):\n \"\"\"\n Return True if given object is scalar.\n\n This method wrks the same as is_scalar method from Pandas but\n it is optimized for Modin frames. For BasePandasDataset objects\n Pandas version of is_scalar tries to access missing attribute\n causing index scan. This tiggers execution for lazy frames and\n we avoid it by handling BasePandasDataset objects separately.\n\n Parameters\n ----------\n val: object\n Object to check.\n\n Returns\n -------\n bool\n True if given object is scalar and False otherwise.\n \"\"\"\n from pandas.api.types import is_scalar as pandas_is_scalar\n from .base import BasePandasDataset\n\n return not isinstance(obj, BasePandasDataset) and pandas_is_scalar(obj)\n"
] | [
[
"pandas.core.dtypes.common._get_dtype"
],
[
"matplotlib.use",
"pandas.Timestamp",
"pandas.DataFrame",
"numpy.testing.assert_equal"
],
[
"pandas.api.types.is_scalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucadiliello/metrics | [
"e98fbafd2af5d217596958f9cfe6152543a00b7f",
"e98fbafd2af5d217596958f9cfe6152543a00b7f"
] | [
"torchmetrics/regression/pearson.py",
"torchmetrics/functional/text/wip.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update\nfrom torchmetrics.metric import Metric\n\n\ndef _final_aggregation(\n means_x: Tensor,\n means_y: Tensor,\n vars_x: Tensor,\n vars_y: Tensor,\n corrs_xy: Tensor,\n nbs: Tensor,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Aggregate the statistics from multiple devices.\n\n Formula taken from here: `Aggregate the statistics from multiple devices`_\n \"\"\"\n # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1\n mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n for i in range(1, len(means_x)):\n mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]\n\n nb = n1 + n2\n mean_x = (n1 * mx1 + n2 * mx2) / nb\n mean_y = (n1 * my1 + n2 * my2) / nb\n var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2)\n var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2)\n\n corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y)\n corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y)\n corr_xy = (corr1 + corr2) / (n1 + n2)\n\n mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb\n\n return var_x, var_y, corr_xy, nb\n\n\nclass PearsonCorrcoef(Metric):\n r\"\"\"\n Computes `Pearson Correlation Coefficient`_:\n\n .. math::\n P_{corr}(x,y) = \\frac{cov(x,y)}{\\sigma_x \\sigma_y}\n\n Where :math:`y` is a tensor of target values, and :math:`x` is a\n tensor of predictions.\n\n Forward accepts\n\n - ``preds`` (float tensor): ``(N,)``\n - ``target``(float tensor): ``(N,)``\n\n Args:\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n >>> from torchmetrics import PearsonCorrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson = PearsonCorrcoef()\n >>> pearson(preds, target)\n tensor(0.9849)\n\n \"\"\"\n is_differentiable = True\n higher_is_better = None # both -1 and 1 are optimal\n preds: List[Tensor]\n target: List[Tensor]\n mean_x: Tensor\n mean_y: Tensor\n var_x: Tensor\n var_y: Tensor\n corr_xy: Tensor\n n_total: Tensor\n\n def __init__(\n self,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ) -> None:\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n\n self.add_state(\"mean_x\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"mean_y\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"var_x\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"var_y\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"corr_xy\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"n_total\", default=torch.tensor(0.0), dist_reduce_fx=None)\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(\n preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total\n )\n\n def compute(self) -> Tensor:\n \"\"\"Computes pearson correlation coefficient over state.\"\"\"\n if self.mean_x.numel() > 1: # multiple devices, need further reduction\n var_x, var_y, corr_xy, n_total = _final_aggregation(\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total\n )\n else:\n var_x = self.var_x\n var_y = self.var_y\n corr_xy = self.corr_xy\n n_total = self.n_total\n\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)\n",
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Tuple, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.text.helper import _edit_distance\n\n\ndef _wip_update(\n predictions: Union[str, List[str]],\n references: Union[str, List[str]],\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Update the wip score with the current set of references and predictions.\n\n Args:\n predictions: Transcription(s) to score as a string or list of strings\n references: Reference(s) for each speech input as a string or list of strings\n Returns:\n Number of edit operations to get from the reference to the prediction, summed over all samples\n Number of words overall references\n Number of words overall prediction\n \"\"\"\n if isinstance(predictions, str):\n predictions = [predictions]\n if isinstance(references, str):\n references = [references]\n total = tensor(0, dtype=torch.float)\n errors = tensor(0, dtype=torch.float)\n reference_total = tensor(0, dtype=torch.float)\n prediction_total = tensor(0, dtype=torch.float)\n for prediction, reference in zip(predictions, references):\n prediction_tokens = prediction.split()\n reference_tokens = reference.split()\n errors += _edit_distance(prediction_tokens, reference_tokens)\n reference_total += len(reference_tokens)\n prediction_total += len(prediction_tokens)\n total += max(len(reference_tokens), len(prediction_tokens))\n\n return errors - total, reference_total, prediction_total\n\n\ndef _wip_compute(errors: Tensor, reference_total: Tensor, prediction_total: Tensor) -> Tensor:\n \"\"\"Compute the Word Information Perserved.\n\n Args:\n errors: Number of edit operations to get from the reference to the prediction, summed over all samples\n reference_total: Number of words overall references\n prediction_total: Number of words overall prediction\n Returns:\n Word Information Perserved score\n \"\"\"\n return (errors / reference_total) * (errors / prediction_total)\n\n\ndef word_information_preserved(\n predictions: Union[str, List[str]],\n references: Union[str, List[str]],\n) -> Tensor:\n \"\"\"Word Information Preserved rate is a metric of the performance of an automatic speech recognition system. This\n value indicates the percentage of characters that were incorrectly predicted. The lower the value, the better the\n performance of the ASR system with a Word Information preserved rate of 0 being a perfect score.\n Args:\n predictions: Transcription(s) to score as a string or list of strings\n references: Reference(s) for each speech input as a string or list of strings\n Returns:\n Word Information preserved rate\n Examples:\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> word_information_preserved(predictions=predictions, references=references)\n tensor(0.3472)\n \"\"\"\n errors, reference_total, prediction_total = _wip_update(predictions, references)\n return _wip_compute(errors, reference_total, prediction_total)\n"
] | [
[
"torch.tensor"
],
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lizhi-sjtu/DRL-code-pytorch | [
"2ca05f4ed64d2d032e161fc3a2d2a68c818c4337"
] | [
"8.SAC/SAC-continuous.py"
] | [
"import gym\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport copy\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torch.distributions import Normal\r\n\r\n\r\nclass Actor(nn.Module):\r\n def __init__(self, state_dim, action_dim, hidden_width, max_action):\r\n super(Actor, self).__init__()\r\n self.max_action = max_action\r\n self.l1 = nn.Linear(state_dim, hidden_width)\r\n self.l2 = nn.Linear(hidden_width, hidden_width)\r\n self.mean_layer = nn.Linear(hidden_width, action_dim)\r\n self.log_std_layer = nn.Linear(hidden_width, action_dim)\r\n\r\n def forward(self, x, deterministic=False, with_logprob=True):\r\n x = F.relu(self.l1(x))\r\n x = F.relu(self.l2(x))\r\n mean = self.mean_layer(x)\r\n log_std = self.log_std_layer(x) # We output the log_std to ensure that std=exp(log_std)>0\r\n log_std = torch.clamp(log_std, -20, 2)\r\n std = torch.exp(log_std)\r\n\r\n dist = Normal(mean, std) # Generate a Gaussian distribution\r\n if deterministic: # When evaluating,we use the deterministic policy\r\n a = mean\r\n else:\r\n a = dist.rsample() # reparameterization trick: mean+std*N(0,1)\r\n\r\n if with_logprob: # The method refers to Open AI Spinning up, which is more stable.\r\n log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)\r\n log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)\r\n else:\r\n log_pi = None\r\n\r\n a = self.max_action * torch.tanh(a) # Use tanh to compress the unbounded Gaussian distribution into a bounded action interval.\r\n\r\n return a, log_pi\r\n\r\n\r\nclass Critic(nn.Module): # According to (s,a), directly calculate Q(s,a)\r\n def __init__(self, state_dim, action_dim, hidden_width):\r\n super(Critic, self).__init__()\r\n # Q1\r\n self.l1 = nn.Linear(state_dim + action_dim, hidden_width)\r\n self.l2 = nn.Linear(hidden_width, hidden_width)\r\n self.l3 = nn.Linear(hidden_width, 1)\r\n # Q2\r\n self.l4 = nn.Linear(state_dim + action_dim, hidden_width)\r\n self.l5 = nn.Linear(hidden_width, hidden_width)\r\n self.l6 = nn.Linear(hidden_width, 1)\r\n\r\n def forward(self, s, a):\r\n s_a = torch.cat([s, a], 1)\r\n q1 = F.relu(self.l1(s_a))\r\n q1 = F.relu(self.l2(q1))\r\n q1 = self.l3(q1)\r\n\r\n q2 = F.relu(self.l4(s_a))\r\n q2 = F.relu(self.l5(q2))\r\n q2 = self.l6(q2)\r\n\r\n return q1, q2\r\n\r\n\r\nclass ReplayBuffer(object):\r\n def __init__(self, state_dim, action_dim):\r\n self.max_size = int(1e6)\r\n self.count = 0\r\n self.size = 0\r\n self.s = np.zeros((self.max_size, state_dim))\r\n self.a = np.zeros((self.max_size, action_dim))\r\n self.r = np.zeros((self.max_size, 1))\r\n self.s_ = np.zeros((self.max_size, state_dim))\r\n self.dw = np.zeros((self.max_size, 1))\r\n\r\n def store(self, s, a, r, s_, dw):\r\n self.s[self.count] = s\r\n self.a[self.count] = a\r\n self.r[self.count] = r\r\n self.s_[self.count] = s_\r\n self.dw[self.count] = dw\r\n self.count = (self.count + 1) % self.max_size # When the 'count' reaches max_size, it will be reset to 0.\r\n self.size = min(self.size + 1, self.max_size) # Record the number of transitions\r\n\r\n def sample(self, batch_size):\r\n index = np.random.choice(self.size, size=batch_size) # Randomly sampling\r\n batch_s = torch.tensor(self.s[index], dtype=torch.float)\r\n batch_a = torch.tensor(self.a[index], dtype=torch.float)\r\n batch_r = torch.tensor(self.r[index], dtype=torch.float)\r\n batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)\r\n batch_dw = torch.tensor(self.dw[index], dtype=torch.float)\r\n\r\n return batch_s, batch_a, batch_r, batch_s_, batch_dw\r\n\r\n\r\nclass SAC(object):\r\n def __init__(self, state_dim, action_dim, max_action):\r\n self.max_action = max_action\r\n self.hidden_width = 256 # The number of neurons in hidden layers of the neural network\r\n self.batch_size = 256 # batch size\r\n self.GAMMA = 0.99 # discount factor\r\n self.TAU = 0.005 # Softly update the target network\r\n self.lr = 3e-4 # learning rate\r\n self.adaptive_alpha = True # Whether to automatically learn the temperature alpha\r\n if self.adaptive_alpha:\r\n # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper\r\n self.target_entropy = -action_dim\r\n # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\r\n self.log_alpha = torch.zeros(1, requires_grad=True)\r\n self.alpha = self.log_alpha.exp()\r\n self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)\r\n else:\r\n self.alpha = 0.2\r\n\r\n self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)\r\n self.critic = Critic(state_dim, action_dim, self.hidden_width)\r\n self.critic_target = copy.deepcopy(self.critic)\r\n\r\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)\r\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)\r\n\r\n def choose_action(self, s, deterministic=False):\r\n s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)\r\n a, _ = self.actor(s, deterministic, False) # When choosing actions, we do not need to compute log_pi\r\n return a.data.numpy().flatten()\r\n\r\n def learn(self, relay_buffer):\r\n batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size) # Sample a batch\r\n\r\n with torch.no_grad():\r\n batch_a_, log_pi_ = self.actor(batch_s_) # a' from the current policy\r\n # Compute target Q\r\n target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)\r\n target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)\r\n\r\n # Compute current Q\r\n current_Q1, current_Q2 = self.critic(batch_s, batch_a)\r\n # Compute critic loss\r\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\r\n # Optimize the critic\r\n self.critic_optimizer.zero_grad()\r\n critic_loss.backward()\r\n self.critic_optimizer.step()\r\n\r\n # Freeze critic networks so you don't waste computational effort\r\n for params in self.critic.parameters():\r\n params.requires_grad = False\r\n\r\n # Compute actor loss\r\n a, log_pi = self.actor(batch_s)\r\n Q1, Q2 = self.critic(batch_s, a)\r\n Q = torch.min(Q1, Q2)\r\n actor_loss = (self.alpha * log_pi - Q).mean()\r\n\r\n # Optimize the actor\r\n self.actor_optimizer.zero_grad()\r\n actor_loss.backward()\r\n self.actor_optimizer.step()\r\n\r\n # Unfreeze critic networks\r\n for params in self.critic.parameters():\r\n params.requires_grad = True\r\n\r\n # Update alpha\r\n if self.adaptive_alpha:\r\n # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\r\n alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()\r\n self.alpha_optimizer.zero_grad()\r\n alpha_loss.backward()\r\n self.alpha_optimizer.step()\r\n self.alpha = self.log_alpha.exp()\r\n\r\n # Softly update target networks\r\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\r\n target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)\r\n\r\n\r\ndef evaluate_policy(env, agent):\r\n times = 3 # Perform three evaluations and calculate the average\r\n evaluate_reward = 0\r\n for _ in range(times):\r\n s = env.reset()\r\n done = False\r\n episode_reward = 0\r\n while not done:\r\n a = agent.choose_action(s, deterministic=True) # We use the deterministic policy during the evaluating\r\n s_, r, done, _ = env.step(a)\r\n episode_reward += r\r\n s = s_\r\n evaluate_reward += episode_reward\r\n\r\n return int(evaluate_reward / times)\r\n\r\n\r\ndef reward_adapter(r, env_index):\r\n if env_index == 0: # Pendulum-v1\r\n r = (r + 8) / 8\r\n elif env_index == 1: # BipedalWalker-v3\r\n if r <= -100:\r\n r = -1\r\n return r\r\n\r\n\r\nif __name__ == '__main__':\r\n env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']\r\n env_index = 0\r\n env = gym.make(env_name[env_index])\r\n env_evaluate = gym.make(env_name[env_index]) # When evaluating the policy, we need to rebuild an environment\r\n number = 1\r\n seed = 0\r\n # Set random seed\r\n env.seed(seed)\r\n env.action_space.seed(seed)\r\n env_evaluate.seed(seed)\r\n env_evaluate.action_space.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n\r\n state_dim = env.observation_space.shape[0]\r\n action_dim = env.action_space.shape[0]\r\n max_action = float(env.action_space.high[0])\r\n max_episode_steps = env._max_episode_steps # Maximum number of steps per episode\r\n print(\"env={}\".format(env_name[env_index]))\r\n print(\"state_dim={}\".format(state_dim))\r\n print(\"action_dim={}\".format(action_dim))\r\n print(\"max_action={}\".format(max_action))\r\n print(\"max_episode_steps={}\".format(max_episode_steps))\r\n\r\n agent = SAC(state_dim, action_dim, max_action)\r\n replay_buffer = ReplayBuffer(state_dim, action_dim)\r\n # Build a tensorboard\r\n writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))\r\n\r\n max_train_steps = 3e6 # Maximum number of training steps\r\n random_steps = 25e3 # Take the random actions in the beginning for the better exploration\r\n evaluate_freq = 5e3 # Evaluate the policy every 'evaluate_freq' steps\r\n evaluate_num = 0 # Record the number of evaluations\r\n evaluate_rewards = [] # Record the rewards during the evaluating\r\n total_steps = 0 # Record the total steps during the training\r\n\r\n while total_steps < max_train_steps:\r\n s = env.reset()\r\n episode_steps = 0\r\n done = False\r\n while not done:\r\n episode_steps += 1\r\n if total_steps < random_steps: # Take the random actions in the beginning for the better exploration\r\n a = env.action_space.sample()\r\n else:\r\n a = agent.choose_action(s)\r\n s_, r, done, _ = env.step(a)\r\n r = reward_adapter(r, env_index) # Adjust rewards for better performance\r\n # When dead or win or reaching the max_episode_steps, done will be Ture, we need to distinguish them;\r\n # dw means dead or win,there is no next state s';\r\n # but when reaching the max_episode_steps,there is a next state s' actually.\r\n if done and episode_steps != max_episode_steps:\r\n dw = True\r\n else:\r\n dw = False\r\n replay_buffer.store(s, a, r, s_, dw) # Store the transition\r\n s = s_\r\n\r\n if total_steps >= random_steps:\r\n agent.learn(replay_buffer)\r\n\r\n # Evaluate the policy every 'evaluate_freq' steps\r\n if (total_steps + 1) % evaluate_freq == 0:\r\n evaluate_num += 1\r\n evaluate_reward = evaluate_policy(env_evaluate, agent)\r\n evaluate_rewards.append(evaluate_reward)\r\n print(\"evaluate_num:{} \\t evaluate_reward:{}\".format(evaluate_num, evaluate_reward))\r\n writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)\r\n # Save the rewards\r\n if evaluate_num % 10 == 0:\r\n np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))\r\n\r\n total_steps += 1\r\n"
] | [
[
"torch.cat",
"torch.zeros",
"torch.tanh",
"torch.no_grad",
"torch.tensor",
"numpy.zeros",
"torch.nn.functional.softplus",
"torch.optim.Adam",
"numpy.log",
"numpy.random.choice",
"torch.min",
"torch.exp",
"torch.nn.Linear",
"torch.nn.functional.mse_loss",
"torch.distributions.Normal",
"numpy.array",
"numpy.random.seed",
"torch.manual_seed",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tliu68/graspologic | [
"d1cf7678bc63ab9769828a82a90f66bf1dfa0eff"
] | [
"graspologic/layouts/render.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport networkx as nx\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom graspologic.layouts.classes import NodePosition\nimport matplotlib.pyplot as plt\n\n\ndef _calculate_x_y_domain(\n positions: List[NodePosition],\n) -> Tuple[Tuple[float, float], Tuple[float, float]]:\n \"\"\"calculate the overall x/y domain, converting to a square\n so we can have a consistent scale\n \"\"\"\n min_x = min_y = float(\"inf\")\n max_x = max_y = float(\"-inf\")\n for node_position in positions:\n min_x = min(min_x, node_position.x - node_position.size)\n max_x = max(max_x, node_position.x + node_position.size)\n min_y = min(min_y, node_position.y - node_position.size)\n max_y = max(max_y, node_position.y + node_position.size)\n\n x_delta = max_x - min_x\n y_delta = max_y - min_y\n max_delta = max(x_delta, y_delta)\n\n if max_delta == x_delta:\n difference = (max_delta - y_delta) / 2\n min_y = min_y - difference\n max_y = max_y + difference\n elif max_delta == y_delta:\n difference = (max_delta - x_delta) / 2\n min_x = min_x - difference\n max_x = max_x + difference\n\n return (min_x, max_x), (min_y, max_y)\n\n\ndef _scale_value(\n domain: Tuple[float, float], data_range: Tuple[float, float], value: float\n) -> float:\n return data_range[0] + (data_range[1] - data_range[0]) * (\n (value - domain[0]) / (domain[1] - domain[0])\n )\n\n\ndef _scale_node_sizes_for_rendering(\n sizes: List[float],\n spatial_domain: Tuple[float, float],\n spatial_range: Tuple[float, float],\n dpi: float,\n):\n \"\"\"scale the size again to match the rendered pixel range\n we would expect this to be handled by the underlying viz framework, but it isn't, size is specified\n as the bounding box in points of the rendered output, so we need to transform our size to match.\n\n There are 72 points per inch. Multiplying by 72 / dpi converts from pixels to points.\n \"\"\"\n spatial_domain = (0, spatial_domain[1] - spatial_domain[0])\n return list(\n map(\n lambda s: _scale_value(spatial_domain, spatial_range, s * 2 * 72.0 / dpi)\n ** 2,\n sizes,\n )\n )\n\n\ndef _draw_graph(\n graph: nx.Graph,\n positions: List[NodePosition],\n node_colors: Dict[Any, str],\n vertex_alpha: float,\n edge_line_width: float,\n edge_alpha: float,\n figure_width: float,\n figure_height: float,\n vertex_line_width: float = 0.01,\n vertex_shape: str = \"o\",\n arrows: bool = False,\n dpi: int = 100,\n):\n if len(positions) != len(graph.nodes()):\n raise ValueError(\n f\"The number of positions provided {len(positions)} is not the same as the \"\n f\"number of nodes in the graph {len(graph.nodes())}\"\n )\n for position in positions:\n if position.node_id not in graph:\n raise ValueError(\n f\"The node position provided for {position.node_id} references a node \"\n f\"not found in our graph\"\n )\n\n plt.rcParams[\"figure.dpi\"] = dpi # TODO, test at different dpi\n\n plt.clf()\n figure = plt.gcf()\n ax = plt.gca()\n ax.set_axis_off()\n figure.set_size_inches(figure_width, figure_height)\n window_extent_width = ax.get_window_extent().width\n\n x_domain, y_domain = _calculate_x_y_domain(positions)\n\n position_map = {position.node_id: position for position in positions}\n node_positions = {\n position.node_id: (position.x, position.y) for position in positions\n }\n\n vertices = []\n vertex_sizes = []\n node_color_list = []\n edge_color_list = []\n\n for node in graph.nodes():\n vertices.append(node)\n vertex_sizes.append(position_map[node].size)\n node_color_list.append(node_colors[node])\n\n vertex_sizes = _scale_node_sizes_for_rendering(\n vertex_sizes, x_domain, (0, window_extent_width), dpi\n )\n\n for source, target in graph.edges():\n edge_color_list.append(node_colors[source])\n\n ax.set_xbound(x_domain)\n ax.set_xlim(x_domain)\n ax.set_ybound(y_domain)\n ax.set_ylim(y_domain)\n\n nx.draw_networkx_edges(\n graph,\n pos=node_positions,\n alpha=edge_alpha,\n width=edge_line_width,\n edge_color=edge_color_list,\n arrows=arrows,\n ax=ax,\n )\n\n nx.draw_networkx_nodes(\n graph,\n pos=node_positions,\n nodelist=vertices,\n node_color=node_color_list,\n alpha=vertex_alpha,\n linewidths=vertex_line_width,\n node_size=vertex_sizes,\n node_shape=vertex_shape,\n ax=ax,\n )\n\n\ndef show_graph(\n graph: nx.Graph,\n positions: List[NodePosition],\n node_colors: Dict[Any, str],\n vertex_line_width: float = 0.01,\n vertex_alpha: float = 0.55,\n edge_line_width: float = 0.5,\n edge_alpha: float = 0.02,\n figure_width: float = 15.0,\n figure_height: float = 15.0,\n light_background: bool = True,\n vertex_shape: str = \"o\",\n arrows: bool = False,\n dpi: int = 500,\n):\n \"\"\"\n Renders and displays a graph.\n\n Attempts to display it via the platform-specific display library such as TkInter\n\n Edges will be displayed with the same color as the source node.\n\n Parameters\n ----------\n graph : nx.Graph\n The graph to be displayed. If the networkx Graph contains only nodes, no\n edges will be displayed.\n positions : List[:class:`graspologic.layouts.NodePosition`]\n The positionsfor every node in the graph.\n node_colors : Dict[Any, str]\n A mapping of node id to colors. Must contain an entry for every node in the\n graph.\n vertex_line_width : float\n Line width of vertex outline. Default is``0.01``.\n vertex_alpha : float\n Alpha (transparency) of vertices in visualization. Default is``0.55``.\n edge_line_width : float\n Line width of edge. Default is``0.5``.\n edge_alpha : float\n Alpha (transparency) of edges in visualization. Default is``0.02``.\n figure_width : float\n Width of figure. Default is ``15.0``.\n figure_height : float\n eight of figure. Default is``15.0``.\n light_background : bool\n Light background or dark background. Default is``True``.\n vertex_shape : str\n Matplotlib Marker for the vertex shape. See\n `https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_\n for a list of allowed values . Default is ``o`` (i.e: a circle)\n arrows : bool\n For directed graphs, if ``True``, draw arrow heads. Default is ``False``\n dpi : float\n Dots per inch of the figure. Default is ``500``.\n \"\"\"\n ax = plt.gca()\n if light_background:\n facecolor = ax.get_facecolor()\n else:\n facecolor = \"#030303\"\n\n _draw_graph(\n graph=graph,\n positions=positions,\n node_colors=node_colors,\n vertex_line_width=vertex_line_width,\n vertex_alpha=vertex_alpha,\n edge_line_width=edge_line_width,\n edge_alpha=edge_alpha,\n figure_width=figure_width,\n figure_height=figure_height,\n vertex_shape=vertex_shape,\n arrows=arrows,\n dpi=dpi,\n )\n plt.gcf().set_facecolor(facecolor)\n plt.show()\n plt.close(\"all\")\n\n\ndef save_graph(\n output_path: str,\n graph: nx.Graph,\n positions: List[NodePosition],\n node_colors: Dict[Any, str],\n vertex_line_width: float = 0.01,\n vertex_alpha: float = 0.55,\n edge_line_width: float = 0.5,\n edge_alpha: float = 0.02,\n figure_width: float = 15.0,\n figure_height: float = 15.0,\n light_background: bool = True,\n vertex_shape: str = \"o\",\n arrows: bool = False,\n dpi: int = 100,\n):\n \"\"\"\n Renders a graph to file.\n\n Edges will be displayed with the same color as the source node.\n\n Parameters\n ----------\n output_path : str\n The output path to write the rendered graph to. Suggested file extension is\n ``.png``.\n graph : nx.Graph\n The graph to be displayed. If the networkx Graph contains only nodes, no\n edges will be displayed.\n positions : List[:class:`graspologic.layouts.NodePosition`]\n The positionsfor every node in the graph.\n node_colors : Dict[Any, str]\n A mapping of node id to colors. Must contain an entry for every node in the\n graph.\n vertex_line_width : float\n Line width of vertex outline. Default is``0.01``.\n vertex_alpha : float\n Alpha (transparency) of vertices in visualization. Default is``0.55``.\n edge_line_width : float\n Line width of edge. Default is``0.5``.\n edge_alpha : float\n Alpha (transparency) of edges in visualization. Default is``0.02``.\n figure_width : float\n Width of figure. Default is ``15.0``.\n figure_height : float\n eight of figure. Default is``15.0``.\n light_background : bool\n Light background or dark background. Default is``True``.\n vertex_shape : str\n Matplotlib Marker for the vertex shape. See\n `https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_\n for a list of allowed values . Default is ``o`` (i.e: a circle)\n arrows : bool\n For directed graphs, if ``True``, draw arrow heads. Default is ``False``\n dpi : float\n Dots per inch of the figure. Default is ``100``.\n\n Returns\n -------\n\n \"\"\"\n _draw_graph(\n graph=graph,\n positions=positions,\n node_colors=node_colors,\n vertex_line_width=vertex_line_width,\n vertex_alpha=vertex_alpha,\n edge_line_width=edge_line_width,\n edge_alpha=edge_alpha,\n figure_width=figure_width,\n figure_height=figure_height,\n vertex_shape=vertex_shape,\n arrows=arrows,\n dpi=dpi,\n )\n ax = plt.gca()\n if light_background:\n facecolor = ax.get_facecolor()\n else:\n facecolor = \"#030303\"\n plt.savefig(output_path, facecolor=facecolor)\n plt.close(\"all\")\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
playma/stockAI-trading_calendars | [
"97aa9451961b000ef38e791c394c450015f4724d"
] | [
"trading_calendars/exchange_calendar_twse.py"
] | [
"from datetime import time\nimport pandas as pd\nfrom pytz import timezone\nfrom .precomputed_trading_calendar import PrecomputedTradingCalendar\n\nprecomputed_taiwan_holidays = pd.to_datetime([\n \"1999-01-01\",\n \"1999-02-10\",\n \"1999-02-11\",\n \"1999-02-12\",\n \"1999-02-15\",\n \"1999-02-16\"\n # TODO\n])\n\n\nclass TWSEExchangeCalendar(PrecomputedTradingCalendar):\n \"\"\"\n Exchange calendar for the Taiwan Stock Exchange (TWSE).\n\n Open time: 9:00 Asia/Taipei\n Close time: 13:30 Asia/Taipei\n\n Due to the complexity around the Taiwan exchange holidays, we are\n hardcoding a list of holidays covering 1999-2025, inclusive. There are\n no known early closes or late opens.\n \"\"\"\n\n name = \"TWSE\"\n tz = timezone(\"Asia/Taipei\")\n open_times = (\n (None, time(9, 1)),\n )\n close_times = (\n (None, time(13, 30)),\n )\n\n @property\n def precomputed_holidays(self):\n return precomputed_taiwan_holidays\n"
] | [
[
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lucgiffon/psm-nets | [
"dec43c26281febf6e5c8b8f42bfb78098ae7101d",
"dec43c26281febf6e5c8b8f42bfb78098ae7101d",
"dec43c26281febf6e5c8b8f42bfb78098ae7101d",
"dec43c26281febf6e5c8b8f42bfb78098ae7101d",
"dec43c26281febf6e5c8b8f42bfb78098ae7101d"
] | [
"code/scripts/2020/04/11_12_fine_tune_palminized.py",
"code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py",
"code/palmnet/layers/tt_layer_dense.py",
"code/scripts/sandbox/test_bug_faust2.py",
"code/scripts/2020/02/0_1_random_sparse_facto.py"
] | [
"\"\"\"\nThis script finds a palminized model with given arguments then finetune it.\n\nUsage:\n script.py --input-dir path [-h] [-v|-vv] [--seed int] [--train-val-split float] [--keep-last-layer] [--lr float] [--use-clr policy] [--min-lr float --max-lr float] [--epoch-step-size int] [--nb-epoch int] [--only-mask] [--tb] (--mnist|--svhn|--cifar10|--cifar100|--test-data) [--cifar100-resnet50|--cifar100-resnet20|--mnist-500|--mnist-lenet|--test-model|--cifar10-vgg19|--cifar100-vgg19|--svhn-vgg19] --sparsity-factor=int [--nb-iteration-palm=int] [--delta-threshold=float] [--hierarchical] [--nb-factor=int]\n\nOptions:\n -h --help Show this screen.\n -vv Set verbosity to debug.\n -v Set verbosity to info.\n --seed int The seed for the experiments\n --input-dir path Path to input directory where to find previously generated results.\n --tb Tell if tensorboard should be printed.\n --lr float Flat lr to be used (Overidable)\n --min-lr float Tells the min reasonable lr (Overide everything else).\n --max-lr float Tells the max reasonable lr (Overide everything else).\n --nb-epoch int Number of epochs of training (Overide everything else).\n --epoch-step-size int Number of epochs for an half cycle of CLR.\n --use-clr policy Tell to use clr. Policy can be \"triangular\" or \"triangular2\" (see Cyclical learning rate)\n --keep-last-layer Do not compress classification layer.\n --train-val-split float Tells the proportion of validation data. If not specified, validation data is test data.\n\n\nDataset:\n --mnist Use Mnist dataset.\n --svhn Use svhn dataset.\n --cifar10 Use cifar10 dataset.\n --cifar100 Use cifar100 dataset.\n --test-data Use test datasset (that is actually mnist).\n\nModel:\n --mnist-lenet Use model lenet pretrained for mnist.\n --test-model Use test, small, model.\n --cifar10-vgg19 Use model vgg19 pretrained on cifar10.\n --cifar100-vgg19 Use model vgg19 pretrained on cifar100.\n --svhn-vgg19 Use model vgg19 pretrained on svhn.\n --mnist-500 Use model fc 500 hidden units pretrained on mnist.\n --cifar100-resnet50 Use model resnet50 pretrained on cifar100.\n --cifar100-resnet20 Use model resnet20 pretrained on cifar100.\n\nPalm-Specifc options:\n --sparsity-factor=int Integer coefficient from which is computed the number of value in each factor.\n --nb-iteration-palm=int Number of iterations in the inner palm4msa calls. [default: 300]\n --delta-threshold=float Threshold value before stopping palm iterations. [default: 1e-6]\n --hierarchical Tells if palm should use the hierarchical euristic or not. Muhc longer but better approximation results.\n --nb-factor=int Tells the number of sparse factor for palm\n --only-mask Use only sparsity mask given by palm but re-initialize weights.\n\"\"\"\nimport logging\nimport os\nimport pickle\nimport pandas as pd\nimport sys\nfrom collections import defaultdict\nfrom sklearn.model_selection import train_test_split\nimport time\nfrom copy import deepcopy\nimport keras\nfrom keras.engine import Model, InputLayer\nimport signal\nimport docopt\nfrom scipy.sparse import coo_matrix\nfrom palmnet.utils import CyclicLR\n\nfrom palmnet.core.palminizer import Palminizer\nfrom palmnet.core.palminizable import Palminizable\nfrom palmnet.data import Mnist, Test, Svhn, Cifar100, Cifar10\n# from palmnet.layers.sparse_tensor import SparseFactorisationDense#, SparseFactorisationConv2DDensify\nfrom palmnet.layers.sparse_facto_conv2D_masked import SparseFactorisationConv2D\nfrom palmnet.layers.sparse_facto_dense_masked import SparseFactorisationDense\nfrom palmnet.utils import get_sparsity_pattern, insert_layer_nonseq, timeout_signal_handler, get_lr_metric, CSVLoggerByBatch\nfrom palmnet.experiments.utils import ParameterManagerPalminize, ParameterManagerPalminizeFinetune, ResultPrinter\nfrom skluc.utils import logger, log_memory_usage\nfrom keras.layers import Dense, Conv2D\nimport numpy as np\nimport keras.backend as K\nfrom palmnet.core import palminizable\nfrom palmnet.core.palminizer import Palminizer\npalminizable.Palminizer = Palminizer\nimport sys\nsys.modules[\"palmnet.core.palminize\"] = palminizable\nlst_results_header = [\n \"test_accuracy_finetuned_model\"\n]\n\ndef get_idx_last_dense_layer(model):\n idx_last_dense_layer = -1\n for i, layer in enumerate(model.layers):\n if isinstance(layer, Dense):\n idx_last_dense_layer = i\n if idx_last_dense_layer == -1:\n logger.warning(\"No dense layer found\")\n return idx_last_dense_layer\n\ndef replace_layers_with_sparse_facto(model, dct_name_facto):\n new_model = deepcopy(model)\n log_memory_usage(\"After copy model\")\n lst_tpl_str_bool_new_model_layers = []\n dct_new_layer_attr = defaultdict(lambda: {})\n\n idx_last_dense_layer = get_idx_last_dense_layer(new_model) if paraman[\"--keep-last-layer\"] else -1\n\n for i, layer in enumerate(new_model.layers):\n layer_name = layer.name\n sparse_factorization = dct_name_facto[layer_name]\n logger.info('Prepare layer {}'.format(layer.name))\n # if sparse_factorization != (None, None) and (i != idx_last_dense_layer and paraman[\"--keep-last-layer\"]):\n if sparse_factorization != (None, None) and not (i == idx_last_dense_layer and paraman[\"--keep-last-layer\"]):\n # scaling = 1.\n if paraman[\"--only-mask\"]:\n scaling = []\n else:\n scaling = [np.array(sparse_factorization[0])[None]]\n # factors_sparse = [coo_matrix(fac.toarray()) for fac in sparse_factorization[1].get_list_of_factors()]\n factors = [fac.toarray() for fac in sparse_factorization[1].get_list_of_factors()]\n # sparsity_patterns = [get_sparsity_pattern(w.toarray()) for w in factors]\n sparsity_patterns = [get_sparsity_pattern(w) for w in factors]\n nb_val_sparse_factors = np.sum([np.sum(fac) for fac in sparsity_patterns])\n # factor_data_sparse = [f.data for f in factors_sparse]\n factor_data = factors\n reconstructed_matrix = np.linalg.multi_dot(factors) * scaling[0]\n nb_val_full_matrix = np.prod(reconstructed_matrix.shape)\n\n if nb_val_full_matrix <= nb_val_sparse_factors:\n logger.info(\"Less values in full matrix than factorization. Keep full matrix. {} <= {}\".format(nb_val_full_matrix, nb_val_sparse_factors))\n dct_new_layer_attr[layer_name][\"modified\"] = False\n lst_tpl_str_bool_new_model_layers.append((layer_name, False))\n dct_new_layer_attr[layer_name][\"layer_obj\"] = layer\n continue\n\n base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)\n diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)\n # assert np.allclose(diff, 0, atol=1e-5), \"Reconstructed is different than base\"\n\n # create new layer\n if isinstance(layer, Dense):\n logger.debug(\"Dense layer treatment\")\n hidden_layer_dim = layer.units\n activation = layer.activation\n regularizer = layer.kernel_regularizer\n replacing_layer = SparseFactorisationDense(use_scaling=not paraman[\"--only-mask\"], units=hidden_layer_dim, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, kernel_regularizer=regularizer)\n replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []\n # new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position=\"replace\")\n # replacing_layer.set_weights(replacing_weights)\n\n elif isinstance(layer, Conv2D):\n logger.debug(\"Conv2D layer treatment\")\n nb_filters = layer.filters\n strides = layer.strides\n kernel_size = layer.kernel_size\n activation = layer.activation\n padding = layer.padding\n regularizer = layer.kernel_regularizer\n replacing_layer = SparseFactorisationConv2D(use_scaling=not paraman[\"--only-mask\"], strides=strides, filters=nb_filters, kernel_size=kernel_size, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, padding=padding, kernel_regularizer=regularizer)\n replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []\n # new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position=\"replace\")\n # replacing_layer.set_weights(replacing_weights)\n\n else:\n raise ValueError(\"unknown layer class\")\n\n dct_new_layer_attr[layer_name][\"layer_weights\"] = replacing_weights\n dct_new_layer_attr[layer_name][\"sparsity_pattern\"] = sparsity_patterns\n dct_new_layer_attr[layer_name][\"layer_obj\"] = replacing_layer\n dct_new_layer_attr[layer_name][\"modified\"] = True\n\n lst_tpl_str_bool_new_model_layers.append((layer_name, True))\n else:\n dct_new_layer_attr[layer_name][\"modified\"] = False\n lst_tpl_str_bool_new_model_layers.append((layer_name, False))\n dct_new_layer_attr[layer_name][\"layer_obj\"] = layer\n\n log_memory_usage(\"After prepare all sparse layers \")\n\n network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}\n\n if not isinstance(new_model.layers[0], InputLayer):\n new_model = Model(input=new_model.input, output=new_model.output)\n\n # Set the input layers of each layer\n for layer in new_model.layers:\n # each layer is set as `input` layer of all its outbound layers\n for node in layer._outbound_nodes:\n outbound_layer_name = node.outbound_layer.name\n # if outbound_layer_name not in network_dict\n # network_dict['input_layers_of'].update({outbound_layer_name: [layer.name]})\n network_dict['input_layers_of'][outbound_layer_name].append(layer.name)\n\n # Set the output tensor of the input layer\n network_dict['new_output_tensor_of'].update(\n {new_model.layers[0].name: new_model.input})\n\n for layer in new_model.layers[1:]:\n log_memory_usage(\"Before layer {}\".format(layer.name))\n layer_name = layer.name\n\n layer_input = [network_dict['new_output_tensor_of'][layer_aux] for layer_aux in network_dict['input_layers_of'][layer.name]]\n\n if len(layer_input) == 1:\n layer_input = layer_input[0]\n\n proxy_new_layer_attr = dct_new_layer_attr[layer_name]\n\n if proxy_new_layer_attr[\"modified\"]:\n x = layer_input\n\n new_layer = proxy_new_layer_attr[\"layer_obj\"] # type: keras.layers.Layer\n new_layer.name = '{}_{}'.format(layer.name,\n new_layer.name)\n x = new_layer(x)\n\n if not paraman[\"--only-mask\"]:\n if layer.use_bias:\n reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr[\"layer_weights\"][1:-1]) * proxy_new_layer_attr[\"layer_weights\"][0]\n else:\n reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr[\"layer_weights\"][1:]) * proxy_new_layer_attr[\"layer_weights\"][0]\n\n base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)\n diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)\n # assert np.allclose(diff, 0, atol=1e-5), \"Reconstructed is different than base\"\n del base_palminized_matrix\n\n new_layer.set_weights(proxy_new_layer_attr[\"layer_weights\"])\n\n else:\n masked_weights = []\n i = 0\n for w in new_layer.get_weights():\n if len(w.shape) > 1:\n new_weight = w * proxy_new_layer_attr[\"sparsity_pattern\"][i]\n i += 1\n else:\n new_weight = w\n masked_weights.append(new_weight)\n new_layer.set_weights(masked_weights)\n\n logger.info('Layer {} modified into {}'.format(layer.name, new_layer.name))\n else:\n x = layer(layer_input)\n logger.info('Layer {} unmodified'.format(layer.name))\n\n network_dict['new_output_tensor_of'].update({layer.name: x})\n\n del dct_new_layer_attr[layer_name]\n\n new_model = Model(inputs=new_model.inputs, outputs=x)\n\n return new_model\n\ndef main():\n\n if paraman[\"--mnist-lenet\"]:\n param_train_dataset = Mnist.get_model_param_training()\n elif paraman[\"--mnist-500\"]:\n param_train_dataset = Mnist.get_model_param_training(\"mnist_500\")\n elif paraman[\"--cifar10-vgg19\"]:\n param_train_dataset = Cifar10.get_model_param_training()\n elif paraman[\"--cifar100-vgg19\"]:\n param_train_dataset = Cifar100.get_model_param_training()\n elif paraman[\"--cifar100-resnet20\"] or paraman[\"--cifar100-resnet50\"]:\n param_train_dataset = Cifar100.get_model_param_training(\"cifar100_resnet\")\n elif paraman[\"--svhn-vgg19\"]:\n param_train_dataset = Svhn.get_model_param_training()\n elif paraman[\"--test-model\"]:\n param_train_dataset = Test.get_model_param_training()\n else:\n raise NotImplementedError(\"No dataset specified.\")\n\n (x_train, y_train), (x_test, y_test) = paraman.get_dataset().load_data()\n\n if paraman[\"--mnist-500\"]:\n x_test = np.reshape(x_test, (-1, 784))\n x_train = np.reshape(x_train, (-1, 784))\n\n if paraman[\"--train-val-split\"] is not None:\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=paraman[\"--train-val-split\"], random_state=paraman[\"--seed\"])\n\n else:\n x_val, y_val = x_test, y_test\n\n # noinspection PyUnreachableCode\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n df = pd.read_csv(paraman[\"output_file_resprinter\"])\n init_nb_epoch = pd.read_csv(paraman[\"output_file_csvcbprinter\"])[\"epoch\"].max() -1\n logger.debug(\"Loaded results \" + str(df))\n base_score = float(df[\"base_score\"])\n before_finetuned_score = float(df[\"before_finetuned_score\"])\n palminized_score = float(df[\"palminized_score\"])\n actual_learning_rate = float(df[\"actual-lr\"])\n fine_tuned_model = keras.models.load_model(paraman[\"output_file_modelprinter\"],custom_objects={'SparseFactorisationConv2D':SparseFactorisationConv2D,\n \"SparseFactorisationDense\": SparseFactorisationDense})\n else:\n init_nb_epoch = 0\n\n mypalminizedmodel = pickle.load(open(paraman[\"input_model_path\"], \"rb\"))\n log_memory_usage(\"After load mypalminized model\")\n base_model = mypalminizedmodel.base_model\n dct_name_facto = mypalminizedmodel.sparsely_factorized_layers\n base_score = base_model.evaluate(x_test, y_test, verbose=0)[1]\n print(base_score)\n palminized_model = mypalminizedmodel.compressed_model\n palminized_score = palminized_model.evaluate(x_test, y_test, verbose=1)[1]\n print(palminized_score)\n fine_tuned_model = replace_layers_with_sparse_facto(palminized_model, dct_name_facto)\n log_memory_usage(\"After get_finetuned_model\")\n # fine_tuned_model = palminized_model\n\n input_by_shape = {(32,32,3): x_test[:3]}\n\n # for i, layer in enumerate(palminized_model.layers[1:]):\n # i = i+1\n # print(\"Start with layer {}\".format(layer.name))\n # dense_palm_layer = layer\n # sparsefacto_palm_layer = fine_tuned_model.layers[i]\n #\n # dense_layer_output_function = K.function([dense_palm_layer.input],\n # [dense_palm_layer.output])\n #\n # sparsefacto_layer_outut_function = K.function([sparsefacto_palm_layer.get_input_at(-1)],\n # [sparsefacto_palm_layer.get_output_at(-1)])\n #\n # necessary_input_shapes = [tuple(inpt.shape.as_list()[1:]) for inpt in dense_layer_output_function.inputs]\n # input_data_layer = [input_by_shape[shap] for shap in necessary_input_shapes]\n #\n # dense_layer_output = dense_layer_output_function(input_data_layer)[0]\n # sparsefacto_layer_output = sparsefacto_layer_outut_function(input_data_layer)[0]\n #\n # # try:\n # assert np.allclose(np.linalg.norm(dense_layer_output - sparsefacto_layer_output) / np.linalg.norm(dense_layer_output), 0, atol=1e-5)\n # # except:\n # # print(\"error\")\n # input_by_shape[dense_layer_output.shape[1:]] = dense_layer_output\n\n params_optimizer = param_train_dataset.params_optimizer\n\n params_optimizer[\"lr\"] = paraman[\"--lr\"] if paraman[\"--lr\"] is not None else params_optimizer[\"lr\"]\n\n fine_tuned_model.compile(loss=param_train_dataset.loss,\n optimizer=param_train_dataset.optimizer(**params_optimizer),\n metrics=['categorical_accuracy'])\n # metrics=['categorical_accuracy', get_lr_metric(param_train_dataset.optimizer)])\n\n before_finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]\n print(before_finetuned_score)\n actual_learning_rate = K.eval(fine_tuned_model.optimizer.lr)\n\n # results must be already printed once in case process is killed afterward\n dct_results = {\n \"actual-lr\": actual_learning_rate,\n \"finetuned_score\": None,\n \"before_finetuned_score\": before_finetuned_score,\n \"base_score\": base_score,\n \"palminized_score\": palminized_score,\n }\n resprinter.add(dct_results)\n resprinter.print()\n\n # if paraman[\"--hierarchical\"]:\n # if not paraman[\"--only-mask\"]:\n # assert before_finetuned_score == palminized_score, \\\n # \"the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}\".format(before_finetuned_score, palminized_score)\n # else: # small fix for a bug where when I wasn't using hierarchical palm returned a matrix that wasn't multiplied by lambda\n # # this should pass until results are generated without bug..\n # assert before_finetuned_score != palminized_score, \\\n # \"the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}\".format(before_finetuned_score, palminized_score)\n fine_tuned_model.summary()\n\n call_backs = []\n\n model_checkpoint_callback = keras.callbacks.ModelCheckpoint(str(paraman[\"output_file_modelprinter\"]),\n monitor='val_loss',\n verbose=0, save_best_only=False,\n save_weights_only=False, mode='auto', period=1)\n call_backs.append(model_checkpoint_callback)\n if paraman[\"--tb\"]:\n tbCallBack = keras.callbacks.TensorBoard(log_dir=str(paraman[\"output_file_tensorboardprinter\"]), histogram_freq=20, write_graph=False, write_images=False, batch_size=param_train_dataset.batch_size, write_grads=True, update_freq=\"epoch\")\n call_backs.append(tbCallBack)\n\n actual_min_lr = param_train_dataset.min_lr if paraman[\"--min-lr\"] is None else paraman[\"--min-lr\"]\n actual_max_lr = param_train_dataset.max_lr if paraman[\"--max-lr\"] is None else paraman[\"--max-lr\"]\n if paraman[\"--use-clr\"] is not None:\n clr_cb = CyclicLR(base_lr=actual_min_lr,\n max_lr=actual_max_lr,\n step_size=(paraman[\"--epoch-step-size\"]*(x_train.shape[0] // param_train_dataset.batch_size)),\n logrange=True,\n mode=paraman[\"--use-clr\"])\n call_backs.append(clr_cb)\n\n csvcallback = CSVLoggerByBatch(str(paraman[\"output_file_csvcbprinter\"]), n_batch_between_display=100, separator=',', append=True)\n call_backs.append(csvcallback)\n\n finetuned_score = None\n\n open(paraman[\"output_file_notfinishedprinter\"], 'w').close()\n actual_number_of_epochs = (param_train_dataset.epochs if paraman[\"--nb-epoch\"] is None else paraman[\"--nb-epoch\"])\n actual_batch_size = param_train_dataset.batch_size\n history = fine_tuned_model.fit(param_train_dataset.image_data_generator.flow(x_train, y_train, batch_size=param_train_dataset.batch_size),\n epochs= actual_number_of_epochs - init_nb_epoch,\n # epochs=2 - init_nb_epoch,\n verbose=2,\n validation_data=(x_val, y_val),\n callbacks=param_train_dataset.callbacks + call_backs)\n\n finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]\n print(finetuned_score)\n\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n os.remove(paraman[\"output_file_notfinishedprinter\"])\n\n\n dct_results = {\n \"actual-batch-size\": actual_batch_size,\n \"actual-nb-epochs\": actual_number_of_epochs,\n \"actual-min-lr\":actual_min_lr,\n \"actual-max-lr\":actual_max_lr,\n \"actual-lr\": actual_learning_rate,\n \"finetuned_score\": finetuned_score,\n \"before_finetuned_score\": before_finetuned_score,\n \"base_score\": base_score,\n \"palminized_score\": palminized_score,\n }\n fine_tuned_model.save(str(paraman[\"output_file_modelprinter\"]))\n resprinter.add(dct_results)\n\n\nif __name__ == \"__main__\":\n logger.info(\"Command line: \" + \" \".join(sys.argv))\n log_memory_usage(\"Memory at startup\")\n arguments = docopt.docopt(__doc__)\n paraman = ParameterManagerPalminizeFinetune(arguments)\n initialized_results = dict((v, None) for v in lst_results_header)\n resprinter = ResultPrinter(output_file=paraman[\"output_file_resprinter\"])\n resprinter.add(initialized_results)\n resprinter.add(paraman)\n if paraman[\"-v\"] >= 2:\n logger.setLevel(level=logging.DEBUG)\n elif paraman[\"-v\"] >= 1:\n logger.setLevel(level=logging.INFO)\n else:\n logger.setLevel(level=logging.WARNING)\n\n logger.warning(\"Verbosity set to warning\")\n logger.info(\"Verbosity set to info\")\n logger.debug(\"Verbosity set to debug\")\n\n if not os.path.exists(paraman[\"output_file_notfinishedprinter\"]) and \\\n os.path.exists(paraman[\"output_file_resprinter\"]) and \\\n os.path.exists(paraman[\"output_file_modelprinter\"]):\n sys.exit(\"Expe {} already executed. Exit\".format(paraman[\"hash\"]))\n\n has_failed = False\n try:\n main()\n except Exception as e:\n has_failed = True\n raise e\n\n finally:\n failure_dict = {\n \"failure\": has_failed\n }\n\n resprinter.add(failure_dict)\n resprinter.print()",
"import pathlib\nimport pandas as pd\nfrom palmnet.visualization.utils import get_palminized_model_and_df, get_df\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport logging\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom pprint import pprint as pprint\n\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.ERROR)\n\ndataset = {\n \"Cifar10\": \"--cifar10\",\n \"Cifar100\": \"--cifar100\",\n \"SVHN\": \"--svhn\",\n \"MNIST\": \"--mnist\"\n}\n\nbasemodels = {\n \"Cifar100\": [\"--cifar100-vgg19\", \"--cifar100-resnet20\", \"--cifar100-resnet50\"],\n \"Cifar10\": [\"--cifar10-vgg19\"],\n \"SVHN\": [\"--svhn-vgg19\"],\n \"MNIST\": [\"--mnist-lenet\"]\n}\n\n\ndef show_for_tucker():\n # compression_method = [\"tucker\", \"tensortrain\"]\n # df = df.apply(pd.to_numeric, errors='coerce')\n dct_config_lr = dict()\n lst_name_trace_low = list()\n\n for dataname in dataset:\n df_data = df[df[dataset[dataname]] == 1]\n for base_model_name in basemodels[dataname]:\n df_model = df_data[df_data[base_model_name] == 1]\n\n for index, row in df_model.iterrows():\n fig = go.Figure()\n\n csv_file = pathlib.Path(row[\"results_dir\"]) / row[\"output_file_csvcbprinter\"]\n df_csv = pd.read_csv(csv_file)\n win_size = 5\n lr_values = df_csv[\"lr\"].values\n lr_values_log = np.log10(lr_values)\n lr_rolling_mean = pd.Series(lr_values_log).rolling(window=win_size).mean().iloc[win_size - 1:].values\n loss_rolling_mean = df_csv[\"loss\"].rolling(window=win_size).mean().iloc[win_size - 1:].values\n\n if all(np.isnan(loss_rolling_mean)):\n continue\n\n delta_loss = (np.hstack([loss_rolling_mean, [0]]) - np.hstack([[0], loss_rolling_mean]))[1:-1]\n\n delta_loss_rolling_mean = pd.Series(delta_loss).rolling(window=win_size).mean().iloc[win_size - 1:].values\n lr_rolling_mean_2x = pd.Series(lr_rolling_mean).rolling(window=win_size).mean().iloc[win_size - 1:].values\n lr_rolling_mean_2x_exp = 10 ** lr_rolling_mean_2x\n\n # fig.add_trace(go.Scatter(x=lr_rolling_mean_exp, y=loss_rolling_mean, name=\"sp_fac {} - hiearchical {}\".format(row[\"--sparsity-factor\"], row[\"--hierarchical\"])))\n fig.add_trace(go.Scatter(x=lr_rolling_mean_2x_exp[:-1], y=delta_loss_rolling_mean, name=\"\"))\n\n argmin_loss = np.argmin(delta_loss_rolling_mean)\n val = lr_rolling_mean_2x_exp[:-1][argmin_loss]\n log_val = np.log10(val)\n approx = 10 ** np.around(log_val, decimals=0)\n\n sparsity = int(row[\"--sparsity-factor\"])\n hierarchical = bool(row[\"--hierarchical\"])\n str_hierarchical = \" H\" if hierarchical else \"\"\n try:\n nb_fac = int(row[\"--nb-factor\"])\n except ValueError:\n nb_fac = None\n\n name_trace = f\"tucker_sparse_facto-{dataset[dataname]}-{base_model_name}-Q={nb_fac}-K={sparsity}{str_hierarchical}\"\n print(len(delta_loss_rolling_mean), name_trace)\n if len(delta_loss_rolling_mean) < 10:\n lst_name_trace_low.append(name_trace)\n continue\n\n\n dct_config_lr[name_trace] = approx\n\n # title_str = \"{}:{} - {} - keep first :{}\".format(dataname, base_model_name, \"tucker\", keep_first)\n fig.update_layout(barmode='group',\n title=name_trace,\n xaxis_title=\"lr\",\n yaxis_title=\"loss\",\n xaxis_type=\"log\",\n xaxis={'type': 'category'},\n )\n # fig.show()\n pprint(dct_config_lr)\n pprint(lst_name_trace_low)\n\nif __name__ == \"__main__\":\n root_source_dir = pathlib.Path(\"/home/luc/PycharmProjects/palmnet/results/\")\n\n expe_path = \"2020/04/0_0_compression_tucker_sparse_facto_select_lr\"\n expe_path_errors = \"2020/04/0_0_compression_tucker_sparse_facto_select_lr_errors\"\n\n src_results_dir = root_source_dir / expe_path\n src_results_dir_errors = root_source_dir / expe_path_errors\n\n get_df_and_assign = lambda x: get_df(x).assign(results_dir=str(x))\n df = get_df_and_assign(src_results_dir)\n df_errors = get_df_and_assign(src_results_dir_errors)\n\n df = pd.concat([df, df_errors])\n\n df = df.dropna(subset=[\"failure\"])\n df = df[df[\"failure\"] == 0]\n df = df.drop(columns=\"oar_id\").drop_duplicates()\n\n root_output_dir = pathlib.Path(\"/home/luc/PycharmProjects/palmnet/reports/figures/\")\n output_dir = root_output_dir / expe_path / \"line_plots\"\n output_dir.mkdir(parents=True, exist_ok=True)\n\n\n show_for_tucker()",
"'''\nImplementation of the paper 'Tensorizing Neural Networks', Alexander Novikov, Dmitry Podoprikhin, Anton Osokin, Dmitry P. Vetrov, NIPS, 2015\nto compress a dense layer using Tensor Train factorization.\nTTLayer compute y = Wx + b in the compressed form.\n'''\nfrom keras import backend as K, activations, initializers\nfrom keras.engine.topology import Layer\nimport numpy as np\nimport tensorflow as tf\n\nfrom palmnet.utils import get_facto_for_channel_and_order, DCT_CHANNEL_PREDEFINED_FACTORIZATIONS\n\nclass TTLayerDense(Layer):\n \"\"\" Given x\\in\\mathbb{R}^{N}, b\\in\\mathbb{R}^{M}, W\\in\\mathbb{R}^{M\\times N}, y\\in\\mathbb{R}^{M}, compute y = Wx + b in the TT-format.\n\n Parameters:\n inp_modes: [n_1, n_2, ..., n_k] such that n_1*n_2*...*n_k=N\n out_modes: [m_1, m_2, ..., m_k] such that m_1*m_2*...m_k = M\n mat_ranks: [1, r_1, r_2, ..., r_k]\n\n \"\"\"\n\n def __init__(self, nb_units, mat_ranks, inp_modes=None, out_modes=None, mode=\"auto\", bias_initializer='zeros', kernel_initializer='glorot_normal', use_bias=True, activation=None, **kwargs):\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n\n self.mode = mode\n\n self.mat_ranks = np.array(mat_ranks).astype(int)\n self.order = len(self.mat_ranks) - 1\n self.nb_units = nb_units\n\n if self.mode == \"auto\":\n self.inp_modes = inp_modes\n self.out_modes = out_modes\n elif self.mode == \"manual\":\n if inp_modes is None or out_modes is None:\n raise ValueError(\"inp_modes and out_modes should be specified in mode manual.\")\n self.inp_modes = np.array(inp_modes).astype(int)\n self.out_modes = np.array(out_modes).astype(int)\n self.num_dim = self.inp_modes.shape[0]\n\n if np.prod(self.out_modes) != self.nb_units:\n raise ValueError(\"out_modes product should equal to nb units: {} != {}\".format(np.prod(self.out_modes), self.nb_units))\n if self.inp_modes.shape[0] != self.out_modes.shape[0]:\n raise ValueError(\"The number of input and output dimensions should be the same.\")\n if self.order != self.out_modes.shape[0]:\n raise ValueError(\"Rank should have one more element than input/output shape\")\n for r in self.mat_ranks:\n if isinstance(r, np.integer) != True:\n raise ValueError(\"The rank should be an array of integer.\")\n else:\n raise ValueError(\"Unknown mode {}\".format(self.mode))\n\n super(TTLayerDense, self).__init__(**kwargs)\n\n self.image_max_size = -1\n\n def build(self, input_shape):\n inp_ch = input_shape[-1]\n if self.mode == \"auto\":\n self.inp_modes = get_facto_for_channel_and_order(inp_ch, self.order, dct_predefined_facto=DCT_CHANNEL_PREDEFINED_FACTORIZATIONS) if self.inp_modes is None else self.inp_modes\n self.out_modes = get_facto_for_channel_and_order(self.nb_units, self.order, dct_predefined_facto=DCT_CHANNEL_PREDEFINED_FACTORIZATIONS) if self.out_modes is None else self.out_modes\n\n assert np.prod(self.out_modes) == self.nb_units, \"The product of out_modes should equal to the number of output units.\"\n assert np.prod(self.inp_modes) == inp_ch, \"The product of inp_modes should equal to the input dimension.\"\n\n dim = self.order\n self.mat_cores = []\n for i in range(dim):\n self.mat_cores.append(\n self.add_weight(name='mat_core_%d' % (i + 1), shape=[self.out_modes[i] * self.mat_ranks[i + 1], self.mat_ranks[i] * self.inp_modes[i]], initializer=self.kernel_initializer, trainable=True))\n\n if self.use_bias:\n self.bias = self.add_weight(name=\"bias\", shape=(np.prod(self.out_modes),), initializer=self.bias_initializer, trainable=True)\n\n super(TTLayerDense, self).build(input_shape)\n\n def call(self, input_):\n dim = self.order\n\n out = tf.reshape(input_, [-1, np.prod(self.inp_modes)])\n self.image_max_size = max(self.image_max_size, np.prod(self.inp_modes))\n out = tf.transpose(out, [1, 0])\n for i in range(dim):\n out = tf.reshape(out, [self.mat_ranks[i] * self.inp_modes[i], -1])\n out = tf.matmul(self.mat_cores[i], out)\n out = tf.reshape(out, [self.out_modes[i], -1])\n out = tf.transpose(out, [1, 0])\n\n out = tf.reshape(out, [-1, np.prod(self.out_modes)])\n # self.image_max_size = max(self.image_max_size, np.prod([val.value for val in out.get_shape()[1:]]))\n if self.use_bias:\n out = tf.add(out, self.bias, name='out')\n\n if self.activation is not None:\n out = self.activation(out)\n return out\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], np.prod(self.out_modes))\n\n def get_config(self):\n super_config = super().get_config()\n super_config.update({\n \"nb_units\": self.nb_units,\n \"inp_modes\": self.inp_modes,\n \"out_modes\": self.out_modes,\n \"mat_ranks\": self.mat_ranks,\n \"mode\": self.mode,\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'use_bias': self.use_bias,\n 'activation': activations.serialize(self.activation),\n })\n return super_config\n",
"import numpy as np\nfrom pyfaust.fact import palm4msa\nfrom pyfaust.proj import splincol, ConstraintList, ParamsPalm4MSA, StoppingCriterion\nfrom qkmeans.utils import logger\nimport logging\nlogger.setLevel(logging.ERROR)\nlittle_dims = (64, 27)\nbig_dims = (576, 64)\n\nN_fac = 2\nsparsity = 2\n\nrepet = 5\n\nerrors = np.empty((4, repet))\n\nfor i in range(repet):\n U, S, V = np.linalg.svd(np.random.rand(64, 64))\n target3 = U[:27, :] # 27 x 64 colones orthogonales\n target4 = np.random.rand(27, 64)\n\n for j, target in enumerate([target3, target3.T, target4, target4.T]):\n target = target.astype(float)\n left_dim, right_dim = target.shape\n\n # parameters faust\n stop = StoppingCriterion(tol=1e-10, maxiter=300)\n lst_constraints = [splincol((left_dim, right_dim), sparsity).constraint] + [splincol((right_dim, right_dim), sparsity).constraint for _ in range(N_fac - 1)]\n cons = ConstraintList(*lst_constraints)\n param = ParamsPalm4MSA(cons, stop, is_update_way_R2L=True)\n param.init_facts = [np.eye(left_dim, left_dim) for _ in range(N_fac-1)] + [np.zeros((left_dim, right_dim))]\n\n # call faust\n faust, final_lambda = palm4msa(target, param, ret_lambda=True)\n\n # approximation error\n final_X = np.array(faust.todense())\n error = np.linalg.norm(final_X - target) / np.linalg.norm(target)\n errors[j][i] = error\n print(f\"Error Faust: {error}\")\n\n\nprint(\"col ortho\", \"lin ortho\", \"rand\", \"rand.T\")\nprint(np.mean(errors, axis=1))\nprint(errors)",
"\"\"\"\nThis script finds a palminized model with given arguments then finetune it.\n\nUsage:\n script.py [-h] [-v|-vv] [--seed int] --walltime int --sparsity-factor=int --nb-factor=intorstr [--no-permutation] [--tb] (--mnist|--svhn|--cifar10|--cifar100|--test-data) [--dense-layers --nb-units-dense-layer int|--mnist-lenet|--test-model|--cifar10-vgg19|--cifar100-vgg19|--svhn-vgg19]\n\nOptions:\n -h --help Show this screen.\n -vv Set verbosity to debug.\n -v Set verbosity to info.\n --walltime int The number of hour before training is stopped.\n --tb Tell if tensorboard should be printed.\n --seed int The seed for the experiment.\n\nDataset:\n --mnist Use Mnist dataset.\n --svhn Use svhn dataset.\n --cifar10 Use cifar10 dataset.\n --cifar100 Use cifar100 dataset.\n --test-data Use test datasset (that is actually mnist).\n\nModel:\n --mnist-lenet Use model lenet pretrained for mnist.\n --test-model Use test, small, model.\n --cifar10-vgg19 Use model vgg19 pretrained on cifar10.\n --cifar100-vgg19 Use model vgg19 pretrained on cifar100.\n --svhn-vgg19 Use model vgg19 pretrained on svhn.\n --dense-layers Tells to use simple dense model.\n --nb-units-dense-layer=int Tells the number of hidden units in dense layers.\n\nSparsity options:\n --sparsity-factor=int Integer coefficient from which is computed the number of value in each factor.\n --nb-factor=intorstr Integer telling how many factors should be used or list of int telling for each layer the number of factor (\"int,int,int\").\n --no-permutation Bool tells to not apply permutation to factors so that there is only block diag matrix\n\"\"\"\nimport logging\nimport os\nimport pandas as pd\nimport sys\n\nimport keras\nimport signal\nimport docopt\n\nfrom palmnet.core.palminizable import Palminizable\nfrom palmnet.data import Mnist, Test, Svhn, Cifar100, Cifar10\nfrom palmnet.layers.random_sparse_facto import RandomSparseFactorisationDense, RandomSparseFactorisationConv2D\nfrom palmnet.models import sparse_random_vgg19_model, sparse_random_lenet_model, create_pbp_model, create_random_sparse_model\nfrom palmnet.utils import timeout_signal_handler\nfrom palmnet.experiments.utils import ResultPrinter, ParameterManagerRandomSparseFacto\nfrom skluc.utils import logger, log_memory_usage\nimport numpy as np\n\nlst_results_header = [\n \"test_accuracy_finetuned_model\",\n \"base_score\",\n \"finetuned_score\",\n \"nb_param\",\n \"nb_flop\"\n]\n\ndef main():\n data_obj = paraman.get_dataset()\n (x_train, y_train), (x_test, y_test) = data_obj.load_data()\n if paraman[\"--dense-layers\"] or paraman[\"--pbp-dense-layers\"]:\n x_train = x_train.reshape(x_train.shape[0], np.prod(data_obj.shape))\n x_test = x_test.reshape(x_test.shape[0], np.prod(data_obj.shape))\n\n if paraman[\"--mnist-lenet\"]:\n param_train_dataset = Mnist.get_model_param_training()\n base_model = sparse_random_lenet_model(x_train[0].shape, 10, sparsity_factor=paraman[\"--sparsity-factor\"], nb_sparse_factors=paraman[\"--nb-factor\"])\n elif paraman[\"--cifar10-vgg19\"]:\n param_train_dataset = Cifar10.get_model_param_training()\n base_model = sparse_random_vgg19_model(x_train[0].shape, 10, permutation=not paraman[\"--no-permutation\"], sparsity_factor=paraman[\"--sparsity-factor\"], nb_sparse_factors=paraman[\"--nb-factor\"])\n elif paraman[\"--cifar100-vgg19\"]:\n param_train_dataset = Cifar100.get_model_param_training()\n base_model = sparse_random_vgg19_model(x_train[0].shape, 100, permutation=not paraman[\"--no-permutation\"], sparsity_factor=paraman[\"--sparsity-factor\"], nb_sparse_factors=paraman[\"--nb-factor\"])\n elif paraman[\"--svhn-vgg19\"]:\n param_train_dataset = Svhn.get_model_param_training()\n base_model = sparse_random_vgg19_model(x_train[0].shape, 10, permutation=not paraman[\"--no-permutation\"], sparsity_factor=paraman[\"--sparsity-factor\"], nb_sparse_factors=paraman[\"--nb-factor\"])\n elif paraman[\"--test-model\"]:\n param_train_dataset = Test.get_model_param_training()\n base_model = sparse_random_lenet_model(x_train[0].shape, 10, sparsity_factor=paraman[\"--sparsity-factor\"], nb_sparse_factors=paraman[\"--nb-factor\"])\n elif paraman[\"--dense-layers\"]:\n lst_units = [int(elm) for elm in paraman[\"--nb-units-dense-layer\"].split(\"-\")]\n base_model = create_random_sparse_model(x_train[0].shape, y_test[0].shape[0],\n sparsity_factor=paraman[\"--sparsity-factor\"], nb_sparse_factors=paraman[\"--nb-factor\"],\n units=lst_units)\n else:\n raise NotImplementedError(\"No dataset specified.\")\n\n if paraman[\"--mnist\"]:\n if paraman[\"--dense-layers\"]:\n param_train_dataset = Mnist.get_model_param_training(\"mnist_500\")\n else:\n param_train_dataset = Mnist.get_model_param_training()\n else:\n raise NotImplementedError\n\n\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]) and os.path.exists(paraman[\"output_file_modelprinter\"]):\n df = pd.read_csv(paraman[\"output_file_resprinter\"])\n try:\n init_nb_epoch = len(pd.read_csv(paraman[\"output_file_csvcbprinter\"]))\n except Exception as e:\n logger.error(\"Caught exception while reading csv history: {}\".format(str(e)))\n init_nb_epoch = 0\n base_score = float(df[\"base_score\"])\n base_model = keras.models.load_model(paraman[\"output_file_modelprinter\"],custom_objects={'RandomSparseFactorisationConv2D': RandomSparseFactorisationConv2D,\n \"RandomSparseFactorisationDense\": RandomSparseFactorisationDense})\n nb_param_model = int(df[\"nb_param\"])\n nb_flop_model = int(df[\"nb_flop\"])\n\n else:\n init_nb_epoch = 0\n\n base_model.compile(loss=param_train_dataset.loss,\n optimizer=param_train_dataset.optimizer,\n metrics=['categorical_accuracy'])\n base_score = base_model.evaluate(x_test, y_test, verbose=1)[1]\n print(base_score)\n nb_param_model, _, nb_flop_model, _, param_by_layer, flop_by_layer = Palminizable.count_model_param_and_flops_(base_model)\n print(nb_param_model, nb_flop_model)\n\n # results must be already printed once in case process is killed afterward\n dct_results = {\n \"finetuned_score\": None,\n \"base_score\": base_score,\n \"nb_flop\": nb_flop_model,\n \"nb_param\": nb_param_model,\n }\n resprinter.add(dct_results)\n resprinter.print()\n\n base_model.summary()\n\n call_backs = []\n\n model_checkpoint_callback = keras.callbacks.ModelCheckpoint(str(paraman[\"output_file_modelprinter\"]), monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)\n call_backs.append(model_checkpoint_callback)\n if paraman[\"--tb\"]:\n tbCallBack = keras.callbacks.TensorBoard(log_dir=str(paraman[\"output_file_tensorboardprinter\"]), histogram_freq=20, write_graph=False, write_images=False, batch_size=param_train_dataset.batch_size, write_grads=True, update_freq=\"epoch\")\n call_backs.append(tbCallBack)\n csvcallback = keras.callbacks.callbacks.CSVLogger(str(paraman[\"output_file_csvcbprinter\"]), separator=',', append=True)\n call_backs.append(csvcallback)\n\n\n signal.signal(signal.SIGALRM, timeout_signal_handler)\n signal.alarm(int(paraman[\"--walltime\"] * 3600)) # start alarm\n finetuned_score = None\n try:\n open(paraman[\"output_file_notfinishedprinter\"], 'w').close()\n\n history = base_model.fit(param_train_dataset.image_data_generator.flow(x_train, y_train, batch_size=param_train_dataset.batch_size),\n epochs=param_train_dataset.epochs - init_nb_epoch,\n # epochs=2 - init_nb_epoch,\n verbose=2,\n validation_data=(x_test, y_test),\n callbacks=param_train_dataset.callbacks + call_backs)\n signal.alarm(0) # stop alarm for next evaluation\n finetuned_score = base_model.evaluate(x_test, y_test, verbose=1)[1]\n print(finetuned_score)\n\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n os.remove(paraman[\"output_file_notfinishedprinter\"])\n # except TimeoutError as te:\n except Exception as e:\n logging.error(\"Caught exception: {}\".format(e))\n finetuned_score = None\n finally:\n dct_results = {\n \"finetuned_score\": finetuned_score,\n \"base_score\": base_score,\n \"nb_flop\": nb_flop_model,\n \"nb_param\": nb_param_model,\n }\n base_model.save(str(paraman[\"output_file_modelprinter\"]))\n resprinter.add(dct_results)\n\n\nif __name__ == \"__main__\":\n logger.info(\"Command line: \" + \" \".join(sys.argv))\n log_memory_usage(\"Memory at startup\")\n arguments = docopt.docopt(__doc__)\n paraman = ParameterManagerRandomSparseFacto(arguments)\n initialized_results = dict((v, None) for v in lst_results_header)\n resprinter = ResultPrinter(output_file=paraman[\"output_file_resprinter\"])\n resprinter.add(initialized_results)\n resprinter.add(paraman)\n if paraman[\"-v\"] >= 2:\n logger.setLevel(level=logging.DEBUG)\n elif paraman[\"-v\"] >= 1:\n logger.setLevel(level=logging.INFO)\n else:\n logger.setLevel(level=logging.WARNING)\n\n logger.warning(\"Verbosity set to warning\")\n logger.info(\"Verbosity set to info\")\n logger.debug(\"Verbosity set to debug\")\n\n if not os.path.exists(paraman[\"output_file_notfinishedprinter\"]) and \\\n os.path.exists(paraman[\"output_file_resprinter\"]) and \\\n os.path.exists(paraman[\"output_file_modelprinter\"]):\n sys.exit(\"Expe {} already executed. Exit\".format(paraman[\"hash\"]))\n\n has_failed = False\n try:\n main()\n except Exception as e:\n has_failed = True\n raise e\n\n finally:\n failure_dict = {\n \"failure\": has_failed\n }\n\n resprinter.add(failure_dict)\n resprinter.print()"
] | [
[
"pandas.read_csv",
"numpy.reshape",
"numpy.linalg.multi_dot",
"sklearn.model_selection.train_test_split",
"numpy.linalg.norm",
"numpy.prod",
"numpy.array",
"numpy.sum"
],
[
"numpy.hstack",
"pandas.concat",
"pandas.read_csv",
"pandas.Series",
"numpy.isnan",
"numpy.around",
"numpy.log10",
"numpy.argmin"
],
[
"tensorflow.matmul",
"tensorflow.transpose",
"tensorflow.reshape",
"tensorflow.add",
"numpy.prod",
"numpy.array"
],
[
"numpy.eye",
"numpy.linalg.norm",
"numpy.mean",
"numpy.random.rand",
"numpy.zeros",
"numpy.empty"
],
[
"pandas.read_csv",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dgwakeman/mne-python | [
"3cc7a3f8456d78c828355f1860dd7e0297e59c73",
"3cc7a3f8456d78c828355f1860dd7e0297e59c73",
"3cc7a3f8456d78c828355f1860dd7e0297e59c73",
"3cc7a3f8456d78c828355f1860dd7e0297e59c73"
] | [
"mne/forward/tests/test_forward.py",
"mne/viz/tests/test_decoding.py",
"mne/inverse_sparse/mxne_inverse.py",
"examples/visualization/plot_topography.py"
] | [
"import os\nimport os.path as op\nimport warnings\nimport gc\n\nfrom nose.tools import assert_true, assert_raises\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_equal,\n assert_array_equal, assert_allclose)\n\nfrom mne.datasets import testing\nfrom mne.io import Raw\nfrom mne import (read_forward_solution, apply_forward, apply_forward_raw,\n average_forward_solutions, write_forward_solution,\n convert_forward_solution)\nfrom mne import SourceEstimate, pick_types_forward, read_evokeds\nfrom mne.label import read_label\nfrom mne.utils import (requires_mne, run_subprocess, _TempDir,\n run_tests_if_main, slow_test)\nfrom mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,\n Forward)\n\ndata_path = testing.data_path(download=False)\nfname_meeg = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')\nfname_meeg_grad = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')\n\nfname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',\n 'test_raw.fif')\n\nfname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',\n 'data', 'test-ave.fif')\nfname_mri = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-trans.fif')\nsubjects_dir = os.path.join(data_path, 'subjects')\nfname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')\n\n\ndef compare_forwards(f1, f2):\n \"\"\"Helper to compare two potentially converted forward solutions\"\"\"\n assert_allclose(f1['sol']['data'], f2['sol']['data'])\n assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])\n assert_allclose(f1['source_nn'], f2['source_nn'])\n if f1['sol_grad'] is not None:\n assert_true(f2['sol_grad'] is not None)\n assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])\n assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])\n else:\n assert_true(f2['sol_grad'] is None)\n assert_equal(f1['source_ori'], f2['source_ori'])\n assert_equal(f1['surf_ori'], f2['surf_ori'])\n\n\[email protected]_testing_data\ndef test_convert_forward():\n \"\"\"Test converting forward solution between different representations\n \"\"\"\n fwd = read_forward_solution(fname_meeg_grad)\n assert_true(repr(fwd))\n assert_true(isinstance(fwd, Forward))\n # look at surface orientation\n fwd_surf = convert_forward_solution(fwd, surf_ori=True)\n fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)\n compare_forwards(fwd_surf, fwd_surf_io)\n del fwd_surf_io\n gc.collect()\n # go back\n fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)\n assert_true(repr(fwd_new))\n assert_true(isinstance(fwd_new, Forward))\n compare_forwards(fwd, fwd_new)\n # now go to fixed\n fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,\n force_fixed=True)\n del fwd_surf\n gc.collect()\n assert_true(repr(fwd_fixed))\n assert_true(isinstance(fwd_fixed, Forward))\n fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,\n force_fixed=True)\n compare_forwards(fwd_fixed, fwd_fixed_io)\n del fwd_fixed_io\n gc.collect()\n # now go back to cartesian (original condition)\n fwd_new = convert_forward_solution(fwd_fixed)\n assert_true(repr(fwd_new))\n assert_true(isinstance(fwd_new, Forward))\n compare_forwards(fwd, fwd_new)\n del fwd, fwd_new, fwd_fixed\n gc.collect()\n\n\n@slow_test\[email protected]_testing_data\ndef test_io_forward():\n \"\"\"Test IO for forward solutions\n \"\"\"\n temp_dir = _TempDir()\n # do extensive tests with MEEG + grad\n n_channels, n_src = 366, 108\n fwd = read_forward_solution(fname_meeg_grad)\n assert_true(isinstance(fwd, Forward))\n fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)\n leadfield = fwd['sol']['data']\n assert_equal(leadfield.shape, (n_channels, n_src))\n assert_equal(len(fwd['sol']['row_names']), n_channels)\n fname_temp = op.join(temp_dir, 'test-fwd.fif')\n write_forward_solution(fname_temp, fwd, overwrite=True)\n\n fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)\n fwd_read = read_forward_solution(fname_temp, surf_ori=True)\n leadfield = fwd_read['sol']['data']\n assert_equal(leadfield.shape, (n_channels, n_src))\n assert_equal(len(fwd_read['sol']['row_names']), n_channels)\n assert_equal(len(fwd_read['info']['chs']), n_channels)\n assert_true('dev_head_t' in fwd_read['info'])\n assert_true('mri_head_t' in fwd_read)\n assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])\n\n fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)\n leadfield = fwd['sol']['data']\n assert_equal(leadfield.shape, (n_channels, n_src / 3))\n assert_equal(len(fwd['sol']['row_names']), n_channels)\n assert_equal(len(fwd['info']['chs']), n_channels)\n assert_true('dev_head_t' in fwd['info'])\n assert_true('mri_head_t' in fwd)\n assert_true(fwd['surf_ori'])\n\n # test warnings on bad filenames\n fwd = read_forward_solution(fname_meeg_grad)\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')\n write_forward_solution(fwd_badname, fwd)\n read_forward_solution(fwd_badname)\n assert_true(len(w) == 2)\n\n fwd = read_forward_solution(fname_meeg)\n write_forward_solution(fname_temp, fwd, overwrite=True)\n fwd_read = read_forward_solution(fname_temp)\n compare_forwards(fwd, fwd_read)\n\n\[email protected]_testing_data\ndef test_apply_forward():\n \"\"\"Test projection of source space data to sensor space\n \"\"\"\n start = 0\n stop = 5\n n_times = stop - start - 1\n sfreq = 10.0\n t_start = 0.123\n\n fwd = read_forward_solution(fname_meeg, force_fixed=True)\n fwd = pick_types_forward(fwd, meg=True)\n assert_true(isinstance(fwd, Forward))\n\n vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]\n stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))\n stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)\n\n gain_sum = np.sum(fwd['sol']['data'], axis=1)\n\n # Evoked\n with warnings.catch_warnings(record=True) as w:\n evoked = read_evokeds(fname_evoked, condition=0)\n evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)\n assert_equal(len(w), 2)\n data = evoked.data\n times = evoked.times\n\n # do some tests\n assert_array_almost_equal(evoked.info['sfreq'], sfreq)\n assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)\n assert_array_almost_equal(times[0], t_start)\n assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)\n\n # Raw\n raw = Raw(fname_raw)\n raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)\n data, times = raw_proj[:, :]\n\n # do some tests\n assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)\n assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)\n atol = 1. / sfreq\n assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)\n assert_allclose(raw_proj.last_samp / sfreq,\n t_start + (n_times - 1) / sfreq, atol=atol)\n\n\[email protected]_testing_data\ndef test_restrict_forward_to_stc():\n \"\"\"Test restriction of source space to source SourceEstimate\n \"\"\"\n start = 0\n stop = 5\n n_times = stop - start - 1\n sfreq = 10.0\n t_start = 0.123\n\n fwd = read_forward_solution(fname_meeg, force_fixed=True)\n fwd = pick_types_forward(fwd, meg=True)\n\n vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]\n stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))\n stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)\n\n fwd_out = restrict_forward_to_stc(fwd, stc)\n assert_true(isinstance(fwd_out, Forward))\n\n assert_equal(fwd_out['sol']['ncol'], 20)\n assert_equal(fwd_out['src'][0]['nuse'], 15)\n assert_equal(fwd_out['src'][1]['nuse'], 5)\n assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])\n assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])\n\n fwd = read_forward_solution(fname_meeg, force_fixed=False)\n fwd = pick_types_forward(fwd, meg=True)\n\n vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]\n stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))\n stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)\n\n fwd_out = restrict_forward_to_stc(fwd, stc)\n\n assert_equal(fwd_out['sol']['ncol'], 60)\n assert_equal(fwd_out['src'][0]['nuse'], 15)\n assert_equal(fwd_out['src'][1]['nuse'], 5)\n assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])\n assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])\n\n\[email protected]_testing_data\ndef test_restrict_forward_to_label():\n \"\"\"Test restriction of source space to label\n \"\"\"\n fwd = read_forward_solution(fname_meeg, force_fixed=True)\n fwd = pick_types_forward(fwd, meg=True)\n\n label_path = op.join(data_path, 'MEG', 'sample', 'labels')\n labels = ['Aud-lh', 'Vis-rh']\n label_lh = read_label(op.join(label_path, labels[0] + '.label'))\n label_rh = read_label(op.join(label_path, labels[1] + '.label'))\n\n fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])\n\n src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)\n src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)\n\n src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)\n src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +\n len(fwd['src'][0]['vertno']))\n\n assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))\n assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))\n assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))\n assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)\n assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)\n\n fwd = read_forward_solution(fname_meeg, force_fixed=False)\n fwd = pick_types_forward(fwd, meg=True)\n\n label_path = op.join(data_path, 'MEG', 'sample', 'labels')\n labels = ['Aud-lh', 'Vis-rh']\n label_lh = read_label(op.join(label_path, labels[0] + '.label'))\n label_rh = read_label(op.join(label_path, labels[1] + '.label'))\n\n fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])\n\n src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)\n src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)\n\n src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)\n src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +\n len(fwd['src'][0]['vertno']))\n\n assert_equal(fwd_out['sol']['ncol'],\n 3 * (len(src_sel_lh) + len(src_sel_rh)))\n assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))\n assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))\n assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)\n assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)\n\n\[email protected]_testing_data\n@requires_mne\ndef test_average_forward_solution():\n \"\"\"Test averaging forward solutions\n \"\"\"\n temp_dir = _TempDir()\n fwd = read_forward_solution(fname_meeg)\n # input not a list\n assert_raises(TypeError, average_forward_solutions, 1)\n # list is too short\n assert_raises(ValueError, average_forward_solutions, [])\n # negative weights\n assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])\n # all zero weights\n assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])\n # weights not same length\n assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])\n # list does not only have all dict()\n assert_raises(TypeError, average_forward_solutions, [1, fwd])\n\n # try an easy case\n fwd_copy = average_forward_solutions([fwd])\n assert_true(isinstance(fwd_copy, Forward))\n assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])\n\n # modify a fwd solution, save it, use MNE to average with old one\n fwd_copy['sol']['data'] *= 0.5\n fname_copy = op.join(temp_dir, 'copy-fwd.fif')\n write_forward_solution(fname_copy, fwd_copy, overwrite=True)\n cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',\n fname_copy, '--out', fname_copy)\n run_subprocess(cmd)\n\n # now let's actually do it, with one filename and one fwd\n fwd_ave = average_forward_solutions([fwd, fwd_copy])\n assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])\n # fwd_ave_mne = read_forward_solution(fname_copy)\n # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])\n\n # with gradient\n fwd = read_forward_solution(fname_meeg_grad)\n fwd_ave = average_forward_solutions([fwd, fwd])\n compare_forwards(fwd, fwd_ave)\n\nrun_tests_if_main()\n",
"# Authors: Denis Engemann <[email protected]>\n# Jean-Remi King <[email protected]>\n#\n# License: Simplified BSD\n\nimport os.path as op\nimport warnings\n\nfrom nose.tools import assert_raises, assert_equals\n\nimport numpy as np\n\nfrom mne.epochs import equalize_epoch_counts, concatenate_epochs\nfrom mne.decoding import GeneralizationAcrossTime\nfrom mne import io, Epochs, read_events, pick_types\nfrom mne.utils import requires_sklearn, run_tests_if_main\nimport matplotlib\nmatplotlib.use('Agg') # for testing don't use X server\n\n\ndata_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')\nraw_fname = op.join(data_dir, 'test_raw.fif')\nevent_name = op.join(data_dir, 'test-eve.fif')\n\n\nwarnings.simplefilter('always') # enable b/c these tests throw warnings\n\n\ndef _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),\n event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):\n \"\"\"Aux function for testing GAT viz\"\"\"\n gat = GeneralizationAcrossTime()\n raw = io.Raw(raw_fname, preload=False)\n events = read_events(event_name)\n picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,\n eog=False, exclude='bads')\n picks = picks[1:13:3]\n decim = 30\n # Test on time generalization within one condition\n with warnings.catch_warnings(record=True):\n epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True, decim=decim)\n epochs_list = [epochs[k] for k in event_id]\n equalize_epoch_counts(epochs_list)\n epochs = concatenate_epochs(epochs_list)\n\n # Test default running\n gat = GeneralizationAcrossTime(test_times=test_times)\n gat.fit(epochs)\n gat.score(epochs)\n return gat\n\n\n@requires_sklearn\ndef test_gat_plot_matrix():\n \"\"\"Test GAT matrix plot\"\"\"\n gat = _get_data()\n gat.plot()\n del gat.scores_\n assert_raises(RuntimeError, gat.plot)\n\n\n@requires_sklearn\ndef test_gat_plot_diagonal():\n \"\"\"Test GAT diagonal plot\"\"\"\n gat = _get_data()\n gat.plot_diagonal()\n del gat.scores_\n assert_raises(RuntimeError, gat.plot)\n\n\n@requires_sklearn\ndef test_gat_plot_times():\n \"\"\"Test GAT times plot\"\"\"\n gat = _get_data()\n # test one line\n gat.plot_times(gat.train_times_['times'][0])\n # test multiple lines\n gat.plot_times(gat.train_times_['times'])\n # test multiple colors\n n_times = len(gat.train_times_['times'])\n colors = np.tile(['r', 'g', 'b'], np.ceil(n_times / 3))[:n_times]\n gat.plot_times(gat.train_times_['times'], color=colors)\n # test invalid time point\n assert_raises(ValueError, gat.plot_times, -1.)\n # test float type\n assert_raises(ValueError, gat.plot_times, 1)\n assert_raises(ValueError, gat.plot_times, 'diagonal')\n del gat.scores_\n assert_raises(RuntimeError, gat.plot)\n\n\ndef chance(ax):\n return ax.get_children()[1].get_lines()[0].get_ydata()[0]\n\n\n@requires_sklearn\ndef test_gat_chance_level():\n \"\"\"Test GAT plot_times chance level\"\"\"\n gat = _get_data()\n ax = gat.plot_diagonal(chance=False)\n ax = gat.plot_diagonal()\n assert_equals(chance(ax), .5)\n gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))\n ax = gat.plot_diagonal()\n assert_equals(chance(ax), .25)\n ax = gat.plot_diagonal(chance=1.234)\n assert_equals(chance(ax), 1.234)\n assert_raises(ValueError, gat.plot_diagonal, chance='foo')\n del gat.scores_\n assert_raises(RuntimeError, gat.plot)\n\n\n@requires_sklearn\ndef test_gat_plot_nonsquared():\n \"\"\"Test GAT diagonal plot\"\"\"\n gat = _get_data(test_times=dict(start=0.))\n gat.plot()\n ax = gat.plot_diagonal()\n scores = ax.get_children()[1].get_lines()[2].get_ydata()\n assert_equals(len(scores), len(gat.estimators_))\n\nrun_tests_if_main()\n",
"# Author: Alexandre Gramfort <[email protected]>\n# Daniel Strohmeier <[email protected]>\n#\n# License: Simplified BSD\n\nfrom copy import deepcopy\nimport numpy as np\nfrom scipy import linalg, signal\n\nfrom ..source_estimate import SourceEstimate\nfrom ..minimum_norm.inverse import combine_xyz, _prepare_forward\nfrom ..minimum_norm.inverse import _check_reference\nfrom ..forward import compute_orient_prior, is_fixed_orient, _to_fixed_ori\nfrom ..io.pick import pick_channels_evoked\nfrom ..io.proj import deactivate_proj\nfrom ..utils import logger, verbose\nfrom ..externals.six.moves import xrange as range\n\nfrom .mxne_optim import (mixed_norm_solver, iterative_mixed_norm_solver,\n norm_l2inf, tf_mixed_norm_solver)\n\n\n@verbose\ndef _prepare_weights(forward, gain, source_weighting, weights, weights_min):\n mask = None\n if isinstance(weights, SourceEstimate):\n # weights = np.sqrt(np.sum(weights.data ** 2, axis=1))\n weights = np.max(np.abs(weights.data), axis=1)\n weights_max = np.max(weights)\n if weights_min > weights_max:\n raise ValueError('weights_min > weights_max (%s > %s)' %\n (weights_min, weights_max))\n weights_min = weights_min / weights_max\n weights = weights / weights_max\n n_dip_per_pos = 1 if is_fixed_orient(forward) else 3\n weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T)\n if len(weights) != gain.shape[1]:\n raise ValueError('weights do not have the correct dimension '\n ' (%d != %d)' % (len(weights), gain.shape[1]))\n if len(source_weighting.shape) == 1:\n source_weighting *= weights\n else:\n source_weighting *= weights[:, None]\n gain *= weights[None, :]\n\n if weights_min is not None:\n mask = (weights > weights_min)\n gain = gain[:, mask]\n n_sources = np.sum(mask) // n_dip_per_pos\n logger.info(\"Reducing source space to %d sources\" % n_sources)\n\n return gain, source_weighting, mask\n\n\n@verbose\ndef _prepare_gain_column(forward, info, noise_cov, pca, depth, loose, weights,\n weights_min, verbose=None):\n gain_info, gain, _, whitener, _ = _prepare_forward(forward, info,\n noise_cov, pca)\n\n logger.info('Whitening lead field matrix.')\n gain = np.dot(whitener, gain)\n\n if depth is not None:\n depth_prior = np.sum(gain ** 2, axis=0) ** depth\n source_weighting = np.sqrt(depth_prior ** -1.)\n else:\n source_weighting = np.ones(gain.shape[1], dtype=gain.dtype)\n\n if loose is not None and loose != 1.0:\n source_weighting *= np.sqrt(compute_orient_prior(forward, loose))\n\n gain *= source_weighting[None, :]\n\n if weights is None:\n mask = None\n else:\n gain, source_weighting, mask = _prepare_weights(forward, gain,\n source_weighting,\n weights, weights_min)\n\n return gain, gain_info, whitener, source_weighting, mask\n\n\ndef _prepare_gain(forward, info, noise_cov, pca, depth, loose, weights,\n weights_min, verbose=None):\n if not isinstance(depth, float):\n raise ValueError('Invalid depth parameter. '\n 'A float is required (got %s).'\n % type(depth))\n elif depth < 0.0:\n raise ValueError('Depth parameter must be positive (got %s).'\n % depth)\n\n gain, gain_info, whitener, source_weighting, mask = \\\n _prepare_gain_column(forward, info, noise_cov, pca, depth,\n loose, weights, weights_min)\n\n return gain, gain_info, whitener, source_weighting, mask\n\n\ndef _reapply_source_weighting(X, source_weighting, active_set,\n n_dip_per_pos):\n X *= source_weighting[active_set][:, None]\n return X\n\n\ndef _compute_residual(forward, evoked, X, active_set, info):\n sel = [forward['sol']['row_names'].index(c) for c in info['ch_names']]\n residual = evoked.copy()\n residual = pick_channels_evoked(residual, include=info['ch_names'])\n r_tmp = residual.copy()\n r_tmp.data = np.dot(forward['sol']['data'][sel, :][:, active_set], X)\n\n # Take care of proj\n active_projs = list()\n non_active_projs = list()\n for p in evoked.info['projs']:\n if p['active']:\n active_projs.append(p)\n else:\n non_active_projs.append(p)\n\n if len(active_projs) > 0:\n r_tmp.info['projs'] = deactivate_proj(active_projs, copy=True)\n r_tmp.apply_proj()\n r_tmp.add_proj(non_active_projs, remove_existing=False)\n\n residual.data -= r_tmp.data\n\n return residual\n\n\n@verbose\ndef _make_sparse_stc(X, active_set, forward, tmin, tstep,\n active_is_idx=False, verbose=None):\n if not is_fixed_orient(forward):\n logger.info('combining the current components...')\n X = combine_xyz(X)\n\n if not active_is_idx:\n active_idx = np.where(active_set)[0]\n else:\n active_idx = active_set\n\n n_dip_per_pos = 1 if is_fixed_orient(forward) else 3\n if n_dip_per_pos > 1:\n active_idx = np.unique(active_idx // n_dip_per_pos)\n\n src = forward['src']\n\n n_lh_points = len(src[0]['vertno'])\n lh_vertno = src[0]['vertno'][active_idx[active_idx < n_lh_points]]\n rh_vertno = src[1]['vertno'][active_idx[active_idx >= n_lh_points] -\n n_lh_points]\n vertices = [lh_vertno, rh_vertno]\n stc = SourceEstimate(X, vertices=vertices, tmin=tmin, tstep=tstep)\n return stc\n\n\n@verbose\ndef mixed_norm(evoked, forward, noise_cov, alpha, loose=0.2, depth=0.8,\n maxit=3000, tol=1e-4, active_set_size=10, pca=True,\n debias=True, time_pca=True, weights=None, weights_min=None,\n solver='auto', n_mxne_iter=1, return_residual=False,\n verbose=None):\n \"\"\"Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE)\n\n Compute L1/L2 mixed-norm solution or L0.5/L2 mixed-norm solution\n on evoked data.\n\n References:\n Gramfort A., Kowalski M. and Hamalainen, M.,\n Mixed-norm estimates for the M/EEG inverse problem using accelerated\n gradient methods, Physics in Medicine and Biology, 2012\n http://dx.doi.org/10.1088/0031-9155/57/7/1937\n\n Strohmeier D., Haueisen J., and Gramfort A.,\n Improved MEG/EEG source localization with reweighted mixed-norms,\n 4th International Workshop on Pattern Recognition in Neuroimaging,\n Tuebingen, 2014\n\n Parameters\n ----------\n evoked : instance of Evoked or list of instances of Evoked\n Evoked data to invert.\n forward : dict\n Forward operator.\n noise_cov : instance of Covariance\n Noise covariance to compute whitener.\n alpha : float\n Regularization parameter.\n loose : float in [0, 1]\n Value that weights the source variances of the dipole components\n that are parallel (tangential) to the cortical surface. If loose\n is 0 or None then the solution is computed with fixed orientation.\n If loose is 1, it corresponds to free orientations.\n depth: None | float in [0, 1]\n Depth weighting coefficients. If None, no depth weighting is performed.\n maxit : int\n Maximum number of iterations.\n tol : float\n Tolerance parameter.\n active_set_size : int | None\n Size of active set increment. If None, no active set strategy is used.\n pca : bool\n If True the rank of the data is reduced to true dimension.\n debias : bool\n Remove coefficient amplitude bias due to L1 penalty.\n time_pca : bool or int\n If True the rank of the concatenated epochs is reduced to\n its true dimension. If is 'int' the rank is limited to this value.\n weights : None | array | SourceEstimate\n Weight for penalty in mixed_norm. Can be None or\n 1d array of length n_sources or a SourceEstimate e.g. obtained\n with wMNE or dSPM or fMRI.\n weights_min : float\n Do not consider in the estimation sources for which weights\n is less than weights_min.\n solver : 'prox' | 'cd' | 'bcd' | 'auto'\n The algorithm to use for the optimization. 'prox' stands for\n proximal interations using the FISTA algorithm, 'cd' uses\n coordinate descent, and 'bcd' applies block coordinate descent.\n 'cd' is only available for fixed orientation.\n n_mxne_iter : int\n The number of MxNE iterations. If > 1, iterative reweighting\n is applied.\n return_residual : bool\n If True, the residual is returned as an Evoked instance.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n stc : SourceEstimate | list of SourceEstimate\n Source time courses for each evoked data passed as input.\n residual : instance of Evoked\n The residual a.k.a. data not explained by the sources.\n Only returned if return_residual is True.\n \"\"\"\n if n_mxne_iter < 1:\n raise ValueError('MxNE has to be computed at least 1 time. '\n 'Requires n_mxne_iter > 0. '\n 'Got n_mxne_iter = %d.' % n_mxne_iter)\n\n if not isinstance(evoked, list):\n evoked = [evoked]\n\n _check_reference(evoked[0])\n\n all_ch_names = evoked[0].ch_names\n if not all(all_ch_names == evoked[i].ch_names\n for i in range(1, len(evoked))):\n raise Exception('All the datasets must have the same good channels.')\n\n # put the forward solution in fixed orientation if it's not already\n if loose is None and not is_fixed_orient(forward):\n forward = deepcopy(forward)\n _to_fixed_ori(forward)\n\n gain, gain_info, whitener, source_weighting, mask = _prepare_gain(\n forward, evoked[0].info, noise_cov, pca, depth, loose, weights,\n weights_min)\n\n sel = [all_ch_names.index(name) for name in gain_info['ch_names']]\n M = np.concatenate([e.data[sel] for e in evoked], axis=1)\n\n # Whiten data\n logger.info('Whitening data matrix.')\n M = np.dot(whitener, M)\n\n if time_pca:\n U, s, Vh = linalg.svd(M, full_matrices=False)\n if not isinstance(time_pca, bool) and isinstance(time_pca, int):\n U = U[:, :time_pca]\n s = s[:time_pca]\n Vh = Vh[:time_pca]\n M = U * s\n\n # Scaling to make setting of alpha easy\n n_dip_per_pos = 1 if is_fixed_orient(forward) else 3\n alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)\n alpha_max *= 0.01\n gain /= alpha_max\n source_weighting /= alpha_max\n\n if n_mxne_iter == 1:\n X, active_set, E = mixed_norm_solver(\n M, gain, alpha, maxit=maxit, tol=tol,\n active_set_size=active_set_size, n_orient=n_dip_per_pos,\n debias=debias, solver=solver, verbose=verbose)\n else:\n X, active_set, E = iterative_mixed_norm_solver(\n M, gain, alpha, n_mxne_iter, maxit=maxit, tol=tol,\n n_orient=n_dip_per_pos, active_set_size=active_set_size,\n debias=debias, solver=solver, verbose=verbose)\n\n if mask is not None:\n active_set_tmp = np.zeros(len(mask), dtype=np.bool)\n active_set_tmp[mask] = active_set\n active_set = active_set_tmp\n del active_set_tmp\n\n if time_pca:\n X = np.dot(X, Vh)\n\n if active_set.sum() == 0:\n raise Exception(\"No active dipoles found. alpha is too big.\")\n\n # Reapply weights to have correct unit\n X = _reapply_source_weighting(X, source_weighting,\n active_set, n_dip_per_pos)\n\n stcs = list()\n residual = list()\n cnt = 0\n for e in evoked:\n tmin = e.times[0]\n tstep = 1.0 / e.info['sfreq']\n Xe = X[:, cnt:(cnt + len(e.times))]\n stc = _make_sparse_stc(Xe, active_set, forward, tmin, tstep)\n stcs.append(stc)\n cnt += len(e.times)\n\n if return_residual:\n residual.append(_compute_residual(forward, e, Xe, active_set,\n gain_info))\n\n logger.info('[done]')\n\n if len(stcs) == 1:\n out = stcs[0]\n if return_residual:\n residual = residual[0]\n else:\n out = stcs\n\n if return_residual:\n out = out, residual\n\n return out\n\n\ndef _window_evoked(evoked, size):\n \"\"\"Window evoked (size in seconds)\"\"\"\n if isinstance(size, (float, int)):\n lsize = rsize = float(size)\n else:\n lsize, rsize = size\n evoked = evoked.copy()\n sfreq = float(evoked.info['sfreq'])\n lsize = int(lsize * sfreq)\n rsize = int(rsize * sfreq)\n lhann = signal.hann(lsize * 2)\n rhann = signal.hann(rsize * 2)\n window = np.r_[lhann[:lsize],\n np.ones(len(evoked.times) - lsize - rsize),\n rhann[-rsize:]]\n evoked.data *= window[None, :]\n return evoked\n\n\n@verbose\ndef tf_mixed_norm(evoked, forward, noise_cov, alpha_space, alpha_time,\n loose=0.2, depth=0.8, maxit=3000, tol=1e-4,\n weights=None, weights_min=None, pca=True, debias=True,\n wsize=64, tstep=4, window=0.02,\n return_residual=False, verbose=None):\n \"\"\"Time-Frequency Mixed-norm estimate (TF-MxNE)\n\n Compute L1/L2 + L1 mixed-norm solution on time frequency\n dictionary. Works with evoked data.\n\n References:\n\n A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski,\n Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with\n non-stationary source activations\n NeuroImage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,\n DOI: 10.1016/j.neuroimage.2012.12.051.\n\n A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski\n Functional Brain Imaging with M/EEG Using Structured Sparsity in\n Time-Frequency Dictionaries\n Proceedings Information Processing in Medical Imaging\n Lecture Notes in Computer Science, 2011, Volume 6801/2011,\n 600-611, DOI: 10.1007/978-3-642-22092-0_49\n http://dx.doi.org/10.1007/978-3-642-22092-0_49\n\n Parameters\n ----------\n evoked : instance of Evoked\n Evoked data to invert.\n forward : dict\n Forward operator.\n noise_cov : instance of Covariance\n Noise covariance to compute whitener.\n alpha_space : float\n Regularization parameter for spatial sparsity. If larger than 100,\n then no source will be active.\n alpha_time : float\n Regularization parameter for temporal sparsity. It set to 0,\n no temporal regularization is applied. It this case, TF-MxNE is\n equivalent to MxNE with L21 norm.\n loose : float in [0, 1]\n Value that weights the source variances of the dipole components\n that are parallel (tangential) to the cortical surface. If loose\n is 0 or None then the solution is computed with fixed orientation.\n If loose is 1, it corresponds to free orientations.\n depth: None | float in [0, 1]\n Depth weighting coefficients. If None, no depth weighting is performed.\n maxit : int\n Maximum number of iterations.\n tol : float\n Tolerance parameter.\n weights: None | array | SourceEstimate\n Weight for penalty in mixed_norm. Can be None or\n 1d array of length n_sources or a SourceEstimate e.g. obtained\n with wMNE or dSPM or fMRI.\n weights_min: float\n Do not consider in the estimation sources for which weights\n is less than weights_min.\n pca: bool\n If True the rank of the data is reduced to true dimension.\n debias: bool\n Remove coefficient amplitude bias due to L1 penalty.\n wsize: int\n Length of the STFT window in samples (must be a multiple of 4).\n tstep: int\n Step between successive windows in samples (must be a multiple of 2,\n a divider of wsize and smaller than wsize/2) (default: wsize/2).\n window : float or (float, float)\n Length of time window used to take care of edge artifacts in seconds.\n It can be one float or float if the values are different for left\n and right window length.\n return_residual : bool\n If True, the residual is returned as an Evoked instance.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n stc : instance of SourceEstimate\n Source time courses.\n residual : instance of Evoked\n The residual a.k.a. data not explained by the sources.\n Only returned if return_residual is True.\n \"\"\"\n _check_reference(evoked)\n\n all_ch_names = evoked.ch_names\n info = evoked.info\n\n # put the forward solution in fixed orientation if it's not already\n if loose is None and not is_fixed_orient(forward):\n forward = deepcopy(forward)\n _to_fixed_ori(forward)\n\n gain, gain_info, whitener, source_weighting, mask = _prepare_gain(\n forward, evoked.info, noise_cov, pca, depth, loose, weights,\n weights_min)\n\n if window is not None:\n evoked = _window_evoked(evoked, window)\n\n sel = [all_ch_names.index(name) for name in gain_info[\"ch_names\"]]\n M = evoked.data[sel]\n\n # Whiten data\n logger.info('Whitening data matrix.')\n M = np.dot(whitener, M)\n\n # Scaling to make setting of alpha easy\n n_dip_per_pos = 1 if is_fixed_orient(forward) else 3\n alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False)\n alpha_max *= 0.01\n gain /= alpha_max\n source_weighting /= alpha_max\n\n X, active_set, E = tf_mixed_norm_solver(M, gain,\n alpha_space, alpha_time,\n wsize=wsize, tstep=tstep,\n maxit=maxit, tol=tol,\n verbose=verbose,\n n_orient=n_dip_per_pos,\n debias=debias)\n\n if active_set.sum() == 0:\n raise Exception(\"No active dipoles found. alpha is too big.\")\n\n if mask is not None:\n active_set_tmp = np.zeros(len(mask), dtype=np.bool)\n active_set_tmp[mask] = active_set\n active_set = active_set_tmp\n del active_set_tmp\n\n # Reapply weights to have correct unit\n X = _reapply_source_weighting(X, source_weighting,\n active_set, n_dip_per_pos)\n\n if return_residual:\n residual = _compute_residual(forward, evoked, X, active_set,\n gain_info)\n\n tmin = evoked.times[0]\n tstep = 1.0 / info['sfreq']\n out = _make_sparse_stc(X, active_set, forward, tmin, tstep)\n logger.info('[done]')\n\n if return_residual:\n out = out, residual\n\n return out\n",
"\"\"\"\n=================================\nPlot topographies for MEG sensors\n=================================\n\n\"\"\"\n# Author: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\n\nfrom mne import read_evokeds\nfrom mne.viz import plot_topo\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()\n\nfname = data_path + '/MEG/sample/sample_audvis-ave.fif'\n\n# Reading\ncondition = 'Left Auditory'\nevoked = read_evokeds(fname, condition=condition, baseline=(None, 0))\n\n###############################################################################\n# Show topography\ntitle = 'MNE sample data (condition : %s)' % evoked.comment\nplot_topo(evoked, title=title)\nplt.show()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.testing.assert_array_equal",
"numpy.intersect1d",
"numpy.searchsorted",
"numpy.testing.assert_allclose",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"matplotlib.use",
"numpy.ceil"
],
[
"numpy.dot",
"scipy.linalg.svd",
"numpy.sqrt",
"numpy.abs",
"numpy.unique",
"numpy.tile",
"numpy.ones",
"numpy.concatenate",
"numpy.max",
"scipy.signal.hann",
"numpy.where",
"numpy.sum"
],
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
christophbrgr/ood_detection_framework | [
"c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f"
] | [
"models/wide_resnet.py"
] | [
"import sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.autograd import Variable\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n\ndef conv_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_uniform_(m.weight, gain=np.sqrt(2))\n init.constant_(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n\n\nclass wide_basic(nn.Module):\n def __init__(self, in_planes, planes, dropout_rate, stride=1):\n super(wide_basic, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, padding=1, bias=True)\n self.dropout = nn.Dropout(p=dropout_rate)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=stride, padding=1, bias=True)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1,\n stride=stride, bias=True),\n )\n\n def forward(self, x):\n out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n out = self.conv2(F.relu(self.bn2(out)))\n out += self.shortcut(x)\n\n return out\n\n\nclass Wide_ResNet(nn.Module):\n def __init__(self, depth, widen_factor, dropout_rate, num_classes):\n super(Wide_ResNet, self).__init__()\n self.in_planes = 16\n\n assert ((depth-4) % 6 == 0), 'Wide-resnet depth should be 6n+4'\n n = (depth-4)/6\n k = widen_factor\n\n print('Wide-Resnet %dx%d' % (depth, k))\n nStages = [16, 16*k, 32*k, 64*k]\n\n self.conv1 = conv3x3(3, nStages[0])\n self.layer1 = self._wide_layer(\n wide_basic, nStages[1], n, dropout_rate, stride=1)\n self.layer2 = self._wide_layer(\n wide_basic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(\n wide_basic, nStages[3], n, dropout_rate, stride=2)\n self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)\n self.linear = nn.Linear(nStages[3], num_classes)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n strides = [stride] + [1]*(int(num_blocks)-1)\n layers = []\n\n for stride in strides:\n layers.append(block(self.in_planes, planes, dropout_rate, stride))\n self.in_planes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n # print(f'Shape before avg pooling: {out.shape}')\n out = F.avg_pool2d(out, int(out.shape[3]))\n # print(f'Shape after avg pooling: {out.shape}')\n out = out.view(out.size(0), -1)\n penultimate = out\n out = self.linear(out)\n\n return out, penultimate\n\n # feature extraction for Mahalanobis\n def feature_list(self, x):\n out_list = []\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n # print shape\n # print(f'Shape: {out.shape}')\n # out2 = F.max_pool3d(out, (4,4,4))\n out2 = F.max_pool2d(out, (8,8))\n out_list.append(out2)\n print(f'Shape: {out2.shape}')\n out = F.avg_pool2d(out, int(out.shape[3]))\n out = out.view(out.size(0), -1)\n\n return self.linear(out), out_list\n\n def intermediate_forward(self, x, layer_index):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n return F.max_pool2d(out, (8,8))# F.max_pool3d(out, (4,4,4))\n\n # function to extract the penultimate features\n def penultimate_forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n penultimate = F.relu(self.bn1(out))\n penultimate = F.max_pool2d(penultimate, (8,8))\n # penultimate = F.max_pool3d(penultimate, (4,4,4))\n out = F.avg_pool2d(penultimate, int(out.shape[3]))\n out = out.view(out.size(0), -1)\n\n return self.linear(out), penultimate\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"numpy.sqrt",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.functional.max_pool2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
esoha-nvidia/cudf | [
"663457b186bbf27ea2926e08438b8c01b5c7633e",
"663457b186bbf27ea2926e08438b8c01b5c7633e"
] | [
"python/cudf/cudf/tests/test_binops.py",
"python/cudf/cudf/core/indexing.py"
] | [
"# Copyright (c) 2018-2021, NVIDIA CORPORATION.\n\nfrom __future__ import division\n\nimport decimal\nimport operator\nimport random\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.core import Series\nfrom cudf.core.index import as_index\nfrom cudf.tests import utils\nfrom cudf.utils.dtypes import (\n BOOL_TYPES,\n DATETIME_TYPES,\n FLOAT_TYPES,\n INTEGER_TYPES,\n NUMERIC_TYPES,\n TIMEDELTA_TYPES,\n)\n\nSTRING_TYPES = {\"str\"}\n\n_binops = [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.mod,\n operator.pow,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _binops)\ndef test_series_binop(binop, obj_class):\n nelem = 1000\n arr1 = utils.gen_rand(\"float64\", nelem) * 10000\n # Keeping a low value because CUDA 'pow' has 2 full range error\n arr2 = utils.gen_rand(\"float64\", nelem) * 10\n\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n expect = binop(pd.Series(arr1), pd.Series(arr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n utils.assert_eq(result, expect)\n\n\[email protected](\"binop\", _binops)\ndef test_series_binop_concurrent(binop):\n def func(index):\n arr = np.random.random(100) * 10\n sr = Series(arr)\n\n result = binop(sr.astype(\"int32\"), sr)\n expect = binop(arr.astype(\"int32\"), arr)\n\n np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)\n\n from concurrent.futures import ThreadPoolExecutor\n\n indices = range(10)\n with ThreadPoolExecutor(4) as e: # four processes\n list(e.map(func, indices))\n\n\[email protected](\"use_cudf_scalar\", [False, True])\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem,binop\", list(product([1, 2, 100], _binops)))\ndef test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):\n arr = np.random.random(nelem)\n rhs = random.choice(arr).item()\n\n sr = Series(arr)\n if obj_class == \"Index\":\n sr = as_index(sr)\n\n if use_cudf_scalar:\n result = binop(sr, rhs)\n else:\n result = binop(sr, cudf.Scalar(rhs))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))\n\n\n_bitwise_binops = [operator.and_, operator.or_, operator.xor]\n\n\n_int_types = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _bitwise_binops)\[email protected](\n \"lhs_dtype,rhs_dtype\", list(product(_int_types, _int_types))\n)\ndef test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):\n arr1 = (np.random.random(100) * 100).astype(lhs_dtype)\n sr1 = Series(arr1)\n\n arr2 = (np.random.random(100) * 100).astype(rhs_dtype)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))\n\n\n_logical_binops = [\n (operator.and_, operator.and_),\n (operator.or_, operator.or_),\n (np.logical_and, cudf.logical_and),\n (np.logical_or, cudf.logical_or),\n]\n\n\[email protected](\"lhstype\", _int_types + [np.bool_])\[email protected](\"rhstype\", _int_types + [np.bool_])\[email protected](\"binop,cubinop\", _logical_binops)\ndef test_series_logical_binop(lhstype, rhstype, binop, cubinop):\n arr1 = pd.Series(np.random.choice([True, False], 10))\n if lhstype is not np.bool_:\n arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)\n sr1 = Series(arr1)\n\n arr2 = pd.Series(np.random.choice([True, False], 10))\n if rhstype is not np.bool_:\n arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)\n sr2 = Series(arr2)\n\n result = cubinop(sr1, sr2)\n expect = binop(arr1, arr2)\n\n utils.assert_eq(result, expect)\n\n\n_cmpops = [\n operator.lt,\n operator.gt,\n operator.le,\n operator.ge,\n operator.eq,\n operator.ne,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"dtype\", [\"int8\", \"int32\", \"int64\", \"float32\", \"float64\", \"datetime64[ms]\"]\n)\ndef test_series_compare(cmpop, obj_class, dtype):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n arr2 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result1 = cmpop(sr1, sr1)\n result2 = cmpop(sr2, sr2)\n result3 = cmpop(sr1, sr2)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n result3 = Series(result3)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))\n np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))\n np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))\n\n\ndef _series_compare_nulls_typegen():\n tests = []\n tests += list(product(DATETIME_TYPES, DATETIME_TYPES))\n tests += list(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n tests += list(product(NUMERIC_TYPES, NUMERIC_TYPES))\n tests += list(product(STRING_TYPES, STRING_TYPES))\n\n return tests\n\n\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtypes\", _series_compare_nulls_typegen())\ndef test_series_compare_nulls(cmpop, dtypes):\n ltype, rtype = dtypes\n\n ldata = [1, 2, None, None, 5]\n rdata = [2, 1, None, 4, None]\n\n lser = Series(ldata, dtype=ltype)\n rser = Series(rdata, dtype=rtype)\n\n lmask = ~lser.isnull()\n rmask = ~rser.isnull()\n\n expect_mask = np.logical_and(lmask, rmask)\n expect = cudf.Series([None] * 5, dtype=\"bool\")\n expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask])\n\n got = cmpop(lser, rser)\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"obj\", [pd.Series([\"a\", \"b\", None, \"d\", \"e\", None], dtype=\"string\"), \"a\"]\n)\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"cmp_obj\",\n [pd.Series([\"b\", \"a\", None, \"d\", \"f\", None], dtype=\"string\"), \"a\"],\n)\ndef test_string_series_compare(obj, cmpop, cmp_obj):\n\n g_obj = obj\n if isinstance(g_obj, pd.Series):\n g_obj = Series.from_pandas(g_obj)\n g_cmp_obj = cmp_obj\n if isinstance(g_cmp_obj, pd.Series):\n g_cmp_obj = Series.from_pandas(g_cmp_obj)\n got = cmpop(g_obj, g_cmp_obj)\n expected = cmpop(obj, cmp_obj)\n\n if isinstance(expected, pd.Series):\n expected = cudf.from_pandas(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem\", [1, 2, 100])\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtype\", utils.NUMERIC_TYPES + [\"datetime64[ms]\"])\[email protected](\"use_cudf_scalar\", [True, False])\ndef test_series_compare_scalar(\n nelem, cmpop, obj_class, dtype, use_cudf_scalar\n):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n rhs = random.choice(arr1).item()\n\n if use_cudf_scalar:\n rhs = cudf.Scalar(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n\n result1 = cmpop(sr1, rhs)\n result2 = cmpop(rhs, sr1)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))\n np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))\n\n\n_nulls = [\"none\", \"some\"]\n\n\[email protected](\"nelem\", [1, 7, 8, 9, 32, 64, 128])\[email protected](\"lhs_nulls,rhs_nulls\", list(product(_nulls, _nulls)))\ndef test_validity_add(nelem, lhs_nulls, rhs_nulls):\n np.random.seed(0)\n # LHS\n lhs_data = np.random.random(nelem)\n if lhs_nulls == \"some\":\n lhs_mask = utils.random_bitmask(nelem)\n lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]\n lhs_null_count = utils.count_zero(lhs_bitmask)\n assert lhs_null_count >= 0\n lhs = Series.from_masked_array(lhs_data, lhs_mask)\n assert lhs.null_count == lhs_null_count\n else:\n lhs = Series(lhs_data)\n # RHS\n rhs_data = np.random.random(nelem)\n if rhs_nulls == \"some\":\n rhs_mask = utils.random_bitmask(nelem)\n rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]\n rhs_null_count = utils.count_zero(rhs_bitmask)\n assert rhs_null_count >= 0\n rhs = Series.from_masked_array(rhs_data, rhs_mask)\n assert rhs.null_count == rhs_null_count\n else:\n rhs = Series(rhs_data)\n # Result\n res = lhs + rhs\n if lhs_nulls == \"some\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"some\" and rhs_nulls == \"none\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"none\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_\n )[:nelem]\n # Fill NA values\n na_value = -10000\n got = res.fillna(na_value).to_array()\n expect = lhs_data + rhs_data\n if lhs_nulls == \"some\" or rhs_nulls == \"some\":\n expect[~res_mask] = na_value\n\n np.testing.assert_array_equal(expect, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"binop,lhs_dtype,rhs_dtype\",\n list(\n product(\n [operator.add, operator.mul],\n utils.NUMERIC_TYPES,\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 10\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"cmpop,lhs_dtype,rhs_dtype\",\n list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),\n)\ndef test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 5\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = cmpop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))\n\n\n_reflected_ops = [\n lambda x: 1 + x,\n lambda x: 2 * x,\n lambda x: 2 - x,\n lambda x: 2 // x,\n lambda x: 2 / x,\n lambda x: 3 + x,\n lambda x: 3 * x,\n lambda x: 3 - x,\n lambda x: 3 // x,\n lambda x: 3 / x,\n lambda x: 3 % x,\n lambda x: -1 + x,\n lambda x: -2 * x,\n lambda x: -2 - x,\n lambda x: -2 // x,\n lambda x: -2 / x,\n lambda x: -3 + x,\n lambda x: -3 * x,\n lambda x: -3 - x,\n lambda x: -3 // x,\n lambda x: -3 / x,\n lambda x: -3 % x,\n lambda x: 0 + x,\n lambda x: 0 * x,\n lambda x: 0 - x,\n lambda x: 0 // x,\n lambda x: 0 / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"func, dtype\", list(product(_reflected_ops, utils.NUMERIC_TYPES))\n)\ndef test_reflected_ops_scalar(func, dtype, obj_class):\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\n_cudf_scalar_reflected_ops = [\n lambda x: cudf.Scalar(1) + x,\n lambda x: cudf.Scalar(2) * x,\n lambda x: cudf.Scalar(2) - x,\n lambda x: cudf.Scalar(2) // x,\n lambda x: cudf.Scalar(2) / x,\n lambda x: cudf.Scalar(3) + x,\n lambda x: cudf.Scalar(3) * x,\n lambda x: cudf.Scalar(3) - x,\n lambda x: cudf.Scalar(3) // x,\n lambda x: cudf.Scalar(3) / x,\n lambda x: cudf.Scalar(3) % x,\n lambda x: cudf.Scalar(-1) + x,\n lambda x: cudf.Scalar(-2) * x,\n lambda x: cudf.Scalar(-2) - x,\n lambda x: cudf.Scalar(-2) // x,\n lambda x: cudf.Scalar(-2) / x,\n lambda x: cudf.Scalar(-3) + x,\n lambda x: cudf.Scalar(-3) * x,\n lambda x: cudf.Scalar(-3) - x,\n lambda x: cudf.Scalar(-3) // x,\n lambda x: cudf.Scalar(-3) / x,\n lambda x: cudf.Scalar(-3) % x,\n lambda x: cudf.Scalar(0) + x,\n lambda x: cudf.Scalar(0) * x,\n lambda x: cudf.Scalar(0) - x,\n lambda x: cudf.Scalar(0) // x,\n lambda x: cudf.Scalar(0) / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"funcs, dtype\",\n list(\n product(\n list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):\n cpu_func, gpu_func = funcs\n\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = gpu_func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = cpu_func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Empty frame on the right side\n pd_frame = binop(pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({}))\n cd_frame = binop(cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({}))\n utils.assert_eq(cd_frame, pd_frame)\n\n # Empty frame on the left side\n pd_frame = pd.DataFrame({}) + pd.DataFrame({\"x\": [1, 2]})\n cd_frame = cudf.DataFrame({}) + cudf.DataFrame({\"x\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # Note: the below rely on a discrepancy between cudf and pandas\n # While pandas inserts columns in alphabetical order, cudf inserts in the\n # order of whichever column comes first. So the following code will not\n # work if the names of columns are reversed i.e. ('y', 'x') != ('x', 'y')\n\n # More rows on the left side\n pd_frame = pd.DataFrame({\"x\": [1, 2, 3]}) + pd.DataFrame({\"y\": [1, 2]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2, 3]}) + cudf.DataFrame({\"y\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # More rows on the right side\n pd_frame = pd.DataFrame({\"x\": [1, 2]}) + pd.DataFrame({\"y\": [1, 2, 3]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2]}) + cudf.DataFrame({\"y\": [1, 2, 3]})\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_same_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n pd_frame = binop(\n pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({\"x\": [1, 2, 3]})\n )\n cd_frame = binop(\n cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({\"x\": [1, 2, 3]})\n )\n # cast x as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns_with_unaligned_indices(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Test with a RangeIndex\n pdf1 = pd.DataFrame({\"x\": [4, 3, 2, 1], \"y\": [7, 3, 8, 6]})\n # Test with a GenericIndex\n pdf2 = pd.DataFrame(\n {\"x\": [1, 2, 3, 7], \"y\": [4, 5, 6, 7]}, index=[0, 1, 3, 4]\n )\n # Test with a GenericIndex in a different order\n pdf3 = pd.DataFrame(\n {\"x\": [4, 5, 6, 7], \"y\": [1, 2, 3, 7], \"z\": [0, 5, 3, 7]},\n index=[0, 3, 5, 3],\n )\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2)\n gdf3 = cudf.DataFrame.from_pandas(pdf3)\n\n pd_frame = binop(binop(pdf1, pdf2), pdf3)\n cd_frame = binop(binop(gdf1, gdf2), gdf3)\n # cast x and y as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n cd_frame[\"y\"] = cd_frame[\"y\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\n \"df2\",\n [\n cudf.DataFrame({\"a\": [3, 2, 1]}, index=[3, 2, 1]),\n cudf.DataFrame([3, 2]),\n ],\n)\[email protected](\"binop\", [operator.eq, operator.ne])\ndef test_df_different_index_shape(df2, binop):\n df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3])\n\n pdf1 = df1.to_pandas()\n pdf2 = df2.to_pandas()\n\n utils.assert_exceptions_equal(\n lfunc=binop,\n rfunc=binop,\n lfunc_args_and_kwargs=([pdf1, pdf2],),\n rfunc_args_and_kwargs=([df1, df2],),\n )\n\n\[email protected](\"op\", [operator.eq, operator.ne])\ndef test_boolean_scalar_binop(op):\n psr = pd.Series(np.random.choice([True, False], 10))\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(op(psr, True), op(gsr, True))\n utils.assert_eq(op(psr, False), op(gsr, False))\n\n # cuDF scalar\n utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True)))\n utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False)))\n\n\n_operators_arithmetic = [\n \"add\",\n \"radd\",\n \"sub\",\n \"rsub\",\n \"mul\",\n \"rmul\",\n \"mod\",\n \"rmod\",\n \"pow\",\n \"rpow\",\n \"floordiv\",\n \"rfloordiv\",\n \"truediv\",\n \"rtruediv\",\n]\n\n_operators_comparison = [\"eq\", \"ne\", \"lt\", \"le\", \"gt\", \"ge\"]\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series(dtype, func, has_nulls, fill_value):\n count = 1000\n gdf_series_a = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n gdf_series_b = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=100\n )\n pdf_series_a = gdf_series_a.to_pandas()\n pdf_series_b = gdf_series_b.to_pandas()\n\n gdf_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_result, gdf_result)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar(\n dtype, func, has_nulls, fill_value, use_cudf_scalar\n):\n count = 1000\n scalar = 59\n gdf_series = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas()\n\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_series_result, gdf_series_result)\n\n\n_permu_values = [0, 1, None, np.nan]\n\n\[email protected](\"fill_value\", _permu_values)\[email protected](\"scalar_a\", _permu_values)\[email protected](\"scalar_b\", _permu_values)\[email protected](\"func\", _operators_comparison)\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series_logical(\n dtype, func, scalar_a, scalar_b, fill_value\n):\n\n gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype)\n gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype)\n\n pdf_series_a = gdf_series_a.to_pandas(nullable=True)\n pdf_series_b = gdf_series_b.to_pandas(nullable=True)\n\n gdf_series_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_series_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n # If fill_value is np.nan, things break down a bit,\n # because setting a NaN into a pandas nullable float\n # array still gets transformed to <NA>. As such,\n # pd_series_with_nulls.fillna(np.nan) has no effect.\n if (\n (pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum())\n and np.isscalar(fill_value)\n and np.isnan(fill_value)\n ):\n with pytest.raises(AssertionError):\n utils.assert_eq(expect, got)\n return\n utils.assert_eq(expect, got)\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"func\", _operators_comparison)\[email protected](\"has_nulls\", [True, False])\[email protected](\"scalar\", [-59.0, np.nan, 0, 59.0])\[email protected](\"fill_value\", [None, True, False, 1.0])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar_logical(\n dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar\n):\n gdf_series = utils.gen_rand_series(\n dtype, 1000, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas(nullable=True)\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"nulls\", _nulls)\[email protected](\"fill_value\", [None, 27])\[email protected](\"other\", [\"df\", \"scalar\"])\ndef test_operator_func_dataframe(func, nulls, fill_value, other):\n num_rows = 100\n num_cols = 3\n\n def gen_df():\n pdf = pd.DataFrame()\n from string import ascii_lowercase\n\n cols = np.random.choice(num_cols + 5, num_cols, replace=False)\n\n for i in range(num_cols):\n colname = ascii_lowercase[cols[i]]\n data = utils.gen_rand(\"float64\", num_rows) * 10000\n if nulls == \"some\":\n idx = np.random.choice(\n num_rows, size=int(num_rows / 2), replace=False\n )\n data[idx] = np.nan\n pdf[colname] = data\n return pdf\n\n pdf1 = gen_df()\n pdf2 = gen_df() if other == \"df\" else 59.0\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == \"df\" else 59.0\n\n got = getattr(gdf1, func)(gdf2, fill_value=fill_value)\n expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)]\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic + _operators_comparison)\[email protected](\"rhs\", [0, 1, 2, 128])\ndef test_binop_bool_uint(func, rhs):\n # TODO: remove this once issue #2172 is resolved\n if func == \"rmod\" or func == \"rfloordiv\":\n return\n psr = pd.Series([True, False, False])\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(\n getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False\n )\n\n\ndef test_series_misc_binop():\n pds = pd.Series([1, 2, 4], name=\"abc xyz\")\n gds = cudf.Series([1, 2, 4], name=\"abc xyz\")\n\n utils.assert_eq(pds + 1, gds + 1)\n utils.assert_eq(1 + pds, 1 + gds)\n\n utils.assert_eq(pds + pds, gds + gds)\n\n pds1 = pd.Series([1, 2, 4], name=\"hello world\")\n gds1 = cudf.Series([1, 2, 4], name=\"hello world\")\n\n utils.assert_eq(pds + pds1, gds + gds1)\n utils.assert_eq(pds1 + pds, gds1 + gds)\n\n utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5)\n\n\ndef test_int8_float16_binop():\n a = cudf.Series([1], dtype=\"int8\")\n b = np.float16(2)\n expect = cudf.Series([0.5])\n got = a / b\n utils.assert_eq(expect, got, check_dtype=False)\n\n\[email protected](\"dtype\", [\"int64\", \"float64\", \"str\"])\ndef test_vector_to_none_binops(dtype):\n data = Series([1, 2, 3, None], dtype=dtype)\n\n expect = Series([None] * 4).astype(dtype)\n got = data + None\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"lhs\",\n [\n 1,\n 3,\n 4,\n pd.Series([5, 6, 2]),\n pd.Series([0, 10, 20, 30, 3, 4, 5, 6, 2]),\n 6,\n ],\n)\[email protected](\"rhs\", [1, 3, 4, pd.Series([5, 6, 2])])\[email protected](\n \"ops\",\n [\n (np.remainder, cudf.remainder),\n (np.floor_divide, cudf.floor_divide),\n (np.subtract, cudf.subtract),\n (np.add, cudf.add),\n (np.true_divide, cudf.true_divide),\n (np.multiply, cudf.multiply),\n ],\n)\ndef test_ufunc_ops(lhs, rhs, ops):\n np_op, cu_op = ops\n\n if isinstance(lhs, pd.Series):\n culhs = cudf.from_pandas(lhs)\n else:\n culhs = lhs\n\n if isinstance(rhs, pd.Series):\n curhs = cudf.from_pandas(rhs)\n else:\n curhs = rhs\n\n expect = np_op(lhs, rhs)\n got = cu_op(culhs, curhs)\n if np.isscalar(expect):\n assert got == expect\n else:\n utils.assert_eq(\n expect, got,\n )\n\n\ndef dtype_scalar(val, dtype):\n if dtype == \"str\":\n return str(val)\n dtype = np.dtype(dtype)\n if dtype.type in {np.datetime64, np.timedelta64}:\n res, _ = np.datetime_data(dtype)\n return dtype.type(val, res)\n else:\n return dtype.type(val)\n\n\ndef make_valid_scalar_add_data():\n valid = set()\n\n # to any int, we may add any kind of\n # other int, float, datetime timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES,\n FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # to any float, we may add any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # to any datetime, we may add any int, timedelta, or bool\n valid |= set(\n product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # to any timedelta, we may add any int, datetime, other timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES)\n )\n\n # to any bool, we may add any int, float, datetime, timedelta, or bool\n valid |= set(\n product(\n BOOL_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # to any string, we may add any other string\n valid |= {(\"str\", \"str\")}\n\n return sorted(list(valid))\n\n\ndef make_invalid_scalar_add_data():\n invalid = set()\n\n # we can not add a datetime to a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES))\n\n # We can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n # we can not add a float to any datetime\n invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES))\n\n # can can not add a datetime to a datetime\n invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES))\n\n # can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_valid_scalar_add_data())\ndef test_scalar_add(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n # expect = np.add(lval_host, rval_host)\n expect = lval_host + rval_host\n got = lval_gpu + rval_gpu\n\n assert expect == got.value\n if not dtype_l == dtype_r == \"str\":\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_invalid_scalar_add_data())\ndef test_scalar_add_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu + rval_gpu\n\n\ndef make_scalar_difference_data():\n valid = set()\n\n # from an int, we may subtract any int, float, timedelta,\n # or boolean value\n valid |= set(\n product(\n INTEGER_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any float, we may subtract any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # from any datetime we may subtract any int, datetime, timedelta, or bool\n valid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any timedelta we may subtract any int, timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # from any bool we may subtract any int, float or timedelta\n valid |= set(\n product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_difference_data_invalid():\n invalid = set()\n\n # we can't subtract a datetime from an int\n invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or timedelta from a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES))\n\n # we can't subtract a float from a datetime or timedelta\n invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES))\n\n # We can't subtract a datetime from a timedelta\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or bool from a bool\n invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_difference_data())\ndef test_scalar_difference(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host - rval_host\n got = lval_gpu - rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_difference_data_invalid()\n)\ndef test_scalar_difference_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu - rval_gpu\n\n\ndef make_scalar_product_data():\n valid = set()\n\n # we can multiply an int, or bool by any int, float, timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # we can muliply any timedelta by any int, or bool\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES))\n\n # we can multiply a float by any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_product_data_invalid():\n invalid = set()\n\n # can't multiply a ints, floats, datetimes, timedeltas,\n # or bools by datetimes\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES,\n )\n )\n\n # can't multiply datetimes with anything really\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # can't multiply timedeltas by timedeltas\n invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data())\ndef test_scalar_product(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host * rval_host\n got = lval_gpu * rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data_invalid())\ndef test_scalar_product_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu * rval_gpu\n\n\ndef make_scalar_floordiv_data():\n valid = set()\n\n # we can divide ints and floats by other ints, floats, or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can divide timedeltas by ints, floats or other timedeltas\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n # we can divide bools by ints, floats or bools\n valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_floordiv_data_invalid():\n invalid = set()\n\n # we can't numeric types into datelike types\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we can't divide datetime types into anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we can't divide timedeltas into bools, or datetimes\n invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_floordiv_data())\ndef test_scalar_floordiv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host // rval_host\n got = lval_gpu // rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_floordiv_data_invalid()\n)\ndef test_scalar_floordiv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu // rval_gpu\n\n\ndef make_scalar_truediv_data():\n valid = set()\n\n # we can true divide ints, floats, or bools by other\n # ints, floats or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can true divide timedeltas by ints floats or timedeltas\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_truediv_data_invalid():\n invalid = set()\n\n # we can't divide ints, floats or bools by datetimes\n # or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we cant true divide datetime types by anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we cant true divide timedeltas by datetimes or bools or floats\n invalid |= set(\n product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES)\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data())\ndef test_scalar_truediv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = np.true_divide(lval_host, rval_host)\n got = lval_gpu / rval_gpu\n\n assert expect == got.value\n\n # numpy bug\n\n if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2:\n assert expect.dtype == \"float64\" and got.dtype == \"float32\"\n else:\n assert expect.dtype == got.dtype\n # assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data_invalid())\ndef test_scalar_truediv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu / rval_gpu\n\n\ndef make_scalar_remainder_data():\n valid = set()\n\n # can mod numeric types with each other\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # can mod timedeltas by other timedeltas\n valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_remainder_data_invalid():\n invalid = set()\n\n # numeric types cant be modded against timedeltas\n # or datetimes. Also, datetimes can't be modded\n # against datetimes or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetime and timedelta types cant be modded against\n # any numeric types\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # timedeltas cant mod with datetimes\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_remainder_data())\ndef test_scalar_remainder(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host % rval_host\n got = lval_gpu % rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_remainder_data_invalid()\n)\ndef test_scalar_remainder_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu % rval_gpu\n\n\ndef make_scalar_power_data():\n # only numeric values form valid operands for power\n return sorted(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n\ndef make_scalar_power_data_invalid():\n invalid = set()\n\n # datetimes and timedeltas cant go in exponents\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | TIMEDELTA_TYPES\n | DATETIME_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetimes and timedeltas may not be raised to\n # any exponent of any dtype\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n DATETIME_TYPES\n | TIMEDELTA_TYPES\n | INTEGER_TYPES\n | FLOAT_TYPES\n | BOOL_TYPES,\n )\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data())\ndef test_scalar_power(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host ** rval_host\n got = lval_gpu ** rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data_invalid())\ndef test_scalar_power_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu ** rval_gpu\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop(\n date_col, n_periods, frequency, dtype, op\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n expect = op(psr, -poffset)\n got = op(gsr, -goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\n \"kwargs\",\n [\n {\"months\": 2, \"years\": 5},\n {\"microseconds\": 1, \"seconds\": 1},\n {\"months\": 2, \"years\": 5, \"seconds\": 923, \"microseconds\": 481},\n pytest.param(\n {\"milliseconds\": 4},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for milliseconds\"\n ),\n ),\n pytest.param(\n {\"milliseconds\": 4, \"years\": 2},\n marks=pytest.mark.xfail(\n reason=\"Pandas construction fails with these keywords\"\n ),\n ),\n pytest.param(\n {\"nanoseconds\": 12},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for nanoseconds\"\n ),\n ),\n ],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op):\n\n gsr = cudf.Series(date_col, dtype=\"datetime64[ns]\")\n psr = gsr.to_pandas()\n\n poffset = pd.DateOffset(**kwargs)\n goffset = cudf.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\ndef test_datetime_dateoffset_binaryop_reflected(\n date_col, n_periods, frequency, dtype\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = poffset + psr\n got = goffset + gsr\n\n utils.assert_eq(expect, got)\n\n with pytest.raises(TypeError):\n poffset - psr\n\n with pytest.raises(TypeError):\n goffset - gsr\n\n\[email protected](\"frame\", [cudf.Series, cudf.Index, cudf.DataFrame])\[email protected](\n \"dtype\", [\"int\", \"str\", \"datetime64[s]\", \"timedelta64[s]\", \"category\"]\n)\ndef test_binops_with_lhs_numpy_scalar(frame, dtype):\n data = [1, 2, 3, 4, 5]\n\n data = (\n frame({\"a\": data}, dtype=dtype)\n if isinstance(frame, cudf.DataFrame)\n else frame(data, dtype=dtype)\n )\n\n if dtype == \"datetime64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"timedelta64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"category\":\n val = np.int64(4)\n else:\n val = np.dtype(dtype).type(4)\n\n expected = val == data.to_pandas()\n got = val == data\n\n # In case of index, expected would be a numpy array\n if isinstance(data, cudf.Index):\n expected = pd.Index(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\n \"dtype\",\n [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"float32\",\n \"float64\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"op\", _operators_comparison)\ndef test_binops_with_NA_consistent(dtype, op):\n data = [1, 2, 3]\n sr = cudf.Series(data, dtype=dtype)\n\n result = getattr(sr, op)(cudf.NA)\n if dtype in NUMERIC_TYPES:\n if op == \"ne\":\n expect_all = True\n else:\n expect_all = False\n assert (result == expect_all).all()\n elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES:\n assert result._column.null_count == len(data)\n\n\ndef _decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input], dtype=dtype,\n )\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", \"3.005\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"100.1\", \"200.2\"],\n cudf.Decimal64Dtype(scale=3, precision=9),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"99.9\", \"199.8\"],\n cudf.Decimal64Dtype(scale=3, precision=9),\n ),\n (\n operator.mul,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"3.0\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", \"6.0\"],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", \"40.0\"],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.mul,\n [\"1000\", \"2000\"],\n cudf.Decimal64Dtype(scale=-3, precision=4),\n [\"0.343\", \"0.500\"],\n cudf.Decimal64Dtype(scale=3, precision=3),\n [\"343.0\", \"1000.0\"],\n cudf.Decimal64Dtype(scale=0, precision=8),\n ),\n (\n operator.add,\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", None, \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.mul,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", None],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.18\", \"0.21\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1800\", \"0.2100\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"100\", None],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"200\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, False],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, True],\n bool,\n ),\n ],\n)\ndef test_binops_decimal(args):\n op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype = args\n\n a = _decimal_series(lhs, l_dtype)\n b = _decimal_series(rhs, r_dtype)\n expect = (\n _decimal_series(expect, expect_dtype)\n if isinstance(expect_dtype, cudf.Decimal64Dtype)\n else cudf.Series(expect, dtype=expect_dtype)\n )\n\n got = op(a, b)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.000\", \"42.001\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100\", \"40\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n ],\n)\[email protected](\"integer_dtype\", cudf.tests.utils.INTEGER_TYPES)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 decimal data setups, with scale from {==0, >0, <0}.\n Decimal precisions are sufficient to hold the digits.\n For each decimal data setup, there is at least one row that lead to one\n of the following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = _decimal_series(ldata, ldtype)\n rhs = cudf.Series(rdata, dtype=integer_dtype)\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"98\", \"198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"96\", \"196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"-98\", \"-198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"-96\", \"-196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n ],\n)\ndef test_binops_decimal_scalar(args):\n op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args\n\n def decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input],\n dtype=dtype,\n )\n\n lhs = decimal_series(lhs, l_dtype)\n expect = decimal_series(expect, expect_dtype)\n\n if reflect:\n lhs, rhs = rhs, lhs\n\n got = op(lhs, rhs)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100.00\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n 100,\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n ],\n)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_scalar_compare(args, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 data setups: pyints, Decimal, and\n decimal cudf.Scalar\n For each data setup, there is at least one row that lead to one of the\n following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = _decimal_series(ldata, ldtype)\n rhs = rdata\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"dtype\",\n [\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"str\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"null_scalar\", [None, cudf.NA, np.datetime64(\"NaT\")])\[email protected](\"cmpop\", _cmpops)\ndef test_column_null_scalar_comparison(dtype, null_scalar, cmpop):\n # This test is meant to validate that comparing\n # a series of any dtype with a null scalar produces\n # a new series where all the elements are <NA>.\n\n if isinstance(null_scalar, np.datetime64):\n if np.dtype(dtype).kind not in \"mM\":\n pytest.skip()\n null_scalar = null_scalar.astype(dtype)\n\n dtype = np.dtype(dtype)\n\n data = [1, 2, 3, 4, 5]\n sr = cudf.Series(data, dtype=dtype)\n result = cmpop(sr, null_scalar)\n\n assert result.isnull().all()\n\n\[email protected](\"fn\", [\"eq\", \"ne\", \"lt\", \"gt\", \"le\", \"ge\"])\ndef test_equality_ops_index_mismatch(fn):\n a = cudf.Series(\n [1, 2, 3, None, None, 4], index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n )\n b = cudf.Series(\n [-5, 4, 3, 2, 1, 0, 19, 11],\n index=[\"aa\", \"b\", \"c\", \"d\", \"e\", \"f\", \"y\", \"z\"],\n )\n\n pa = a.to_pandas(nullable=True)\n pb = b.to_pandas(nullable=True)\n expected = getattr(pa, fn)(pb)\n actual = getattr(a, fn)(b).to_pandas(nullable=True)\n\n utils.assert_eq(expected, actual)\n\n\ndef generate_test_null_equals_columnops_data():\n # Generate tuples of:\n # (left_data, right_data, compare_bool\n # where compare_bool is the correct answer to\n # if the columns should compare as null equals\n\n def set_null_cases(column_l, column_r, case):\n if case == \"neither\":\n return column_l, column_r\n elif case == \"left\":\n column_l[1] = None\n elif case == \"right\":\n column_r[1] = None\n elif case == \"both\":\n column_l[1] = None\n column_r[1] = None\n else:\n raise ValueError(\"Unknown null case\")\n return column_l, column_r\n\n null_cases = [\"neither\", \"left\", \"right\", \"both\"]\n data = [1, 2, 3]\n\n results = []\n # TODO: Numeric types can be cross compared as null equal\n for dtype in (\n list(NUMERIC_TYPES)\n + list(DATETIME_TYPES)\n + list(TIMEDELTA_TYPES)\n + list(STRING_TYPES)\n + [\"category\"]\n ):\n for case in null_cases:\n left = cudf.Series(data, dtype=dtype)\n right = cudf.Series(data, dtype=dtype)\n if case in {\"left\", \"right\"}:\n answer = False\n else:\n answer = True\n left, right = set_null_cases(left, right, case)\n results.append((left._column, right._column, answer, case))\n\n return results\n\n\[email protected](\n \"lcol,rcol,ans,case\", generate_test_null_equals_columnops_data()\n)\ndef test_null_equals_columnops(lcol, rcol, ans, case):\n assert lcol._null_equals(rcol).all() == ans\n",
"# Copyright (c) 2020-2021, NVIDIA CORPORATION.\n\nfrom typing import Any, Union\n\nimport numpy as np\nimport pandas as pd\nfrom nvtx import annotate\n\nimport cudf\nfrom cudf._lib.concat import concat_columns\nfrom cudf._lib.scalar import _is_null_host_scalar\nfrom cudf._typing import ColumnLike, DataFrameOrSeries, ScalarLike\nfrom cudf.core.column.column import as_column\nfrom cudf.utils.dtypes import (\n find_common_type,\n is_categorical_dtype,\n is_column_like,\n is_list_like,\n is_numerical_dtype,\n is_scalar,\n to_cudf_compatible_scalar,\n)\n\n\ndef indices_from_labels(obj, labels):\n from cudf.core.column import column\n\n if not isinstance(labels, cudf.MultiIndex):\n labels = column.as_column(labels)\n\n if is_categorical_dtype(obj.index):\n labels = labels.astype(\"category\")\n codes = labels.codes.astype(obj.index._values.codes.dtype)\n labels = column.build_categorical_column(\n categories=labels.dtype.categories,\n codes=codes,\n ordered=labels.dtype.ordered,\n )\n else:\n labels = labels.astype(obj.index.dtype)\n\n # join is not guaranteed to maintain the index ordering\n # so we will sort it with its initial ordering which is stored\n # in column \"__\"\n lhs = cudf.DataFrame({\"__\": column.arange(len(labels))}, index=labels)\n rhs = cudf.DataFrame({\"_\": column.arange(len(obj))}, index=obj.index)\n return lhs.join(rhs).sort_values(\"__\")[\"_\"]\n\n\ndef get_label_range_or_mask(index, start, stop, step):\n if (\n not (start is None and stop is None)\n and type(index) is cudf.core.index.DatetimeIndex\n and index.is_monotonic is False\n ):\n start = pd.to_datetime(start)\n stop = pd.to_datetime(stop)\n if start is not None and stop is not None:\n if start > stop:\n return slice(0, 0, None)\n boolean_mask = (index >= start) and (index <= stop)\n elif start is not None:\n boolean_mask = index >= start\n else:\n boolean_mask = index <= stop\n return boolean_mask\n else:\n start, stop = index.find_label_range(start, stop)\n return slice(start, stop, step)\n\n\nclass _SeriesIlocIndexer(object):\n \"\"\"\n For integer-location based selection.\n \"\"\"\n\n def __init__(self, sr):\n self._sr = sr\n\n def __getitem__(self, arg):\n if isinstance(arg, tuple):\n arg = list(arg)\n data = self._sr._column[arg]\n\n if is_scalar(data) or _is_null_host_scalar(data):\n return data\n index = self._sr.index.take(arg)\n return self._sr._copy_construct(data=data, index=index)\n\n def __setitem__(self, key, value):\n from cudf.core.column import column\n\n if isinstance(key, tuple):\n key = list(key)\n\n # coerce value into a scalar or column\n if is_scalar(value):\n value = to_cudf_compatible_scalar(value)\n else:\n value = column.as_column(value)\n\n if (\n not is_categorical_dtype(self._sr._column.dtype)\n and hasattr(value, \"dtype\")\n and pd.api.types.is_numeric_dtype(value.dtype)\n ):\n # normalize types if necessary:\n if not pd.api.types.is_integer(key):\n to_dtype = np.result_type(value.dtype, self._sr._column.dtype)\n value = value.astype(to_dtype)\n self._sr._column._mimic_inplace(\n self._sr._column.astype(to_dtype), inplace=True\n )\n\n self._sr._column[key] = value\n\n\nclass _SeriesLocIndexer(object):\n \"\"\"\n Label-based selection\n \"\"\"\n\n def __init__(self, sr):\n self._sr = sr\n\n def __getitem__(self, arg: Any) -> Union[ScalarLike, DataFrameOrSeries]:\n if isinstance(arg, pd.MultiIndex):\n arg = cudf.from_pandas(arg)\n\n if isinstance(self._sr.index, cudf.MultiIndex) and not isinstance(\n arg, cudf.MultiIndex\n ):\n result = self._sr.index._get_row_major(self._sr, arg)\n if (\n isinstance(arg, tuple)\n and len(arg) == self._sr._index.nlevels\n and not any((isinstance(x, slice) for x in arg))\n ):\n result = result.iloc[0]\n return result\n try:\n arg = self._loc_to_iloc(arg)\n except (TypeError, KeyError, IndexError, ValueError):\n raise KeyError(arg)\n\n return self._sr.iloc[arg]\n\n def __setitem__(self, key, value):\n try:\n key = self._loc_to_iloc(key)\n except KeyError as e:\n if (\n is_scalar(key)\n and not isinstance(self._sr.index, cudf.MultiIndex)\n and is_scalar(value)\n ):\n _append_new_row_inplace(self._sr.index._values, key)\n _append_new_row_inplace(self._sr._column, value)\n return\n else:\n raise e\n if isinstance(value, (pd.Series, cudf.Series)):\n value = cudf.Series(value)\n value = value._align_to_index(self._sr.index, how=\"right\")\n self._sr.iloc[key] = value\n\n def _loc_to_iloc(self, arg):\n if is_scalar(arg):\n if not is_numerical_dtype(self._sr.index.dtype):\n # TODO: switch to cudf.utils.dtypes.is_integer(arg)\n if isinstance(\n arg, cudf.Scalar\n ) and pd.api.types.is_integer_dtype(arg.dtype):\n found_index = arg.value\n return found_index\n elif pd.api.types.is_integer(arg):\n found_index = arg\n return found_index\n try:\n found_index = self._sr.index._values.find_first_value(\n arg, closest=False\n )\n return found_index\n except (TypeError, KeyError, IndexError, ValueError):\n raise KeyError(\"label scalar is out of bound\")\n\n elif isinstance(arg, slice):\n return get_label_range_or_mask(\n self._sr.index, arg.start, arg.stop, arg.step\n )\n elif isinstance(arg, (cudf.MultiIndex, pd.MultiIndex)):\n if isinstance(arg, pd.MultiIndex):\n arg = cudf.MultiIndex.from_pandas(arg)\n\n return indices_from_labels(self._sr, arg)\n\n else:\n arg = cudf.core.series.Series(cudf.core.column.as_column(arg))\n if arg.dtype in (bool, np.bool_):\n return arg\n else:\n indices = indices_from_labels(self._sr, arg)\n if indices.null_count > 0:\n raise KeyError(\"label scalar is out of bound\")\n return indices\n\n\nclass _DataFrameIndexer(object):\n def __getitem__(self, arg):\n from cudf import MultiIndex\n\n if isinstance(self._df.index, MultiIndex) or isinstance(\n self._df.columns, MultiIndex\n ):\n # This try/except block allows the use of pandas-like\n # tuple arguments into MultiIndex dataframes.\n try:\n return self._getitem_tuple_arg(arg)\n except (TypeError, KeyError, IndexError, ValueError):\n return self._getitem_tuple_arg((arg, slice(None)))\n else:\n if not isinstance(arg, tuple):\n arg = (arg, slice(None))\n return self._getitem_tuple_arg(arg)\n\n def __setitem__(self, key, value):\n if not isinstance(key, tuple):\n key = (key, slice(None))\n return self._setitem_tuple_arg(key, value)\n\n def _can_downcast_to_series(self, df, arg):\n \"\"\"\n This method encapsulates the logic used\n to determine whether or not the result of a loc/iloc\n operation should be \"downcasted\" from a DataFrame to a\n Series\n \"\"\"\n from cudf.core.column import as_column\n\n if isinstance(df, cudf.Series):\n return False\n nrows, ncols = df.shape\n if nrows == 1:\n if type(arg[0]) is slice:\n if not is_scalar(arg[1]):\n return False\n elif (is_list_like(arg[0]) or is_column_like(arg[0])) and (\n is_list_like(arg[1])\n or is_column_like(arg[0])\n or type(arg[1]) is slice\n ):\n return False\n else:\n if pd.api.types.is_bool_dtype(\n as_column(arg[0]).dtype\n ) and not isinstance(arg[1], slice):\n return True\n dtypes = df.dtypes.values.tolist()\n all_numeric = all(\n [pd.api.types.is_numeric_dtype(t) for t in dtypes]\n )\n if all_numeric:\n return True\n if ncols == 1:\n if type(arg[1]) is slice:\n return False\n if isinstance(arg[1], tuple):\n # Multiindex indexing with a slice\n if any(isinstance(v, slice) for v in arg):\n return False\n if not (is_list_like(arg[1]) or is_column_like(arg[1])):\n return True\n return False\n\n def _downcast_to_series(self, df, arg):\n \"\"\"\n \"Downcast\" from a DataFrame to a Series\n based on Pandas indexing rules\n \"\"\"\n nrows, ncols = df.shape\n # determine the axis along which the Series is taken:\n if nrows == 1 and ncols == 1:\n if is_scalar(arg[0]) and is_scalar(arg[1]):\n return df[df.columns[0]].iloc[0]\n elif not is_scalar(arg[0]):\n axis = 1\n else:\n axis = 0\n\n elif nrows == 1:\n axis = 0\n elif ncols == 1:\n axis = 1\n else:\n raise ValueError(\"Cannot downcast DataFrame selection to Series\")\n\n # take series along the axis:\n if axis == 1:\n return df[df._data.names[0]]\n else:\n df = _normalize_dtypes(df)\n sr = df.T\n return sr[sr._data.names[0]]\n\n\nclass _DataFrameLocIndexer(_DataFrameIndexer):\n \"\"\"\n For selection by label.\n \"\"\"\n\n def __init__(self, df):\n self._df = df\n\n def _getitem_scalar(self, arg):\n return self._df[arg[1]].loc[arg[0]]\n\n @annotate(\"LOC_GETITEM\", color=\"blue\", domain=\"cudf_python\")\n def _getitem_tuple_arg(self, arg):\n from uuid import uuid4\n\n from cudf import MultiIndex\n from cudf.core.column import column\n from cudf.core.dataframe import DataFrame\n from cudf.core.index import as_index\n\n # Step 1: Gather columns\n if isinstance(arg, tuple):\n columns_df = self._get_column_selection(arg[1])\n columns_df._index = self._df._index\n else:\n columns_df = self._df\n\n # Step 2: Gather rows\n if isinstance(columns_df.index, MultiIndex):\n if isinstance(arg, (MultiIndex, pd.MultiIndex)):\n if isinstance(arg, pd.MultiIndex):\n arg = MultiIndex.from_pandas(arg)\n\n indices = indices_from_labels(columns_df, arg)\n return columns_df.take(indices)\n\n else:\n if isinstance(arg, tuple):\n return columns_df.index._get_row_major(columns_df, arg[0])\n else:\n return columns_df.index._get_row_major(columns_df, arg)\n else:\n if isinstance(arg[0], slice):\n out = get_label_range_or_mask(\n columns_df.index, arg[0].start, arg[0].stop, arg[0].step\n )\n if isinstance(out, slice):\n df = columns_df._slice(out)\n else:\n df = columns_df._apply_boolean_mask(out)\n else:\n tmp_arg = arg\n if is_scalar(arg[0]):\n # If a scalar, there is possibility of having duplicates.\n # Join would get all the duplicates. So, coverting it to\n # an array kind.\n tmp_arg = ([tmp_arg[0]], tmp_arg[1])\n if len(tmp_arg[0]) == 0:\n return columns_df._empty_like(keep_index=True)\n tmp_arg = (column.as_column(tmp_arg[0]), tmp_arg[1])\n\n if pd.api.types.is_bool_dtype(tmp_arg[0]):\n df = columns_df._apply_boolean_mask(tmp_arg[0])\n else:\n tmp_col_name = str(uuid4())\n other_df = DataFrame(\n {tmp_col_name: column.arange(len(tmp_arg[0]))},\n index=as_index(tmp_arg[0]),\n )\n df = other_df.join(columns_df, how=\"inner\")\n # as join is not assigning any names to index,\n # update it over here\n df.index.name = columns_df.index.name\n df = df.sort_values(tmp_col_name)\n df.drop(columns=[tmp_col_name], inplace=True)\n # There were no indices found\n if len(df) == 0:\n raise KeyError(arg)\n\n # Step 3: Gather index\n if df.shape[0] == 1: # we have a single row\n if isinstance(arg[0], slice):\n start = arg[0].start\n if start is None:\n start = self._df.index[0]\n df.index = as_index(start)\n else:\n row_selection = column.as_column(arg[0])\n if pd.api.types.is_bool_dtype(row_selection.dtype):\n df.index = self._df.index.take(row_selection)\n else:\n df.index = as_index(row_selection)\n # Step 4: Downcast\n if self._can_downcast_to_series(df, arg):\n return self._downcast_to_series(df, arg)\n return df\n\n @annotate(\"LOC_SETITEM\", color=\"blue\", domain=\"cudf_python\")\n def _setitem_tuple_arg(self, key, value):\n if isinstance(self._df.index, cudf.MultiIndex) or isinstance(\n self._df.columns, pd.MultiIndex\n ):\n raise NotImplementedError(\n \"Setting values using df.loc[] not supported on \"\n \"DataFrames with a MultiIndex\"\n )\n\n try:\n columns = self._get_column_selection(key[1])\n except KeyError:\n if not self._df.empty and isinstance(key[0], slice):\n pos_range = get_label_range_or_mask(\n self._df.index, key[0].start, key[0].stop, key[0].step\n )\n idx = self._df.index[pos_range]\n elif self._df.empty and isinstance(key[0], slice):\n idx = None\n else:\n idx = cudf.Index(key[0])\n if is_scalar(value):\n length = len(idx) if idx is not None else 1\n value = as_column(value, length=length)\n\n new_col = cudf.Series(value, index=idx)\n if not self._df.empty:\n new_col = new_col._align_to_index(self._df.index, how=\"right\")\n\n if self._df.empty:\n self._df.index = (\n idx if idx is not None else cudf.RangeIndex(len(new_col))\n )\n self._df._data.insert(key[1], new_col)\n else:\n for col in columns:\n self._df[col].loc[key[0]] = value\n\n def _get_column_selection(self, arg):\n return self._df._get_columns_by_label(arg)\n\n\nclass _DataFrameIlocIndexer(_DataFrameIndexer):\n \"\"\"\n For selection by index.\n \"\"\"\n\n def __init__(self, df):\n self._df = df\n\n @annotate(\"ILOC_GETITEM\", color=\"blue\", domain=\"cudf_python\")\n def _getitem_tuple_arg(self, arg):\n from cudf import MultiIndex\n from cudf.core.column import column\n from cudf.core.index import as_index\n\n # Iloc Step 1:\n # Gather the columns specified by the second tuple arg\n columns_df = self._get_column_selection(arg[1])\n columns_df._index = self._df._index\n\n # Iloc Step 2:\n # Gather the rows specified by the first tuple arg\n if isinstance(columns_df.index, MultiIndex):\n if isinstance(arg[0], slice):\n df = columns_df[arg[0]]\n else:\n df = columns_df.index._get_row_major(columns_df, arg[0])\n if (len(df) == 1 and len(columns_df) >= 1) and not (\n isinstance(arg[0], slice) or isinstance(arg[1], slice)\n ):\n # Pandas returns a numpy scalar in this case\n return df.iloc[0]\n if self._can_downcast_to_series(df, arg):\n return self._downcast_to_series(df, arg)\n return df\n else:\n if isinstance(arg[0], slice):\n df = columns_df._slice(arg[0])\n elif is_scalar(arg[0]):\n index = arg[0]\n if index < 0:\n index += len(columns_df)\n df = columns_df._slice(slice(index, index + 1, 1))\n else:\n arg = (column.as_column(arg[0]), arg[1])\n if pd.api.types.is_bool_dtype(arg[0]):\n df = columns_df._apply_boolean_mask(arg[0])\n else:\n df = columns_df._gather(arg[0])\n\n # Iloc Step 3:\n # Reindex\n if df.shape[0] == 1: # we have a single row without an index\n df.index = as_index(self._df.index[arg[0]])\n\n # Iloc Step 4:\n # Downcast\n if self._can_downcast_to_series(df, arg):\n return self._downcast_to_series(df, arg)\n\n if df.shape[0] == 0 and df.shape[1] == 0 and isinstance(arg[0], slice):\n df._index = as_index(self._df.index[arg[0]])\n return df\n\n @annotate(\"ILOC_SETITEM\", color=\"blue\", domain=\"cudf_python\")\n def _setitem_tuple_arg(self, key, value):\n columns = self._get_column_selection(key[1])\n\n for col in columns:\n self._df[col].iloc[key[0]] = value\n\n def _getitem_scalar(self, arg):\n col = self._df.columns[arg[1]]\n return self._df[col].iloc[arg[0]]\n\n def _get_column_selection(self, arg):\n return cudf.DataFrame(self._df._get_columns_by_index(arg))\n\n\ndef _normalize_dtypes(df):\n if len(df.columns) > 0:\n dtypes = df.dtypes.values.tolist()\n normalized_dtype = np.result_type(*dtypes)\n for name, col in df._data.items():\n df[name] = col.astype(normalized_dtype)\n return df\n\n\ndef _append_new_row_inplace(col: ColumnLike, value: ScalarLike):\n \"\"\"Append a scalar `value` to the end of `col` inplace.\n Cast to common type if possible\n \"\"\"\n to_type = find_common_type([type(value), col.dtype])\n val_col = as_column(value, dtype=to_type)\n old_col = col.astype(to_type)\n\n col._mimic_inplace(concat_columns([old_col, val_col]), inplace=True)\n"
] | [
[
"numpy.true_divide",
"numpy.datetime_data",
"numpy.random.random",
"pandas.Series",
"numpy.random.seed",
"pandas.DateOffset",
"numpy.random.choice",
"numpy.isnan",
"numpy.float16",
"pandas.Index",
"pandas.DataFrame",
"numpy.dtype",
"numpy.testing.assert_array_equal",
"numpy.datetime64",
"numpy.int64",
"numpy.isscalar",
"numpy.logical_and",
"numpy.random.randint"
],
[
"pandas.api.types.is_integer",
"pandas.to_datetime",
"pandas.api.types.is_numeric_dtype",
"numpy.result_type",
"pandas.api.types.is_integer_dtype",
"pandas.api.types.is_bool_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"1.0",
"0.25"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
pedbrgs/anomaly-detection-tool | [
"1b5d89eb1287eb13849d87851a8c3c4cc708a93e"
] | [
"utils.py"
] | [
"import cv2\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\nimport torch\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\n\ndef plot_image(image, figsize):\n\n \"\"\" Display an image \"\"\"\n\n fig = plt.figure(figsize = figsize)\n plt.imshow(image, cmap = 'gray')\n plt.title(''), plt.xticks([]), plt.yticks([])\n plt.show()\n\ndef pattern_detection(img, figsize):\n \n \"\"\" Performs object segmentation by morphological filtering \"\"\"\n\n # BGR to grayscale\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n img_backup = img.copy()\n\n # Get image size\n height, width, _ = np.array(img).shape\n\n # Erosion morphological filter\n kernel = np.ones((3,3), np.uint8)\n erosion = cv2.erode(imgGray, kernel, iterations = 2)\n th = cv2.threshold(erosion, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n\n # Image binarization \n th = erosion.mean()\n imBin = erosion > th\n \n # Finding contours\n ret, thresh = cv2.threshold(erosion, 127, 255, 0)\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n # Compute contour areas for noise filtering\n areas = [cv2.contourArea(cnt) for cnt in contours]\n\n patterns, objects = [], []\n \n # Drawing bounding boxes around the contours\n for cnt in contours:\n # Filtering large and small bounding boxes\n if (cv2.contourArea(cnt) > 50 and cv2.contourArea(cnt) < np.max(areas)):\n # Get bounding box coordinates\n x, y, w, h = cv2.boundingRect(cnt)\n patterns.append([x, y, w, h])\n objects.append(cv2.cvtColor(img_backup[y:(y + h), x:(x+w)], cv2.COLOR_BGR2RGB))\n # Draw bounding box\n img_backup = cv2.rectangle(img_backup, (x, y),(x+w, y+h),(255, 0, 0), 1)\n\n return patterns, objects\n\ndef image_loader(image):\n \n \"\"\" Load image and returns pytorch tensor \"\"\"\n\n imsize = 256\n loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()])\n\n image = Image.fromarray(image)\n image = loader(image).float()\n image = Variable(image, requires_grad = True)\n image = image.unsqueeze(0)\n # .cuda() assumes that you are using GPU\n return image\n\ndef build_model():\n\n \"\"\" Build feature extractor based on ResNet-34 \"\"\"\n\n # If True, returns a model pre-trained on ImageNet\n convnet = models.resnet34(pretrained = True)\n convnet = list(convnet.children())[:-2]\n convnet = torch.nn.Sequential(*convnet, torch.nn.AdaptiveAvgPool2d(output_size = (4, 4)))\n \n return convnet\n\ndef feature_extraction(model, objects, patterns):\n\n \"\"\" Feature extraction from all detected patterns \"\"\"\n\n feature_vectors = []\n\n for i in range(len(patterns)):\n\n x_min, y_min, width, height = patterns[i][0], patterns[i][1], patterns[i][2], patterns[i][3]\n image = image_loader(objects[i])\n # Forward pass in each pattern\n features = model.forward(image)\n features = features.flatten().detach().numpy()\n feature_vectors.append(features)\n\n return feature_vectors\n\ndef pairwise_matrix(feature_vectors):\n\n \"\"\" Compute cosine similarity between feature vectors \"\"\"\n\n cosine_similarity = np.ones((len(feature_vectors[0]), len(feature_vectors[0])))\n\n for i in range(len(feature_vectors)-1):\n for j in range(len(feature_vectors)-1):\n cosine_similarity[i,j] = np.dot(feature_vectors[i], feature_vectors[j]) / (np.linalg.norm(feature_vectors[i]) * np.linalg.norm(feature_vectors[j]))\n\n return cosine_similarity"
] | [
[
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.ones",
"numpy.max",
"torch.nn.AdaptiveAvgPool2d",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leonavery/KSFD | [
"090e388df13a2674676cbaa53171f2a87291ba9b"
] | [
"KSFD/ksfdtimeseries.py"
] | [
"\"\"\"\nMPI-aware read and write PETSc Vec to HDF5\n\nThe goal of this module is to save snapshots of a PETSc Vec to HDF5\nfiles, and obviously to read them again later. The obvious way to do\nthis is parallel HDF5. Unfortunately, distributions of HDF5 and h5py\nmay be built without support for parallel operation. (In particular,\nthe conda-forge version doesn't have it.) This is accomplished through\nthe following kludge:\n\nWhen a KSFD.TimeSeries is created with name tsname and argument mpiok\nTrue, the runtime envirnoment is checked to find out if parallel HDF5\nis enabled (using h5py.getconfig().mpi). If so, the data are stored in\nan HDF5 file named\n\n'{name}MPI.h5'.format(name=tsname). \n\nNote: there is a serious problem with parallel HDF5: variable length\nrecords can't be written. If you try, you get this exception:\n\nOSError: Can't write data (Parallel IO does not support writing VL\ndatatypes yet)\n\nSince that makes parallel HDF5 a nonstarter for my purposes, mpiok\ndefaults to False. You won't get parallel MPI unless you specifically\nask for it, and then dealing with the lack of VL records is your\nproblem.\n\nIf not, each process stores the data it owns in a file named\n\n'{name}s{size}r{rank}.h5'.format(name=tsname, size=comm.size, rank=comm.rank)\n\nwhere comm is the MPI communicator. If run sequentially the data will\nall be stored in a file called '{name}s1r0.h5'. It is intended that\nthe *MPI.h5 file created using parallele HDF5 and the *s1r0.h5 file\ncreated when running sequentially and parallel HDF5 is not available\nwill be the same. \n\nThe same procedure is used for finding the filename when opening in\nread/write mode ('r+' or 'a'). \n\nWhen opening a TimeSeries for read (mode 'r') TimeSeries checks (in\norder) for the *s<size>r<rank>.h5 file, then the *MPI.h5 file ,and\nfinally a *s1r0.h5 file, and opens the first it finds. In this case\nthe retrieve methods will only return the components of the vector\nowned by the local process. \n\nFinally, I will write a simple script to merge all the files of\n*s<size>r<rank>.h5 series into a single *MPI.h5 file. In this way an\nMPi process group of any size will be able to retrieve data written by\na process group of any size. \n\"\"\"\nimport h5py, os, re, gc, time\nimport traceback as tb\nimport numpy as np\nimport petsc4py\nfrom mpi4py import MPI\n#\n# These imports are placed inside a try/except so that this script can\n# be executed standalone to check for syntax errors.\n#\ntry:\n from .ksfddebug import log\n from .ksfdgrid import Grid\nexcept ImportError:\n from ksfddebug import log\n from ksfdgrid import Grid\n\ndef logSERIES(*args, **kwargs):\n log(*args, system='SERIES', **kwargs)\n\n\nclass KSFDTimeSeries:\n \"\"\"\n Base class for TimeSeries\n\n KSFDTimeSeries is intended as an abstract base class for reading and\n writing time series from KSFD solutions to HDF5 files. It is not\n formally defined as an ABC: you can instantiate it if you really\n wish, but it is not designed to make that a useful thing to do.\n \"\"\"\n def __init__(\n self,\n basename,\n size=1,\n rank=0,\n mpiok=False,\n mode='r+',\n retries=0,\n retry_interval=60\n ):\n \"\"\"\n Required parameter:\n\n basename: the prefix of the filename.\n\n Optional keyword parameters:\n size=1: Number of MPI processes. This typically corresponds to\n comm.size for an MPI communicator comm.\n rank=0: Number of the MPI process that created this\n file. Typically comm.rank.\n mpiok=True: Whether parallel HDF5 should be used to store to\n store all the data from all MPI processes in a single\n file.\n mode='r+': The file mode for opening the h5py.File.\n retries=0. If nonzero, retry faile dopens this many times.\n retry_interval=60: time (in secodns) between successive\n retries. Note: the open will block while waiting for a\n successful retry.\n\n size, rank, and mpiok are used mostly to figure out what\n filename to use. They need not correspond to the actual\n current MPU configuration. For instance, they may correspond\n to the config when the time series was created.\n \"\"\"\n self.get_filename(basename, size, rank, mpiok, mode)\n self.retries = retries\n self.retry_interval = retry_interval\n self._size = size\n self._rank = rank\n self._mode = mode\n self._tsf = self.open_with_retry()\n _ = self.info # make sure '/info' exists\n self.try_to_set('size', self.size)\n self.try_to_set('rank', self.rank)\n if 'times' in self.tsf:\n self.ts = np.array(self.tsf['times'][()])\n try:\n self.ks = np.array(self.tsf['ks'][()])\n except KeyError:\n self.ks = np.arange(len(self.ts))\n self.order = np.array(self.tsf['order'][()])\n else:\n self.ts = np.array([], dtype=float)\n self.ks = np.array([], dtype=int)\n self.order = np.array([], dtype=int)\n self.lastk = self.ks.size - 1\n self.sorted = False\n self.tsf.flush()\n\n def parse_filename(filename):\n \"\"\"\n filename is a name like 'bases2r1.h5'. parse_filename returns\n (basename, size, rank, mpi) (('base', 2, 1, False) for the\n example). For a filename like 'tests/test1mpi.h5', returns\n ('base', 1, 0, True). \n \"\"\"\n mpipat = '(.*)MPI\\.h5'\n nompi_pat = '(.*)s(\\d+)r(\\d+)\\.h5'\n res = re.fullmatch(mpipat, filename)\n if res:\n return (res[1], 1, 0, True)\n res = re.fullmatch(nompi_pat, filename)\n if res:\n return (res[1], res[2], res[3], False)\n raise ValueError(\n \"Couldn't parse filename {fname}\".format(fname=filename)\n )\n\n def set_grid(self, grid):\n self._grid = grid\n self._dim = grid.dim\n self._dof = grid.dof\n if self.rank_owns_file:\n self._ranges = grid.ranges\n # if (\n # 'ranges' in self.tsf and\n # not np.all(self.tsf['ranges'][()] == self.ranges)\n # ):\n # raise ValueError(\n # \"data ranges {filerange} in {file} doesn't \" +\n # \"match grid range {gridrange}\".format(\n # filerange=str(self.tsf['ranges'][()]),\n # file=self.filename,\n # gridrange=str(grid.ranges)\n # )\n # )\n self.myslice = (slice(0, None),)*(self.dim + 1)\n else:\n self._ranges = tuple((0, np) for np in grid.nps)\n #\n # Slice of the global array belonging to this process:\n self.myslice = (slice(0, None),) + tuple(\n slice(*r) for r in grid.ranges\n )\n self.try_to_set('ranges', self.ranges)\n \n def get_filename(self, basename, size=1, rank=0, mpiok=True,\n mode='r+'):\n \"\"\"\n Get name of file to be opened by this process\n\n self.filename is set to the name of the HDF5 file to be\n opened. This is also returned as the function value. In\n addition, the following flags are set:\n self.creating: True if creating a new file.\n self.rank_owns_file: True if the file will be exclusively\n owned by this process.\n \"\"\"\n self.usempi = mpiok and h5py.get_config().mpi\n name_nompi = '{name}s{size}r{rank}.h5'.format(\n name=basename,\n size=size,\n rank=rank\n )\n name_mpi = '{name}MPI.h5'.format(name=basename)\n name_seq = '{name}s1r0.h5'.format(name=basename)\n self.driver = None\n if self.usempi and os.path.isfile(name_mpi):\n self.creating = mode[0] == 'w' or mode[0] == 'x'\n self.rank_owns_file = size == 1\n self.filename = name_mpi\n elif self.usempi and (mode[0] == 'w' or mode[0] == 'x'):\n self.creating = True\n self.rank_owns_file = size == 1\n self.filename = name_mpi\n elif os.path.isfile(name_nompi):\n self.creating = mode[0] == 'w' or mode[0] == 'x'\n self.rank_owns_file = True\n self.filename = name_nompi\n elif (mode == 'r' or mode == 'a') and os.path.isfile(name_seq):\n self.creating = False\n self.rank_owns_file = size == 1\n self.filename = name_seq\n # Allow reading from MPi file even if we're not using MPI:\n elif (mode == 'r' or mode == 'a') and os.path.isfile(name_mpi):\n self.creating = False\n self.rank_owns_file = size == 1\n self.filename = name_mpi\n else:\n self.creating = mode != 'r'\n self.rank_owns_file = not self.usempi\n self.filename = name_mpi if self.usempi else name_nompi\n if self.creating and not self.rank_owns_file and self.usempi:\n self.driver = 'mpio'\n if self.creating:\n os.makedirs(os.path.dirname(self.filename), exist_ok=True)\n logSERIES('self.filename', self.filename)\n logSERIES('self.creating', self.creating)\n logSERIES('self.rank_owns_file', self.rank_owns_file)\n logSERIES('self.driver', self.driver)\n logSERIES('self.usempi', self.usempi)\n return self.filename\n\n def open(self, filename, usempi, mode):\n if mode in ['w', 'w-', 'x', 'a']:\n dirname = os.path.dirname(os.path.abspath(filename))\n try:\n os.makedirs(dirname, exist_ok=True)\n except FileExistsError:\n pass\n\n def grid_save(self):\n grid = self.grid\n attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',\n 'stencil_width', 'stencil_type', 'boundary_type',\n 'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',\n 'Vlshape', 'ranges', 'Clshape', 'Cashape',\n 'coordsNoGhosts', 'coordsWithGhosts',\n ]\n for a in attrs:\n self.try_to_set('/grid/' + a, getattr(grid, a))\n\n def grid_read(self):\n \"\"\"Reads grid params from open file, returns dict\"\"\"\n ggroup = self.tsf['grid']\n gd = {}\n attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',\n 'stencil_width', 'stencil_type', 'boundary_type',\n 'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',\n 'Vlshape', 'ranges', 'Clshape', 'Cashape',\n 'coordsNoGhosts', 'coordsWithGhosts',\n ]\n for a in attrs:\n try:\n val = ggroup[a][()]\n if a.endswith('shape'):\n gd[a] = tuple(val)\n elif np.isscalar(val):\n gd[a] = val.item()\n else:\n gd[a] = val\n except KeyError:\n gd[a] = None\n gd['width'] = gd['bounds'][0]\n gd['height'] = gd['bounds'][1] if gd['dim'] > 1 else 1.0\n gd['depth'] = gd['bounds'][2] if gd['dim'] > 2 else 1.0\n gd['nx'] = gd['nps'][0]\n gd['ny'] = gd['nps'][1] if gd['dim'] > 1 else 8\n gd['nz'] = gd['nps'][2] if gd['dim'] > 2 else 8\n return gd\n\n def grid_load(self, gd=None):\n \"\"\"Reads grid params from open file and creates new Grid.\"\"\"\n if gd is None:\n gd = self.grid_read()\n grid = Grid(\n dim=gd['dim'],\n width=gd['width'],\n height=gd['height'],\n depth=gd['depth'],\n nx=gd['nx'],\n ny=gd['ny'],\n nz=gd['nz'],\n dof=gd['dof'],\n order=gd['order'],\n stencil_width=gd['stencil_width'],\n stencil_type=gd['stencil_type'],\n boundary_type=gd['boundary_type']\n )\n self.set_grid(grid)\n\n #\n # info is a place for caller to store stuff\n @property\n def info(self):\n \"\"\"Place for caller to store extra stuff\"\"\"\n if not hasattr(self, '_info') or not self._info:\n self._info = self.tsf.require_group('/info')\n return self._info\n\n @property\n def tsFile(self):\n \"\"\"The open h5File object\"\"\"\n return self._tsf\n\n @property\n def tsf(self):\n return self._tsf\n\n @property\n def size(self):\n return self._size\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def mode(self):\n return self._mode\n\n @property\n def ranges(self):\n return self._ranges\n\n @property\n def comm(self):\n return self._comm\n\n @property\n def grid(self):\n return self._grid\n\n @property\n def dim(self):\n return self._dim\n\n @property\n def dof(self):\n return self._dof\n\n def try_to_set(self, key, val):\n \"\"\"Try to set self.tsf[key] to val, but ignore exceptions\"\"\"\n if (self.mode == 'r'): return\n try:\n del self.tsf[key]\n except KeyError:\n pass\n try:\n self.tsf[key] = val\n except ValueError:\n pass\n \n def _sort(self):\n if getattr(self, 'sorted', False): return\n ts = getattr(self, 'ts', np.array([]))\n self.try_to_set('times', ts)\n self.order = ts.argsort()\n self.try_to_set('order', self.order)\n self.sts = ts\n self.sts.sort()\n ks = getattr(self, 'ks', [])\n lastk = getattr(self, 'lastk', -1)\n self.try_to_set('ks', ks)\n self.try_to_set('lastk', lastk)\n self.sorted = True\n\n def flush(self):\n self._sort()\n self.tsf.flush()\n\n def temp_close(self):\n \"\"\"\n temp_close closes the HDF5 file in which the TimeSeries is\n stored without destroying associated information. The file\n can be reopened with little loss of time. temp_close and\n reopen are intended for use during long solutions. If there is\n a crash during solution, a temp-closed TimeSeries will be left\n in a valid state for later use.\n \"\"\"\n self._sort()\n self.tsf.close()\n\n def open_with_retry(\n self,\n fname=None,\n mode=None,\n driver=None,\n comm=None\n ):\n if fname is None:\n fname = self.filename\n if mode is None:\n mode = self.mode\n if driver is None:\n driver = self.driver\n if comm is None:\n comm = self.comm\n if isinstance(comm, petsc4py.PETSc.Comm):\n comm = comm.tompi4py()\n logSERIES('fname, mode, driver, comm', fname, mode, driver, comm)\n try:\n if driver == 'mpio':\n logSERIES('trying 4-argument open')\n comm.Barrier()\n logSERIES('comm.rank, comm.size', comm.rank, comm.size)\n tsf = h5py.File(fname, mode=mode,\n driver=driver, comm=comm)\n else:\n logSERIES('trying 3-argument open')\n tsf = h5py.File(fname, mode=mode,\n driver=driver)\n except OSError:\n retries_left = self.retries\n if retries_left <= 0:\n logSERIES('open failed: re-raising exception')\n raise\n while retries_left > 0:\n logSERIES('reopen failed with OSError: {n} retries left'.format(\n n=retries_left\n ))\n logSERIES('tb.format_exc()', tb.format_exc())\n time.sleep(self.retry_interval)\n try: \n if driver == 'mpio':\n logSERIES('trying 4-argument open')\n comm.Barrier()\n logSERIES('comm.rank, comm.size', comm.rank, comm.size)\n tsf = h5py.File(fname, mode=mode,\n driver=driver, comm=comm)\n else:\n logSERIES('trying 3-argument open')\n tsf = h5py.File(fname, mode=mode,\n driver=driver)\n failed = False\n except OSError:\n failed = True\n if retries_left <= 1:\n raise\n if not failed:\n break\n retries_left -= 1\n return tsf\n \n def reopen(self):\n \"\"\"\n Reopen a temp_closed TimeSeries\n \"\"\"\n mode = self.mode if self.mode == 'r' else 'r+'\n self._tsf = self.open_with_retry(mode=mode)\n\n def close(self):\n if not hasattr(self, '_tsf') or not self._tsf:\n self.reopen()\n self._sort()\n self.tsf.close()\n del self._tsf\n gc.collect()\n \n # def __del__(self):\n # self.close()\n\n def store(self, data, t, k=None):\n if isinstance(data, petsc4py.PETSc.Vec):\n vals = data.array.reshape(self.grid.Vlshape, order='F')\n else:\n vals = data.reshape(self.grid.Vlshape, order='F')\n logSERIES('k, t', k, t)\n if k is None:\n k = self.lastk + 1\n self.lastk = k\n self.ks = np.append(self.ks, k)\n self.ts = np.append(self.ts, t)\n key = 'data' + str(k)\n try:\n dset = self.tsf.create_dataset(key, self.grid.Vlshape,\n dtype=vals.dtype)\n except OSError:\n dset = self.tsf[key] # dset already exists\n Cvals = vals.copy(order='C') # h5py requires C order\n if self.rank_owns_file:\n dset.write_direct(Cvals)\n else:\n dset[self.myslice] = Cvals \n dset.attrs['k'] = k\n dset.attrs['t'] = t\n self.sorted = False\n self.tsf.flush()\n\n def store_slice(self, ranges, data, t, tol=1e-7):\n shape = (self.grid.dof,) + tuple(\n r[1] - r[0] for r in ranges\n )\n slc = (slice(0, None),) + tuple(\n slice(*r) for r in ranges\n )\n vals = data.reshape(shape, order='F')\n na, nb, ta, tb = self.find_time(t)\n logSERIES('na, nb, ta, tb', na, nb, ta, tb)\n if abs(t-ta) <= abs(tb-t):\n n, tn = na, ta\n else:\n n, tn = nb, tb\n if (\n (not (t == 0.0 and tn == 0.0)) and\n ((self.sts.size <= n) or\n (abs(t-tn)/max(abs(t), abs(tn)) > tol))\n ):\n #\n # New time point: append it to the lists\n #\n k = self.lastk + 1\n self.lastk = k\n self.ks = np.append(self.ks, k)\n self.ts = np.append(self.ts, t)\n key = 'data' + str(k)\n dset = self.tsf.create_dataset(key, self.grid.Vlshape,\n dtype=vals.dtype)\n logSERIES('k, t', k, t)\n dset.attrs['k'] = k\n dset.attrs['t'] = t\n self.sorted = False\n else:\n k = n\n key = 'data' + str(k)\n dset = self.tsf[key]\n dset[slc] = vals \n self.tsf.flush()\n\n def times(self):\n self._sort()\n return self.ts\n\n def steps(self):\n self._sort()\n return self.ks\n\n def sorted_times(self):\n self._sort()\n return self.sts\n\n def sorted_steps(self):\n self._sort()\n return self.order\n\n def retrieve_by_number(self, k):\n key = 'data' + str(k)\n dset = self.tsf[key]\n if self.rank_owns_file:\n return np.array(dset)\n else:\n return np.array(dset)[self.myslice]\n\n def find_time(self, t):\n \"\"\"\n Find the time points closest to t\n\n Returns tuple (a, b, ta, tb)\n a and b are the numbers (ints) of the points flanking t. ta\n and tb (floats) are the corresponding times. If there is a\n time point exactly matchig nt, than a == b, ta == tb == t.\n \"\"\"\n self._sort()\n if self.sts.size == 0:\n return (0, 0, t - 1.0, t - 1.0)\n if (t <= self.sts[0]):\n a = 0\n return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])\n elif (t >= self.sts[-1]):\n a = len(self.sts) - 1\n return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])\n else:\n b = self.sts.searchsorted(t)\n nb = self.order[b]\n tb = self.sts[b]\n if (b >= len(self.order) - 1):\n return(b, b, self.sts[b], self.sts[b])\n elif tb == t:\n return(b, b, tb, tb)\n a = b - 1\n na = self.order[a]\n ta = self.sts[a]\n return (a, b, ta, tb)\n\n def retrieve_by_time(self, t):\n \"\"\"\n Retrieve a time point.\n \n Arguments:\n t: the time to be retrieved.\n \"\"\"\n na, nb, ta, tb = self.find_time(t)\n adata = self.retrieve_by_number(na)\n if na == nb:\n return adata\n bdata = self.retrieve_by_number(nb)\n data = ((t-ta)*bdata + (tb-t)*adata)/(tb-ta)\n return(data)\n\nclass TimeSeries(KSFDTimeSeries):\n\n def __init__(\n self,\n basename,\n grid=None,\n comm=None,\n mpiok=False,\n mode='r+',\n retries=0,\n retry_interval=60\n ):\n \"\"\"\n Open a KSFD.TimeSeries\n\n Required parameters:\n basename: the name of the TimeSeries. (This is a prefix of the\n names of the HDF5 files in which data are stored.)\n\n Optional parameters:\n grid: The KSFD.Grid on which the PETSc Vecs to be saved are\n defined. This must be supplied when creating a new\n TimeSeries. When opening an existig nseries, it will be\n read from the file if not supplied.\n comm: the MPI communicator. (If not supplied, grid.comm is\n used.)\n mpiok=False: whether it is Ok to use parallel HDF5.\n mode: the file mode (See h5py.h5File.)\n retries=0. If nonzero, retry faile dopens this many times.\n retry_interval=60: time (in secodns) between successive\n retries. Note: the open will block while waiting for a\n successful retry.\n \"\"\"\n if comm:\n self._comm = comm\n elif grid:\n self._comm = grid.comm\n else:\n self._comm = MPI.COMM_SELF\n self._mode = mode\n self._size = self.comm.size\n self._rank = self.comm.rank\n self.mpiok = mpiok\n super().__init__(basename, size=self.size, rank=self.rank,\n mpiok=mpiok, mode=mode, retries=retries,\n retry_interval=retry_interval)\n if (grid):\n self.set_grid(grid)\n self.grid_save()\n else:\n self.grid_load()\n\n\nclass Gatherer(KSFDTimeSeries):\n \"\"\"\n Gatherer is a special-purpose iterator to allow a single\n sequential process to read the separate files written by a\n TimeSeries run under MPI. For instance, to reconstruct the global\n vector at the last time (assuming it fits in memory in a single\n process):\n\n gather = Gatherer(basename='base', size=4)\n grid = gather.grid\n lastk = gather.sorted_steps()[-1]\n vec = grid.Vdmda.createGlobalVec()\n vecarray = vec.array.reshape(grid.globalVshape, order='F')\n for series in gather:\n vec = grid.Vdmda.createGlobalVec()\n rank = series.rank\n vecarray[series.slice] = series.retrieve_by_number(lastk)\n \n <do something with vec...>\n\n This gatherer would iterate through files bases4r0.h5,\n bases4r1.h5, bases4r2.h5, and bases4r3.h5. Note that with every\n iteration it closes the last file and opens the next. Thus, if you\n want to iterate over all times, it is more efficient to nest the\n loops like this:\n\n for series in gather:\n for t in series.times():\n <do something for this file at this time)\n\n than the other way. (The other way would be more intuitive, but my\n expectation is that this class will be used mostly to gather all\n TimeSeries files into a single file, which then can be processed\n efficiently as a TimeSeries.)\n \"\"\"\n \n def __init__(\n self,\n basename,\n size=None,\n retries=0,\n retry_interval=60\n ):\n \"\"\"\n Required positional parameter\n \n basename: the prefix of the filenames for the TimeSeries being\n read. As a convenience, this can be a special filename\n that matches the regular expression '(.+)s(\\d+)@.*' (That\n is a literal '@'. Then the basename is the (.+) and the\n size is the (\\d+) following the 's' and preceding\n '@'. For example, \"bases4@' or '[email protected]' would both\n serve for a series with basename 'base' and size 4.\n\n Optional keyword parameter:\n size=None: This argument can be omitted only if the basename\n has the special @ filename format. Otherwise, it must be\n supplied.\n\n Gatherer is read-only (mode 'r'). \n \"\"\"\n self._comm = MPI.COMM_SELF\n self.retries = retries\n self.retry_interval = retry_interval\n gatherre = '(.+)s(\\d+)@.*'\n fname_match = re.fullmatch(gatherre, basename)\n if fname_match:\n base = fname_match[1]\n size = int(fname_match[2])\n else:\n base = basename\n size = size\n self.basename = base\n if not isinstance(size, int) or size <= 0:\n raise ValueError(\n 'size {size} is not a positive int'\n )\n #\n # This opens the first file. We have to do that so as to read\n # and initialize things like grid, times, etc.\n #\n super().__init__(\n basename=base,\n size=size,\n rank=0,\n mpiok=False,\n mode='r',\n retries=retries,\n retry_interval=retry_interval\n )\n self.set_ranges()\n #\n # Since we have to open the rank 0 file before startig\n # iteration, the following flag is used to determine whether\n # to open a new file when __iter__ is called\n #\n self.iter_started = False\n self.iter_stopped = False\n\n def set_ranges(self):\n self.rank_owns_file = True\n gd = self.grid_read()\n self.grid_load(gd)\n self._ranges = gd['ranges']\n self._shape = (self.dof,) + tuple(\n r[1] - r[0] for r in self.ranges\n )\n self._slice = (slice(0, None),) + tuple(\n slice(*r) for r in self.ranges\n )\n \n @property\n def slice(self):\n return self._slice\n\n @property\n def shape(self):\n return self._shape\n\n def __iter__(self):\n return self\n \n def __next__(self):\n if self.iter_stopped:\n #\n # We previously exhausted the iteration. Restart it\n #\n self.tsf.close()\n self.__init__(self.basename,\n self.size,\n retries=self.retries,\n retry_interval=self.retry_interval\n )\n elif self.iter_started:\n #\n # We're not just starting: move on to next file\n #\n self.tsf.close()\n self._rank = self.rank + 1\n if self.rank >= self.size:\n self.iter_stopped = True\n raise StopIteration\n super().__init__(\n basename=self.basename,\n size=self.size,\n rank=self.rank,\n mpiok=False,\n mode='r',\n retries=self.retries,\n retry_interval=self.retry_interval\n )\n self.set_ranges()\n self.iter_started = True\n self.iter_stopped = False\n return self\n\n"
] | [
[
"numpy.append",
"numpy.array",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
didichuxing/delta | [
"31dfebc8f20b7cb282b62f291ff25a87e403cc86"
] | [
"utils/avg_checkpoints.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script to average values of variables in a list of checkpoint files.\"\"\"\nimport os\nimport six\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom six.moves import zip # pylint: disable=redefined-builtin\nimport numpy as np\nimport delta.compat as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"checkpoints\", \"\",\n \"Comma-separated list of checkpoints to average.\")\nflags.DEFINE_integer(\n \"num_last_checkpoints\", 0, \"Averages the last N saved checkpoints.\"\n \" If the checkpoints flag is set, this is ignored.\")\nflags.DEFINE_string(\"prefix\", \"\",\n \"Prefix (e.g., directory) to append to each checkpoint.\")\nflags.DEFINE_string(\"output_path\", \"/tmp/averaged.ckpt\",\n \"Path to output the averaged checkpoint to.\")\n\n\ndef checkpoint_exists(path):\n return (tf.io.gfile.exists(path) or tf.io.gfile.exists(path + \".meta\") or\n tf.io.gfile.exists(path + \".index\"))\n\n\ndef main(_):\n if FLAGS.checkpoints:\n # Get the checkpoints list from flags and run some basic checks.\n checkpoints = [c.strip() for c in FLAGS.checkpoints.split(\",\")]\n checkpoints = [c for c in checkpoints if c]\n if not checkpoints:\n raise ValueError(\"No checkpoints provided for averaging.\")\n if FLAGS.prefix:\n checkpoints = [FLAGS.prefix + c for c in checkpoints]\n else:\n assert FLAGS.num_last_checkpoints >= 1, \"Must average at least one model\"\n assert FLAGS.prefix, (\"Prefix must be provided when averaging last\"\n \" N checkpoints\")\n checkpoint_state = tf.train.get_checkpoint_state(\n os.path.dirname(FLAGS.prefix))\n # Checkpoints are ordered from oldest to newest.\n checkpoints = checkpoint_state.all_model_checkpoint_paths[\n -FLAGS.num_last_checkpoints:]\n\n checkpoints = [c for c in checkpoints if checkpoint_exists(c)]\n if not checkpoints:\n if FLAGS.checkpoints:\n raise ValueError(\"None of the provided checkpoints exist. %s\" %\n FLAGS.checkpoints)\n else:\n raise ValueError(\"Could not find checkpoints at %s\" %\n os.path.dirname(FLAGS.prefix))\n\n # Read variables from all checkpoints and average them.\n logging.info(\"Reading variables and averaging checkpoints:\")\n for c in checkpoints:\n logging.info(\"%s \", c)\n var_list = tf.train.list_variables(checkpoints[0])\n var_values, var_dtypes = {}, {}\n for (name, shape) in var_list:\n if not name.startswith(\"global_step\"):\n var_values[name] = np.zeros(shape)\n for checkpoint in checkpoints:\n reader = tf.train.load_checkpoint(checkpoint)\n for name in var_values:\n tensor = reader.get_tensor(name)\n var_dtypes[name] = tensor.dtype\n var_values[name] += tensor\n logging.info(\"Read from checkpoint %s\", checkpoint)\n for name in var_values: # Average.\n var_values[name] /= len(checkpoints)\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n tf_vars = [\n tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])\n for v in var_values\n ]\n placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]\n assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]\n global_step = tf.Variable(\n 0, name=\"global_step\", trainable=False, dtype=tf.int64)\n saver = tf.train.Saver(tf.all_variables())\n\n # Build a model consisting only of variables, set them to the average values.\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for p, assign_op, (name, value) in zip(placeholders, assign_ops,\n six.iteritems(var_values)):\n sess.run(assign_op, {p: value})\n # Use the built saver to save the averaged checkpoint.\n saver.save(sess, FLAGS.output_path, global_step=global_step)\n\n logging.info(\"Averaged checkpoints saved in %s\", FLAGS.output_path)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TemsyChen/Spotifinder | [
"b069ffcd63bd7654e1afd51cde3288c9678d121a"
] | [
"app/app_3rdtry.py"
] | [
"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport pickle\n# from os.path import dirname\n\n# DIR = dirname(__file__)\n# MODELS_DIR = DIR + '/../models/'\n# DATA_DIR = DIR + '/../data/'\n\n# data_filename = DATA_DIR + 'NLP_songs_data.zip'\n# model_filename = MODELS_DIR + 'nlp_model.pkl'\n# dtm_filename = MODELS_DIR + 'nlp_dtm.pkl'\n\n# df = None\n# loaded_model = None\n# dtm = None\n\n# def load_files():\n# global df, loaded_model, dtm\n\n# df = pd.read_csv(data_filename)\n# loaded_model = pickle.load(open(model_filename, 'rb'))\n# dtm = pickle.load(open(dtm_filename, 'rb'))\n\n# load_files()\n\ndata_filename = r'C:\\Users\\temsy\\Documents\\GitHub\\Spotifinder\\data\\NLP_songs_data.zip'\n\ndf = pd.read_csv(data_filename)\nloaded_model = pickle.load(open(r'C:\\Users\\temsy\\Documents\\GitHub\\Spotifinder\\models\\nlp_model.pkl', 'rb'))\ndtm = pickle.load(open(r'C:\\Users\\temsy\\Documents\\GitHub\\Spotifinder\\models\\nlp_dtm.pkl', 'rb'))\n\n#Plotly Dash\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets, requests_pathname_prefix = '/dash/')\n\napp.layout = html.Div([\n html.Label(\"Artist:\", style={'fontSize':30, 'textAlign':'center'}),\n dcc.Dropdown(\n id='Artist',\n options=[{\n 'label': c,\n 'value': c}\n for c in df['track_artist']],\n value = df['track_artist'][0]\n ),\n html.Label(\"Songs:\", style={'fontSize':30, 'textAlign':'center'}),\n dcc.Dropdown(id='Songs',\n multi=False),\n html.Label(\"Recommendations:\", style={'fontSize':30, 'textAlign':'center'}),\n html.Div(id='Recommendations')\n])\n\[email protected](\n Output('Songs', 'options'),\n [Input('Artist', 'value')]\n)\ndef set_options(artist):\n dff = df[df.track_artist == artist]\n dicosongs = [{'label': c, 'value': c} for c in sorted(dff.track_name.unique())]\n return dicosongs\n\[email protected](\n Output('Recommendations', 'dicorecs')\n [Input('Songs', 'value')],\n [Input('Artist', 'value')]\n)\ndef predict(artist, song):\n # if dtm is None:\n # load_files()\n #translate artist, song into doc dtm.iloc[x].values\n artist_songs = df.loc[df['track_artist'] == artist]\n selected_song = artist_songs.loc[artist_songs['track_name'] == song]\n x = selected_song.index\n x = x[0]\n x = x.item()\n \n doc = dtm.loc[x].values\n result = loaded_model.kneighbors([doc], n_neighbors=6)\n\n songs = []\n # rec_songs = {\"artist\": [], \"song\": []};\n\n for i in range(5):\n song = result[1][0][1 + i]\n\n # translate the loc into an artist and song title\n artist = df.loc[song]['track_artist']\n song = df.loc[song]['track_name']\n\n # rec_songs['artist'].append(artist)\n # rec_songs['song'].append(song)\n songs.append(song)\n\n return result[1][0]\n\nif __name__ == '__main__':\n app.run_server(debug=True)"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
alifianmahardhika/galaxy_simpy | [
"799d11b00a3b14991d89ddac0aabf0bcd447b800",
"799d11b00a3b14991d89ddac0aabf0bcd447b800"
] | [
"two-body-mond.py",
"new-code/simple-code-program.py"
] | [
"import matplotlib.pyplot as plt\nfrom numpy import sin,cos,pi,sqrt,exp,floor,zeros,copy,array\nfrom numpy.random import normal\nfrom numpy.linalg import norm\nfrom random import uniform\nfrom time import time\n\nstart = time()\ndef euler(x,v):\n for i in range(n_particles):\n sigmaF = zeros(2)\n for j in range(n_particles):\n if(i!=j): \n sigmaF += f(x[i],x[j])\n x[i] += v[i]*dt\n v[i] += a_0*phi_inv(norm(sigmaF)/a_0)*(sigmaF/norm(sigmaF))*dt\ndef symplectic(x,v):\n for i in range(n_particles):\n sigmaF = zeros(2)\n for j in range(n_particles):\n if(i!=j): \n sigmaF += f(x[i],x[j])\n v[i] += G*sigmaF*dt\n x[i] += v[i]*dt\ndef f(xi,xj):\n rij = xj-xi\n return (G*m*rij)/(norm(rij)+epsilon)**3\ndef init_two():\n x1 = ([R*cos(omega*0),R*sin(omega*0)])\n x2 = -copy(x1)\n v1 = ([omega*x1[1],omega*x1[0]])\n v2 = -copy(v1)\n x = array([x1,x2])\n v = array([v1,v2])\n return x,v\ndef kinetic_energy():\n sigmaN = 0.0\n for i in range(n_particles):\n sigmaN += 0.5*m*norm(v[i])**2\n return sigmaN\ndef phi_inv(q):\n return sqrt(q)*sqrt((1.0+sqrt(1.0+(4.0/r**2)))/2.0)\n#Global parameter\nn_particles = 2 #particles\nd = 2 #dimension\nm = 10e11/n_particles #[MO]\nR = 2.9 #[kpc]\nG = 13.34*10e-11 #[kpc^3 MO^-1 gy^-2]\nomega = sqrt((G*m)/(4*R**3)) #velocities\nepsilon = 1e-3\nT = 100\ndt = 0.001\nN = int(floor(T/dt))\nscale = 30.0\na_0 = 10e-1\n#initial condition\nx,v = init_two()\n#x = get_init_coordinates()\n#v = get_init_velocities()\nprint(x)\n#main loop\nplt.plot(x[:,0],x[:,1], 'ro')\nfor k in range(N):\n euler(x,v)\n #print(kinetic_energy())\n #plt.plot(xe[:,0],xe[:,1], 'b.')\n #plt.xlim(right=scale,left=-scale)\n #plt.ylim(top=scale,bottom=-scale)\n #plt.axes(aspect='equal')\n if(k%100==0):\n plt.plot(x[:,0],x[:,1], 'b.')\n#filename='./figures/plot.png'\n#plt.savefig(filename)\nprint(\"Time for running \", N, \"iteration :\", time()-start, \"seconds\")\nprint(x)\nplt.show()",
"import numpy as np\nimport matplotlib.pyplot as plt\n\n#initil-parameter-symple-model\nR = 2\nN = 150\nm = 10\nm_0 = 10*m\ndt = 0.002\nG = 13.37*10**(-11)\neps =0.3\nalpha_0 = 1.2e-8\nomega = np.random.normal(0,2*np.pi)\n\n#inital_array\nx = np.zeros((N,2))\nx_center = np.ones((N,2))\nv = np.zeros((N,2))\nf = np.zeros((N,2))\n\n#initialization_particle-system\nfor i in range(N):\n t = i*dt\n x[i] = [np.random.uniform(-R,R)*np.cos(2*np.pi*(i/N)),np.random.uniform(-R,R)*np.sin(2*np.pi*(i/N))]\n v[i] = [-omega*x[i,1],omega*x[i,0]]\n\n#force\ndef force(f,x,x_center,N):\n ff = np.zeros((N,2))\n for i in range(N):\n x_2nd = m_0*G*(x_center[i])\n for j in range(N):\n if i!=j:\n r_ij = 2*R*np.sin(np.pi*np.abs(x[i]-x[j])/N)\n ff = m*G*(x[i]-x[j])/(r_ij+eps)**3 - x_2nd\n f+=ff\n return f\n\n#euler-ND\nfor k in range(100):\n no = np.str(k)\n x+=v*dt\n v+=force(f,x,x_center,N)*dt\n plt.xlim(-6,6)\n plt.ylim(-6,6)\n plt.scatter(x[:,0],x[:,1],s=10,c='b')\n plt.savefig('./output_euler/plot2d-euler'+no+'.png')\n plt.close()\n\n#symplectic-ND\nfor k in range(100):\n no = np.str(k)\n v+=force(f,x,x_center,N)*dt\n x+=v*dt\n plt.xlim(-6,6)\n plt.ylim(-6,6)\n plt.scatter(x[:,0],x[:,1],s=10,c='r')\n plt.savefig('./output_symplectic/plot2d-symplectic'+no+'.png')\n plt.close()\n \ndef beta(q): #for-MOND-model\n return np.sqrt(1+np.sqrt(1+(4/q**2))/2)\n \n# symplectic_mond\na = np.zeros((N,2))\nfor k in range(100):\n no= np.str(k)\n qq = (np.abs(force(f,x,x_center,N))/alpha_0)\n a = force(f,x,x_center,N)*beta(qq)\n v += a*dt\n x += v*dt\n plt.xlim(-6,6)\n plt.ylim(-6,6)\n plt.scatter(x[:,0],x[:,1],s=10,c='m')\n plt.savefig('./output_mond_symplectic/plot2dmond-symplectic'+no+'.png')\n plt.close()\n \n# euler_mond\na = np.zeros((N,2))\nfor k in range(100):\n no= np.str(k)\n qq = (np.abs(force(f,x,x_center,N))/alpha_0)\n a= force(f,x,x_center,N)*beta(qq)\n x += v*dt\n v += a*dt\n plt.xlim(-6,6)\n plt.ylim(-6,6)\n plt.scatter(x[:,0],x[:,1],s=10,c='k')\n plt.savefig('./output_mond_euler/plot2dmond-euler'+no+'.png')\n plt.close()"
] | [
[
"numpy.sqrt",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"matplotlib.pyplot.plot",
"numpy.copy",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
],
[
"numpy.sqrt",
"matplotlib.pyplot.scatter",
"numpy.str",
"numpy.abs",
"matplotlib.pyplot.ylim",
"numpy.cos",
"matplotlib.pyplot.savefig",
"numpy.ones",
"numpy.sin",
"numpy.random.normal",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.close",
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sagartomar/aesara | [
"477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6",
"477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6",
"17594a05d4dee2574f07771b8f176e7dd60d134a"
] | [
"aesara/scan/op.py",
"aesara/tensor/math.py",
"tests/tensor/test_math.py"
] | [
"\"\"\"This module provides the `Scan` `Op`.\n\nMemory reuse in scan\n--------------------\n\nTo reduce the number of memory allocations and copies associated with calling\nthe inner function and recovering the outputs at every iteration, Scan uses a\nmemory pre-allocation mechanism for some of its outputs. Instead of repeatedly\ncalling the inner function and copying the outputs to designated locations,\nit tries to make the inner function write the outputs directly to the\ndesignated locations.\n\nThis is achieved by initializing, at every iteration, the output storage\nof the inner function with references to previously allocated memory. Other\nthan the code in the Python and Cython backends to do this and to ensure that\nthe pre-allocated memory has been used, the memory pre-allocation mechanism\nrelies on the following elements to work properly :\n- In make_thunk(), when compiling the inner function, the borrow flag must\n be set to False for the inputs. This will prevent aliasing between the\n inputs and the outputs of the inner function which could lead to invalid\n results.\n- In make_thunk(), again, the borrow flag must be set to True for the outputs.\n This will make Aesara consider the output storages as persistent and make\n Aesara provide them as pre-allocated storage to the ops that compute the\n outputs of the inner function instead of letting these ops allocate their\n own output storage.\n- The ops that produce the outputs of the inner function must be prevented\n from working inplace because if they do, they're not using the pre-allocated\n storage. This is achieved by including the optimization\n 'add_no_output_from_inplace' to the compilation mode used by scan. It\n prevents other optimizations from altering the graph such that outputs are\n produced by inplace operations.\n- The ScanSaveMem optimization, whose goal is to limit the amount of memory\n used by scan, needs to allocate buffers large enough to be able, at every\n iteration, to simultaneously read the needed previous states and storing\n the new states. Before the memory reuse feature, the buffers could be\n smaller because, often, Scan only needed buffers large enough to read the\n needed previous states. This is because all the outputs of the inner\n function were computed before any of them was stored in the buffers. Now,\n the outputs are stored as they are computed which means that, if the buffer\n is too small, computing an output can overwrite an input that is still\n needed to compute another output.\n\n\"\"\"\n\n\nimport copy\nimport itertools\nimport logging\nimport time\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport aesara\nfrom aesara import tensor as aet\nfrom aesara.compile.builders import infer_shape\nfrom aesara.compile.function import function\nfrom aesara.compile.io import In, Out\nfrom aesara.compile.mode import AddFeatureOptimizer, get_mode\nfrom aesara.compile.profiling import ScanProfileStats, register_profiler_printer\nfrom aesara.configdefaults import config\nfrom aesara.gradient import DisconnectedType, NullType, Rop, grad, grad_undefined\nfrom aesara.graph.basic import (\n Apply,\n Constant,\n Variable,\n clone_replace,\n equal_computations,\n graph_inputs,\n io_connection_pattern,\n)\nfrom aesara.graph.features import NoOutputFromInplace\nfrom aesara.graph.fg import MissingInputError\nfrom aesara.graph.op import Op, ops_with_inner_function\nfrom aesara.link.c.basic import CLinker\nfrom aesara.link.c.exceptions import MissingGXX\nfrom aesara.link.utils import raise_with_op\nfrom aesara.scan.utils import Validator, forced_replace, hash_listsDictsTuples, safe_new\nfrom aesara.tensor.basic import as_tensor_variable\nfrom aesara.tensor.math import minimum\nfrom aesara.tensor.shape import Shape_i\nfrom aesara.tensor.type import TensorType, integer_dtypes\nfrom aesara.tensor.var import TensorVariable\n\n\n__docformat__ = \"restructedtext en\"\n__authors__ = (\n \"Razvan Pascanu \"\n \"Frederic Bastien \"\n \"James Bergstra \"\n \"Pascal Lamblin \"\n \"PyMC Developers \"\n \"Aesara Developers \"\n)\n__copyright__ = \"(c) 2010, Universite de Montreal\"\n\n# Logging function for sending warning or info\n_logger = logging.getLogger(\"aesara.scan.op\")\n\n\nclass Scan(Op):\n \"\"\"\n\n Parameters\n ----------\n inputs\n Inputs of the inner function of scan.\n outputs\n Outputs of the inner function of scan.\n info\n Dictionary containing different properties of the scan op (like number\n of different types of arguments, name, mode, if it should run on GPU or\n not, etc.).\n typeConstructor\n Function that constructs an equivalent to Aesara TensorType.\n\n Notes\n -----\n ``typeConstructor`` had been added to refactor how\n Aesara deals with the GPU. If it runs on the GPU, scan needs\n to construct certain outputs (those who reside in the GPU\n memory) as the GPU-specific type. However we can not import\n gpu code in this file (as it is in sandbox, and not available\n on each machine) so the workaround is that the GPU\n optimization passes to the constructor of this class a\n function that is able to construct a GPU type. This way the\n class Scan does not need to be aware of the details for the\n GPU, it just constructs any tensor using this function (which\n by default constructs normal tensors).\n\n \"\"\"\n\n def __init__(\n self,\n inputs,\n outputs,\n info,\n typeConstructor=None,\n ):\n # adding properties into self\n self.inputs = inputs\n self.outputs = outputs\n self.__dict__.update(info)\n # I keep a version of info in self, to use in __eq__ and __hash__,\n # since info contains all tunable parameters of the op, so for two\n # scan to be equal this tunable parameters should be the same\n self.info = info\n # build a list of output types for any Apply node using this op.\n self.output_types = []\n idx = 0\n jdx = 0\n\n def tensorConstructor(broadcastable, dtype):\n return TensorType(broadcastable=broadcastable, dtype=dtype)\n\n if typeConstructor is None:\n typeConstructor = tensorConstructor\n\n while idx < self.n_mit_mot_outs:\n # Not that for mit_mot there are several output slices per\n # output sequence\n o = outputs[idx]\n self.output_types.append(\n typeConstructor(\n broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype\n )\n )\n\n idx += len(self.mit_mot_out_slices[jdx])\n jdx += 1\n\n # mit_sot / sit_sot / nit_sot\n end = idx + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n\n for o in outputs[idx:end]:\n self.output_types.append(\n typeConstructor(\n broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype\n )\n )\n\n # shared outputs + possibly the ending condition\n for o in outputs[end:]:\n self.output_types.append(o.type)\n\n if self.as_while:\n self.output_types = self.output_types[:-1]\n\n mode_instance = get_mode(self.mode)\n # Clone mode_instance, altering \"allow_gc\" for the linker,\n # and adding a message if we profile\n if self.name:\n message = self.name + \" sub profile\"\n else:\n message = \"Scan sub profile\"\n\n self.mode_instance = mode_instance.clone(\n link_kwargs=dict(allow_gc=self.allow_gc), message=message\n )\n\n if not hasattr(self, \"name\") or self.name is None:\n self.name = \"scan_fn\"\n # to have a fair __eq__ comparison later on, we update the info with\n # the actual mode used to compile the function and the name of the\n # function that we set in case none was given\n self.info[\"name\"] = self.name\n\n # Pre-computing some values to speed up perform\n self.mintaps = [np.min(x) for x in self.tap_array]\n self.mintaps += [0 for x in range(self.n_nit_sot)]\n self.seqs_arg_offset = 1 + self.n_seqs\n self.shared_arg_offset = (\n self.seqs_arg_offset + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n )\n self.nit_sot_arg_offset = self.shared_arg_offset + self.n_shared_outs\n self.n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n self.n_tap_outs = self.n_mit_mot + self.n_mit_sot\n if self.info[\"gpua\"]:\n self._hash_inner_graph = self.info[\"gpu_hash\"]\n else:\n # Do the missing inputs check here to have the error early.\n for var in graph_inputs(self.outputs, self.inputs):\n if var not in self.inputs and not isinstance(var, Constant):\n raise MissingInputError(f\"ScanOp is missing an input: {repr(var)}\")\n self._cmodule_key = CLinker().cmodule_key_variables(\n self.inputs, self.outputs, []\n )\n self._hash_inner_graph = hash(self._cmodule_key)\n\n # Compute mappings between outer inputs, outer outputs, inner\n # inputs and inner outputs to determine with variables are associated\n # with the same states.\n self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()\n\n def validate_inner_graph(self):\n \"\"\"\n Perform some elementary validations on the inner graph to ensure\n that it is coherent.\n\n \"\"\"\n\n # For every recurrent output, iterate over the associated inner\n # inputs and output and ensure that they have the same dtype\n nb_recurr_outputs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n\n for outer_oidx in range(nb_recurr_outputs):\n\n inner_iidxs = self.var_mappings[\"inner_inp_from_outer_out\"][outer_oidx]\n inner_oidxs = self.var_mappings[\"inner_out_from_outer_out\"][outer_oidx]\n\n for (inner_iidx, inner_oidx) in itertools.product(inner_iidxs, inner_oidxs):\n\n type_input = self.inputs[inner_iidx].type\n type_output = self.outputs[inner_oidx].type\n if type_input != type_output:\n raise TypeError(\n \"Inconsistency in the inner graph of \"\n f\"scan '{self.name}' : an input and an output are \"\n \"associated with the same recurrent state \"\n \"and should have the same type but have \"\n f\"type '{type_input}' and '{type_output}' respectively.\"\n )\n\n # If scan has the flag 'gpua' set to false (meaning that is shouldn't\n # use the gpuarray gpu backend ), ensure that is has no input and no\n # output with type GpuArrayType\n from aesara.gpuarray import GpuArrayType\n\n if not self.info.get(\"gpua\", False):\n for inp in self.inputs:\n if isinstance(inp.type, GpuArrayType):\n raise TypeError(\n \"Inconsistency in the inner graph of \"\n f\"scan '{self.name}' : one of the inputs to the \"\n \"inner graph is of type GpuArrayType but \"\n \"the attributes of the scan op indicate \"\n \"that it shouldn't be the case\"\n )\n\n for out in self.outputs:\n if isinstance(out.type, GpuArrayType):\n raise TypeError(\n \"Inconsistency in the inner graph of \"\n f\"scan '{self.name}' : one of the outputs to the \"\n \"inner graph is of type GpuArrayType but \"\n \"the attributes of the scan op indicate \"\n \"that it shouldn't be the case\"\n )\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if \"allow_gc\" not in self.__dict__:\n self.allow_gc = True\n self.info[\"allow_gc\"] = True\n if not hasattr(self, \"var_mappings\"):\n # Generate the mappings between inner and outer inputs and outputs\n # if they haven't already been generated.\n self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()\n if hasattr(self, \"fn\"):\n if not hasattr(self, \"thunk_mit_mot_out_slices\"):\n # The thunk has been compiled before mit_mot preallocation\n # feature was implemented. Mark every mit_mot output tap as\n # not having been preallocated\n self.mitmots_preallocated = [False] * self.n_mit_mot_outs\n\n if not hasattr(self, \"outs_is_tensor\"):\n # The thunk has been compiled before the analysis, at\n # compilation time, of the location of the inputs and outputs.\n # Perform this analysis here.\n self.inps_is_tensor = [\n isinstance(out, TensorVariable)\n for out in self.fn.maker.fgraph.inputs\n ]\n self.outs_is_tensor = [\n isinstance(out, TensorVariable)\n for out in self.fn.maker.fgraph.outputs\n ]\n\n # Ensure that the graph associated with the inner function is valid.\n self.validate_inner_graph()\n\n def make_node(self, *inputs):\n \"\"\"\n Conventions:\n inner_X - the variable corresponding to X in the inner function\n of scan (the lambda function executed at every time\n step)\n outer_X - the variable corresponding to X in the outer graph,\n i.e. the main graph (where the scan op lives)\n inner_X_out - the variable representing the new value of X after\n executing one step of scan (i.e. outputs given by\n the inner function)\n\n \"\"\"\n assert np.all(isinstance(i, Variable) for i in inputs)\n # Check that the number of inputs to the Scan node corresponds to\n # the number of inputs of the inner function of scan\n n_outer_ins = len(inputs) - len(self.outer_nitsot(inputs)) - 1\n n_inner_ins = (\n len(self.inner_seqs(self.inputs))\n + len(self.mitmot_taps())\n + len(self.mitsot_taps())\n + len(self.inner_sitsot(self.inputs))\n + len(self.inner_shared(self.inputs))\n + len(self.inner_non_seqs(self.inputs))\n )\n assert n_outer_ins == n_inner_ins, (\n \"The number of inputs given to the inner function of scan\"\n \" does not match the number of inputs given to scan.\"\n )\n # Force the inputs to be on the CPU\n new_inputs = [as_tensor_variable(inputs[0])]\n # assert dtype is consistent\n err_msg1 = (\n \"When compiling the inner function of scan (the \"\n \"function called by scan in each of its iterations) \"\n \"the following error has been encountered: The \"\n \"%s %s (argument number %d) has dtype \"\n \"%s and %d dimension(s). The corresponding variable \"\n \"in the inner function of scan %s \"\n \"however has dtype %s and %d dimension(s). This \"\n \"variable in the inner function of scan should \"\n \"have the same dtype and one fewer dimension \"\n \"compared to its corresponding variable in the initial \"\n \"state (outputs_info in scan nomenclature). For example, \"\n \"if the inner function of scan returns a vector \"\n \"of size d and scan uses the values of \"\n \"the previous time-step, then the initial state in scan \"\n \"should be a matrix of shape (1, d). \"\n \"The first dimension of this \"\n \"matrix corresponds to the number of previous time-steps \"\n \"that scan uses in each of its iterations. \"\n \"In order to solve this issue if the two variable currently \"\n \"have the same dimensionality, you can increase the \"\n \"dimensionality of the varialbe in the initial state of scan \"\n \"by using dimshuffle or shape_padleft. \"\n )\n err_msg2 = (\n \"When compiling the inner function of scan the \"\n \"following error has been encountered: The \"\n \"initial state (`outputs_info` in scan nomenclature) \"\n \"of variable %s (argument number %d) \"\n \"has dtype %s, while the result of the inner function \"\n \"(`fn`) has dtype %s. This can happen if the inner \"\n \"function of scan results in an upcast or downcast.\"\n )\n err_msg3 = (\n \"When compiling the inner function of scan (the \"\n \"function called by scan in each of its iterations) \"\n \"the following error has been encountered: The \"\n \"initial state (`outputs_info` in scan nomenclature) \"\n \"of variable %s (argument number %d) has %d dimension(s), \"\n \"while the corresponding variable in the result of the inner \"\n \"function of scan (`fn`) has %d dimension(s) (it should \"\n \"be one less than the initial state). For example, \"\n \"if the inner function of scan returns a vector \"\n \"of size d and scan uses the values of \"\n \"the previous time-step, then the initial state in scan \"\n \"should be a matrix of shape (1, d). \"\n \"The first dimension of this \"\n \"matrix corresponds to the number of previous time-steps \"\n \"that scan uses in each of its iterations. \"\n \"In order to solve this issue if the two varialbe currently \"\n \"have the same dimensionality, you can increase the \"\n \"dimensionality of the variable in the initial state of scan \"\n \"by using dimshuffle or shape_padleft. \"\n )\n\n def check_broadcast(v1, v2):\n \"\"\"Checks that the broadcast pattern of v1 and v2.\n\n Controls that the broadcast pattern of the variable provided as\n input to `scan` matches the broadcast pattern provided in\n `output_info`. It raises an error when they don't match. The\n typical case is when the user provides either the input or the\n `output_info` (but not both) with a dimension fixed to 1,\n which may wrongly be interpreted as broadcastable.\n\n \"\"\"\n if not hasattr(v1, \"broadcastable\") and not hasattr(v2, \"broadcastable\"):\n return\n msg = (\n \"The broadcast pattern of the output of scan (%s) is \"\n \"inconsistent with the one provided in `output_info` \"\n \"(%s). The output on axis %d is `%r`, but it is `%r` on \"\n \"axis %d in `output_info`. This can happen if one of the \"\n \"dimension is fixed to 1 in the input, while it is still \"\n \"variable in the output, or vice-verca. You have to make \"\n \"them consistent, e.g. using aesara.tensor.\"\n \"{patternbroadcast,unbroadcast,addbroadcast}.\"\n )\n size = min(len(v1.broadcastable), len(v2.broadcastable))\n for n, (b1, b2) in enumerate(\n zip(v1.broadcastable[-size:], v2.broadcastable[-size:])\n ):\n if b1 != b2:\n a1 = n + size - len(v1.broadcastable) + 1\n a2 = n + size - len(v2.broadcastable) + 1\n raise TypeError(msg % (v1.type, v2.type, a1, b1, b2, a2))\n\n def format(var, as_var):\n \"\"\"\n This functions ensures that ``out`` has the same dtype as\n ``inp`` as well as calling filter_variable to make sure\n they are both TensorType or GpuArrayType. It internally\n deals with the corner case where inp.ndim + 1 = out.ndim\n\n \"\"\"\n if not hasattr(var, \"dtype\"):\n return var\n rval = var\n if rval.type.dtype != as_var.type.dtype:\n rval = rval.astype(as_var.type.dtype)\n if rval.ndim == as_var.ndim:\n rval = as_var.type.filter_variable(rval)\n else:\n tmp = as_var.type.clone(\n broadcastable=(\n tuple(var.broadcastable[:1]) + tuple(as_var.broadcastable)\n )\n )\n rval = tmp.filter_variable(rval)\n return rval\n\n # Check if input sequences and variables representing a slice of\n # them have the same dtype\n argoffset = 0\n for inner_seq, outer_seq in zip(\n self.inner_seqs(self.inputs), self.outer_seqs(inputs)\n ):\n check_broadcast(outer_seq, inner_seq)\n new_inputs.append(format(outer_seq, as_var=inner_seq))\n\n argoffset += len(self.outer_seqs(inputs))\n # Check that this 3 things have the same dtype for mit_mot:\n # - initial state of the output\n # - variable representing an input slice of the output\n # - variable representing an output slice of the output\n ipos = 0\n opos = 0\n inner_mitmot = self.inner_mitmot(self.inputs)\n inner_mitmot_outs = self.inner_mitmot_outs(self.outputs)\n for idx, (itaps, otaps, _outer_mitmot) in enumerate(\n zip(self.mitmot_taps(), self.mitmot_out_taps(), self.outer_mitmot(inputs))\n ):\n outer_mitmot = format(_outer_mitmot, as_var=inner_mitmot[ipos])\n new_inputs.append(outer_mitmot)\n for k in range(len(itaps)):\n if (\n inner_mitmot[ipos + k].type.dtype != outer_mitmot.type.dtype\n or inner_mitmot[ipos + k].ndim != outer_mitmot.ndim - 1\n ):\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_mitmot),\n argoffset + idx,\n outer_mitmot.type.dtype,\n outer_mitmot.type.ndim,\n str(inner_mitmot[ipos + k]),\n inner_mitmot[ipos + k].type.dtype,\n inner_mitmot[ipos + k].type.ndim,\n )\n )\n ipos += len(itaps)\n for k in range(len(otaps)):\n if inner_mitmot_outs[opos + k].type.dtype != outer_mitmot.type.dtype:\n raise ValueError(\n err_msg2\n % (\n str(outer_mitmot),\n argoffset + idx,\n outer_mitmot.type.dtype,\n inner_mitmot_outs[opos + k].type.dtype,\n )\n )\n if inner_mitmot_outs[opos + k].ndim != outer_mitmot.ndim - 1:\n raise ValueError(\n err_msg3\n % (\n str(outer_mitmot),\n argoffset + idx,\n outer_mitmot.ndim,\n inner_mitmot_outs[opos + k].ndim,\n )\n )\n opos += len(otaps)\n argoffset += len(self.outer_mitmot(inputs))\n # Same checks as above but for outputs of type mit_sot\n ipos = 0\n inner_mitsots = self.inner_mitsot(self.inputs)\n for idx, (itaps, _outer_mitsot, inner_mitsot_out) in enumerate(\n zip(\n self.mitsot_taps(),\n self.outer_mitsot(inputs),\n self.inner_mitsot_outs(self.outputs),\n )\n ):\n outer_mitsot = format(_outer_mitsot, as_var=inner_mitsots[ipos])\n new_inputs.append(outer_mitsot)\n\n for k in range(len(itaps)):\n if (\n inner_mitsots[ipos + k].type.dtype != outer_mitsot.type.dtype\n or inner_mitsots[ipos + k].ndim != outer_mitsot.ndim - 1\n ):\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_mitsot),\n argoffset + idx,\n outer_mitsot.type.dtype,\n outer_mitsot.type.ndim,\n str(inner_mitsots[ipos + k]),\n inner_mitsots[ipos + k].type.dtype,\n inner_mitsots[ipos + k].type.ndim,\n )\n )\n ipos += len(itaps)\n if inner_mitsot_out.type.dtype != outer_mitsot.type.dtype:\n raise ValueError(\n err_msg2\n % (\n str(outer_mitsot),\n argoffset + idx,\n outer_mitsot.type.dtype,\n inner_mitsot_out.type.dtype,\n )\n )\n if inner_mitsot_out.ndim != outer_mitsot.ndim - 1:\n raise ValueError(\n err_msg3\n % (\n str(outer_mitsot),\n argoffset + idx,\n outer_mitsot.ndim,\n inner_mitsot_out.ndim,\n )\n )\n\n argoffset += len(self.outer_mitsot(inputs))\n # Same checks as above but for outputs of type sit_sot\n for idx, (inner_sitsot, _outer_sitsot, inner_sitsot_out) in enumerate(\n zip(\n self.inner_sitsot(self.inputs),\n self.outer_sitsot(inputs),\n self.inner_sitsot_outs(self.outputs),\n )\n ):\n outer_sitsot = format(_outer_sitsot, as_var=inner_sitsot)\n new_inputs.append(outer_sitsot)\n if inner_sitsot.ndim != outer_sitsot.ndim - 1:\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_sitsot),\n argoffset + idx,\n outer_sitsot.type.dtype,\n outer_sitsot.type.ndim,\n str(inner_sitsot),\n inner_sitsot.type.dtype,\n inner_sitsot.type.ndim,\n )\n )\n if inner_sitsot_out.type.dtype != outer_sitsot.type.dtype:\n raise ValueError(\n err_msg2\n % (\n str(outer_sitsot),\n argoffset + idx,\n outer_sitsot.type.dtype,\n inner_sitsot_out.type.dtype,\n )\n )\n if inner_sitsot_out.ndim != outer_sitsot.ndim - 1:\n raise ValueError(\n err_msg3\n % (\n str(outer_sitsot),\n argoffset + idx,\n outer_sitsot.type.ndim,\n inner_sitsot_out.type.ndim,\n )\n )\n\n argoffset += len(self.outer_sitsot(inputs))\n # Check that the shared variable and their update rule have the same\n # dtype. Maybe even same type ?!\n for idx, (inner_shared, inner_shared_out, _outer_shared) in enumerate(\n zip(\n self.inner_shared(self.inputs),\n self.inner_shared_outs(self.outputs),\n self.outer_shared(inputs),\n )\n ):\n outer_shared = format(_outer_shared, as_var=inner_shared)\n new_inputs.append(outer_shared)\n if (\n hasattr(outer_shared, \"dtype\")\n and outer_shared.dtype != inner_shared_out.dtype\n ):\n raise ValueError(\n err_msg2\n % (\n str(outer_shared),\n idx + argoffset,\n outer_shared.dtype,\n inner_shared_out.dtype,\n )\n )\n if (\n hasattr(outer_shared, \"dtype\")\n and outer_shared.ndim != inner_shared_out.ndim\n ):\n raise ValueError(\n err_msg3\n % (\n str(outer_shared),\n idx + argoffset,\n outer_shared.ndim,\n inner_shared_out.ndim,\n )\n )\n\n if hasattr(outer_shared, \"dtype\") and (\n outer_shared.dtype != inner_shared.dtype\n or outer_shared.ndim != inner_shared.ndim\n ):\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_shared),\n argoffset + idx,\n outer_shared.dtype,\n outer_shared.ndim,\n str(inner_shared),\n inner_shared.dtype,\n inner_shared.ndim,\n )\n )\n # We do not need to call `format` on outer_nisot arguments.\n # outer_nitsot stands for no input tap single output tap. This means\n # these are states that do not feed anything back in the recurrent\n # computation, and hence they do not have an initial state. The scan\n # node however receives an input for each such argument, the input\n # in this case is just a int saying how many steps of this output we\n # need to store. This input does not have the same dtype, nor is it the same\n # type of tensor as the output, it is always a scalar int.\n new_inputs += [as_tensor_variable(ons) for ons in self.outer_nitsot(inputs)]\n for inner_nonseq, _outer_nonseq in zip(\n self.inner_non_seqs(self.inputs), self.outer_non_seqs(inputs)\n ):\n outer_nonseq = format(_outer_nonseq, as_var=inner_nonseq)\n new_inputs.append(outer_nonseq)\n if inner_nonseq.type != outer_nonseq.type:\n raise ValueError(\n (\n \"Argument %s given to scan node does not\"\n \" match its correspondence %s\"\n )\n % (str(outer_nonseq), str(inner_nonseq))\n )\n\n for outer_nitsot in self.outer_nitsot(inputs):\n # For every nit_sot input we get as input a int/uint that\n # depicts the size in memory for that sequence. This feature is\n # used by truncated BPTT and by scan space optimization\n if (\n str(outer_nitsot.type.dtype) not in integer_dtypes\n or outer_nitsot.ndim != 0\n ):\n raise ValueError(\n \"For output %s you need to provide a \" \"scalar int !\",\n str(outer_nitsot),\n )\n assert len(new_inputs) == len(inputs)\n\n # The vector_seqs and vector_outs are just a workaround\n # strange NumPy behavior: vector_ndarray[int] return a NumPy\n # scalar and not a NumPy ndarray of 0 dimensions.\n def is_cpu_vector(s):\n return isinstance(s.type, TensorType) and s.ndim == 1\n\n self.vector_seqs = [\n is_cpu_vector(seq) for seq in new_inputs[1 : 1 + self.n_seqs]\n ]\n self.vector_outs = [\n is_cpu_vector(arg)\n for arg in new_inputs[1 + self.n_seqs : (1 + self.n_seqs + self.n_outs)]\n ]\n self.vector_outs += [\n isinstance(t.type, TensorType) and t.ndim == 0\n for t in self.outer_nitsot_outs(self.outputs)\n ]\n\n apply_node = Apply(self, new_inputs, [t() for t in self.output_types])\n return apply_node\n\n def __eq__(self, other):\n # Check if we are dealing with same type of objects\n if not type(self) == type(other):\n return False\n if \"destroy_map\" not in self.info:\n self.info[\"destroy_map\"] = OrderedDict()\n if \"destroy_map\" not in other.info:\n other.info[\"destroy_map\"] = OrderedDict()\n keys_to_check = [\n \"truncate_gradient\",\n \"profile\",\n \"n_seqs\",\n \"tap_array\",\n \"as_while\",\n \"n_mit_sot\",\n \"destroy_map\",\n \"n_nit_sot\",\n \"n_shared_outs\",\n \"n_sit_sot\",\n \"gpua\",\n \"n_mit_mot_outs\",\n \"n_mit_mot\",\n \"mit_mot_out_slices\",\n ]\n # This are some safety checks ( namely that the inner graph has the\n # same number of inputs and same number of outputs )\n if not len(self.inputs) == len(other.inputs):\n return False\n elif not len(self.outputs) == len(other.outputs):\n return False\n for key in keys_to_check:\n if self.info[key] != other.info[key]:\n return False\n # If everything went OK up to here, there is still one thing to\n # check. Namely, do the internal graph represent same\n # computations\n for self_in, other_in in zip(self.inputs, other.inputs):\n if self_in.type != other_in.type:\n return False\n\n return equal_computations(\n self.outputs, other.outputs, self.inputs, other.inputs\n )\n\n def __str__(self):\n if self.gpua:\n gpu_str = \"gpu\"\n else:\n gpu_str = \"cpu\"\n if self.as_while:\n name = \"do_while\"\n else:\n name = \"for\"\n aux_txt = \"%s\"\n if len(self.destroy_map.keys()) > 0:\n # Check if all outputs are inplace\n if sorted(self.destroy_map.keys()) == sorted(\n range(self.n_mit_mot + self.n_mit_sot + self.n_sit_sot)\n ):\n aux_txt += \"all_inplace,%s,%s}\"\n else:\n aux_txt += \"{inplace{\"\n for k in self.destroy_map.keys():\n aux_txt += str(k) + \",\"\n aux_txt += \"},%s,%s}\"\n else:\n aux_txt += \"{%s,%s}\"\n aux_txt = aux_txt % (name, gpu_str, str(self.name))\n return aux_txt\n\n def __hash__(self):\n return hash(\n (\n type(self),\n # and a hash representing the inner graph using the\n # CLinker.cmodule_key_\n self._hash_inner_graph,\n hash_listsDictsTuples(self.info),\n )\n )\n\n def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):\n \"\"\"\n\n Parameters\n ----------\n node\n Something previously returned by self.make_node.\n storage_map\n dict variable -> one-element-list where a computed\n value for this variable may be found.\n compute_map\n dict variable -> one-element-list where a boolean\n value will be found. The boolean indicates whether the\n variable's storage_map container contains a valid value (True)\n or if it has not been computed yet (False).\n no_recycling\n List of variables for which it is forbidden to reuse memory\n allocated by a previous call.\n impl\n Use 'py' if we want python execution.\n Notes\n -----\n If the thunk consults the storage_map on every call, it is safe\n for it to ignore the no_recycling argument, because elements of the\n no_recycling list will have a value of None in the storage map. If\n the thunk can potentially cache return values (like CLinker does),\n then it must not do so for variables in the no_recycling list.\n\n \"\"\"\n\n # Before building the thunk, validate that the inner graph is\n # coherent\n self.validate_inner_graph()\n\n # Setting up all my variables in what I believe is a more Cython\n # friendly form\n\n node_input_storage = [storage_map[r] for r in node.inputs]\n node_output_storage = [storage_map[r] for r in node.outputs]\n # If a shared variable is the result of a ViewOp it is a clear\n # indication that we need to copy that value after the perform of\n # scan is done\n slices = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n\n if config.scan__allow_output_prealloc:\n\n # Go through the mitmots. Whenever a mitmot has a tap both as an\n # input and an output, wrap the input such that the corresponding\n # output variable becomes an update to be performed on it, possibly\n # inplace at the end of the functions's execution.\n wrapped_inputs = [In(x, borrow=False) for x in self.inputs[: self.n_seqs]]\n new_outputs = [x for x in self.outputs]\n preallocated_mitmot_outs = []\n new_mit_mot_out_slices = copy.deepcopy(self.mit_mot_out_slices)\n\n input_idx = self.n_seqs\n for mitmot_idx in range(self.n_mit_mot):\n for inp_tap in self.tap_array[mitmot_idx]:\n if inp_tap in self.mit_mot_out_slices[mitmot_idx]:\n inp = self.inputs[input_idx]\n\n # Figure out the index of the corresponding output\n output_idx = sum(\n [len(m) for m in self.mit_mot_out_slices[:mitmot_idx]]\n )\n output_idx += self.mit_mot_out_slices[mitmot_idx].index(inp_tap)\n\n # Make it so the input is automatically updated to the\n # output value, possibly inplace, at the end of the\n # function execution. Also, since an update is\n # defined, a default value must also be (this is\n # verified by DebugMode). Use an array of size 0 but\n # the right ndim and dtype (use a shape of 1 on\n # broadcastable dimensions, 0 on the others).\n default_shape = [1 if _b else 0 for _b in inp.broadcastable]\n default_val = inp.type.value_zeros(default_shape)\n wrapped_inp = In(\n variable=inp,\n value=default_val,\n update=self.outputs[output_idx],\n )\n wrapped_inputs.append(wrapped_inp)\n preallocated_mitmot_outs.append(output_idx)\n new_mit_mot_out_slices[mitmot_idx].remove(inp_tap)\n else:\n # Wrap the corresponding input as usual. Leave the\n # output as-is.\n wrapped_inputs.append(In(self.inputs[input_idx], borrow=False))\n input_idx += 1\n\n # Wrap the inputs not associated to mitmots and wrap the remaining\n # outputs\n wrapped_inputs += [In(x, borrow=False) for x in self.inputs[input_idx:]]\n wrapped_outputs = [Out(x, borrow=True) for x in new_outputs[:slices]]\n wrapped_outputs += new_outputs[slices:]\n\n # Remove now useless outputs from the output list (start from the\n # end to avoid altering the indices of the other outputs to be\n # deleted.\n preallocated_mitmot_outs.sort()\n for p in preallocated_mitmot_outs[::-1]:\n del wrapped_outputs[p]\n\n # Store the list of mitmot output taps that have been altered\n # so they can be preallocated\n self.mitmots_preallocated = [\n i in preallocated_mitmot_outs for i in range(self.n_mit_mot_outs)\n ]\n\n # Add an optimization to the compilation mode to attach a feature\n # to the function graph just before the inplace optimizations are\n # applied (inplace optimizations start at position 50 so the\n # optimization to attach the feature is registered at position 49.9\n # so that it runs before them). This feature will prevent mitsot,\n # sitsot and nitsot outputs from being computed inplace (to allow\n # their preallocation).\n mitsot_start = self.n_mit_mot_outs - len(preallocated_mitmot_outs)\n nitsot_end = mitsot_start + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n feature = NoOutputFromInplace(mitsot_start, nitsot_end)\n opt = AddFeatureOptimizer(feature)\n compilation_mode = self.mode_instance.register((opt, 49.9))\n\n else:\n # Output preallocation is not activated. Mark every mitmot output\n # tap as not being preallocated\n self.mitmots_preallocated = [False] * self.n_mit_mot_outs\n\n wrapped_inputs = [In(x, borrow=True) for x in self.inputs]\n wrapped_outputs = [Out(x, borrow=False) for x in self.outputs[:slices]]\n wrapped_outputs += self.outputs[slices:]\n\n compilation_mode = self.mode_instance\n\n profile = None\n if config.profile or (\n isinstance(self.profile, (str, bool, (int,))) and self.profile\n ):\n if isinstance(self.profile, str):\n profile = ScanProfileStats(name=self.profile)\n else:\n profile = ScanProfileStats(name=self.name)\n elif self.profile:\n profile = self.profile\n # make_thunk can be called many times on the same op\n # we do not want to recompile the inner fct every time.\n if not getattr(self, \"fn\", None):\n self.fn = function(\n wrapped_inputs,\n wrapped_outputs,\n mode=compilation_mode,\n name=self.name,\n profile=profile,\n on_unused_input=\"ignore\",\n )\n\n # Analyse the compile inner function to determine which inputs and\n # outputs are on the gpu and speed up some checks during the execution\n self.inps_is_tensor = [\n isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.inputs\n ]\n self.outs_is_tensor = [\n isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.outputs\n ]\n\n try:\n if impl == \"py\":\n raise MissingGXX\n cython_mintaps = np.asarray(self.mintaps, dtype=\"int32\")\n cython_tap_array_len = np.asarray(\n [len(x) for x in self.tap_array], dtype=\"int32\"\n )\n if len(self.tap_array) == 0:\n d1 = 0\n else:\n d1 = np.max(cython_tap_array_len)\n d0 = len(self.tap_array)\n cython_tap_array = np.zeros((d0, d1), dtype=\"int32\")\n for _d0 in range(d0):\n for _d1 in range(cython_tap_array_len[_d0]):\n cython_tap_array[_d0, _d1] = self.tap_array[_d0][_d1]\n cython_mit_mot_out_nslices = np.asarray(\n [len(x) for x in self.mit_mot_out_slices], dtype=\"int32\"\n )\n if len(self.mit_mot_out_slices) == 0:\n d1 = 0\n else:\n d1 = np.max(cython_mit_mot_out_nslices)\n d0 = len(self.mit_mot_out_slices)\n cython_mit_mot_out_slices = np.zeros((d0, d1), dtype=\"int32\")\n for _d0 in range(d0):\n for _d1 in range(cython_mit_mot_out_nslices[_d0]):\n cython_mit_mot_out_slices[_d0, _d1] = self.mit_mot_out_slices[_d0][\n _d1\n ]\n\n cython_vector_seqs = np.asarray(self.vector_seqs, dtype=\"int32\")\n cython_vector_outs = np.asarray(self.vector_outs, dtype=\"int32\")\n cython_mitmots_preallocated = np.asarray(\n self.mitmots_preallocated, dtype=\"int32\"\n )\n\n cython_inps_is_tensor = np.asarray(self.inps_is_tensor, dtype=\"int32\")\n cython_outs_is_tensor = np.asarray(self.outs_is_tensor, dtype=\"int32\")\n\n if self.destroy_map:\n cython_destroy_map = [\n x in self.destroy_map for x in range(len(node.outputs))\n ]\n else:\n cython_destroy_map = [0 for x in range(len(node.outputs))]\n cython_destroy_map = np.asarray(cython_destroy_map, dtype=\"int32\")\n from . import scan_perform_ext\n\n def p(node, args, outs):\n return scan_perform_ext.perform(\n self.n_shared_outs,\n self.n_mit_mot_outs,\n self.n_seqs,\n self.n_mit_mot,\n self.n_mit_sot,\n self.n_sit_sot,\n self.n_nit_sot,\n args[0],\n self.as_while,\n cython_mintaps,\n cython_tap_array,\n cython_tap_array_len,\n cython_vector_seqs,\n cython_vector_outs,\n cython_mit_mot_out_slices,\n cython_mit_mot_out_nslices,\n cython_mitmots_preallocated,\n cython_inps_is_tensor,\n cython_outs_is_tensor,\n self.fn.fn,\n self.fn,\n cython_destroy_map,\n args,\n outs,\n self,\n node,\n )\n\n except (ImportError, MissingGXX):\n p = self.perform\n\n # default arguments are stored in the closure of `rval`\n\n # Big ugly hack since we can't get the real value of allow_gc\n # for the englobing function.\n allow_gc = config.allow_gc and not self.allow_gc\n\n def rval(\n p=p, i=node_input_storage, o=node_output_storage, n=node, allow_gc=allow_gc\n ):\n r = p(n, [x[0] for x in i], o)\n for o in node.outputs:\n compute_map[o][0] = True\n if allow_gc:\n self.fn.free()\n return r\n\n rval.inputs = node_input_storage\n rval.outputs = node_output_storage\n rval.perform = p\n rval.lazy = False\n return rval\n\n def inner_seqs(self, list_inputs):\n # Given the list of inner inputs this function grabs those\n # corresponding to sequences\n return list_inputs[: self.n_seqs]\n\n def outer_seqs(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n # Given the list of outer inputs this function grabs those\n # corresponding to sequences\n return list_inputs[1 : 1 + self.n_seqs]\n\n def inner_mitmot(self, list_inputs):\n n_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])\n return list_inputs[self.n_seqs : self.n_seqs + n_taps]\n\n def outer_mitmot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n return list_inputs[1 + self.n_seqs : 1 + self.n_seqs + self.n_mit_mot]\n\n def inner_mitmot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n return list_outputs[:n_taps]\n\n def outer_mitmot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n return list_outputs[: self.n_mit_mot]\n\n def mitmot_taps(self):\n return self.tap_array[: self.n_mit_mot]\n\n def mitmot_out_taps(self):\n return self.mit_mot_out_slices[: self.n_mit_mot]\n\n def inner_mitsot(self, list_inputs):\n n_mitmot_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])\n ntaps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n return list_inputs[\n self.n_seqs + n_mitmot_taps : self.n_seqs + ntaps_upto_sit_sot\n ]\n\n def outer_mitsot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = 1 + self.n_seqs + self.n_mit_mot\n return list_inputs[offset : offset + self.n_mit_sot]\n\n def inner_mitsot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n return list_outputs[n_taps : n_taps + self.n_mit_sot]\n\n def outer_mitsot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n return list_outputs[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]\n\n def mitsot_taps(self):\n return self.tap_array[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]\n\n def inner_sitsot(self, list_inputs):\n n_taps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n offset = self.n_seqs + n_taps_upto_sit_sot\n return list_inputs[offset : offset + self.n_sit_sot]\n\n def outer_sitsot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot\n return list_inputs[offset : offset + self.n_sit_sot]\n\n def inner_sitsot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n offset = self.n_mit_sot + n_taps\n return list_outputs[offset : offset + self.n_sit_sot]\n\n def outer_sitsot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n offset = self.n_mit_mot + self.n_mit_sot\n return list_outputs[offset : offset + self.n_sit_sot]\n\n def outer_nitsot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = (\n 1\n + self.n_seqs\n + self.n_mit_mot\n + self.n_mit_sot\n + self.n_sit_sot\n + self.n_shared_outs\n )\n return list_inputs[offset : offset + self.n_nit_sot]\n\n def inner_nitsot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n offset = self.n_mit_sot + n_taps + self.n_sit_sot\n return list_outputs[offset : offset + self.n_nit_sot]\n\n def outer_nitsot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n return list_outputs[offset : offset + self.n_nit_sot]\n\n def inner_shared(self, list_inputs):\n n_taps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot\n return list_inputs[offset : offset + self.n_shared_outs]\n\n def outer_shared(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n return list_inputs[offset : offset + self.n_shared_outs]\n\n def inner_shared_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n offset = self.n_mit_sot + n_taps + self.n_sit_sot + self.n_nit_sot\n return list_outputs[offset : offset + self.n_shared_outs]\n\n def outer_shared_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n return list_outputs[offset : offset + self.n_shared_outs]\n\n def inner_non_seqs(self, list_inputs):\n n_taps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot + self.n_shared_outs\n return list_inputs[offset:]\n\n def outer_non_seqs(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = (\n 1\n + self.n_seqs\n + self.n_mit_mot\n + self.n_mit_sot\n + self.n_sit_sot\n + self.n_nit_sot\n + self.n_shared_outs\n )\n return list_inputs[offset:]\n\n def perform(self, node, inputs, output_storage, params=None):\n \"\"\"Compute the scan operation in Python.\n\n The `inputs` are packed like this:\n\n n_steps\n\n X sequence inputs x_1, x_2, ... x_<self.n_seqs>\n\n Y initial states (u_1, u_2, ... u_<self.n_outs>) for our\n outputs. Each must have appropriate length (T_1, T_2, ..., T_Y).\n\n W other inputs w_1, w_2, ... w_W\n\n There are at least ``1 + self.n_seqs + self.n_outs`` inputs, and the\n ones above this number are passed to the scanned function as\n non-sequential inputs.\n\n The outputs are more straightforward:\n\n Y sequence outputs y_1, y_2, ... y_<self.n_outs>\n\n \"\"\"\n # 1. Unzip the number of steps and sequences. If number of steps is\n # negative flip sequences around, and make n_steps positive\n t0_call = time.time()\n t_fn = 0\n n_steps = inputs[0]\n seqs = []\n if n_steps < 0:\n # History, in the past, this was used for backward\n # scan. Now we reverse the inputs outside of scan.\n raise IndexError(\n f\"Scan was asked to run for negative number of step {int(n_steps)}\"\n )\n elif n_steps == 0:\n raise NotImplementedError(\n \"We didn't implemented yet the case where scan do 0 iteration\"\n )\n else:\n for idx, seq in enumerate(inputs[1 : self.seqs_arg_offset]):\n if seq.shape[0] < n_steps:\n raise ValueError(\n (\n \"Sequence is shorter then the required \"\n \"number of steps : (n_steps, seq, \"\n \"seq.shape):\"\n ),\n n_steps,\n node.inputs[1 + idx],\n seq.shape,\n )\n seqs.append(seq)\n\n # 2. Allocate memory for the outputs. Construct the list:\n # store_steps -- map containing the length of each output\n # pos -- map containing the current position of each\n # output\n\n store_steps = [\n arg.shape[0]\n for arg in inputs[self.seqs_arg_offset : self.shared_arg_offset]\n ]\n store_steps += [\n arg\n for arg in inputs[\n self.nit_sot_arg_offset : self.nit_sot_arg_offset + self.n_nit_sot\n ]\n ]\n\n pos = [\n (-self.mintaps[idx]) % store_steps[idx]\n for idx in range(self.n_outs + self.n_nit_sot)\n ]\n # 2.1 Create storage space for outputs\n for idx in range(self.n_outs):\n if idx in self.destroy_map:\n # ^ Case 1. Outputs should be computed inplace of their\n # initial state\n output_storage[idx][0] = inputs[self.seqs_arg_offset + idx]\n elif (\n output_storage[idx][0] is not None\n and output_storage[idx][0].shape[1:]\n == inputs[self.seqs_arg_offset + idx].shape[1:]\n and output_storage[idx][0].shape[0] >= store_steps[idx]\n ):\n # Put in the values of the initial state\n output_storage[idx][0] = output_storage[idx][0][: store_steps[idx]]\n if idx > self.n_mit_mot:\n l = -self.mintaps[idx]\n output_storage[idx][0][:l] = inputs[self.seqs_arg_offset + idx][:l]\n else:\n output_storage[idx][0][:] = inputs[self.seqs_arg_offset + idx]\n else:\n output_storage[idx][0] = inputs[self.seqs_arg_offset + idx].copy()\n\n offset = self.nit_sot_arg_offset + self.n_nit_sot\n other_args = inputs[offset:]\n inner_input_storage = self.fn.input_storage\n nb_mitmot_in = sum(map(len, self.tap_array[: self.n_mit_mot]))\n old_mitmot_input_storage = [None] * nb_mitmot_in\n old_mitmot_input_data = [None] * nb_mitmot_in\n inner_output_storage = self.fn.output_storage\n old_inner_output_storage = [None] * len(inner_output_storage)\n old_inner_output_data = [None] * len(inner_output_storage)\n fn = self.fn.fn\n offset = (\n self.n_seqs\n + sum(map(len, self.tap_array[: self.n_outs]))\n + self.n_shared_outs\n )\n for idx in range(len(other_args)):\n inner_input_storage[idx + offset].storage[0] = other_args[idx]\n\n i = 0\n cond = True\n # ############# THE MAIN LOOP ##############\n # for i in range(n_steps):\n while (i < n_steps) and cond:\n # sequences over which scan iterates\n # 3. collect input slices\n for idx in range(self.n_seqs):\n if self.vector_seqs[idx]:\n inner_input_storage[idx].storage[0] = seqs[idx][i : i + 1].reshape(\n ()\n )\n else:\n inner_input_storage[idx].storage[0] = seqs[idx][i]\n\n offset = self.n_seqs\n for idx in range(self.n_outs):\n if self.vector_outs[idx]:\n for tap in self.tap_array[idx]:\n _idx = (pos[idx] + tap) % store_steps[idx]\n inner_input_storage[offset].storage[0] = output_storage[idx][0][\n _idx : _idx + 1\n ].reshape(())\n offset += 1\n else:\n for tap in self.tap_array[idx]:\n _idx = (pos[idx] + tap) % store_steps[idx]\n inner_input_storage[offset].storage[0] = output_storage[idx][0][\n _idx\n ]\n offset += 1\n\n a_offset = self.shared_arg_offset\n o_offset = self.n_outs + self.n_nit_sot\n if i == 0:\n for j in range(self.n_shared_outs):\n inner_input_storage[offset].storage[0] = inputs[a_offset + j]\n offset += 1\n else:\n for j in range(self.n_shared_outs):\n inner_input_storage[offset].storage[0] = output_storage[\n o_offset + j\n ][0]\n offset += 1\n\n # 4. collecting slices where the output should be stored\n\n # 4.1. Collect slices for mitmots\n offset = 0\n for idx in range(self.n_mit_mot_outs):\n if not self.mitmots_preallocated[idx]:\n inner_output_storage[offset].storage[0] = None\n offset += 1\n\n # 4.2. Collect slices for mitsots, sitsots and nitsots\n if i != 0:\n for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):\n if (\n store_steps[idx + self.n_mit_mot] == 1\n or self.vector_outs[idx + self.n_mit_mot]\n ):\n inner_output_storage[idx + offset].storage[0] = None\n else:\n _pos0 = idx + self.n_mit_mot\n inner_output_storage[idx + offset].storage[0] = output_storage[\n _pos0\n ][0][pos[_pos0]]\n else:\n for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):\n inner_output_storage[idx + offset].storage[0] = None\n\n # 4.3. Collect slices for shared outputs\n offset += self.n_outs + self.n_nit_sot - self.n_mit_mot\n for idx in range(self.n_shared_outs):\n inner_output_storage[idx + offset].storage[0] = None\n\n # 4.4. If there is a condition add it to the mix\n if self.as_while:\n pdx = offset + self.n_shared_outs\n inner_output_storage[pdx].storage[0] = None\n\n # 4.5. Keep a reference to the variables (ndarrays, GpuArrays,\n # etc) currently in the output_storage to be able to compare them\n # with the actual outputs of the inner function after its\n # execution. Also keep pointers to their data to be able to detect\n # cases where outputs reused the allocated object but alter the\n # memory region they refer to.\n for idx in range(len(inner_output_storage)):\n\n var = inner_output_storage[idx].storage[0]\n old_inner_output_storage[idx] = var\n\n if var is None:\n old_inner_output_data[idx] = None\n elif self.outs_is_tensor[idx]:\n old_inner_output_data[idx] = var.data\n else:\n old_inner_output_data[idx] = var.gpudata\n\n # 4.6. Keep a reference to the variables (ndarrays, GpuArrays,\n # etc) associated with mitmot inputs currently in the\n # input_storage to be able to compare them with the content of the\n # input_storage after the execution of the function. Also keep\n # pointers to their data to be able to detect cases where outputs\n # reused the allocated object but alter the memory region they\n # refer to.\n for idx in range(nb_mitmot_in):\n var = inner_input_storage[idx + self.n_seqs].storage[0]\n old_mitmot_input_storage[idx] = var\n\n if var is None:\n old_mitmot_input_data[idx] = None\n elif self.inps_is_tensor[idx + self.n_seqs]:\n old_mitmot_input_data[idx] = var.data\n else:\n old_mitmot_input_data[idx] = var.gpudata\n\n # 5.1 compute outputs\n t0_fn = time.time()\n\n try:\n fn()\n except Exception:\n if hasattr(fn, \"position_of_error\"):\n # this is a new vm-provided function or c linker\n # they need this because the exception manipulation\n # done by raise_with_op is not implemented in C.\n if hasattr(fn, \"thunks\"):\n # For the CVM\n raise_with_op(\n self.fn.maker.fgraph,\n fn.nodes[fn.position_of_error],\n fn.thunks[fn.position_of_error],\n )\n else:\n # For the c linker\n # We don't have access from python to all the\n # temps values So for now, we just don't print\n # the extra shapes/strides info\n raise_with_op(\n self.fn.maker.fgraph, fn.nodes[fn.position_of_error]\n )\n else:\n # old-style linkers raise their own exceptions\n raise\n\n dt_fn = time.time() - t0_fn\n if self.as_while:\n pdx = offset + self.n_shared_outs\n cond = inner_output_storage[pdx].storage[0] == 0\n\n # 5.2. By calling fn() directly instead of calling the aesara\n # function, it is possible that the updates have not been\n # performed. Perform the updates if needed.\n offset_out = len(inner_output_storage) - 1\n if getattr(fn, \"need_update_inputs\", True):\n # Update the inputs that have an update function\n for inp, storage in zip(\n self.fn.maker.expanded_inputs[::-1], self.fn.input_storage[::-1]\n ):\n if inp.update is not None:\n storage.data = inner_output_storage[offset_out].data\n offset_out -= 1\n\n t_fn += dt_fn\n offset_out = 0\n\n # 5.3 Copy over the values for mit_mot outputs\n mitmot_inp_offset = 0\n mitmot_out_idx = 0\n for j in range(self.n_mit_mot):\n for k in self.mit_mot_out_slices[j]:\n if self.mitmots_preallocated[mitmot_out_idx]:\n # This output tap has been preallocated.\n inp_idx = mitmot_inp_offset + self.tap_array[j].index(k)\n\n # Verify whether the input points to the same data as\n # it did before the execution of the inner function.\n old_var = old_mitmot_input_storage[inp_idx]\n new_var = inner_input_storage[self.n_seqs + inp_idx].storage[0]\n if old_var is new_var:\n old_data = old_mitmot_input_data[inp_idx]\n if self.inps_is_tensor[self.n_seqs + inp_idx]:\n same_data = new_var.data == old_data\n else:\n same_data = new_var.gpudata == old_data\n else:\n same_data = False\n\n # If the corresponding input storage still points to\n # the same data, it has been modified inplace and\n # nothing needs to be done. Otherwise, recover the\n # and store it in `outs` as usual\n if not same_data:\n output_storage[j][0][k + pos[j]] = inner_input_storage[\n self.n_seqs + inp_idx\n ].storage[0]\n\n else:\n # This output tap has not been preallocated, recover\n # its value as usual\n output_storage[j][0][k + pos[j]] = inner_output_storage[\n offset_out\n ].storage[0]\n offset_out += 1\n\n mitmot_out_idx += 1\n\n mitmot_inp_offset += len(self.tap_array[j])\n\n # 5.4 Copy over the values for mit_sot/sit_sot outputs\n begin = self.n_mit_mot\n end = self.n_outs\n offset_out -= self.n_mit_mot\n\n for j in range(begin, end):\n\n # Copy the output value to `outs`, if necessary\n if store_steps[j] == 1 or self.vector_outs[j]:\n output_storage[j][0][pos[j]] = inner_output_storage[\n offset_out + j\n ].storage[0]\n else:\n # Check whether the initialization of the output storage\n # map for this output has been reused.\n old_var = old_inner_output_storage[offset_out + j]\n new_var = inner_output_storage[offset_out + j].storage[0]\n if old_var is new_var:\n old_data = old_inner_output_data[offset_out + j]\n if old_data is None:\n output_reused = False\n elif self.outs_is_tensor[offset_out + j]:\n output_reused = new_var.data == old_data\n else:\n output_reused = new_var.gpudata == old_data\n else:\n output_reused = False\n\n if not output_reused:\n try:\n output_storage[j][0][pos[j]] = inner_output_storage[\n offset_out + j\n ].storage[0]\n except ValueError as e:\n if i == 0:\n # First iteration, so don't change the\n # error message as it can't be the\n # case we write about.\n raise\n ne = ValueError(\n \"An output of the scan has changed shape. \"\n \"This may be caused by a pushout optimization.\"\n \" Try adding \"\n \"'optimizer_excluding=scanOp_pushout_output' \"\n \"to your Aesara flags.\"\n )\n raise ne from e\n\n # 5.5 Copy over the values for nit_sot outputs\n begin = end\n end += self.n_nit_sot\n for j in range(begin, end):\n\n if i == 0:\n jout = j + offset_out\n shape = (store_steps[j],) + inner_output_storage[jout].storage[\n 0\n ].shape\n dtype = inner_output_storage[jout].storage[0].dtype\n if (\n output_storage[j][0] is None\n or output_storage[j][0].shape[0] < store_steps[j]\n or output_storage[j][0].shape[1:] != shape[1:]\n or output_storage[j][0].dtype != dtype\n ):\n output_storage[j][0] = node.outputs[j].type.value_zeros(shape)\n elif output_storage[j][0].shape[0] != store_steps[j]:\n output_storage[j][0] = output_storage[j][0][: store_steps[j]]\n output_storage[j][0][pos[j]] = inner_output_storage[jout].storage[0]\n elif store_steps[j] == 1 or self.vector_outs[j]:\n output_storage[j][0][pos[j]] = inner_output_storage[\n j + offset_out\n ].storage[0]\n else:\n # Check whether the initialization of the output storage map\n # for this output has been reused.\n old_var = old_inner_output_storage[offset_out + j]\n old_data = old_inner_output_data[offset_out + j]\n new_var = inner_output_storage[offset_out + j].storage[0]\n if old_var is new_var:\n if old_data is None:\n output_reused = False\n elif self.outs_is_tensor[offset_out + j]:\n output_reused = new_var.data == old_data\n else:\n output_reused = new_var.gpudata == old_data\n else:\n output_reused = False\n\n if not output_reused:\n output_storage[j][0][pos[j]] = inner_output_storage[\n j + offset_out\n ].storage[0]\n\n # 5.6 Copy over the values for outputs corresponding to shared\n # variables\n begin = end\n end += self.n_shared_outs\n for j in range(begin, end):\n jout = j + offset_out\n output_storage[j][0] = inner_output_storage[jout].storage[0]\n\n pos = [(idx + 1) % store for idx, store in zip(pos, store_steps)]\n i = i + 1\n\n # 6. Check if you need to re-order output buffers\n begin = self.n_mit_mot\n end = self.n_outs + self.n_nit_sot\n for idx in range(begin, end):\n if store_steps[idx] < i - self.mintaps[idx] and pos[idx] < store_steps[idx]:\n\n pdx = pos[idx]\n if pdx >= store_steps[idx] // 2:\n # It seems inefficient to copy the bigger part of the\n # array over, and back, but it is the only way that\n # there is no overlap in the areas of out[idx][0] that\n # are read and written.\n # This way, there will be no information overwritten\n # before it is read (as it used to happen).\n shape = (pdx,) + output_storage[idx][0].shape[1:]\n tmp = node.outputs[idx].type.value_zeros(shape)\n tmp[:] = output_storage[idx][0][:pdx]\n output_storage[idx][0][: store_steps[idx] - pdx] = output_storage[\n idx\n ][0][pdx:]\n output_storage[idx][0][store_steps[idx] - pdx :] = tmp\n del tmp\n else:\n shape = (store_steps[idx] - pdx,) + output_storage[idx][0].shape[1:]\n tmp = node.outputs[idx].type.value_zeros(shape)\n tmp[:] = output_storage[idx][0][pdx:]\n output_storage[idx][0][store_steps[idx] - pdx :] = output_storage[\n idx\n ][0][:pdx]\n output_storage[idx][0][: store_steps[idx] - pdx] = tmp\n del tmp\n # This would normally happen only when doing truncated\n # backpropagation through time. In such a scenario Scan is\n # expected to return 0 for all entries for which the gradient is\n # not actually computed\n elif store_steps[idx] > i - self.mintaps[idx]:\n output_storage[idx][0][i - self.mintaps[idx] :] = 0\n # This is a fix for a bug introduced by while. If you say\n # you want to loop up to a condition, you expect the output\n # to have that length ( and not the maximal length possible)\n #\n # Without this the behaviour of a scan op is not consistent\n # if optimization gets applied compared to when optimization\n # do not get applied\n if i < n_steps:\n # The reason I don't use out[idx][0][:i] is because for\n # certain outputs (those with multiple taps),\n # outs[idx][0] has more than n_steps entries, with the\n # initial state at the beginning. When indexing in it I\n # usually have to do something like\n # outs[idx][0][i+offset]. To do something similar here,\n # I would have first to compute the maximal tap for\n # every output and then do outs[0][:i+maximal_tap],\n # which implies I think more computations then this\n # little trick that I used\n output_storage[idx][0] = output_storage[idx][0][: -(n_steps - i)]\n\n # We never reuse the input or output storage of the\n # inner function so we clear it.\n for i_s in inner_input_storage:\n i_s.storage[0] = None\n for o_s in inner_output_storage:\n o_s.storage[0] = None\n\n t_call = time.time() - t0_call\n # NOTE: make this match what's in function.types.Function\n # and this little string helps us to find this spot:\n # \"PROFILE_CODE\"\n\n if hasattr(self.fn.maker, \"profile\") and self.fn.maker.profile:\n profile = self.fn.maker.profile\n profile.callcount += 1\n profile.nbsteps += n_steps\n profile.call_time += t_call\n profile.vm_call_time += t_fn\n if hasattr(self.fn.fn, \"update_profile\"):\n self.fn.fn.update_profile(profile)\n\n self.t_call = t_call\n self.t_fn = t_fn\n\n def infer_shape(self, fgraph, node, input_shapes):\n # input_shapes correspond to the shapes of node.inputs\n for inp, inp_shp in zip(node.inputs, input_shapes):\n assert inp_shp is None or len(inp_shp) == inp.type.ndim\n\n # Here we build 2 variables;\n # - A list `inner_ins_shapes`, such that inner_ins_shapes[i] is the\n # shape of self.inputs[i]\n # - A dictionary `out_equivalent` containing, for every inner input,\n # an equivalent variable computed from the outer inputs.\n # NOTE : For non-sequences, this equivalence is trivial. For\n # sequences and recurrent states, there is no direct equivalence\n # between outer and inner inputs. However, because every iteration\n # of the Scan needs to give the same output shapes, we can give an\n # equivalence between these inner inputs and the subelements of the\n # corresponding outer inputs that the Scan would use as input for\n # any given iteration. For simplicity, we use iteration 0.\n inner_ins_shapes = []\n out_equivalent = OrderedDict()\n\n # The two following blocks are commented as it cause in some\n # cases extra scans in the graph. See gh-XXX for the\n # investigation.\n\n # We skip the first outer input as it is the total or current number\n # of iterations.\n # sequences\n seqs_shape = [x[1:] for x in input_shapes[1 : 1 + self.n_seqs]]\n # We disable extra infer_shape for now. See gh-3765.\n extra_infer_shape = False\n\n if extra_infer_shape:\n inner_seqs = self.inputs[: self.n_seqs]\n outer_seqs = node.inputs[1 : 1 + self.n_seqs]\n for in_s, out_s in zip(inner_seqs, outer_seqs):\n out_equivalent[in_s] = out_s[0]\n\n # mit_mot, mit_sot, sit_sot\n outer_inp_idx = 1 + self.n_seqs\n inner_inp_idx = self.n_seqs\n else:\n outer_inp_idx = 0\n n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n outs_shape = []\n for idx in range(n_outs):\n mintap = abs(min(self.tap_array[idx]))\n for k in self.tap_array[idx]:\n outs_shape += [input_shapes[idx + self.n_seqs + 1][1:]]\n if extra_infer_shape:\n corresponding_tap = node.inputs[outer_inp_idx][mintap + k]\n out_equivalent[self.inputs[inner_inp_idx]] = corresponding_tap\n inner_inp_idx += 1\n outer_inp_idx += 1\n\n # shared_outs\n offset = 1 + self.n_seqs + n_outs\n for idx in range(self.n_shared_outs):\n outs_shape += [input_shapes[idx + offset]]\n\n # non_sequences\n offset += self.n_nit_sot + self.n_shared_outs\n inner_ins_shapes = seqs_shape + outs_shape + input_shapes[offset:]\n assert len(inner_ins_shapes) == len(self.inputs)\n\n # Non-sequences have a direct equivalent from self.inputs in\n # node.inputs\n inner_non_sequences = self.inputs[len(seqs_shape) + len(outs_shape) :]\n for in_ns, out_ns in zip(inner_non_sequences, node.inputs[offset:]):\n out_equivalent[in_ns] = out_ns\n\n if self.as_while:\n self_outs = self.outputs[:-1]\n else:\n self_outs = self.outputs\n outs_shape = infer_shape(\n outs=self_outs, inputs=self.inputs, input_shapes=inner_ins_shapes\n )\n # Will be used to check if outs_shape can be expressed without using\n # variables in self.inputs.\n # The shapes of node.inputs are valid.\n validator = Validator(\n valid=input_shapes, invalid=self.inputs, valid_equivalent=out_equivalent\n )\n\n offset = 1 + self.n_seqs\n scan_outs = [x for x in input_shapes[offset : offset + n_outs]]\n offset += n_outs\n outs_shape_n = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot\n for x in range(self.n_nit_sot):\n out_shape_x = outs_shape[outs_shape_n + x]\n if out_shape_x is None:\n # This output is not a tensor, and has no shape\n scan_outs.append(None)\n else:\n # We need to make sure that we can compute the shapes from\n # node.inputs, and constants, without using the variables\n # in the inner function.\n r = node.outputs[n_outs + x]\n assert r.ndim == 1 + len(out_shape_x)\n shp = [node.inputs[offset + self.n_shared_outs + x]]\n for i, shp_i in zip(range(1, r.ndim), out_shape_x):\n # Validate shp_i. v_shape_i is either None (if invalid),\n # or a (variable, Boolean) tuple. The Boolean indicates\n # whether variable is shp_i (if True), or an valid\n # equivalent (if False). Here, we only need the variable.\n v_shp_i = validator.check(shp_i)\n if v_shp_i is None:\n if hasattr(r, \"broadcastable\") and r.broadcastable[i]:\n shp.append(1)\n else:\n shp.append(Shape_i(i)(r))\n else:\n # It can (or at least, an equivalent variable can)\n shp.append(v_shp_i[0])\n scan_outs.append(tuple(shp))\n\n scan_outs += [x for x in input_shapes[offset : offset + self.n_shared_outs]]\n # if we are dealing with a repeat-until, then we do not know the\n # leading dimension so we replace it for every entry with Shape_i\n if self.as_while:\n scan_outs_init = scan_outs\n scan_outs = []\n for o, x in zip(node.outputs, scan_outs_init):\n if x is None:\n scan_outs.append(None)\n else:\n scan_outs.append((Shape_i(0)(o),) + x[1:])\n return scan_outs\n\n def connection_pattern(self, node):\n\n # We cache the result of this function because, with a previous\n # implementation that repeatedly called grad, there were cases\n # where calls to aesara.grad() took as much as 4h for functions\n # containing many nested scans.\n if hasattr(node.tag, \"connection_pattern\"):\n return node.tag.connection_pattern\n\n # Obtain the connection pattern of the inner function.\n inner_connect_pattern = io_connection_pattern(self.inputs, self.outputs)\n\n # Initially assume no outer input is connected to any outer output\n connection_pattern = [[False for output in node.outputs] for x in node.inputs]\n\n # For every possible pair of outer input and outer output, iterate\n # over every possible pairing of their corresponding inner inputs\n # and inner outputs and, if one such pair of inner variables is\n # connected than the pair of outer variables is connected.\n for outer_oidx in range(len(node.outputs)):\n inner_oidxs = self.var_mappings[\"inner_out_from_outer_out\"][outer_oidx]\n\n for outer_iidx in range(len(node.inputs)):\n inner_iidxs = self.var_mappings[\"inner_inp_from_outer_inp\"][outer_iidx]\n\n for inner_oidx in inner_oidxs:\n for inner_iidx in inner_iidxs:\n\n if inner_connect_pattern[inner_iidx][inner_oidx]:\n connection_pattern[outer_iidx][outer_oidx] = True\n break\n\n if connection_pattern[outer_iidx][outer_oidx]:\n break\n\n # Applying Floyd-Warshall to find all paths connecting inputs to\n # outputs. Note that if `x` is an input to `y_t` and `y_tm1` is an\n # input to `z_t` then `x` is an input to `z_t`.\n\n n_outs = len(node.outputs)\n\n for steps in range(n_outs):\n for iidx in range(n_outs):\n for jidx in range(n_outs):\n\n # Get the idx of the outer input corresponding to that\n # outer output\n j_inp_idx = self.var_mappings[\"outer_inp_from_outer_out\"][jidx]\n\n if j_inp_idx != -1:\n if connection_pattern[j_inp_idx][iidx] is True:\n for k in range(len(connection_pattern)):\n if connection_pattern[k][jidx]:\n connection_pattern[k][iidx] = True\n\n node.tag.connection_pattern = connection_pattern\n return connection_pattern\n\n def get_oinp_iinp_iout_oout_mappings(self):\n \"\"\"\n Compute and return dictionary mappings between the inputs and\n outputs of the inner function and the inputs and outputs of the Scan\n node in the outer graph.\n\n The return value is a dictionary in which the keys are the names of\n the individual mappings and the values are the mapping dictionaries\n themselves. In dictionaries representing mappings to outer variables,\n the values are individual integer indices. In dictionaries\n representing mappings to inner variables, the values are sequences of\n indices because multiple inner variables can be associated with the\n same state.\n\n \"\"\"\n # Lists for outer variables contain individual indices, lists for\n # inner variables contain sequences of indices because many inner\n # variables can be associated with the same outer variable. The list\n # and indices are initialized already containing the data associated\n # with the timestep index, the first outer input.\n outer_input_indices = [0]\n inner_input_indices = [[]]\n inner_output_indices = [[]]\n outer_output_indices = [-1]\n\n outer_iidx = 1\n inner_iidx = 0\n inner_oidx = 0\n outer_oidx = 0\n\n # Handle sequences inputs\n for i in range(self.info[\"n_seqs\"]):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([inner_iidx])\n inner_output_indices.append([])\n outer_output_indices.append(-1)\n\n outer_iidx += 1\n inner_iidx += 1\n inner_oidx += 0\n outer_oidx += 0\n\n # Handle mitmots, mitsots and sitsots variables\n for i in range(len(self.info[\"tap_array\"])):\n nb_input_taps = len(self.info[\"tap_array\"][i])\n\n if i < self.n_mit_mot:\n nb_output_taps = len(self.mit_mot_out_slices[i])\n else:\n nb_output_taps = 1\n\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append(\n list(range(inner_iidx, inner_iidx + nb_input_taps))\n )\n inner_output_indices.append(\n list(range(inner_oidx, inner_oidx + nb_output_taps))\n )\n outer_output_indices.append(outer_oidx)\n\n outer_iidx += 1\n inner_iidx += nb_input_taps\n inner_oidx += nb_output_taps\n outer_oidx += 1\n\n # This is needed because, for outer inputs (and for outer inputs only)\n # nitsots come *after* shared variables.\n outer_iidx += self.info[\"n_shared_outs\"]\n\n # Handle nitsots variables\n for i in range(self.n_nit_sot):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([])\n inner_output_indices.append([inner_oidx])\n outer_output_indices.append(outer_oidx)\n\n outer_iidx += 1\n inner_iidx += 0\n inner_oidx += 1\n outer_oidx += 1\n\n # This is needed because, for outer inputs (and for outer inputs only)\n # nitsots come *after* shared variables.\n outer_iidx -= self.info[\"n_shared_outs\"] + self.n_nit_sot\n\n # Handle shared states\n for i in range(self.info[\"n_shared_outs\"]):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([inner_iidx])\n inner_output_indices.append([inner_oidx])\n outer_output_indices.append(outer_oidx)\n\n outer_iidx += 1\n inner_iidx += 1\n inner_oidx += 1\n outer_oidx += 1\n\n # This is needed because, for outer inputs (and for outer inputs only)\n # nitsots come *after* shared variables.\n outer_iidx += self.n_nit_sot\n\n # Handle non-sequence inputs\n # Note : the number of non-sequence inputs is not stored in self.info\n # so it has to be inferred from the number of inner inputs that remain\n # to be handled\n for i in range(len(self.inputs) - inner_iidx):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([inner_iidx])\n inner_output_indices.append([])\n outer_output_indices.append(-1)\n\n outer_iidx += 1\n inner_iidx += 1\n inner_oidx += 0\n outer_oidx += 0\n\n # With the global mapping inferred, the individual mappings\n # can be produced\n mappings = {\n \"outer_inp_from_outer_out\": {},\n \"inner_inp_from_outer_out\": {},\n \"inner_out_from_outer_out\": {},\n \"inner_inp_from_outer_inp\": {},\n \"inner_out_from_outer_inp\": {},\n \"outer_out_from_outer_inp\": {},\n \"outer_inp_from_inner_inp\": {},\n \"inner_out_from_inner_inp\": {},\n \"outer_out_from_inner_inp\": {},\n \"outer_inp_from_inner_out\": {},\n \"inner_inp_from_inner_out\": {},\n \"outer_out_from_inner_out\": {},\n }\n\n for (oinp, iinp, iout, oout) in zip(\n outer_input_indices,\n inner_input_indices,\n inner_output_indices,\n outer_output_indices,\n ):\n\n if oout != -1:\n mappings[\"outer_inp_from_outer_out\"][oout] = oinp\n mappings[\"inner_inp_from_outer_out\"][oout] = iinp\n mappings[\"inner_out_from_outer_out\"][oout] = iout\n\n if oinp != -1:\n mappings[\"inner_inp_from_outer_inp\"][oinp] = iinp\n mappings[\"inner_out_from_outer_inp\"][oinp] = iout\n mappings[\"outer_out_from_outer_inp\"][oinp] = oout\n\n for idx in iinp:\n mappings[\"outer_inp_from_inner_inp\"][idx] = oinp\n mappings[\"inner_out_from_inner_inp\"][idx] = iout\n mappings[\"outer_out_from_inner_inp\"][idx] = oout\n\n for idx in iout:\n mappings[\"outer_inp_from_inner_out\"][idx] = oinp\n mappings[\"inner_inp_from_inner_out\"][idx] = iinp\n mappings[\"outer_out_from_inner_out\"][idx] = oout\n\n return mappings\n\n def L_op(self, inputs, outs, dC_douts):\n if not isinstance(outs, (list, tuple)):\n outs = [outs]\n # `grad_step` equals the number of steps the original scan node has\n # done (if the original scan is a while loop than this number is the\n # length of the output sequence)\n # We do not know what kind of outputs the original scan has, so we\n # try first to see if it has a nit_sot output, then a sit_sot and\n # then a mit_sot\n if self.n_nit_sot > 0:\n grad_steps = self.outer_nitsot_outs(outs)[0].shape[0]\n elif self.n_sit_sot > 0:\n grad_steps = self.outer_sitsot_outs(outs)[0].shape[0] - 1\n elif self.n_mit_sot > 0:\n grad_steps = (\n self.outer_mitsot_outs(outs)[0].shape[0] + self.mintaps[self.n_mit_mot]\n )\n else:\n grad_steps = inputs[0]\n if self.as_while:\n n_steps = outs[0].shape[0]\n\n # Restrict the number of grad steps according to\n # self.truncate_gradient\n if self.truncate_gradient != -1:\n grad_steps = minimum(grad_steps, self.truncate_gradient)\n\n self_inputs = self.inputs\n self_outputs = self.outputs\n # differentiable inputs\n diff_inputs = (\n self.inner_seqs(self_inputs)\n + self.inner_mitmot(self_inputs)\n + self.inner_mitsot(self_inputs)\n + self.inner_sitsot(self_inputs)\n + self.inner_non_seqs(self_inputs)\n )\n diff_outputs = (\n self.inner_mitmot_outs(self_outputs)\n + self.inner_mitsot_outs(self_outputs)\n + self.inner_sitsot_outs(self_outputs)\n + self.inner_nitsot_outs(self_outputs)\n )\n scan_node = outs[0].owner\n connection_pattern = self.connection_pattern(scan_node)\n\n def get_inp_idx(iidx):\n if iidx < self.n_seqs:\n return 1 + iidx\n oidx = 1 + self.n_seqs\n iidx = iidx - self.n_seqs\n for taps in self.mitmot_taps():\n if len(taps) > iidx:\n return oidx\n else:\n oidx += 1\n iidx -= len(taps)\n for taps in self.mitsot_taps():\n if len(taps) > iidx:\n return oidx\n else:\n oidx += 1\n iidx -= len(taps)\n\n if iidx < self.info[\"n_sit_sot\"]:\n return oidx + iidx\n else:\n return oidx + iidx + self.info[\"n_nit_sot\"]\n\n def get_out_idx(iidx):\n oidx = 0\n for taps in self.mitmot_out_taps():\n if len(taps) > iidx:\n return oidx\n else:\n oidx += 1\n iidx -= len(taps)\n return oidx + iidx\n\n def compute_all_gradients(known_grads):\n y_s = known_grads.keys()\n g_y_s = known_grads.values()\n\n for g_y in g_y_s:\n if str(g_y.dtype) in integer_dtypes:\n raise TypeError(\n \"Gradients may never be integers but g_y \"\n \"has type \" + str(g_y.type)\n )\n\n out_indices = [get_out_idx(self_outputs.index(y)) for y in y_s]\n\n connected_inputs = [\n i\n for i in range(len(scan_node.inputs))\n if any([connection_pattern[i][odx] for odx in out_indices])\n ]\n\n wrt = [\n x\n for x in graph_inputs(y_s)\n if (x in diff_inputs)\n and get_inp_idx(self_inputs.index(x)) in connected_inputs\n ]\n gmp = OrderedDict()\n\n # Required in case there is a pair of variables X and Y, with X\n # used to compute Y, for both of which there is an external\n # gradient signal. Without this, the total gradient signal on X\n # will be the external gradient signalknown_grads[X]. With this,\n # it will be the sum of the external gradient signal and the\n # gradient obtained by propagating Y's external gradient signal\n # to X.\n known_grads = OrderedDict([(k.copy(), v) for (k, v) in known_grads.items()])\n\n grads = grad(\n cost=None,\n known_grads=known_grads,\n wrt=wrt,\n consider_constant=wrt,\n disconnected_inputs=\"ignore\",\n return_disconnected=\"None\",\n null_gradients=\"return\",\n )\n\n for i in range(len(wrt)):\n gmp[wrt[i]] = grads[i]\n\n rval = [gmp.get(p, None) for p in diff_inputs]\n return rval\n\n dC_dinps_t = [None for inp in diff_inputs]\n disconnected_dC_dinps_t = [True for inp in diff_inputs]\n dC_dXts = []\n Xts = []\n for idx, Xt in enumerate(diff_outputs):\n\n # We are looking for x[t-1] for a given x[t]\n if idx >= self.n_mit_mot_outs:\n Xt_placeholder = safe_new(Xt)\n Xts.append(Xt_placeholder)\n\n # Different processing based on whether Xt is a nitsot output\n # or not. NOTE : This cannot be done by using\n # \"if Xt not in self.inner_nitsot_outs(self_outputs)\" because\n # the exact same variable can be used as multiple outputs.\n idx_nitsot_start = (\n self.info[\"n_mit_mot\"] + self.info[\"n_mit_sot\"] + self.info[\"n_sit_sot\"]\n )\n idx_nitsot_end = idx_nitsot_start + self.info[\"n_nit_sot\"]\n if idx < idx_nitsot_start or idx >= idx_nitsot_end:\n # What we do here is loop through dC_douts and collect all\n # those that are connected to the specific one and do an\n # upcast on all of their dtypes to get the dtype for this\n # specific output. Deciding if the gradient with this\n # specific previous step is defined or not is done somewhere\n # else.\n dtypes = []\n states = (\n self.inner_mitmot(self_inputs)\n + self.inner_mitsot(self_inputs)\n + self.inner_sitsot(self_inputs)\n )\n\n for pos, inp in enumerate(states):\n if inp in graph_inputs([Xt]):\n # Get the index of the outer output that to which\n # the state variable 'inp' corresponds.\n outer_oidx = self.var_mappings[\"outer_out_from_inner_inp\"][\n self.n_seqs + pos\n ]\n\n if not isinstance(dC_douts[outer_oidx].type, DisconnectedType):\n dtypes.append(dC_douts[outer_oidx].dtype)\n if dtypes:\n new_dtype = aesara.scalar.upcast(*dtypes)\n else:\n new_dtype = config.floatX\n dC_dXt = safe_new(Xt, dtype=new_dtype)\n else:\n if isinstance(dC_douts[idx].type, DisconnectedType):\n continue\n dC_dXt = safe_new(dC_douts[idx][0])\n dC_dXts.append(dC_dXt)\n\n known_grads = OrderedDict()\n dc_dxts_idx = 0\n for i in range(len(diff_outputs)):\n if i < idx_nitsot_start or i >= idx_nitsot_end:\n if diff_outputs[i] in known_grads:\n known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]\n else:\n known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]\n dc_dxts_idx += 1\n else:\n if isinstance(dC_douts[i].type, DisconnectedType):\n continue\n else:\n if diff_outputs[i] in known_grads:\n known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]\n else:\n known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]\n dc_dxts_idx += 1\n dC_dinps_t = compute_all_gradients(known_grads)\n\n # mask inputs that get no gradients\n for dx in range(len(dC_dinps_t)):\n if not dC_dinps_t[dx]:\n dC_dinps_t[dx] = aet.zeros_like(diff_inputs[dx])\n else:\n disconnected_dC_dinps_t[dx] = False\n for Xt, Xt_placeholder in zip(diff_outputs[self.n_mit_mot_outs :], Xts):\n tmp = forced_replace(dC_dinps_t[dx], Xt, Xt_placeholder)\n dC_dinps_t[dx] = tmp\n\n # construct dX_dtm1\n dC_dXtm1s = []\n for pos, x in enumerate(dC_dinps_t[self.n_seqs :]):\n\n # Get the index of the first inner input corresponding to the\n # pos-ieth inner input state\n idxs = self.var_mappings[\"inner_out_from_inner_inp\"][self.n_seqs + pos]\n\n # Check if the pos-th input is associated with one of the\n # recurrent states\n x_is_state = pos < sum([len(t) for t in self.tap_array])\n\n if x_is_state and len(idxs) > 0:\n opos = idxs[0]\n dC_dXtm1s.append(safe_new(dC_dXts[opos]))\n if hasattr(x, \"dtype\") and x.dtype != dC_dXts[opos].dtype:\n dC_dinps_t[pos + self.n_seqs] = x.astype(dC_dXts[opos].dtype)\n else:\n dC_dXtm1s.append(safe_new(x))\n\n for dx, dC_dXtm1 in enumerate(dC_dXtm1s):\n if isinstance(dC_dinps_t[dx + self.n_seqs].type, NullType):\n # The accumulated gradient is undefined\n pass\n elif isinstance(dC_dXtm1.type, NullType):\n # The new gradient is undefined, this makes the accumulated\n # gradient undefined as weell\n dC_dinps_t[dx + self.n_seqs] = dC_dXtm1\n else:\n dC_dinps_t[dx + self.n_seqs] += dC_dXtm1\n # Construct scan op\n # Seqs\n if self.as_while:\n # equivalent to x[:n_steps][::-1]\n outer_inp_seqs = [x[n_steps - 1 :: -1] for x in inputs[1 : 1 + self.n_seqs]]\n else:\n outer_inp_seqs = [x[::-1] for x in inputs[1 : 1 + self.n_seqs]]\n for idx in range(self.n_mit_mot + self.n_mit_sot):\n mintap = np.min(self.tap_array[idx])\n if idx < self.n_mit_mot:\n outmaxtap = np.max(self.mitmot_out_taps()[idx])\n else:\n outmaxtap = 0\n seq = outs[idx]\n for k in self.tap_array[idx]:\n if outmaxtap - k != 0:\n nw_seq = seq[k - mintap : -(outmaxtap - k)][::-1]\n else:\n nw_seq = seq[k - mintap :][::-1]\n outer_inp_seqs.append(nw_seq)\n outer_inp_seqs += [x[:-1][::-1] for x in self.outer_sitsot_outs(outs)]\n for x in self.outer_nitsot_outs(dC_douts):\n if not isinstance(x.type, DisconnectedType):\n if self.as_while:\n # equivalent to x[:n_steps][::-1]\n outer_inp_seqs.append(x[n_steps - 1 :: -1])\n else:\n outer_inp_seqs.append(x[::-1])\n\n if hasattr(inputs[0].tag, \"test_value\"):\n # Here we tests that the new scan input sequence all have\n # the same shape[0]. This is a properties that the scan()\n # fct add and we want to keep it for all Scan op. This is\n # used in T_Scan.test_grad_multiple_outs_taps to test\n # that.\n if self.as_while:\n n = n_steps.tag.test_value\n else:\n n = inputs[0].tag.test_value\n for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs)):\n mintap = np.min(taps)\n if hasattr(x[::-1][:mintap], \"test_value\"):\n assert x[::-1][:mintap].tag.test_value.shape[0] == n\n for x in self.outer_sitsot_outs(outs):\n if hasattr(x[::-1][:-1].tag, \"test_value\"):\n assert x[::-1][:-1].tag.test_value.shape[0] == n\n for x in self.outer_nitsot_outs(outs):\n if hasattr(x[::-1].tag, \"test_value\"):\n if self.as_while:\n assert x[n_steps - 1 :: -1].tag.test_value.shape[0] == n\n else:\n assert x[::-1].tag.test_value.shape[0] == n\n outer_inp_seqs += [\n x[::-1][: np.min(taps)]\n for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs))\n ]\n outer_inp_seqs += [x[::-1][:-1] for x in self.outer_sitsot_outs(outs)]\n outer_inp_seqs += [x[::-1] for x in self.outer_nitsot_outs(outs)]\n\n # Restrict the length of the outer sequences to the number of grad\n # steps\n outer_inp_seqs = [s_[:grad_steps] for s_ in outer_inp_seqs]\n\n inner_inp_seqs = self.inner_seqs(self_inputs)\n inner_inp_seqs += self.inner_mitmot(self_inputs)\n inner_inp_seqs += self.inner_mitsot(self_inputs)\n inner_inp_seqs += self.inner_sitsot(self_inputs)\n inner_inp_seqs += self.inner_nitsot_outs(dC_dXts)\n inner_inp_seqs += Xts\n # mitmot\n outer_inp_mitmot = []\n inner_inp_mitmot = []\n inner_out_mitmot = []\n mitmot_inp_taps = []\n mitmot_out_taps = []\n type_outs = []\n out_pos = 0\n ins_pos = self.n_seqs\n n_mitmot_outs = 0\n n_mitmot_inps = 0\n\n for idx in range(self.n_mit_mot):\n if isinstance(dC_douts[idx].type, DisconnectedType):\n out = outs[idx]\n outer_inp_mitmot.append(aet.zeros_like(out))\n else:\n outer_inp_mitmot.append(dC_douts[idx][::-1])\n mitmot_inp_taps.append([])\n mitmot_out_taps.append([])\n undefined_msg = None\n through_shared = False\n disconnected = True\n\n for jdx in range(len(self.mit_mot_out_slices[idx])):\n inner_inp_mitmot.append(dC_dXts[out_pos])\n mitmot_inp_taps[idx].append(-self.mit_mot_out_slices[idx][jdx])\n n_mitmot_inps += 1\n out_pos += 1\n\n for jdx in range(len(self.tap_array[idx])):\n tap = -self.tap_array[idx][jdx]\n\n # Only create a new inner input if there is not already one\n # associated with this input tap\n if tap not in mitmot_inp_taps[idx]:\n inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # We cannot use Null in the inner graph, so we\n # use a zero tensor of the appropriate shape instead.\n inner_out_mitmot.append(\n aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)\n )\n undefined_msg = dC_dinps_t[ins_pos].type.why_null\n else:\n new_inner_out_mitmot = dC_dinps_t[ins_pos]\n\n # If there is already an inner input associated with that\n # input tap, make sure the computation of the new output\n # uses it instead of the input it's currently using\n if tap in mitmot_inp_taps[idx]:\n to_replace = dC_dXtm1s[ins_pos - self.n_seqs]\n replacement_idx = len(mitmot_inp_taps[idx]) - mitmot_inp_taps[\n idx\n ].index(tap)\n replacement = inner_inp_mitmot[-replacement_idx]\n\n self.tap_array[idx]\n new_inner_out_mitmot = clone_replace(\n new_inner_out_mitmot, replace=[(to_replace, replacement)]\n )\n\n inner_out_mitmot.append(new_inner_out_mitmot)\n\n if not disconnected_dC_dinps_t[ins_pos]:\n disconnected = False\n\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([dC_dinps_t[ins_pos]]):\n through_shared = True\n\n ins_pos += 1\n n_mitmot_outs += 1\n mitmot_out_taps[idx].append(-self.tap_array[idx][jdx])\n\n # Only add the tap as a new input tap if needed\n if tap not in mitmot_inp_taps[idx]:\n n_mitmot_inps += 1\n mitmot_inp_taps[idx].append(-self.tap_array[idx][jdx])\n\n if undefined_msg:\n type_outs.append(undefined_msg)\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n offset = self.n_mit_mot\n for idx in range(self.n_mit_sot):\n if isinstance(dC_douts[idx + offset].type, DisconnectedType):\n outer_inp_mitmot.append(outs[idx + offset].zeros_like())\n else:\n outer_inp_mitmot.append(dC_douts[idx + offset][::-1])\n mitmot_inp_taps.append([])\n mitmot_out_taps.append([])\n idx_tap = idx + self.n_mit_mot\n inner_inp_mitmot.append(dC_dXts[out_pos])\n out_pos += 1\n n_mitmot_inps += 1\n undefined_msg = None\n through_shared = False\n disconnected = True\n mitmot_inp_taps[idx + offset].append(0)\n for jdx in range(len(self.tap_array[idx_tap])):\n inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # We cannot use Null in the inner graph, so we\n # use a zero tensor of the appropriate shape instead.\n inner_out_mitmot.append(\n aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)\n )\n undefined_msg = dC_dinps_t[ins_pos].type.why_null\n else:\n inner_out_mitmot.append(dC_dinps_t[ins_pos])\n\n mitmot_inp_taps[idx + offset].append(-self.tap_array[idx_tap][jdx])\n mitmot_out_taps[idx].append(-self.tap_array[idx_tap][jdx])\n if not disconnected_dC_dinps_t[ins_pos]:\n disconnected = False\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([dC_dinps_t[ins_pos]]):\n through_shared = True\n\n n_mitmot_inps += 1\n ins_pos += 1\n n_mitmot_outs += 1\n\n if undefined_msg:\n type_outs.append(undefined_msg)\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n offset += self.n_mit_sot\n for idx in range(self.n_sit_sot):\n mitmot_inp_taps.append([0, 1])\n mitmot_out_taps.append([1])\n through_shared = False\n if not isinstance(dC_douts[idx + offset].type, DisconnectedType):\n outer_inp_mitmot.append(dC_douts[idx + offset][::-1])\n else:\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # Cannot use dC_dinps_t[ins_pos].dtype, so we use\n # floatX instead, as it is a dummy value that will not\n # be used anyway.\n outer_inp_mitmot.append(\n aet.zeros(outs[idx + offset].shape, dtype=config.floatX)\n )\n else:\n outer_inp_mitmot.append(\n aet.zeros(\n outs[idx + offset].shape, dtype=dC_dinps_t[ins_pos].dtype\n )\n )\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # We cannot use Null in the inner graph, so we\n # use a zero tensor of the appropriate shape instead.\n inner_out_mitmot.append(\n aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)\n )\n else:\n inner_out_mitmot.append(dC_dinps_t[ins_pos])\n\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([dC_dinps_t[ins_pos]]):\n through_shared = True\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n type_outs.append(dC_dinps_t[ins_pos].type.why_null)\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected_dC_dinps_t[ins_pos]:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n inner_inp_mitmot += [dC_dXts[out_pos], dC_dXtm1s[ins_pos - self.n_seqs]]\n n_mitmot_outs += 1\n out_pos += 1\n ins_pos += 1\n n_mitmot_inps += 2\n\n n_nit_sot = self.n_seqs\n inner_out_nitsot = dC_dinps_t[: self.n_seqs]\n inner_out_sitsot = dC_dinps_t[ins_pos:]\n for _p, vl in enumerate(inner_out_sitsot):\n through_shared = False\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([vl]):\n through_shared = True\n if isinstance(vl.type, NullType):\n type_outs.append(vl.type.why_null)\n # Replace the inner output with a zero tensor of\n # the right shape\n inner_out_sitsot[_p] = aet.zeros(\n diff_inputs[ins_pos + _p].shape, dtype=config.floatX\n )\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected_dC_dinps_t[_p + ins_pos]:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n for _p, vl in enumerate(inner_out_nitsot):\n through_shared = False\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([vl]):\n through_shared = True\n if isinstance(vl.type, NullType):\n type_outs.append(vl.type.why_null)\n # Replace the inner output with a zero tensor of\n # the right shape\n inner_out_nitsot[_p] = aet.zeros(\n diff_inputs[_p].shape, dtype=config.floatX\n )\n\n if through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected_dC_dinps_t[_p]:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n inner_inp_sitsot = dC_dXtm1s[ins_pos - self.n_seqs :]\n outer_inp_sitsot = []\n for _idx, y in enumerate(inner_inp_sitsot):\n x = self.outer_non_seqs(inputs)[_idx]\n if isinstance(y.type, NullType):\n # Cannot use dC_dXtm1s.dtype, so we use floatX instead.\n outer_inp_sitsot.append(\n aet.zeros(\n [grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],\n dtype=config.floatX,\n )\n )\n # replace y by a zero tensor of the right shape\n inner_inp_sitsot[_idx] = aet.zeros(\n diff_inputs[ins_pos + _idx].shape, dtype=config.floatX\n )\n\n else:\n outer_inp_sitsot.append(\n aet.zeros(\n [grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],\n dtype=y.dtype,\n )\n )\n\n n_sitsot_outs = len(outer_inp_sitsot)\n new_tap_array = mitmot_inp_taps + [[-1] for k in range(n_sitsot_outs)]\n\n info = OrderedDict()\n info[\"n_seqs\"] = len(outer_inp_seqs)\n info[\"n_mit_sot\"] = 0\n info[\"tap_array\"] = new_tap_array\n info[\"gpua\"] = False\n info[\"n_mit_mot\"] = len(outer_inp_mitmot)\n info[\"n_mit_mot_outs\"] = n_mitmot_outs\n info[\"mit_mot_out_slices\"] = mitmot_out_taps\n info[\"truncate_gradient\"] = self.truncate_gradient\n info[\"n_sit_sot\"] = n_sitsot_outs\n info[\"n_shared_outs\"] = 0\n info[\"n_nit_sot\"] = n_nit_sot\n info[\"as_while\"] = False\n info[\"profile\"] = self.profile\n info[\"destroy_map\"] = OrderedDict()\n if self.name:\n info[\"name\"] = \"grad_of_\" + self.name\n else:\n info[\"name\"] = None\n info[\"mode\"] = self.mode\n info[\"allow_gc\"] = self.allow_gc\n\n outer_inputs = (\n [grad_steps]\n + outer_inp_seqs\n + outer_inp_mitmot\n + outer_inp_sitsot\n + [n_steps if self.as_while else inputs[0] for _ in range(n_nit_sot)]\n + self.outer_shared(inputs)\n + self.outer_non_seqs(inputs)\n )\n\n inner_gfn_ins = (\n inner_inp_seqs\n + inner_inp_mitmot\n + inner_inp_sitsot\n + self.inner_shared(self_inputs)\n + self.inner_non_seqs(self_inputs)\n )\n inner_gfn_outs = inner_out_mitmot + inner_out_sitsot + inner_out_nitsot\n\n local_op = Scan(inner_gfn_ins, inner_gfn_outs, info)\n outputs = local_op(*outer_inputs)\n if type(outputs) not in (list, tuple):\n outputs = [outputs]\n # Re-order the gradients correctly\n gradients = [DisconnectedType()()]\n\n offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + n_sitsot_outs\n for p, (x, t) in enumerate(\n zip(\n outputs[offset : offset + self.n_seqs],\n type_outs[offset : offset + self.n_seqs],\n )\n ):\n if t == \"connected\":\n # If the forward scan is in as_while mode, we need to pad\n # the gradients, so that they match the size of the input\n # sequences.\n if self.as_while:\n n_zeros = inputs[0] - n_steps\n shp = (n_zeros,)\n if x.ndim > 1:\n shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))\n z = aet.zeros(shp, dtype=x.dtype)\n x = aet.concatenate([x[::-1], z], axis=0)\n gradients.append(x)\n else:\n gradients.append(x[::-1])\n elif t == \"disconnected\":\n gradients.append(DisconnectedType()())\n elif t == \"through_shared\":\n gradients.append(\n grad_undefined(\n self, p + 1, inputs[p + 1], \"Depends on a shared variable\"\n )\n )\n else:\n # t contains the \"why_null\" string of a NullType\n gradients.append(NullType(t)())\n\n end = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n for p, (x, t) in enumerate(zip(outputs[:end], type_outs[:end])):\n if t == \"connected\":\n # If the forward scan is in as_while mode, we need to pad\n # the gradients, so that they match the size of the input\n # sequences.\n if self.as_while:\n n_zeros = inputs[0] - grad_steps\n shp = (n_zeros,)\n if x.ndim > 1:\n shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))\n z = aet.zeros(shp, dtype=x.dtype)\n x = aet.concatenate([x[::-1], z], axis=0)\n gradients.append(x)\n else:\n gradients.append(x[::-1])\n elif t == \"disconnected\":\n gradients.append(DisconnectedType()())\n elif t == \"through_shared\":\n gradients.append(\n grad_undefined(\n self,\n p + 1 + self.n_seqs,\n inputs[p + 1 + self.n_seqs],\n \"Depends on a shared variable\",\n )\n )\n else:\n # t contains the \"why_null\" string of a NullType\n gradients.append(NullType(t)())\n\n start = len(gradients)\n node = outs[0].owner\n for idx in range(self.n_shared_outs):\n disconnected = True\n connected_flags = self.connection_pattern(node)[idx + start]\n for dC_dout, connected in zip(dC_douts, connected_flags):\n if not isinstance(dC_dout.type, DisconnectedType) and connected:\n disconnected = False\n if disconnected:\n gradients.append(DisconnectedType()())\n else:\n gradients.append(\n grad_undefined(\n self, idx, inputs[idx], \"Shared Variable with update\"\n )\n )\n\n start = len(gradients)\n gradients += [DisconnectedType()() for _ in range(self.n_nit_sot)]\n begin = end\n\n end = begin + n_sitsot_outs\n for p, (x, t) in enumerate(zip(outputs[begin:end], type_outs[begin:end])):\n if t == \"connected\":\n gradients.append(x[-1])\n elif t == \"disconnected\":\n gradients.append(DisconnectedType()())\n elif t == \"through_shared\":\n gradients.append(\n grad_undefined(\n self,\n p + begin + 1,\n inputs[p + begin + 1],\n \"Depends on a shared variable\",\n )\n )\n else:\n # t contains the \"why_null\" string of a NullType\n gradients.append(NullType(t)())\n\n # Mask disconnected gradients\n # Ideally we would want to assert that the gradients we are\n # replacing do indeed evaluate to 0, though that is not practical\n # from a computational point of view\n # The gradients of scan are computed replacing Disconnected with 0,\n # because through the recurrence they can become nonzero\n for idx in range(len(gradients)):\n disconnected = True\n for kdx in range(len(node.outputs)):\n if connection_pattern[idx][kdx] and not isinstance(\n dC_douts[kdx].type, DisconnectedType\n ):\n disconnected = False\n if disconnected:\n gradients[idx] = DisconnectedType()()\n return gradients\n\n def R_op(self, inputs, eval_points):\n # Step 0. Prepare some shortcut variable\n self_inputs = self.inputs\n rop_of_inputs = (\n self_inputs[: self.n_seqs + self.n_outs]\n + self_inputs[self.n_seqs + self.n_outs + self.n_shared_outs :]\n )\n self_outputs = self.outputs\n\n # Step 1. Compute the R_op of the inner function\n inner_eval_points = [safe_new(x, \"_evalpoint\") for x in rop_of_inputs]\n if self.as_while:\n rop_self_outputs = self_outputs[:-1]\n else:\n rop_self_outputs = self_outputs\n if self.info[\"n_shared_outs\"] > 0:\n rop_self_outputs = rop_self_outputs[: -self.info[\"n_shared_outs\"]]\n rop_outs = Rop(rop_self_outputs, rop_of_inputs, inner_eval_points)\n if type(rop_outs) not in (list, tuple):\n rop_outs = [rop_outs]\n # Step 2. Figure out what corresponds to what in the scan\n\n # When doing the R-op of scan, you end up having double of each type of\n # input, because for each sequence you need also its eval point, for\n # each mit_mot, mit_sot, sit_sot or other type of inputs the same.\n # Interestingly enough, all these types of eval points behave the same\n # way as the input to which they correspond\n # The only exception is the eval point for the number of sequences, and\n # evan point for the number of nit_sot which I think should just be\n # ignored (?)\n info = OrderedDict()\n info[\"n_seqs\"] = self.n_seqs * 2\n info[\"n_mit_sot\"] = self.n_mit_sot * 2\n info[\"n_sit_sot\"] = self.n_sit_sot * 2\n info[\"n_mit_mot\"] = self.n_mit_mot * 2\n info[\"n_nit_sot\"] = self.n_nit_sot * 2\n info[\"n_shared_outs\"] = self.n_shared_outs\n info[\"gpua\"] = False\n info[\"as_while\"] = self.as_while\n info[\"profile\"] = self.profile\n info[\"truncate_gradient\"] = self.truncate_gradient\n if self.name:\n info[\"name\"] = \"rop_of_\" + self.name\n else:\n info[\"name\"] = None\n info[\"mode\"] = self.mode\n info[\"allow_gc\"] = self.allow_gc\n info[\"mit_mot_out_slices\"] = self.mit_mot_out_slices * 2\n info[\"destroy_map\"] = OrderedDict()\n new_tap_array = []\n b = 0\n e = self.n_mit_mot\n new_tap_array += self.tap_array[b:e] * 2\n b = e\n e += self.n_mit_sot\n new_tap_array += self.tap_array[b:e] * 2\n b = e\n e += self.n_sit_sot\n new_tap_array += self.tap_array[b:e] * 2\n info[\"tap_array\"] = new_tap_array\n\n # Sequences ...\n b = 1\n ib = 0\n e = 1 + self.n_seqs\n ie = self.n_seqs\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_seqs = inputs[b:e] + clean_eval_points\n inner_seqs = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # MIT_MOT sequences ...\n b = e\n e = e + self.n_mit_mot\n ib = ie\n ie = ie + int(np.sum([len(x) for x in self.tap_array[: self.n_mit_mot]]))\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_mit_mot = inputs[b:e] + clean_eval_points\n inner_mit_mot = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # MIT_SOT sequences ...\n b = e\n e = e + self.n_mit_sot\n ib = ie\n ie = ie + int(\n np.sum(\n [\n len(x)\n for x in self.tap_array[\n self.n_mit_mot : self.n_mit_mot + self.n_mit_sot\n ]\n ]\n )\n )\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_mit_sot = inputs[b:e] + eval_points[b:e]\n inner_mit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # SIT_SOT sequences ...\n b = e\n e = e + self.n_sit_sot\n ib = ie\n ie = ie + self.n_sit_sot\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_sit_sot = inputs[b:e] + clean_eval_points\n inner_sit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # Shared outs ...\n b = e\n e = e + self.n_shared_outs\n ib = ie\n ie = ie + self.n_shared_outs\n scan_shared = inputs[b:e]\n inner_shared = self_inputs[ib:ie]\n\n # NIT_SOT sequences\n b = e\n e = e + self.n_nit_sot\n scan_nit_sot = inputs[b:e] * 2\n\n # All other arguments\n clean_eval_points = []\n for inp, evp in zip(inputs[e:], eval_points[e:]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n scan_other = inputs[e:] + clean_eval_points\n # inner_eval_points do not have entries for shared variables\n inner_other = self_inputs[ie:] + inner_eval_points[ib:]\n\n # Outputs\n n_mit_mot_outs = int(np.sum([len(x) for x in self.mit_mot_out_slices]))\n info[\"n_mit_mot_outs\"] = n_mit_mot_outs * 2\n b = 0\n e = n_mit_mot_outs\n inner_out_mit_mot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_mit_sot\n inner_out_mit_sot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_sit_sot\n inner_out_sit_sot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_nit_sot\n inner_out_nit_sot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_shared_outs\n inner_out_shared = self_outputs[b:e]\n\n inner_ins = (\n inner_seqs\n + inner_mit_mot\n + inner_mit_sot\n + inner_sit_sot\n + inner_shared\n + inner_other\n )\n inner_outs = (\n inner_out_mit_mot\n + inner_out_mit_sot\n + inner_out_sit_sot\n + inner_out_nit_sot\n + inner_out_shared\n )\n\n if self.as_while:\n inner_outs += [self_outputs[-1]]\n scan_inputs = (\n [inputs[0]]\n + scan_seqs\n + scan_mit_mot\n + scan_mit_sot\n + scan_sit_sot\n + scan_shared\n + scan_nit_sot\n + scan_other\n )\n\n local_op = Scan(inner_ins, inner_outs, info)\n outputs = local_op(*scan_inputs)\n if type(outputs) not in (list, tuple):\n outputs = [outputs]\n # Select only the result of the R_op results\n final_outs = []\n b = self.n_mit_mot\n e = self.n_mit_mot * 2\n final_outs += outputs[b:e]\n b = e + self.n_mit_sot\n e = e + self.n_mit_sot * 2\n final_outs += outputs[b:e]\n b = e + self.n_sit_sot\n e = e + self.n_sit_sot * 2\n final_outs += outputs[b:e]\n b = e + self.n_nit_sot\n e = e + self.n_nit_sot * 2\n final_outs += outputs[b:e]\n final_outs += [None] * self.n_shared_outs\n\n return final_outs\n\n\n# Since Scan is an op that contains an Aesara compiled function, it is\n# useful to let DebugMode know about it.\nops_with_inner_function[Scan] = \"fn\"\n\n\n@register_profiler_printer\ndef profile_printer(\n message, compile_time, fct_call_time, apply_time, apply_cimpl, outputs_size, file\n):\n # Scan overhead profile\n if any(\n [\n isinstance(node.op, Scan) and v > 0\n for (fgraph, node), v in apply_time.items()\n ]\n ):\n print(\"\", file=file)\n print(\"Scan overhead:\", file=file)\n print(\n \"<Scan op time(s)> <sub scan fct time(s)> <sub scan op \"\n \"time(s)> <sub scan fct time(% scan op time)> <sub scan \"\n \"op time(% scan op time)> <node>\",\n file=file,\n )\n\n total_super_scan_time = 0\n total_scan_fct_time = 0\n total_scan_op_time = 0\n for (fgraph, node), v in apply_time.items():\n if isinstance(node.op, Scan) and not node.op.fn.profile:\n print(\n \" One scan node do not have its inner profile enabled. \"\n \"If you enable Aesara profiler with \"\n \"'aesara.function(..., profile=True)', you must manually\"\n \" enable the profiling for each scan too: \"\n \"'aesara.scan(...,profile=True)'.\"\n \" Or use Aesara flag 'profile=True'.\",\n file=file,\n )\n elif isinstance(node.op, Scan) and node.op.fn.profile:\n if v > 0:\n scan_fct_time = node.op.fn.profile.call_time\n scan_op_time = sum(node.op.fn.profile.apply_time.values())\n total_super_scan_time += v\n total_scan_fct_time += scan_fct_time\n total_scan_op_time += scan_op_time\n print(\n \" %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%\"\n % (\n v,\n scan_fct_time,\n scan_op_time,\n scan_fct_time / v * 100,\n scan_op_time / v * 100,\n ),\n node,\n file=file,\n )\n else:\n print(\n (\" The node took 0s, so we can not \" \"compute the overhead\"),\n node,\n file=file,\n )\n if total_super_scan_time == 0:\n print(\" No scan have its inner profile enabled.\", file=file)\n else:\n print(\n \"total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%\"\n % (\n total_super_scan_time,\n total_scan_fct_time,\n total_scan_op_time,\n total_scan_fct_time / total_super_scan_time * 100,\n total_scan_op_time / total_super_scan_time * 100,\n ),\n file=file,\n )\n",
"import builtins\nimport warnings\n\nimport numpy as np\n\nfrom aesara import config, printing\nfrom aesara import scalar as aes\nfrom aesara.gradient import DisconnectedType\nfrom aesara.graph.basic import Apply, Variable\nfrom aesara.graph.op import COp, Op\nfrom aesara.graph.params_type import ParamsType\nfrom aesara.graph.type import Generic\nfrom aesara.misc.safe_asarray import _asarray\nfrom aesara.printing import pprint\nfrom aesara.scalar.basic import BinaryScalarOp\nfrom aesara.tensor.basic import (\n alloc,\n arange,\n as_tensor_variable,\n cast,\n concatenate,\n constant,\n patternbroadcast,\n stack,\n switch,\n)\nfrom aesara.tensor.elemwise import (\n CAReduce,\n CAReduceDtype,\n DimShuffle,\n Elemwise,\n scalar_elemwise,\n)\nfrom aesara.tensor.shape import shape\nfrom aesara.tensor.type import (\n complex_dtypes,\n continuous_dtypes,\n discrete_dtypes,\n int_dtypes,\n integer_dtypes,\n tensor,\n uint_dtypes,\n)\nfrom aesara.tensor.type_other import NoneConst\nfrom aesara.tensor.utils import as_list\nfrom aesara.tensor.var import TensorConstant, _tensor_py_operators\n\n\n# We capture the builtins that we are going to replace to follow the numpy API\n_abs = builtins.abs\n\n\nif int(config.tensor__cmp_sloppy) > 1:\n # This config variable is a quick-and-dirty way to get low-precision\n # comparisons. For a more precise setting of these tolerances set\n # them explicitly in your user code by assigning, for example,\n # \"aesara.tensor.math.float32_atol = ...\"\n\n # When config.tensor__cmp_sloppy>1 we are even more sloppy. This is\n # useful to test the GPU as they don't use extended precision and\n # this cause some difference bigger then the normal sloppy.\n float16_atol = 1e-2\n float16_rtol = 5e-2\n\n float32_atol = 5e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelif int(config.tensor__cmp_sloppy):\n float16_atol = 5e-3\n float16_rtol = 1e-2\n\n float32_atol = 1e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelse:\n # If you change those value in test don't forget to put them back\n # when the test end. Don't forget the case when the test fail.\n float16_atol = 1e-3\n float16_rtol = 1e-3\n\n float32_atol = 1e-5\n float32_rtol = 1e-5\n\n # defaults in numpy.allclose\n # Don't be more strict then numpy rtol\n # It cause useless error.\n float64_rtol = 1.0000000000000001e-05\n float64_atol = 1e-8\n\n\ndef _get_atol_rtol(a, b):\n tiny = (\"float16\",)\n narrow = (\"float32\", \"complex64\")\n if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):\n atol = float16_atol\n rtol = float16_rtol\n elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):\n atol = float32_atol\n rtol = float32_rtol\n else:\n atol = float64_atol\n rtol = float64_rtol\n return atol, rtol\n\n\ndef _allclose(a, b, rtol=None, atol=None):\n a = np.asarray(a)\n b = np.asarray(b)\n atol_, rtol_ = _get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n\n return np.allclose(a, b, atol=atol_, rtol=rtol_)\n\n\nclass MaxAndArgmax(COp):\n \"\"\"\n Calculate the max and argmax over a given axis or over all axes.\n\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 2 # max val, max idx\n E_axis = \"invalid axis\"\n params_type = Generic()\n __props__ = (\"axis\",)\n _f16_ok = True\n\n def __init__(self, axis):\n assert isinstance(axis, list)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n return self.axis\n\n def make_node(self, x):\n x = as_tensor_variable(x)\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the max / argmax.\n all_axes = set(self.axis)\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n inputs = [x]\n outputs = [\n tensor(x.type.dtype, broadcastable, name=\"max\"),\n tensor(\"int64\", broadcastable, name=\"argmax\"),\n ]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, outs, params):\n x = inp[0]\n axes = params\n max, max_idx = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n else:\n axes = tuple(int(ax) for ax in axes)\n max[0] = _asarray(np.max(x, axes), dtype=node.outputs[0].dtype)\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n\n # Numpy.prod returns 1.0 when arg is empty, so we cast it to int64\n # Otherwise reshape would complain citing float arg\n new_shape = kept_shape + (np.prod(reduced_shape, dtype=\"int64\"),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n if len(self.axis) != 1 and len(self.axis) != node.inputs[0].ndim:\n raise NotImplementedError(\n \"NumPy C-API can compute max and argmax only for 1 axis or for all axes.\"\n )\n x = inp[0]\n axis = sub[\"params\"]\n max, argmax = out\n fail = sub[\"fail\"]\n ret = \"\"\"\n #if PY_MAJOR_VERSION >= 3\n #ifndef PyInt_AS_LONG\n #define PyInt_AS_LONG PyLong_AS_LONG\n #endif\n #endif\n\n int axis;\n\n if (PyTuple_GET_SIZE(%(axis)s) == PyArray_NDIM(%(x)s)) {\n axis = NPY_MAXDIMS;\n } else if(PyTuple_GET_SIZE(%(axis)s) == 1) {\n PyObject* axis_object = PyTuple_GET_ITEM(%(axis)s, 0);\n axis = (int)PyInt_AS_LONG(axis_object);\n if (axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"MaxAndArgmax: bad axis argument\");\n %(fail)s\n }\n } else {\n PyErr_SetString(PyExc_NotImplementedError,\n \"MaxAndArgmax: NumPy C-API can compute max and argmax only for 1 axis or for all axes.\");\n %(fail)s\n }\n\n Py_CLEAR(%(max)s);\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n\n %(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);\n if (%(max)s == NULL) {\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(max)s)) {\n %(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(max)s == NULL){\n %(fail)s;\n }\n }\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if (%(argmax)s == NULL) {\n Py_CLEAR(%(max)s);\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(argmax)s)) {\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if (PyArray_TYPE(%(argmax)s) != NPY_INT64) {\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (5,)\n\n def infer_shape(self, fgraph, node, shapes):\n ishape = shapes[0]\n rval = tuple(\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n )\n return [rval, rval]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None, None]\n if len(self.axis) != 1:\n raise ValueError(\"R_op supported for arg_max only for \" \"one axis!\")\n if self.axis[0] > 1:\n raise ValueError(\"R_op supported for arg_max only when \" \" axis is 0 or 1\")\n if inputs[0].ndim != 2:\n raise ValueError(\n \"R_op supported for arg_max only when \" \" input is a matrix\"\n )\n max_vals, max_pos = self.make_node(*inputs).outputs\n if self.axis[0] == 0:\n return [eval_points[0][max_pos, arange(eval_points[0].shape[1])], None]\n else:\n return [eval_points[0][arange(eval_points[0].shape[0]), max_pos], None]\n\n def grad(self, inp, grads):\n # The strict sense mathematical gradient of the maximum function is\n # not calculated here for it is not defined at every point where some\n # coordinates are identical. However, since the latter set has null\n # Lebesgue measure, the result may be interpreted as weak gradient.\n\n # @note: This function should work correctly for L{vector}s.\n # (x, y), (gz, gw)\n # gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy\n # gMax * dMax/dx + gArgMax * dArgMax/dx,\n # gMax * dMax/daxis + gArgMax * dArgMax/daxis\n # g_max has one less dimension than x, so you need to complete\n # g_max to x's shape when axis=0 the broadcasting mechanism\n # does it automatically\n x = inp[0]\n axis = as_tensor_variable(self.axis)\n g_max, g_max_idx = grads\n\n g_max_disconnected = isinstance(g_max.type, DisconnectedType)\n g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)\n\n # if the op is totally disconnected, so are its inputs\n if g_max_disconnected and g_max_idx_disconnected:\n return [DisconnectedType()(), DisconnectedType()()]\n\n # if the max is disconnected but the argmax is not,\n # the gradient on its inputs is zero\n if g_max_disconnected:\n return [x.zeros_like()]\n if NoneConst.equals(axis):\n axis_ = list(range(x.ndim))\n else:\n axis_ = axis\n xmax = max(x, axis_)\n\n # Raise the g_max and xmax to the same number of dim as the input.\n pattern = []\n out_dim = 0\n if NoneConst.equals(axis):\n # We are taking the max/argmax over all dimensions.\n axis = None\n for i in range(x.ndim):\n if axis is None or i in axis.data:\n pattern.append(\"x\")\n else:\n pattern.append(out_dim)\n out_dim += 1\n g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)\n xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)\n\n # Set the grad to the correct position.\n g_x = eq(xmax_pad, x) * g_max_pad\n return (g_x,)\n\n\nclass Argmax(COp):\n \"\"\"\n Calculate the argmax over a given axis or over all axes.\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 1\n E_axis = \"invalid axis\"\n __props__ = (\"axis\",)\n _f16_ok = True\n\n params_type = ParamsType(c_axis=aes.int64)\n\n def __init__(self, axis):\n if axis is not None:\n axis = tuple(axis)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n if self.axis is not None and len(self.axis) == 1:\n c_axis = np.int64(self.axis[0])\n else:\n # The value here doesn't matter, it won't be used\n c_axis = np.int64(-1)\n return self.params_type.get_params(c_axis=c_axis)\n\n def make_node(self, x, axis=None):\n x = as_tensor_variable(x)\n if self.axis is None:\n all_axes = list(range(x.ndim))\n else:\n all_axes = self.axis\n inputs = [x]\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the argmax.\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n outputs = [tensor(\"int64\", broadcastable, name=\"argmax\")]\n return Apply(self, inputs, outputs)\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) == 2:\n raise ValueError(\n \"You are trying to compile a graph with an old Argmax node. Either reoptimize your graph or rebuild it to get the new node format.\"\n )\n\n def perform(self, node, inp, outs, params):\n (x,) = inp\n axes = self.axis\n (max_idx,) = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n new_shape = kept_shape + (np.prod(reduced_shape),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (argmax,) = out\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n if self.axis is None:\n axis_code = \"axis = NPY_MAXDIMS;\"\n else:\n if len(self.axis) > 1:\n raise NotImplementedError()\n # params is only used here for now\n axis_code = (\n \"\"\"\n axis = %(params)s->c_axis;\n if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){\n PyErr_SetString(PyExc_ValueError,\n \"Argmax, bad axis argument\");\n %(fail)s\n }\n \"\"\"\n % locals()\n )\n ret = \"\"\"\n int axis;\n\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n %(axis_code)s\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n if(!PyArray_CheckExact(%(argmax)s)){\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if(PyArray_TYPE(%(argmax)s) != NPY_INT64){\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (1,)\n\n def infer_shape(self, fgraph, node, shapes):\n (ishape,) = shapes\n if self.axis is None:\n return [()]\n rval = tuple(\n [\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n ]\n )\n return [rval]\n\n def grad(self, inp, grads):\n (x,) = inp\n\n return [x.zeros_like()]\n\n\ndef makeKeepDims(x, y, axis):\n \"\"\"\n Reintroduces in y with length one the axes of x which have been left out\n in a prior reduction of x. With this option, the resulting tensor will\n broadcast correctly against the original tensor x.\n\n \"\"\"\n x = as_tensor_variable(x)\n y = as_tensor_variable(y)\n\n if axis is None:\n axis = list(range(x.type.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n newaxis = []\n for a in axis:\n if not isinstance(a, int):\n raise ValueError(\"keepdims option can be used only with constant axis\")\n if a < 0:\n a += x.type.ndim\n newaxis.append(a)\n i = 0\n new_dims = []\n for j, _ in enumerate(x.type.broadcastable):\n if j in newaxis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n return DimShuffle(y.type.broadcastable, new_dims)(y)\n\n\ndef check_and_normalize_axes(x, axis):\n \"\"\"Check axes, normalize and convert them to a Python list of integers.\n\n Parameters\n ----------\n x: TensorVariable\n axis: int, tuple or list of integers\n\n Returns\n -------\n axis: list of integers\n Return an empty list if argument is None.\n\n \"\"\"\n x = as_tensor_variable(x)\n if axis is None:\n axis = []\n elif isinstance(axis, (int, np.integer)) or (\n isinstance(axis, np.ndarray) and axis.ndim == 0\n ):\n axis = [int(axis)]\n elif isinstance(axis, (tuple, list, np.ndarray)):\n axis = [int(i) for i in axis]\n elif isinstance(axis, Variable):\n if NoneConst.equals(axis):\n axis = []\n elif not isinstance(axis, TensorConstant):\n raise TypeError(f\"Computation needs a constant axis. Got {axis}\")\n else:\n assert axis.dtype in integer_dtypes\n if isinstance(axis.data, (int, np.integer)) or (\n isinstance(axis.data, np.ndarray) and axis.data.ndim == 0\n ):\n axis = [int(axis.data)]\n elif isinstance(axis.data, (list, np.ndarray)):\n axis = [int(i) for i in axis.data]\n else:\n raise TypeError(\n f\"Axis must be an integer, tuple, list of integers or a TensorVariable. Got {axis}\"\n )\n if len(axis) > 0:\n for i in range(len(axis)):\n if axis[i] < 0:\n axis[i] += x.type.ndim\n if axis[i] < 0 or axis[i] >= x.type.ndim:\n raise ValueError(\n f\"Computation needs a valid axis number for {int(x.type.ndim)}-D tensor. Got {int(axis[i])}\"\n )\n axis = list(set(axis))\n axis.sort()\n return axis\n\n\ndef max_and_argmax(a, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements and their indices obtained by iterating over\n given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n # Check axis and convert it to a Python list of integers.\n # Axis will be used as an op param of MaxAndArgmax.\n a = as_tensor_variable(a)\n axis = check_and_normalize_axes(a, axis)\n if len(axis) == 0:\n axis = list(range(a.type.ndim))\n out, argout = MaxAndArgmax(axis)(a)\n\n if keepdims:\n out = makeKeepDims(a, out, axis)\n argout = makeKeepDims(a, argout, axis)\n return [out, argout]\n\n\nclass NonZeroCAReduce(CAReduce):\n def _c_all(self, node, name, inames, onames, sub):\n decl, checks, alloc, loop, end = super()._c_all(node, name, inames, onames, sub)\n\n # We add an additional check for zero-sized dimensions (This seems like\n # something that could enabled in `elemwise_cgen.make_checks`.)\n iname = inames[0]\n\n axis = self.axis\n if axis is None:\n axis = list(range(len(node.inputs[0].type.broadcastable)))\n\n pattern = [0] * len(node.inputs[0].broadcastable)\n for i in axis:\n pattern[i] = 1\n\n pattern_ = str(pattern)[1:-1]\n\n decl += f\"\"\"int tosum[]={{{pattern_}}};\"\"\"\n alloc += f\"\"\"\n for(int i=0;i<PyArray_NDIM({iname});i++){{\n if(PyArray_DIMS({iname})[i]==0 && tosum[i]){{\n PyErr_Format(PyExc_ValueError,\n \"Input of CAReduce{{{node.op.scalar_op}}} has zero-size on axis %%d\",i);\n {sub[\"fail\"]};\n }}\n }}\n \"\"\"\n return decl, checks, alloc, loop, end\n\n\nclass Max(NonZeroCAReduce):\n nfunc_spec = (\"max\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(aes.scalar_maximum, axis)\n\n\nclass Min(NonZeroCAReduce):\n nfunc_spec = (\"min\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(aes.scalar_minimum, axis)\n\n\ndef max(x, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n Notes\n -----\n We return an error as numpy when we reduce a dim with a shape of 0.\n\n \"\"\"\n\n # We have a choice of implementing this call with the\n # CAReduce op or the MaxAndArgmax op.\n\n # MaxAndArgmax supports grad and Rop, so we prefer to use that.\n # CAReduce is faster, but optimizations will replace MaxAndArgmax[0]\n # with CAReduce at compile time, so at this stage the important\n # thing is supporting all user interface features, not speed.\n # Some cases can be implemented only with CAReduce.\n\n # We thus prefer to use MaxAndArgmax, if possible. It does not\n # support all axis arguments, so we may need to fall back to CAReduce.\n\n try:\n out = max_and_argmax(x, axis)[0]\n except Exception:\n out = Max(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef argmax(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmax is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n argout = max_and_argmax(x, axis)[1]\n\n if keepdims:\n argout = makeKeepDims(x, argout, axis)\n return argout\n\n\ndef min(x, axis=None, keepdims=False):\n \"\"\"\n Returns minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the min is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return -max(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n max_val = np.array(itype.max, dtype=itype.dtype)\n return max_val - max(max_val - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return ~max(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\ndef argmin(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmin is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return argmax(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n return argmax(itype.max - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return argmax(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\ndef smallest(*args):\n \"\"\"\n Return the [elementwise] smallest of a variable number of arguments.\n\n Like python's min.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a < b, a, b)\n else:\n return min(stack(args), axis=0)\n\n\ndef largest(*args):\n \"\"\"\n Return the [elementwise] largest of a variable number of arguments.\n\n Like python's max.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a > b, a, b)\n else:\n return max(stack(args), axis=0)\n\n\n@scalar_elemwise\ndef lt(a, b):\n \"\"\"a < b\"\"\"\n\n\n@scalar_elemwise\ndef gt(a, b):\n \"\"\"a > b\"\"\"\n\n\n@scalar_elemwise\ndef le(a, b):\n \"\"\"a <= b\"\"\"\n\n\n@scalar_elemwise\ndef ge(a, b):\n \"\"\"a >= b\"\"\"\n\n\n@scalar_elemwise\ndef eq(a, b):\n \"\"\"a == b\"\"\"\n\n\n@scalar_elemwise\ndef neq(a, b):\n \"\"\"a != b\"\"\"\n\n\n@scalar_elemwise\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisnan_ = isnan\n\n\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isnan_(a)\n\n\n@scalar_elemwise\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisinf_ = isinf\n\n\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isinf_(a)\n\n\ndef allclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implement Numpy's ``allclose`` on tensors.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan: bool\n Whether to consider nan's in the same place to be close.\n\n Returns\n -------\n bool\n A boolean value (of type int8 returned by the tensor elementwise `all`\n function) whether all elements in a and b are in the tolerance range\n defined above.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n \"\"\"\n return all(isclose(a, b, rtol, atol, equal_nan))\n\n\ndef isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implements Numpy's ``isclose`` on tensors.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan : bool\n Whether to consider nan's in the same place to be close\n\n Returns\n -------\n int8\n A boolean (int8) array where two arrays are element-wise equal\n within a tolerance.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n Examples\n --------\n >>> import aesara\n >>> import numpy as np\n >>> a = _asarray([1e10, 1e-7], dtype=\"float64\")\n >>> b = _asarray([1.00001e10, 1e-8], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 0], dtype=int8)\n >>> a = _asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = _asarray([1.00001e10, 1e-9], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 1], dtype=int8)\n >>> a = _asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = _asarray([1.0001e10, 1e-9], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([0, 1], dtype=int8)\n >>> a = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b, equal_nan=True).eval()\n array([1, 1], dtype==int8)\n >>> a = _asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = _asarray([1.0, -np.inf], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = _asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = _asarray([1.0, np.inf], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 1], dtype==int8)\n\n \"\"\"\n # close will be an int8 array of 1 where within tolerance\n # and 0 where not within tolerance or there was a nan or inf value.\n diff = _abs(a - b)\n tolerance = atol + rtol * _abs(b)\n close_prelim = le(diff, tolerance)\n\n a_nan = isnan(a)\n b_nan = isnan(b)\n nans = bitwise_or(a_nan, b_nan)\n\n a_inf = isinf(a)\n b_inf = isinf(b)\n infs = bitwise_or(a_inf, b_inf)\n\n nans_or_infs = bitwise_or(nans, infs)\n\n # close is now an array of 0's except where elements are not nan or inf\n # and are within the tolerance.\n close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))\n\n # deal with signed inf values. this will make an array inf_eq of 0's\n # except where inf values have the same sign.\n both_infs = bitwise_and(a_inf, b_inf)\n inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))\n inf_eq = bitwise_and(both_infs, inf_signs_eq)\n\n # now create the potential result combining close and inf_eq\n close_with_infs = bitwise_or(close, inf_eq)\n\n # deal with comparing nan's.\n if equal_nan:\n both_nans = bitwise_and(a_nan, b_nan)\n return bitwise_or(close_with_infs, both_nans)\n # otherwise nan's aren't considered close.\n else:\n return close_with_infs\n\n\n##########################\n# Bit-wise\n##########################\n\n\n@scalar_elemwise\ndef and_(a, b):\n \"\"\"bitwise a & b\"\"\"\n\n\nbitwise_and = and_ # numpy name for it\n\n\n@scalar_elemwise\ndef or_(a, b):\n \"\"\"bitwise a | b\"\"\"\n\n\nbitwise_or = or_ # numpy name for it\n\n\n@scalar_elemwise\ndef xor(a, b):\n \"\"\"bitwise a ^ b\"\"\"\n\n\nbitwise_xor = xor # numpy name for it\n\n\n@scalar_elemwise\ndef invert(a):\n \"\"\"bitwise ~a\"\"\"\n\n\nbitwise_not = invert # numpy alias for it\n\n##########################\n# Math\n##########################\n\n\n@scalar_elemwise\ndef abs(a):\n \"\"\"|`a`|\"\"\"\n\n\n# These are deprecated and will be removed\nabs_ = abs\n\n\npprint.assign(abs, printing.PatternPrinter((\"|%(0)s|\", -1000)))\n\n\n@scalar_elemwise\ndef exp(a):\n \"\"\"e^`a`\"\"\"\n\n\n@scalar_elemwise\ndef exp2(a):\n \"\"\"2^`a`\"\"\"\n\n\n@scalar_elemwise\ndef expm1(a):\n \"\"\"e^`a` - 1\"\"\"\n\n\n@scalar_elemwise\ndef neg(a):\n \"\"\"-a\"\"\"\n\n\n@scalar_elemwise\ndef reciprocal(a):\n \"\"\"1.0/a\"\"\"\n\n\n# This is deprecated and will be removed\ninv = reciprocal\n\n\n@scalar_elemwise\ndef log(a):\n \"\"\"base e logarithm of a\"\"\"\n\n\n@scalar_elemwise\ndef log2(a):\n \"\"\"base 2 logarithm of a\"\"\"\n\n\n@scalar_elemwise\ndef log10(a):\n \"\"\"base 10 logarithm of a\"\"\"\n\n\n@scalar_elemwise\ndef log1p(a):\n \"\"\"log(1+a)\"\"\"\n\n\n@scalar_elemwise\ndef sgn(a):\n \"\"\"sign of a\"\"\"\n\n\n@scalar_elemwise\ndef ceil(a):\n \"\"\"ceiling of a\"\"\"\n\n\n@scalar_elemwise\ndef floor(a):\n \"\"\"floor of a\"\"\"\n\n\n@scalar_elemwise\ndef trunc(a):\n \"\"\"trunc of a\"\"\"\n\n\ndef iround(a, mode=None):\n \"\"\"cast(round(a,mode),'int64')\"\"\"\n return cast(round(a, mode), \"int64\")\n\n\ndef round(a, mode=None):\n \"\"\"round_mode(a) with mode in [half_away_from_zero, half_to_even].\n Default to half_to_even.\"\"\"\n if mode is None:\n mode = \"half_to_even\"\n if config.warn__round:\n warnings.warn(\n \"aesara.tensor.round() changed its default from\"\n \" `half_away_from_zero` to `half_to_even` to have\"\n \" the same default as NumPy. Use the Aesara flag\"\n \" `warn__round=False` to disable this warning.\"\n )\n if mode == \"half_away_from_zero\":\n return round_half_away_from_zero(a)\n elif mode == \"half_to_even\":\n return round_half_to_even(a)\n else:\n raise Exception(f\"round mode {mode} is not implemented.\")\n\n\n@scalar_elemwise\ndef round_half_to_even(a):\n \"\"\"round_half_to_even(a)\"\"\"\n\n\n@scalar_elemwise\ndef round_half_away_from_zero(a):\n \"\"\"round_half_away_from_zero(a)\"\"\"\n\n\n@scalar_elemwise\ndef sqr(a):\n \"\"\"square of a\"\"\"\n\n\n# alias to sqr, included to maintain similarity with numpy interface\nsquare = sqr\n\n\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):\n \"\"\"Calculate the covariance matrix.\n\n Covariance indicates the level to which two variables vary together.\n If we examine N-dimensional samples, :math:`m = [x_1, x_2, ... x_N]^T`,\n then the covariance matrix element :math:`C_{ij}` is the covariance of\n :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance\n of :math:`x_i`. Code and docstring ported from numpy.\n\n Parameters\n ==========\n m : array_like\n A 2-D array containing multiple variables and observations.\n Each row of `m` represents a variable, and each column is\n observations of all those variables.\n y : array_like, optional\n An additional set of variables and observations. `y` has the same form\n as that of `m`.\n rowvar : bool, optional\n If `rowvar` is True (default), then each row represents a\n variable, with observations in the columns. Otherwise, the relationship\n is transposed: each column represents a variable, while the rows\n contain observations.\n bias : bool, optional\n Default normalization (False) is by ``(N - 1)``, where ``N`` is the\n number of observations given (unbiased estimate). If `bias` is True, then\n normalization is by ``N``. These values can be overridden by using the\n keyword ``ddof``.\n ddof : int, optional\n If not ``None`` the default value implied by `bias` is overridden.\n The default value is ``None``.\n\n Returns\n =======\n out : The covariance matrix of the variables.\n\n \"\"\"\n\n if fweights is not None:\n raise NotImplementedError(\"fweights are not implemented\")\n if aweights is not None:\n raise NotImplementedError(\"aweights are not implemented\")\n\n if not rowvar and m.shape[0] != 1:\n m = m.T\n\n if y is not None:\n if not rowvar and y.shape[0] != 1:\n y = y.T\n m = concatenate((m, y), axis=0)\n\n if ddof is None:\n if not bias:\n ddof = 1\n else:\n ddof = 0\n\n # Determine the normalization\n fact = m.shape[1] - ddof\n\n m -= m.mean(axis=1, keepdims=1)\n c = m.dot(m.T)\n c *= constant(1) / fact\n return c.squeeze()\n\n\n@scalar_elemwise\ndef sqrt(a):\n \"\"\"square root of a\"\"\"\n\n\n@scalar_elemwise\ndef deg2rad(a):\n \"\"\"convert degree a to radian\"\"\"\n\n\n@scalar_elemwise\ndef rad2deg(a):\n \"\"\"convert radian a to degree\"\"\"\n\n\n@scalar_elemwise\ndef cos(a):\n \"\"\"cosine of a\"\"\"\n\n\n@scalar_elemwise\ndef arccos(a):\n \"\"\"arccosine of a\"\"\"\n\n\n@scalar_elemwise\ndef sin(a):\n \"\"\"sine of a\"\"\"\n\n\n@scalar_elemwise\ndef arcsin(a):\n \"\"\"arcsine of a\"\"\"\n\n\n@scalar_elemwise\ndef tan(a):\n \"\"\"tangent of a\"\"\"\n\n\n@scalar_elemwise\ndef arctan(a):\n \"\"\"arctangent of a\"\"\"\n\n\n@scalar_elemwise\ndef arctan2(a, b):\n \"\"\"arctangent of a / b\"\"\"\n\n\n@scalar_elemwise\ndef cosh(a):\n \"\"\"hyperbolic cosine of a\"\"\"\n\n\n@scalar_elemwise\ndef arccosh(a):\n \"\"\"hyperbolic arc cosine of a\"\"\"\n\n\n@scalar_elemwise\ndef sinh(a):\n \"\"\"hyperbolic sine of a\"\"\"\n\n\n@scalar_elemwise\ndef arcsinh(a):\n \"\"\"hyperbolic arc sine of a\"\"\"\n\n\n@scalar_elemwise\ndef tanh(a):\n \"\"\"hyperbolic tangent of a\"\"\"\n\n\n@scalar_elemwise\ndef arctanh(a):\n \"\"\"hyperbolic arc tangent of a\"\"\"\n\n\n@scalar_elemwise\ndef erf(a):\n \"\"\"error function\"\"\"\n\n\n@scalar_elemwise\ndef erfc(a):\n \"\"\"complementary error function\"\"\"\n\n\n@scalar_elemwise\ndef erfcx(a):\n \"\"\"scaled complementary error function\"\"\"\n\n\n@scalar_elemwise\ndef erfinv(a):\n \"\"\"inverse error function\"\"\"\n\n\n@scalar_elemwise\ndef erfcinv(a):\n \"\"\"inverse complementary error function\"\"\"\n\n\n@scalar_elemwise\ndef gamma(a):\n \"\"\"gamma function\"\"\"\n\n\n@scalar_elemwise\ndef gammaln(a):\n \"\"\"log gamma function\"\"\"\n\n\n@scalar_elemwise\ndef psi(a):\n \"\"\"derivative of log gamma function\"\"\"\n\n\n@scalar_elemwise\ndef tri_gamma(a):\n \"\"\"second derivative of the log gamma function\"\"\"\n\n\n@scalar_elemwise\ndef chi2sf(x, k):\n \"\"\"chi squared survival function\"\"\"\n\n\n@scalar_elemwise\ndef gammainc(k, x):\n \"\"\"Regularized lower gamma function\"\"\"\n\n\n@scalar_elemwise\ndef gammaincc(k, x):\n \"\"\"Regularized upper gamma function\"\"\"\n\n\n@scalar_elemwise\ndef gammau(k, x):\n \"\"\"Upper incomplete gamma function.\"\"\"\n\n\n@scalar_elemwise\ndef gammal(k, x):\n \"\"\"Lower incomplete gamma function.\"\"\"\n\n\n@scalar_elemwise\ndef j0(x):\n \"\"\"Bessel function of the first kind of order 0.\"\"\"\n\n\n@scalar_elemwise\ndef j1(x):\n \"\"\"Bessel function of the first kind of order 1.\"\"\"\n\n\n@scalar_elemwise\ndef jv(v, x):\n \"\"\"Bessel function of the first kind of order v (real).\"\"\"\n\n\n@scalar_elemwise\ndef i0(x):\n \"\"\"Modified Bessel function of the first kind of order 0.\"\"\"\n\n\n@scalar_elemwise\ndef i1(x):\n \"\"\"Modified Bessel function of the first kind of order 1.\"\"\"\n\n\n@scalar_elemwise\ndef iv(v, x):\n \"\"\"Modified Bessel function of the first kind of order v (real).\"\"\"\n\n\n@scalar_elemwise\ndef sigmoid(x):\n \"\"\"Logistic sigmoid function (1 / (1 + exp(x)), also known as expit or inverse logit\"\"\"\n\n\nexpit = sigmoid\n\n\n@scalar_elemwise\ndef softplus(x):\n \"\"\"Compute log(1 + exp(x)), also known as softplus or log1pexp\"\"\"\n\n\nlog1pexp = softplus\n\n\n@scalar_elemwise\ndef log1mexp(x):\n \"\"\"Compute log(1 - exp(x)), also known as log1mexp\"\"\"\n\n\n@scalar_elemwise\ndef betainc(a, b, x):\n \"\"\"Regularized incomplete beta function\"\"\"\n\n\n@scalar_elemwise\ndef real(z):\n \"\"\"Return real component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.real = property(real)\n\n\n@scalar_elemwise\ndef imag(z):\n \"\"\"Return imaginary component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.imag = property(imag)\n\n\n@scalar_elemwise\ndef angle(z):\n \"\"\"Return polar-coordinate angle of complex-valued tensor `z`\"\"\"\n\n\n@scalar_elemwise # numpy.complex cannot build tensors\ndef complex(real, imag):\n \"\"\"Return complex-valued tensor with `real` and `imag` components\"\"\"\n\n\n@scalar_elemwise\ndef conj(z):\n \"\"\"Return the complex conjugate of `z`.\"\"\"\n\n\n@scalar_elemwise\ndef complex_from_polar(abs, angle):\n \"\"\"Return complex-valued tensor from polar coordinate specification.\"\"\"\n\n\nclass Mean(CAReduce):\n def __init__(self, axis=None):\n super().__init__(aes.add, axis)\n assert self.axis is None or len(self.axis) == 1\n\n def __str__(self):\n if self.axis is not None:\n return \"Mean{%s}\" % (\", \".join(str(x) for x in self.axis))\n else:\n return \"Mean\"\n\n def _output_dtype(self, idtype):\n # we want to protect against overflow\n return \"float64\"\n\n def perform(self, node, inp, out):\n (input,) = inp\n (output,) = out\n if self.axis is None:\n axis = None\n else:\n axis = self.axis[0]\n # numpy.asarray is needed as otherwise we can end up with a\n # numpy scalar.\n output[0] = np.asarray(np.mean(input, dtype=\"float64\", axis=axis))\n\n def c_code(self, node, name, inames, onames, sub):\n\n ret = super().c_code(node, name, inames, onames, sub)\n\n if self.axis is not None:\n return ret\n\n # TODO: c_code perform support only axis is None\n return (\n ret\n + f\"\"\"\n *((double *)PyArray_DATA({onames[0]})) /= PyArray_SIZE({inames[0]});\n \"\"\"\n )\n\n\n# TODO: implement the grad. When done and tested, you can make this the default\n# version.\n# def grad(self, (x,), (gout,)):\n# import pdb;pdb.set_trace()\n# return grad(mean(x, self.axis, op=False),[x])\n\n\ndef mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the mean value along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis : None or int or (list of int) (see `Sum`)\n Compute the mean along this axis of the tensor.\n None means all axes (like numpy).\n dtype: None or string\n Dtype to cast the result of the inner summation into.\n For instance, by default, a sum of a float32 tensor will be\n done in float64 (acc_dtype would be float64 by default),\n but that result will be casted back in float32.\n keepdims: bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n acc_dtype: None or string\n Dtype to use for the inner summation. This will not\n necessarily be the dtype of the output (in particular\n if it is a discrete (int/uint) dtype, the output will\n be in a float type). If None, then we use the same rules as `sum()`.\n\n Notes\n -----\n For gpu, if you specify dtype=float32, everything will be done on the gpu.\n\n \"\"\"\n input = as_tensor_variable(input)\n if op:\n if dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"the dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n if acc_dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the acc_dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"acc_dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n out = Mean(axis)(input)\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n if dtype is not None:\n # The summation will be done with the specified dtype.\n # sum() will complain if it is not suitable.\n sum_dtype = dtype\n else:\n sum_dtype = None\n # float16 overflows on the cast way too often\n if input.dtype == \"float16\":\n sum_dtype = \"float32\"\n\n s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims, acc_dtype=acc_dtype)\n shp = shape(input)\n\n # Cast shp into a float type\n # TODO Once we have a consistent casting policy, we could simply\n # use true_div.\n if s.dtype in (\"float16\", \"float32\", \"complex64\"):\n shp = cast(shp, \"float32\")\n else:\n shp = cast(shp, \"float64\")\n\n if axis is None:\n axis = list(range(input.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # This sequential division will possibly be optimized by Aesara:\n for i in axis:\n s = true_div(s, shp[i])\n\n # This can happen when axis is an empty list/tuple\n if s.dtype != shp.dtype and s.dtype in discrete_dtypes:\n s = cast(s, shp.dtype)\n\n if dtype == \"float16\" or (dtype is None and input.dtype == \"float16\"):\n s = cast(s, \"float16\")\n s.name = \"mean\"\n return s\n\n\ndef var(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the variance along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n Default uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Also supports 'corrected_two_pass' algorithm (using the 'corrected' flag)\n which is numerically more stable. There exist other implementations that\n offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n input_ndim = input.type.ndim\n if axis is None:\n axis = list(range(input_ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # compute the axis-wise mean\n mean_input = mean(input, axis, keepdims=True)\n\n # center the input\n centered_input = input - mean_input\n\n # return the mean sqr\n two = constant(2, dtype=centered_input.dtype)\n if ddof == 0:\n v = mean((centered_input ** two), axis, keepdims=keepdims)\n else:\n shp = shape(input) - ddof\n v = sum((centered_input ** two), axis=axis, keepdims=keepdims)\n for i in axis:\n v = true_div(v, shp[i])\n\n # use 'corrected_two_pass' algorithm\n if corrected:\n if ddof == 0:\n error = mean(centered_input, axis, keepdims=keepdims) ** 2\n else:\n shp = shape(input) - ddof\n shp_inp = shape(input)\n error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2\n for i in axis:\n error = true_div(error, shp[i] * shp_inp[i])\n v = v - error\n\n v.name = \"var\"\n return v\n\n\ndef std(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the standard deviation along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n It calls 'var()' and 'var()' uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Function 'var()' also supports 'corrected_two_pass' algorithm (using the\n 'corrected' flag) which is numerically more stable. There exist other\n implementations that offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n ret = sqrt(\n var(input=input, axis=axis, ddof=ddof, keepdims=keepdims, corrected=corrected)\n )\n ret.name = \"std\"\n return ret\n\n\n@scalar_elemwise(symbolname=\"scalar_maximum\")\ndef maximum(x, y):\n \"\"\"elemwise maximum. See max for the maximum in one tensor\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise(symbolname=\"scalar_minimum\")\ndef minimum(x, y):\n \"\"\"elemwise minimum. See min for the minimum in one tensor\"\"\"\n # see decorator for function body\n\n\ndef divmod(x, y):\n \"\"\"elementvise divmod, using floor_div and mod_check\"\"\"\n return floor_div(x, y), mod_check(x, y)\n\n\n@scalar_elemwise\ndef add(a, *other_terms):\n \"\"\"elementwise addition\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef sub(a, b):\n \"\"\"elementwise subtraction\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef mul(a, *other_terms):\n \"\"\"elementwise multiplication\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef true_div(a, b):\n \"\"\"elementwise [true] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef int_div(a, b):\n \"\"\"elementwise [floor] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n# floor_div and int_div are the same thing\nfloor_div = int_div\n\n\ndef ceil_intdiv(a, b):\n \"\"\"\n Safely compute ceil(float_division(a, b)).\n\n Works for all dtypes, but mostly useful when a and b are int.\n\n \"\"\"\n # If a and b are int with not many significant bits, we could\n # cast them to float to avoid doing the modulo. We do not know if this\n # is faster or not. But this is not safe for int64 as the cast will\n # lose precision.\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, aes.upcast(a, b))\n\n # We cast for the case when a and b are uint*. Otherwise neq will\n # force their upcast to int.\n div = int_div(a, b)\n ret = cast(neq(a % b, 0), div.dtype) + div\n assert ret.dtype == aes.upcast(div.owner.inputs[0], div.owner.inputs[1])\n return ret\n\n\ndef mod_check(x, y):\n \"\"\"Make sure we do not try to use complex numbers.\"\"\"\n if (\n as_tensor_variable(x).dtype in complex_dtypes\n or as_tensor_variable(y).dtype in complex_dtypes\n ):\n # Currently forbidden.\n raise aes.Mod.complex_error\n else:\n return mod(x, y)\n\n\n@scalar_elemwise\ndef mod(a, b):\n \"\"\"elementwise modulo\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef pow(a, b):\n \"\"\"elementwise power\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef clip(x, min, max):\n \"\"\"\n Clip x to be between min and max.\n\n Note that when `x` is equal to the boundaries, the output is considered\n to be `x`, so at these points, the gradient of the cost wrt the output\n will be propagated to `x`, not to `min` nor `max`. In other words,\n on these points, the gradient wrt `x` will be equal to the gradient wrt\n the output, and the gradient wrt `min` and `max` will be zero.\n\n \"\"\"\n # see decorator for function body\n # for grep: clamp, bound\n\n\npprint.assign(add, printing.OperatorPrinter(\"+\", -2, \"either\"))\npprint.assign(mul, printing.OperatorPrinter(\"*\", -1, \"either\"))\npprint.assign(sub, printing.OperatorPrinter(\"-\", -2, \"left\"))\npprint.assign(neg, printing.OperatorPrinter(\"-\", 0, \"either\"))\npprint.assign(true_div, printing.OperatorPrinter(\"/\", -1, \"left\"))\npprint.assign(int_div, printing.OperatorPrinter(\"//\", -1, \"left\"))\npprint.assign(pow, printing.OperatorPrinter(\"**\", 1, \"right\"))\n\n\nclass Dot(Op):\n \"\"\"\n Computes the dot product of two variables. For two matrices, this is\n equivalent to matrix multiplication. For two vectors, this is the inner\n product.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n\n __props__ = ()\n\n # the rationale for Dot22 is related to getting GEMM Ops into the\n # graph. See Dot22 in tensor.blas for details.\n\n def make_node(self, *inputs):\n inputs = list(map(as_tensor_variable, inputs))\n\n if len(inputs) != 2:\n raise TypeError(f\"Two arguments required, {len(inputs)} given \")\n if inputs[0].ndim not in (1, 2):\n raise TypeError(\n \"Input 0 (0-indexed) must have ndim of \"\n f\"1 or 2, {int(inputs[0].ndim)} given. Consider calling \"\n \"aesara.tensor.dot instead.\"\n )\n if inputs[1].ndim not in (1, 2):\n raise TypeError(\n \"Input 1 (0-indexed) must have ndim of \"\n f\"1 or 2, {int(inputs[1].ndim)} given. Consider calling \"\n \"aesara.tensor.dot instead.\"\n )\n\n i_broadcastables = [input.type.broadcastable for input in inputs]\n bx, by = i_broadcastables\n if len(by) == 2: # y is a matrix\n bz = bx[:-1] + by[-1:]\n elif len(by) == 1: # y is vector\n bz = bx[:-1]\n\n i_dtypes = [input.type.dtype for input in inputs]\n outputs = [tensor(aes.upcast(*i_dtypes), bz)]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, out):\n x, y = inp\n (z,) = out\n\n # the asarray is here because dot between two vectors\n # gives a numpy float object but we need to return a 0d\n # ndarray\n z[0] = np.asarray(np.dot(x, y))\n\n def grad(self, inp, grads):\n\n x, y = inp\n (gz,) = grads\n xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim\n\n # grad is scalar, so x is vector and y is vector\n if gdim == 0:\n xgrad = gz * y\n ygrad = gz * x\n\n # x is vector, y is matrix, grad is vector\n elif xdim == 1 and ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = outer(x.T, gz)\n\n # x is matrix, y is vector, grad is vector\n elif xdim == 2 and ydim == 1:\n xgrad = outer(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # x is matrix, y is matrix, grad is matrix\n elif xdim == ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # If x or y contain broadcastable dimensions but only one of\n # them know that a matching dimensions is broadcastable, the\n # above code don't always return the right broadcast pattern.\n # This cause problem down the road. See gh-1461.\n if xgrad.broadcastable != x.broadcastable:\n xgrad = patternbroadcast(xgrad, x.broadcastable)\n if ygrad.broadcastable != y.broadcastable:\n ygrad = patternbroadcast(ygrad, y.broadcastable)\n\n rval = xgrad, ygrad\n\n for elem in rval:\n assert elem.dtype.find(\"float\") != -1\n\n return rval\n\n def R_op(self, inputs, eval_points):\n # R_op for a \\dot b evaluated at c for a and d for b is\n # simply c \\dot b + a \\dot d\n\n assert len(inputs) == 2\n assert len(eval_points) == 2\n if eval_points[0] is None and eval_points[1] is None:\n return [None]\n\n if eval_points[0]:\n t1 = self(eval_points[0], inputs[1])\n if eval_points[1]:\n t2 = self(inputs[0], eval_points[1])\n\n if eval_points[0] and eval_points[1]:\n return [t1 + t2]\n elif eval_points[0]:\n return [t1]\n else:\n return [t2]\n\n def infer_shape(self, fgraph, node, shapes):\n xshp, yshp = shapes\n x, y = node.inputs\n\n # vector / vector\n if x.ndim == 1 and y.ndim == 1:\n return [()]\n # matrix / vector\n if x.ndim == 2 and y.ndim == 1:\n return [xshp[:-1]]\n # vector / matrix\n if x.ndim == 1 and y.ndim == 2:\n return [yshp[-1:]]\n # matrix / matrix\n if x.ndim == 2 and y.ndim == 2:\n return [xshp[:-1] + yshp[-1:]]\n raise NotImplementedError()\n\n def __str__(self):\n return \"dot\"\n\n\n_dot = Dot()\npprint.assign(\n _dot, printing.OperatorPrinter(printing.special[\"middle_dot\"], -1, \"left\")\n)\n\n\ndef dot(l, r):\n \"\"\"Return a symbolic dot product.\n\n This is designed to work with both sparse and dense tensors types.\n \"\"\"\n\n if not isinstance(l, Variable):\n l = as_tensor_variable(l)\n\n if not isinstance(r, Variable):\n r = as_tensor_variable(r)\n\n try:\n res = l.__dot__(r)\n if res is NotImplemented:\n raise NotImplementedError\n except (NotImplementedError, AttributeError, TypeError):\n res = r.__rdot__(l)\n if res is NotImplemented:\n raise NotImplementedError()\n\n return res\n\n\ndef dense_dot(a, b):\n \"\"\"\n Computes the dot product of two variables.\n\n For two matrices, this is equivalent to matrix multiplication.\n For two vectors, this is the inner product.\n When one variable is a scalar, this is like elementwise multiplication.\n For N dimensions, this is a sum product over the last axis\n of the first array and the second-to-last axis of the second array:\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n Note that this dot function does one of three things, in the following\n sequence:\n\n 1. If either a or b is scalar, it returns the elementwise product\n without calling the Aesara Dot op.\n\n 2. If either a or b has more than 2 dimensions, it calls Aesara's\n tensordot function with appropriate axes. The tensordot function\n expresses high-dimensional dot products in terms of 2D matrix\n multiplications, so it may be possible to further optimize for\n performance.\n\n 3. If both a and b have either 1 or 2 dimensions, it calls Aesara's\n Dot op on a and b.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if a.ndim == 0 or b.ndim == 0:\n return a * b\n elif a.ndim > 2 or b.ndim > 2:\n return tensordot(a, b, [[a.ndim - 1], [np.maximum(0, b.ndim - 2)]])\n else:\n return _dot(a, b)\n\n\ndef _tensordot_as_dot(a, b, axes, dot, batched):\n \"\"\"\n Reduces a tensor dot product to a matrix or vector dot product. Based\n on code from Tijmen Tieleman's gnumpy\n (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Please see the documentation of tensordot for the meaning of the a, b\n and axes arguments.\n\n :param dot: a function that accepts two symbolic variables and computes\n the appropriate dot product (e.g. dot, batched_dot)\n :type dot: function\n\n :param batched: whether to treat the first axis of a and b as a batch\n axis. If so, this axis will be preserved in the output,\n allowing this function to be used also for batched\n tensor dot products.\n :type batched: boolean\n\n :returns: a tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less the first dimension and any dimensions that were summed\n over).\n :rtype: symbolic tensor\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if not np.isscalar(axes) and len(axes) != 2:\n raise ValueError(\n \"Axes should be an integer or a \"\n \"list/tuple of len 2 ({axes} was provided)\"\n )\n\n # if 'axes' is a number of axes to multiply and sum over (trailing axes\n # of a, leading axes of b), we can just reshape and use dot.\n elif np.isscalar(axes):\n axes = int(axes)\n\n for operand_name, operand in ((\"a\", a), (\"b\", b)):\n if axes > operand.ndim:\n raise ValueError(\n f\"axes can not be larger than the dimension of {operand_name} \"\n f\"({operand_name}.ndim={operand.ndim}, axes={axes})\"\n )\n if batched and axes == operand.ndim:\n raise ValueError(\n \"axes to sum over must not include the batch axis \"\n f\"of {operand_name} ({operand_name}.ndim={operand.ndim}, axes={axes})\"\n )\n\n batch_axes = 1 if batched else 0\n a_outaxes = slice(0, a.ndim - axes)\n b_outaxes = slice(batch_axes + axes, b.ndim)\n outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])\n outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]\n outndim = len(outbcast)\n\n a_shape = [1] * 2\n b_shape = [1] * 2\n\n # compute total size of summed axes\n for i in range(0, axes):\n a_shape[1] *= a.shape[-(i + 1)]\n b_shape[0] *= b.shape[batch_axes + i]\n # compute total size of other axes\n for i in range(0, a.ndim - axes - batch_axes):\n a_shape[0] *= a.shape[batch_axes + i]\n for i in range(0, b.ndim - axes - batch_axes):\n b_shape[1] *= b.shape[-(i + 1)]\n\n if batched:\n a_shape.insert(0, a.shape[0])\n b_shape.insert(0, b.shape[0])\n\n a_reshaped = a.reshape(a_shape)\n b_reshaped = b.reshape(b_shape)\n\n out_reshaped = dot(a_reshaped, b_reshaped)\n out = out_reshaped.reshape(outshape, outndim)\n # Make sure the broadcastable pattern of the result is correct,\n # since some shape information can be lost in the reshapes.\n return patternbroadcast(out, outbcast)\n\n # if 'axes' is a list, transpose a and b such that the summed axes of a\n # are last and the summed axes of b are first.\n else:\n axes = [as_list(axes_) for axes_ in axes]\n\n if len(axes[0]) != len(axes[1]):\n raise ValueError(\"Axes elements must have the same length.\")\n\n for i, (operand_name, operand) in enumerate(((\"a\", a), (\"b\", b))):\n if len(axes[i]) > operand.ndim:\n raise ValueError(\n f\"axes[{i}] should be array_like with length less than \"\n f\"the dimensions of {operand_name} ({operand_name}.ndim={operand.ndim}, len(axes[0])={len(axes[i])}).\"\n )\n if len(axes[i]) > 0 and np.max(axes[i]) >= operand.ndim:\n raise ValueError(\n f\"axes[{i}] contains dimensions greater than or equal \"\n f\"to {operand_name}.ndim ({operand_name}.ndim={operand.ndim}, max(axes[0])={np.max(np.array(axes[i]))}).\"\n )\n if batched and 0 in axes[i]:\n raise ValueError(\n \"axes to sum over must not contain the batch axis \"\n f\"(axes[{i}]={axes[i]})\"\n )\n\n batch_axes = [0] if batched else []\n other_axes = [\n [x for x in range(operand.ndim) if x not in axes[i] and x not in batch_axes]\n for i, operand in enumerate((a, b))\n ]\n\n a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])\n b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])\n\n # now that a and b are in the right order, recur with integer axes\n return _tensordot_as_dot(\n a_shuffled, b_shuffled, len(axes[0]), dot=dot, batched=batched\n )\n\n\ndef tensordot(a, b, axes=2):\n \"\"\"\n Compute a generalized dot product over provided axes.\n\n Given two tensors a and b, tensordot computes a generalized dot product over\n the provided axes. Aesara's implementation reduces all expressions to\n matrix or vector dot products and is based on code from Tijmen Tieleman's\n gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Parameters\n ----------\n a: symbolic tensor\n The first tensor variable.\n b: symbolic tensor\n The second tensor variable\n axes: int or array-like of length 2\n If an integer, the number of axes to sum over.\n If an array, it must have two array elements containing the axes\n to sum over in each tensor.\n\n Note that the default value of 2 is not guaranteed to work\n for all values of a and b, and an error will be raised if\n that is the case. The reason for keeping the default is to\n maintain the same signature as numpy's tensordot function\n (and np.tensordot raises analogous errors for non-compatible\n inputs).\n\n If an integer i, it is converted to an array containing\n the last i dimensions of the first tensor and the first\n i dimensions of the second tensor:\n axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]\n\n If an array, its two elements must contain compatible axes\n of the two tensors. For example, [[1, 2], [2, 0]] means sum\n over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.\n (Remember axes are zero-indexed!) The 2nd axis of a and the\n 3rd axis of b must have the same shape; the same is true for\n the 3rd axis of a and the 1st axis of b.\n\n Returns\n -------\n symbolic tensor\n A tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less any dimensions that were summed over).\n\n Examples\n --------\n It may be helpful to consider an example to see what tensordot does.\n Aesara's implementation is identical to NumPy's. Here a has shape (2, 3, 4)\n and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --\n note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes\n are compatible. The resulting tensor will have shape (2, 5, 6) -- the\n dimensions that are not being summed:\n\n >>> a = np.random.random((2,3,4))\n >>> b = np.random.random((5,6,4,3))\n\n #tensordot\n >>> c = np.tensordot(a, b, [[1,2],[3,2]])\n\n #loop replicating tensordot\n >>> a0, a1, a2 = a.shape\n >>> b0, b1, _, _ = b.shape\n >>> cloop = np.zeros((a0,b0,b1))\n\n #loop over non-summed indices -- these exist\n #in the tensor product.\n >>> for i in range(a0):\n ... for j in range(b0):\n ... for k in range(b1):\n ... #loop over summed indices -- these don't exist\n ... #in the tensor product.\n ... for l in range(a1):\n ... for m in range(a2):\n ... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]\n\n >>> np.allclose(c, cloop)\n true\n\n This specific implementation avoids a loop by transposing a and b such that\n the summed axes of a are last and the summed axes of b are first. The\n resulting arrays are reshaped to 2 dimensions (or left as vectors, if\n appropriate) and a matrix or vector dot product is taken. The result is\n reshaped back to the required output dimensions.\n\n In an extreme case, no axes may be specified. The resulting tensor\n will have shape equal to the concatenation of the shapes of a and b:\n\n >>> c = np.tensordot(a, b, 0)\n >>> print(a.shape)\n (2,3,4)\n >>> print(b.shape)\n (5,6,4,3)\n >>> print(c.shape)\n (2,3,4,5,6,4,3)\n\n See the documentation of numpy.tensordot for more examples.\n\n \"\"\"\n return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)\n\n\ndef outer(x, y):\n \"\"\"Return vector-vector outer product.\n\n If an input isn't a vector, we flatten it first.\n\n \"\"\"\n if x.ndim != 1:\n x = x.flatten()\n if y.ndim != 1:\n y = y.flatten()\n return dot(x.dimshuffle(0, \"x\"), y.dimshuffle(\"x\", 0))\n\n\nclass All(CAReduce):\n \"\"\"Applies `logical and` to all the values of a tensor along the\n specified axis(es).\n\n \"\"\"\n\n __props__ = (\"axis\",)\n nfunc_spec = (\"all\", 1, 1)\n\n def __init__(self, axis=None):\n super().__init__(aes.and_, axis)\n\n def _output_dtype(self, idtype):\n return \"bool\"\n\n def __str__(self):\n if self.axis is None:\n return \"All\"\n else:\n return \"All{%s}\" % \", \".join(map(str, self.axis))\n\n def make_node(self, input):\n input = as_tensor_variable(input)\n if input.dtype != \"bool\":\n input = neq(input, 0)\n ret = super().make_node(input)\n return ret\n\n def grad(self, inp, grads):\n (x,) = inp\n return [x.zeros_like(config.floatX)]\n\n\nclass Any(CAReduce):\n \"\"\"Applies `bitwise or` to all the values of a tensor along the\n specified axis(es).\n\n \"\"\"\n\n __props__ = (\"axis\",)\n nfunc_spec = (\"any\", 1, 1)\n\n def __init__(self, axis=None):\n super().__init__(aes.or_, axis)\n\n def _output_dtype(self, idtype):\n return \"bool\"\n\n def __str__(self):\n if self.axis is None:\n return \"Any\"\n else:\n return \"Any{%s}\" % \", \".join(map(str, self.axis))\n\n def make_node(self, input):\n input = as_tensor_variable(input)\n if input.dtype != \"bool\":\n input = neq(input, 0)\n ret = super().make_node(input)\n return ret\n\n def grad(self, inp, grads):\n (x,) = inp\n return [x.zeros_like(config.floatX)]\n\n\nclass Sum(CAReduceDtype):\n \"\"\"\n Sums all the values of a tensor along the specified axis(es).\n\n Equivalent to `CAReduceDtype(scalar.add, axis=axis, dtype=dtype)`,\n with the difference that this defines the gradient of sum wrt its\n tensor input.\n\n Parameters\n ----------\n axis\n Axis(es) along which the tensor should be summed\n (use None to sum over all axes, and a list or tuple to sum along more\n than one axis).\n\n dtype\n The dtype of the internal accumulator and returned\n tensor. If None, then we use the default dtype which is the same as the\n input tensor's dtype except when:\n - the input dtype is a signed integer of precision < 64 bit, in\n which case we use int64\n - the input dtype is an unsigned integer of precision < 64 bit, in\n which case we use uint64\n This value does not depend on the value of \"acc_dtype\".\n\n acc_dtype\n The dtype of the internal accumulator.\n If None (default), we use the dtype in the list below,\n or the input dtype if its precision is higher:\n - for int dtypes, we use at least int64;\n - for uint dtypes, we use at least uint64;\n - for float dtypes, we use at least float64;\n - for complex dtypes, we use at least complex128.\n\n \"\"\"\n\n __props__ = (\"axis\", \"dtype\", \"acc_dtype\")\n nfunc_spec = (\"sum\", 1, 1)\n\n def __init__(self, axis=None, dtype=None, acc_dtype=None):\n super().__init__(aes.add, axis=axis, dtype=dtype, acc_dtype=acc_dtype)\n\n def __str__(self):\n name = self.__class__.__name__\n axis = \"\"\n if self.axis is not None:\n axis = \", \".join(str(x) for x in self.axis)\n axis = f\"axis=[{axis}], \"\n return f\"{name}{{{axis}acc_dtype={self.acc_dtype}}}\"\n\n def L_op(self, inp, out, grads):\n (x,) = inp\n\n if out[0].dtype not in continuous_dtypes:\n return [x.zeros_like(dtype=config.floatX)]\n\n (gz,) = grads\n gz = as_tensor_variable(gz)\n axis = self.axis\n if axis is None:\n axis = list(range(x.type.ndim))\n if axis == ():\n return (gz,)\n new_dims = []\n i = 0\n for j, _ in enumerate(x.type.broadcastable):\n if j in axis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n ds_op = DimShuffle(gz.type.broadcastable, new_dims)\n gx = Elemwise(aes.second)(x, ds_op(gz))\n return [gx]\n\n def R_op(self, inputs, eval_points):\n # There is just one element in inputs and eval_points, the axis are\n # part of self\n if None in eval_points:\n return [None]\n return self(*eval_points, **dict(return_list=True))\n\n\ndef sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the sum along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the sum is performed\n over the flattened tensor.\n\n For full documentation see `Sum`.\n In particular please pay attention to the important warning when using\n a custom acc_dtype.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\npprint.assign(Sum(), printing.FunctionPrinter(\"sum\"))\n\n\nclass Prod(CAReduceDtype):\n \"\"\"\n Multiplies all the values of a tensor along the specified axis(es).\n\n Equivalent to `CAReduce(scalar.mul, axis = axis)`, with the\n difference that this defines the gradient of prod wrt its tensor\n input.\n\n \"\"\"\n\n __props__ = (\"axis\", \"dtype\", \"acc_dtype\")\n nfunc_spec = (\"prod\", 1, 1)\n\n def __init__(self, axis=None, dtype=None, acc_dtype=None, no_zeros_in_input=False):\n super().__init__(aes.mul, axis=axis, dtype=dtype, acc_dtype=acc_dtype)\n self.no_zeros_in_input = no_zeros_in_input\n\n def __setstate__(self, dct):\n super().__setstate__(dct)\n # Add default value to be able to reload old pickled objects.\n if \"no_zeros_in_input\" not in dct:\n self.no_zeros_in_input = False\n\n def L_op(self, inp, out, grads):\n \"\"\"\n The grad of this Op could be very easy, if it is was not for the case\n where zeros are present in a given \"group\" (ie. elements reduced\n together to form the product).\n\n If no zeros are found in the elements of the product, then the\n partial derivative of the product relative to one of the elements\n (one of the inputs) is simply the product of the other elements.\n That's easy to see from the chain rule.\n\n Now the trick (with no zeros) is to take the overall product, then\n for every original element, the partial derivative is given by\n this product divided by the element itself (which equals the product\n of the other terms). This is easy to do by broadcasting the original\n product.\n\n (Note that we also need to broadcast-multiply by the\n \"incoming gradient\", ie. the gradient of the cost relative to the\n output/product).\n\n With zeros, things get more complicated. For a given group, we have 3\n cases:\n\n * No zeros in the group. Use previous trick.\n * If only one zero is present, then the gradient for that element is\n non-zero, but is zero for all others.\n * If more than one zero is present, then all the derivatives are zero.\n\n For the last two cases (with 1 or more zeros), we can't use the\n division trick, as this gives divisions by 0.\n\n Implementing that case-by-case logic is not as trivial, so a bunch of\n hacks are piled down here to do it. Notably, for the \"only one zero\"\n case, there's a special Op that computes the product of the elements\n in the group, minus the zero (see `ProdWithoutZeros`). The trick is then\n to use the division trick for groups with no zero, to use the\n `ProdWithoutZeros` op where there's only one zero, and to output a\n derivative of zero for any element part of a group with more than\n one zero.\n\n I do this by first counting the number of zeros in each group (see the\n `aet.eq` bits), then taking this or that behavior (see `aet.switch`)\n based on the result of this count.\n\n \"\"\"\n (prod_in,) = inp\n (gz,) = grads\n\n if out[0].dtype in discrete_dtypes or self.acc_dtype in discrete_dtypes:\n # There is an int conversion in the way\n return [prod_in.zeros_like(dtype=config.floatX)]\n\n # Prepare the broadcasting that is used everywhere to broadcast\n # over the original groups (ie. broadcast over the elements of a given\n # product)\n gz = as_tensor_variable(gz)\n axis = self.axis\n if axis is None:\n axis = list(range(prod_in.type.ndim))\n if axis == ():\n return (gz,)\n new_dims = []\n i = 0\n for j, _ in enumerate(prod_in.type.broadcastable):\n if j in axis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n\n # result of the product, broadcastable over groups\n prod_out = self(prod_in).dimshuffle(new_dims)\n # incoming gradient, broadcastable over groups\n gz = gz.dimshuffle(new_dims)\n\n # division trick if we don't have zeros. This will contain\n # NaNs to be eliminated in the `aet.switch` if we do have zeros.\n grad_case_without_zeros = gz * prod_out / prod_in\n\n if self.no_zeros_in_input:\n # this handles inputs with zeros, but only certain input shapes\n return [grad_case_without_zeros]\n else:\n\n where_zeros = eq(prod_in, 0.0)\n sum_where_zeros = sum(where_zeros, axis=self.axis)\n groups_with_single_zero = eq(sum_where_zeros, 1).dimshuffle(new_dims)\n # tensor with 0 everywhere except for those places where\n # a 0 part of a group with a single zero was to be found\n where_single_zero = groups_with_single_zero * where_zeros\n # further optimization to avoid computing ProdWithoutZeros\n # if the incoming gradient is 0\n where_gz_not_zero = neq(gz, 0.0)\n # only take ProdWithoutZeros for the groups with single zeros\n # with non-null incoming gradient\n where_to_take_prod_without_zeros = (\n groups_with_single_zero * where_gz_not_zero\n )\n # preprocess the original input so that we set 0 everywhere\n # except for groups that contain a single zero, to avoid computing\n # multiplications on other groups\n prod_without_zeros_in = where_to_take_prod_without_zeros * prod_in\n # TODO: put lazy switch here, if it'd work\n # this is pretty efficient already (no multiplication if 0), but\n # it'd be even better if we had a lazy if per element\n prod_without_zeros = ProdWithoutZeros(axis=self.axis)(prod_without_zeros_in)\n prod_without_zeros = prod_without_zeros.dimshuffle(new_dims)\n\n groups_without_zeros = eq(sum_where_zeros, 0).dimshuffle(new_dims)\n\n final_grad = switch(\n groups_without_zeros,\n grad_case_without_zeros,\n switch(where_single_zero, prod_without_zeros, 0.0) * gz,\n )\n\n return [final_grad]\n\n def c_code_cache_version(self):\n return (1,)\n\n\ndef prod(\n input,\n axis=None,\n dtype=None,\n keepdims=False,\n acc_dtype=None,\n no_zeros_in_input=False,\n):\n \"\"\"\n Computes the product along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the product is performed\n over the flattened tensor.\n\n For full documentation see ``tensor.elemwise.Prod``.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = Prod(\n axis, dtype=dtype, acc_dtype=acc_dtype, no_zeros_in_input=no_zeros_in_input\n )(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\nclass MulWithoutZeros(BinaryScalarOp):\n # \"identity\" here is zero, as in Reduce we don't want to start\n # with reducing (1, something_else): this leads to the erroneous\n # case where a vector of zeros is reduced by binary reductions\n # of (1, 0), which always ends up as 1 (ie. the result for\n # the c version, for the product of [0,0,0], is 1.0)\n\n identity = 0.0\n commutative = True\n associative = True\n\n def impl(self, x, y):\n if x == 0:\n return y\n if y == 0:\n return x\n return x * y\n\n def c_code(self, node, name, inp, out, sub):\n x, y = inp\n (z,) = out\n return (\n \"%(z)s = ((%(x)s == 0) ? (%(y)s) : \"\n + \"((%(y)s == 0) ? (%(x)s) : ((%(y)s)*(%(x)s))) );\"\n ) % locals()\n\n def c_code_cache_version(self):\n return (1,)\n\n\nmul_without_zeros = MulWithoutZeros(aes.upcast_out, name=\"mul_without_zeros\")\n\n\nclass ProdWithoutZeros(CAReduceDtype):\n\n __props__ = (\"axis\", \"dtype\", \"acc_dtype\")\n\n def __init__(self, axis=None, dtype=None, acc_dtype=None):\n super().__init__(mul_without_zeros, axis=axis, dtype=dtype, acc_dtype=acc_dtype)\n\n def grad(self, inp, grads):\n from aesara.gradient import grad_not_implemented\n\n (a,) = inp\n a_grad = grad_not_implemented(\n self,\n 0,\n a,\n \"2nd derivatives of `product(a)` is not currently supported.\"\n \"If `a` is guaranteed to contains no zeros, use \"\n \"`product(a, no_zeros_in_input=True)`.\",\n )\n return [a_grad]\n\n\ndef any(x, axis=None, keepdims=False):\n out = Any(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef all(x, axis=None, keepdims=False):\n out = All(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef ptp(a, axis=None):\n \"\"\"\n Range of values (maximum - minimum) along an axis.\n\n The name of the function comes from the acronym for peak to peak.\n\n Parameters\n ----------\n a\n Input tensor.\n axis\n Axis along which to find the peaks. By default, flatten the array.\n\n Returns\n -------\n array\n A new array holding the result.\n\n \"\"\"\n\n a = as_tensor_variable(a)\n\n out = max(a, axis) - min(a, axis)\n\n return out\n\n\ndef power(x, y):\n return x ** y\n\n\ndef logaddexp(*xs):\n \"\"\"Logarithm of the sum of exponentiations of the inputs.\n\n See ``numpy.logaddexp``.\n\n Parameters\n ----------\n xs : symbolic tensors\n Input\n\n Returns\n -------\n tensor\n\n \"\"\"\n\n return log(add(*[exp(x) for x in xs]))\n\n\ndef logsumexp(x, axis=None, keepdims=False):\n \"\"\"Compute the log of the sum of exponentials of input elements.\n\n See ``scipy.special.logsumexp``.\n\n Parameters\n ----------\n x : symbolic tensor\n Input\n\n axis : None or int or tuple of ints, optional\n Axis or axes over which the sum is taken. By default axis is None,\n and all elements are summed.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result will\n broadcast correctly against the original array.\n\n Returns\n -------\n tensor\n\n \"\"\"\n\n return log(sum(exp(x), axis=axis, keepdims=keepdims))\n\n\n__all__ = [\n \"max_and_argmax\",\n \"max\",\n \"argmax\",\n \"min\",\n \"argmin\",\n \"smallest\",\n \"largest\",\n \"lt\",\n \"gt\",\n \"le\",\n \"ge\",\n \"eq\",\n \"neq\",\n \"isnan\",\n \"isinf\",\n \"allclose\",\n \"isclose\",\n \"and_\",\n \"bitwise_and\",\n \"or_\",\n \"bitwise_or\",\n \"xor\",\n \"bitwise_xor\",\n \"invert\",\n \"bitwise_not\",\n \"abs\",\n \"abs_\",\n \"exp\",\n \"exp2\",\n \"expm1\",\n \"neg\",\n \"reciprocal\",\n \"inv\",\n \"log\",\n \"log2\",\n \"log10\",\n \"log1p\",\n \"sgn\",\n \"ceil\",\n \"floor\",\n \"trunc\",\n \"iround\",\n \"round\",\n \"round_half_to_even\",\n \"round_half_away_from_zero\",\n \"sqr\",\n \"square\",\n \"cov\",\n \"sqrt\",\n \"deg2rad\",\n \"rad2deg\",\n \"cos\",\n \"arccos\",\n \"sin\",\n \"arcsin\",\n \"tan\",\n \"arctan\",\n \"arctan2\",\n \"cosh\",\n \"arccosh\",\n \"sinh\",\n \"arcsinh\",\n \"tanh\",\n \"arctanh\",\n \"erf\",\n \"erfc\",\n \"erfcx\",\n \"erfinv\",\n \"erfcinv\",\n \"gamma\",\n \"gammaln\",\n \"psi\",\n \"tri_gamma\",\n \"chi2sf\",\n \"gammainc\",\n \"gammaincc\",\n \"gammau\",\n \"gammal\",\n \"j0\",\n \"j1\",\n \"jv\",\n \"i0\",\n \"i1\",\n \"iv\",\n \"sigmoid\",\n \"expit\",\n \"softplus\",\n \"log1pexp\",\n \"log1mexp\",\n \"betainc\",\n \"real\",\n \"imag\",\n \"angle\",\n \"complex\",\n \"conj\",\n \"complex_from_polar\",\n \"sum\",\n \"prod\",\n \"mean\",\n \"var\",\n \"std\",\n \"std\",\n \"maximum\",\n \"minimum\",\n \"divmod\",\n \"add\",\n \"sub\",\n \"mul\",\n \"true_div\",\n \"int_div\",\n \"floor_div\",\n \"ceil_intdiv\",\n \"mod\",\n \"pow\",\n \"clip\",\n \"dot\",\n \"dense_dot\",\n \"tensordot\",\n \"outer\",\n \"any\",\n \"all\",\n \"ptp\",\n \"power\",\n \"logaddexp\",\n \"logsumexp\",\n]\n",
"import builtins\nimport operator\nimport pickle\nimport warnings\nfrom copy import copy\nfrom functools import reduce\nfrom itertools import product\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom scipy.special import logsumexp as scipy_logsumexp\n\nimport aesara.scalar as aes\nfrom aesara.compile.debugmode import DebugMode\nfrom aesara.compile.function import function\nfrom aesara.compile.mode import get_default_mode\nfrom aesara.compile.sharedvalue import shared\nfrom aesara.configdefaults import config\nfrom aesara.gradient import NullTypeGradError, grad, numeric_grad\nfrom aesara.graph.basic import Variable, applys_between\nfrom aesara.graph.fg import FunctionGraph\nfrom aesara.link.c.basic import DualLinker\nfrom aesara.misc.safe_asarray import _asarray\nfrom aesara.tensor import blas, blas_c\nfrom aesara.tensor.basic import (\n as_tensor_variable,\n constant,\n eye,\n get_scalar_constant_value,\n switch,\n)\nfrom aesara.tensor.elemwise import CAReduce, Elemwise\nfrom aesara.tensor.math import (\n Argmax,\n Dot,\n MaxAndArgmax,\n Mean,\n Prod,\n ProdWithoutZeros,\n Sum,\n _dot,\n abs,\n add,\n allclose,\n arccos,\n arccosh,\n arcsin,\n arcsinh,\n arctan,\n arctan2,\n arctanh,\n argmax,\n argmin,\n ceil,\n ceil_intdiv,\n clip,\n complex_from_polar,\n conj,\n cos,\n cosh,\n cov,\n deg2rad,\n dense_dot,\n dot,\n eq,\n exp,\n exp2,\n expm1,\n floor,\n isclose,\n isinf,\n isnan,\n isnan_,\n log,\n log1p,\n log2,\n log10,\n logaddexp,\n logsumexp,\n max,\n max_and_argmax,\n maximum,\n mean,\n min,\n minimum,\n mod,\n mul,\n neg,\n neq,\n outer,\n power,\n ptp,\n rad2deg,\n reciprocal,\n round_half_away_from_zero,\n round_half_to_even,\n sgn,\n sigmoid,\n sin,\n sinh,\n smallest,\n sqr,\n sqrt,\n sub,\n)\nfrom aesara.tensor.math import sum as aet_sum\nfrom aesara.tensor.math import tan, tanh, tensordot, true_div, trunc, var\nfrom aesara.tensor.type import (\n TensorType,\n complex_dtypes,\n continuous_dtypes,\n cscalar,\n discrete_dtypes,\n dmatrix,\n dscalar,\n dtensor3,\n dvector,\n fmatrix,\n fscalar,\n fscalars,\n imatrix,\n iscalar,\n ivector,\n lscalar,\n matrices,\n matrix,\n scalar,\n scalars,\n tensor,\n tensor3,\n tensor4,\n values_eq_approx_remove_nan,\n vector,\n vectors,\n zvector,\n)\nfrom aesara.tensor.type_other import NoneConst\nfrom tests import unittest_tools as utt\nfrom tests.tensor.utils import (\n _bad_build_broadcast_binary_normal,\n _bad_runtime_broadcast_binary_normal,\n _bad_runtime_reciprocal,\n _eps,\n _good_broadcast_binary_arctan2,\n _good_broadcast_binary_normal,\n _good_broadcast_div_mod_normal_float,\n _good_broadcast_div_mod_normal_float_no_complex,\n _good_broadcast_pow_normal_float,\n _good_broadcast_unary_arccosh,\n _good_broadcast_unary_arcsin,\n _good_broadcast_unary_arctanh,\n _good_broadcast_unary_normal,\n _good_broadcast_unary_normal_float_no_complex,\n _good_broadcast_unary_normal_float_no_empty_no_complex,\n _good_broadcast_unary_normal_no_complex,\n _good_broadcast_unary_positive,\n _good_broadcast_unary_tan,\n _good_broadcast_unary_wide,\n _good_reciprocal,\n _grad_broadcast_binary_normal,\n _grad_broadcast_pow_normal,\n _grad_broadcast_unary_normal,\n _grad_broadcast_unary_normal_no_complex,\n _grad_broadcast_unary_normal_no_complex_no_corner_case,\n _grad_broadcast_unary_normal_noint,\n _grad_reciprocal,\n _numpy_true_div,\n angle_eps,\n check_floatX,\n copymod,\n div_grad_rtol,\n eval_outputs,\n get_numeric_types,\n ignore_isfinite_mode,\n inplace_func,\n integers,\n integers_uint32,\n makeBroadcastTester,\n makeTester,\n random,\n random_complex,\n random_nonzero,\n random_ranged,\n upcast_float16_ufunc,\n upcast_int8_nfunc,\n)\n\n\nif config.mode == \"FAST_COMPILE\":\n mode_opt = \"FAST_RUN\"\nelse:\n mode_opt = get_default_mode()\n\n\nTestAddBroadcast = makeBroadcastTester(\n op=add,\n expected=lambda *inputs: check_floatX(inputs, reduce(lambda x, y: x + y, inputs)),\n good=dict(\n three_inputs_same_shapes=(random(2, 3), random(2, 3), random(2, 3)),\n three_inputs_same_shapes_uint=(\n integers_uint32(2, 3),\n integers_uint32(2, 3),\n integers_uint32(2, 3),\n ),\n four_inputs_broadcast=(random(2, 3), random(1, 3), random(2, 1), random(1, 1)),\n **_good_broadcast_binary_normal,\n ),\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n)\n\n\nTestSubBroadcast = makeBroadcastTester(\n op=sub,\n expected=lambda x, y: check_floatX((x, y), x - y),\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n grad=_grad_broadcast_binary_normal,\n)\n\n\nTestMaximumBroadcast = makeBroadcastTester(\n op=maximum,\n expected=lambda *inputs: check_floatX(inputs, np.maximum(*inputs)),\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n grad=_grad_broadcast_binary_normal,\n)\n\n\ndef test_maximum_minimum_grad():\n # Test the discontinuity point.\n # We decided that we only pass the gradient to the first input in that case.\n x, y = vectors(\"xy\")\n for op in [maximum, minimum]:\n o = op(x, y)\n g = grad(o.sum(), [x, y])\n\n f = function([x, y], g)\n assert np.allclose(f([1], [1]), [[1], [0]])\n\n\nTestMinimumBroadcast = makeBroadcastTester(\n op=minimum,\n expected=lambda *inputs: check_floatX(inputs, np.minimum(*inputs)),\n good=_good_broadcast_binary_normal,\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n grad=_grad_broadcast_binary_normal,\n)\n\nTestMulBroadcast = makeBroadcastTester(\n op=mul,\n expected=lambda *inputs: check_floatX(inputs, reduce(lambda x, y: x * y, inputs)),\n good=dict(\n three_inputs_same_shapes=(random(2, 3), random(2, 3), random(2, 3)),\n four_inputs_broadcast=(random(2, 3), random(1, 3), random(2, 1), random(1, 1)),\n **_good_broadcast_binary_normal,\n ),\n bad_build=_bad_build_broadcast_binary_normal,\n bad_runtime=_bad_runtime_broadcast_binary_normal,\n grad=dict(\n three_inputs_same_shapes=(random(2, 3), random(2, 3), random(2, 3)),\n four_inputs_broadcast=(random(2, 3), random(1, 3), random(2, 1), random(1, 1)),\n **_grad_broadcast_binary_normal,\n ),\n)\n\n# Values are fixed, because the gradient evaluation in TestModBroadcast often\n# fails when the inputs are close to each other (due to gradient discontinuity).\n# fmt: off\n_grad_broadcast_div_mod_normal = dict(\n same_shapes=(\n np.array([[-0.51157823, 0.02560825, -0.7482302], [0.05923786, -0.21001006, -0.66742722]]),\n np.array([[-0.02250197, -0.32979461, 0.32081774], [0.36419213, -0.54073201, 0.8932643]])\n ),\n scalar=(\n np.array([[0.32390696, -0.77305276, -0.66302977], [0.8214372, -0.31612823, -0.06294127]]),\n np.array([[-0.86904352]])\n ),\n row=(\n np.array([[0.89763688, -0.09403658, 0.05847774], [-0.00694876, -0.08999577, 0.19857154]]),\n np.array([[-0.47662978, 0.72692131, -0.18250251]])\n ),\n column=(\n np.array([[0.04506636, 0.05725927, -0.94947897], [0.39868416, -0.12655465, -0.87068554]]),\n np.array([[-0.39040176], [0.76164576]])\n ),\n # same_shapes=(random(2, 3), random((2, 3))),\n # scalar=(random(2, 3), random((1, 1))),\n # row=(random(2, 3), random((1, 3))),\n # column=(random(2, 3), random((2, 1))),\n # complex1=(random_complex(2, 3), randcomplex_nonzero((2, 3))),\n # complex2=(random_complex(2, 3), random((2, 3))),\n # complex3=(random(2, 3), randcomplex_nonzero((2, 3))),\n # dtype_mixup_1=(random(2, 3), integers_nonzero(2, 3)),\n # dtype_mixup_2=(integers_nonzero(2, 3), random((2, 3))),\n # empty1=(np.asarray([]), np.asarray([1.])),\n # empty2=(np.asarray([0]), np.asarray([])),\n)\n# fmt: on\n\nTestTrueDivBroadcast = makeBroadcastTester(\n op=true_div,\n expected=_numpy_true_div,\n good=_good_broadcast_div_mod_normal_float_no_complex,\n grad=_grad_broadcast_div_mod_normal,\n grad_rtol=div_grad_rtol,\n)\n\nTestInvBroadcast = makeBroadcastTester(\n op=reciprocal,\n expected=lambda x: upcast_int8_nfunc(np.true_divide)(np.int8(1), x),\n good=_good_reciprocal,\n bad_runtime=_bad_runtime_reciprocal,\n grad=_grad_reciprocal,\n grad_rtol=div_grad_rtol,\n)\n\nTestCeilIntDivBroadcast = makeBroadcastTester(\n op=ceil_intdiv,\n expected=lambda x, y: check_floatX((x, y), (x // y) + ((x % y) != 0)),\n good=_good_broadcast_div_mod_normal_float_no_complex,\n name=\"CeilIntDiv\",\n # As we implement this function with neq, the gradient returned is always 0.\n # grad=_grad_broadcast_div_mod_normal,\n # grad_rtol=div_grad_rtol,\n)\n\nTestModBroadcast = makeBroadcastTester(\n op=mod,\n expected=lambda x, y: np.asarray(x % y, dtype=aes.upcast(x.dtype, y.dtype)),\n good=copymod(_good_broadcast_div_mod_normal_float, [\"complex1\", \"complex2\"]),\n grad=_grad_broadcast_div_mod_normal,\n grad_eps=1e-5,\n)\n\n# Disable NAN checking for pow operator per issue #1780\nTestPowBroadcast = makeBroadcastTester(\n op=pow,\n expected=lambda x, y: check_floatX((x, y), x ** y),\n good=_good_broadcast_pow_normal_float,\n grad=_grad_broadcast_pow_normal,\n name=\"Pow\",\n mode=ignore_isfinite_mode,\n)\n\nTestAbsBroadcast = makeBroadcastTester(\n op=abs,\n expected=lambda x: np.abs(x),\n good=_good_broadcast_unary_normal,\n grad=_grad_broadcast_unary_normal,\n)\n\nTestNegBroadcast = makeBroadcastTester(\n op=neg,\n expected=lambda x: -x,\n good=_good_broadcast_unary_normal,\n grad=_grad_broadcast_unary_normal,\n)\n\nTestSgnBroadcast = makeBroadcastTester(\n op=sgn,\n expected=np.sign,\n good=_good_broadcast_unary_normal_no_complex,\n grad=_grad_broadcast_unary_normal,\n)\n\nTestCeilBroadcast = makeBroadcastTester(\n op=ceil,\n expected=upcast_float16_ufunc(np.ceil),\n good=_good_broadcast_unary_normal_no_complex,\n grad=copymod(\n _grad_broadcast_unary_normal_noint,\n extra=[np.asarray([-2.5, -1.5, -1.51, 0.49, 0.98, 1.02], dtype=config.floatX)],\n ),\n)\n\nTestFloorBroadcast = makeBroadcastTester(\n op=floor,\n expected=upcast_float16_ufunc(np.floor),\n good=_good_broadcast_unary_normal_no_complex,\n grad=_grad_broadcast_unary_normal_noint,\n)\n\nTestTruncBroadcast = makeBroadcastTester(\n op=trunc,\n expected=upcast_float16_ufunc(np.trunc),\n good=_good_broadcast_unary_normal_no_complex,\n)\n\nTestRoundHalfToEvenBroadcast = makeBroadcastTester(\n op=round_half_to_even,\n expected=np.round,\n good=_good_broadcast_unary_normal_float_no_complex,\n grad=_grad_broadcast_unary_normal_no_complex_no_corner_case,\n)\n\n# np.vectorize don't handle correctly empty ndarray.\n# see in their file numpy/lib/function_base.py in class vectorize.__call__\n# This happen in float32 mode.\nTestRoundHalfAwayFromZeroBroadcast = makeBroadcastTester(\n op=round_half_away_from_zero,\n expected=lambda a: aes.round_half_away_from_zero_vec(a),\n good=_good_broadcast_unary_normal_float_no_empty_no_complex,\n grad=_grad_broadcast_unary_normal_no_complex_no_corner_case,\n)\n\nTestSqrBroadcast = makeBroadcastTester(\n op=sqr,\n expected=np.square,\n good=_good_broadcast_unary_normal,\n grad=_grad_broadcast_unary_normal,\n)\n\nTestExpBroadcast = makeBroadcastTester(\n op=exp,\n expected=upcast_float16_ufunc(np.exp),\n good=dict(\n _good_broadcast_unary_normal,\n int8=[np.arange(-127, 89, dtype=\"int8\")],\n uint8=[np.arange(0, 89, dtype=\"uint8\")],\n uint16=[np.arange(0, 89, dtype=\"uint16\")],\n ),\n grad=_grad_broadcast_unary_normal,\n)\n\nTestExp2Broadcast = makeBroadcastTester(\n op=exp2,\n expected=upcast_float16_ufunc(np.exp2),\n good=_good_broadcast_unary_normal,\n grad=_grad_broadcast_unary_normal,\n)\n\nTestExpm1Broadcast = makeBroadcastTester(\n op=expm1,\n expected=upcast_float16_ufunc(np.expm1),\n good=dict(\n _good_broadcast_unary_normal,\n int8=[np.arange(-127, 89, dtype=\"int8\")],\n uint8=[np.arange(0, 89, dtype=\"uint8\")],\n uint16=[np.arange(0, 89, dtype=\"uint16\")],\n ),\n grad=_grad_broadcast_unary_normal,\n)\n\n\n_grad_broadcast_unary_positive = dict(\n normal=(random_ranged(_eps, 5, (2, 3)),),\n)\n\nTestLogBroadcast = makeBroadcastTester(\n op=log,\n expected=upcast_float16_ufunc(np.log),\n good=_good_broadcast_unary_positive,\n grad=_grad_broadcast_unary_positive,\n)\n\nTestLog2Broadcast = makeBroadcastTester(\n op=log2,\n expected=upcast_float16_ufunc(np.log2),\n good=_good_broadcast_unary_positive,\n grad=_grad_broadcast_unary_positive,\n)\n\nTestLog10Broadcast = makeBroadcastTester(\n op=log10,\n expected=upcast_float16_ufunc(np.log10),\n good=_good_broadcast_unary_positive,\n grad=_grad_broadcast_unary_positive,\n)\n\nTestLog1pBroadcast = makeBroadcastTester(\n op=log1p,\n expected=upcast_float16_ufunc(np.log1p),\n good=_good_broadcast_unary_positive,\n grad=_grad_broadcast_unary_positive,\n)\n\nTestSqrtBroadcast = makeBroadcastTester(\n op=sqrt,\n expected=upcast_float16_ufunc(np.sqrt),\n good=_good_broadcast_unary_positive,\n grad=_grad_broadcast_unary_positive,\n)\n\n_grad_broadcast_unary_wide = dict(\n normal=(random_ranged(-1000, 1000, (2, 3)),),\n)\n\nTestDeg2radBroadcast = makeBroadcastTester(\n op=deg2rad,\n expected=upcast_float16_ufunc(np.deg2rad),\n good=_good_broadcast_unary_normal_no_complex,\n grad=_grad_broadcast_unary_normal_no_complex,\n eps=angle_eps,\n)\n\nTestRad2degBroadcast = makeBroadcastTester(\n op=rad2deg,\n expected=upcast_float16_ufunc(np.rad2deg),\n good=_good_broadcast_unary_normal_no_complex,\n grad=_grad_broadcast_unary_normal_no_complex,\n eps=angle_eps,\n)\n\nTestSinBroadcast = makeBroadcastTester(\n op=sin,\n expected=upcast_float16_ufunc(np.sin),\n good=_good_broadcast_unary_wide,\n grad=_grad_broadcast_unary_wide,\n)\n\n# The actual range is [-1, 1] but the numerical gradient is too\n# unstable near those values\n_grad_broadcast_unary_arcsin = dict(\n normal=(random_ranged(-0.9, 0.9, (2, 3)),),\n)\n\nTestArcsinBroadcast = makeBroadcastTester(\n op=arcsin,\n expected=upcast_float16_ufunc(np.arcsin),\n good=_good_broadcast_unary_arcsin,\n grad=_grad_broadcast_unary_arcsin,\n)\n\nTestCosBroadcast = makeBroadcastTester(\n op=cos,\n expected=upcast_float16_ufunc(np.cos),\n good=_good_broadcast_unary_wide,\n grad=_grad_broadcast_unary_wide,\n)\n\n\ndef test_py_c_match():\n a = TensorType(dtype=\"int8\", broadcastable=(False,))()\n f = function([a], arccos(a), mode=\"DebugMode\")\n # This can fail in DebugMode\n f(np.asarray([1, 0, -1], dtype=\"int8\"))\n\n\nTestArccosBroadcast = makeBroadcastTester(\n op=arccos,\n expected=upcast_float16_ufunc(np.arccos),\n good=_good_broadcast_unary_arcsin,\n grad=_grad_broadcast_unary_arcsin,\n)\n\n# We do not want to test around the discontinuity.\n_grad_broadcast_unary_tan = dict(\n normal=(random_ranged(-1.5, 1.5, (2, 3)),),\n shifted=(random_ranged(1.6, 4.6, (2, 3)),),\n)\n\nTestTanBroadcast = makeBroadcastTester(\n op=tan,\n expected=upcast_float16_ufunc(np.tan),\n good=_good_broadcast_unary_tan,\n grad=_grad_broadcast_unary_tan,\n)\n\nTestArctanBroadcast = makeBroadcastTester(\n op=arctan,\n expected=upcast_float16_ufunc(np.arctan),\n good=_good_broadcast_unary_wide,\n grad=_grad_broadcast_unary_wide,\n)\n\n_grad_broadcast_binary_arctan2 = dict(\n same_shapes=(random(2, 3), random(2, 3)),\n scalar=(random(2, 3), random(1, 1)),\n row=(random(2, 3), random(1, 3)),\n column=(random(2, 3), random(2, 1)),\n)\n\nTestArctan2Broadcast = makeBroadcastTester(\n op=arctan2,\n expected=upcast_float16_ufunc(np.arctan2),\n good=_good_broadcast_binary_arctan2,\n grad=_grad_broadcast_binary_arctan2,\n)\n\nTestCoshBroadcast = makeBroadcastTester(\n op=cosh,\n expected=upcast_float16_ufunc(np.cosh),\n good=dict(\n _good_broadcast_unary_normal,\n int8=[np.arange(-89, 90, dtype=\"int8\")],\n uint8=[np.arange(0, 90, dtype=\"uint8\")],\n uint16=[np.arange(0, 90, dtype=\"uint16\")],\n ),\n grad=_grad_broadcast_unary_normal,\n)\n\n_grad_broadcast_unary_arccosh = dict(\n normal=(random_ranged(1 + _eps, 1000, (2, 3)),),\n)\n\nTestArccoshBroadcast = makeBroadcastTester(\n op=arccosh,\n expected=upcast_float16_ufunc(np.arccosh),\n good=_good_broadcast_unary_arccosh,\n grad=_grad_broadcast_unary_arccosh,\n)\n\nTestSinhBroadcast = makeBroadcastTester(\n op=sinh,\n expected=upcast_float16_ufunc(np.sinh),\n good=dict(\n _good_broadcast_unary_normal,\n int8=[np.arange(-89, 90, dtype=\"int8\")],\n uint8=[np.arange(0, 90, dtype=\"uint8\")],\n uint16=[np.arange(0, 90, dtype=\"uint16\")],\n ),\n grad=_grad_broadcast_unary_normal,\n)\n\nTestArcsinhBroadcast = makeBroadcastTester(\n op=arcsinh,\n expected=upcast_float16_ufunc(np.arcsinh),\n good=_good_broadcast_unary_normal,\n grad=_grad_broadcast_unary_normal,\n)\n\nTestTanhBroadcast = makeBroadcastTester(\n op=tanh,\n expected=upcast_float16_ufunc(np.tanh),\n good=_good_broadcast_unary_normal,\n grad=_grad_broadcast_unary_normal,\n)\n\n_grad_broadcast_unary_arctanh = dict(\n normal=(random_ranged(-1 + _eps, 1 - _eps, (2, 3)),),\n)\n\nTestArctanhBroadcast = makeBroadcastTester(\n op=arctanh,\n expected=upcast_float16_ufunc(np.arctanh),\n good=_good_broadcast_unary_arctanh,\n grad=_grad_broadcast_unary_arctanh,\n)\n\n# Complex operations\n_good_complex_from_polar = dict(\n same_shapes=(np.abs(random(2, 3)), random(2, 3)),\n not_same_dimensions=(np.abs(random(2, 2)), random(2)),\n scalar=(np.abs(random(2, 3)), random(1, 1)),\n row=(np.abs(random(2, 3)), random(1, 3)),\n column=(np.abs(random(2, 3)), random(2, 1)),\n integers=(np.abs(integers(2, 3)), integers(2, 3)),\n empty=(np.asarray([], dtype=config.floatX), np.asarray([1], dtype=config.floatX)),\n)\n_grad_complex_from_polar = dict(\n same_shapes=(np.abs(random(2, 3)), random(2, 3)),\n scalar=(np.abs(random(2, 3)), random(1, 1)),\n row=(np.abs(random(2, 3)), random(1, 3)),\n column=(np.abs(random(2, 3)), random(2, 1)),\n)\n\nTestComplexFromPolarBroadcast = makeBroadcastTester(\n op=complex_from_polar,\n expected=lambda r, theta: r * np.cos(theta) + 1j * r * np.sin(theta),\n good=_good_complex_from_polar,\n)\n\nTestConjBroadcast = makeBroadcastTester(\n op=conj, expected=np.conj, good=_good_broadcast_unary_normal\n)\n\n\nTestDenseDot = makeTester(\n name=\"DenseDotTester\",\n op=dense_dot,\n expected=lambda x, y: np.dot(x, y),\n checks={},\n good=dict(\n correct1=(random(5, 7), random(7, 5)),\n correct2=(random(5, 7), random(7, 9)),\n correct3=(random(5, 7), random(7)),\n correct4=(random(5), random(5, 7)),\n mixed1=(random(5).astype(\"float32\"), random(5, 7)),\n mixed2=(random(5).astype(\"float64\"), random(5, 7)),\n complex1=(random_complex(5, 7), random_complex(7)),\n complex2=(random(5, 7), random_complex(7)),\n complex3=(random_complex(5, 7), random(7)),\n empty1=(\n np.asarray([], dtype=config.floatX),\n np.asarray([], dtype=config.floatX),\n ),\n empty2=(random(5, 0), random(0, 2)),\n empty3=(random(0, 5), random(5, 0)),\n ),\n bad_build=dict(),\n bad_runtime=dict(\n bad1=(random(5, 7), random(5, 7)), bad2=(random(5, 7), random(8, 3))\n ),\n)\n\n\ndef test_isnan():\n for x in [matrix(), imatrix(), matrix(dtype=\"bool\")]:\n y = isnan(x)\n assert isinstance(y.owner.op, Elemwise) == (x.dtype not in discrete_dtypes)\n assert y.dtype == \"bool\"\n\n # Test c code generator even for int type.\n y = isnan_(x)\n assert isinstance(y.owner.op, Elemwise)\n assert y.dtype == \"bool\"\n f = function([x], y, allow_input_downcast=True)\n f([[0, 1, 2]])\n\n\nclass TestMaxAndArgmax:\n def setup_method(self):\n MaxAndArgmax.debug = 0\n\n def test_basic(self):\n n = as_tensor_variable(5.0)\n v, i = eval_outputs(max_and_argmax(n))\n assert v == 5.0\n assert i == 0\n assert i.dtype == \"int64\"\n v = eval_outputs(max_and_argmax(n)[0].shape)\n assert len(v) == 0\n v = eval_outputs(max_and_argmax(n)[1].shape)\n assert len(v) == 0\n\n def test_basic_1(self):\n n = as_tensor_variable([1, 2, 3, 2, -6])\n v, i = eval_outputs(max_and_argmax(n))\n assert v == 3\n assert i == 2\n assert i.dtype == \"int64\"\n v = eval_outputs(max_and_argmax(n)[0].shape)\n assert len(v) == 0\n\n def test_basic_2(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (None, None),\n ([0, 1], None),\n ([1, 0], None),\n (NoneConst.clone(), None),\n (constant(0), 0),\n ]:\n v, i = eval_outputs(max_and_argmax(n, axis))\n assert i.dtype == \"int64\"\n assert np.all(v == np.max(data, np_axis))\n assert np.all(i == np.argmax(data, np_axis))\n v_shape = eval_outputs(max_and_argmax(n, axis)[0].shape)\n assert tuple(v_shape) == np.max(data, np_axis).shape\n\n def test_basic_2_float16(self):\n # Test negative values and bigger range to make sure numpy don't do the argmax as on uint16\n data = (random(20, 30).astype(\"float16\") - 0.5) * 20\n n = shared(data)\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (None, None),\n ([0, 1], None),\n ([1, 0], None),\n (NoneConst.clone(), None),\n (constant(0), 0),\n ]:\n v, i = eval_outputs(max_and_argmax(n, axis), (MaxAndArgmax,))\n assert i.dtype == \"int64\"\n assert np.all(v == np.max(data, np_axis))\n assert np.all(i == np.argmax(data, np_axis))\n v_shape = eval_outputs(max_and_argmax(n, axis)[0].shape)\n assert tuple(v_shape) == np.max(data, np_axis).shape\n\n def test_basic_2_invalid(self):\n n = as_tensor_variable(random(2, 3))\n with pytest.raises(ValueError):\n eval_outputs(max_and_argmax(n, 3))\n\n n = as_tensor_variable(random(2, 3))\n with pytest.raises(ValueError):\n eval_outputs(max_and_argmax(n, -3))\n\n def test_basic_2_valid_neg(self):\n n = as_tensor_variable(random(2, 3))\n v, i = eval_outputs(max_and_argmax(n, -1))\n assert i.dtype == \"int64\"\n assert v.shape == (2,)\n assert i.shape == (2,)\n assert np.all(v == np.max(n.value, -1))\n assert np.all(i == np.argmax(n.value, -1))\n v, i = eval_outputs(max_and_argmax(n, -2))\n assert i.dtype == \"int64\"\n assert v.shape == (3,)\n assert i.shape == (3,)\n assert np.all(v == np.max(n.value, -2))\n assert np.all(i == np.argmax(n.value, -2))\n v = eval_outputs(max_and_argmax(n, -1)[0].shape)\n assert v == (2)\n v = eval_outputs(max_and_argmax(n, -2)[0].shape)\n assert v == (3)\n\n def test_basic_3(self):\n data = random(2, 3, 4)\n n = as_tensor_variable(data)\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (None, None),\n ([0, 1, 2], None),\n ([1, 2, 0], None),\n ]:\n v, i = eval_outputs(max_and_argmax(n, axis))\n assert i.dtype == \"int64\"\n assert np.all(v == np.max(data, np_axis))\n assert np.all(i == np.argmax(data, np_axis))\n v = eval_outputs(max_and_argmax(n, axis)[0].shape)\n assert tuple(v) == np.max(data, np_axis).shape\n\n def test_arg_grad(self):\n # The test checks that the gradient of argmax(x).sum() is 0\n\n x = matrix()\n cost = argmax(x, axis=0).sum()\n gx = grad(cost, x)\n val = get_scalar_constant_value(gx)\n assert val == 0.0\n\n def test_grad(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n\n def safe_verify_grad(func, data):\n # Wrapper around 'verify_grad' that picks a proper value for epsilon.\n #\n # This is needed because 'verify_grad' may fail when its epsilon is\n # too large, due to the fact the argmax is not continuous.\n # We make sure epsilon is less than the minimum absolute value found\n # in the matrix of pairwise differences between all elements in the\n # data. This way, the argmax will not change when adding epsilon.\n\n # 'data' is a one-element list.\n (data_tensor,) = data\n # Flatten it into a 1D vector.\n data_vector = data_tensor.flatten()\n # Compute pairwise absolute differences.\n diff = np.abs(data_vector.reshape((-1, 1)) - data_vector)\n # Alter the diagonal to avoid a zero minimum.\n for i in range(len(diff)):\n diff[i, i] = 1\n # Find an appropriate epsilon.\n eps = builtins.min(numeric_grad.type_eps[config.floatX], diff.min() / 2)\n # Run gradient verification.\n utt.verify_grad(func, data, eps=eps)\n\n def check_grad_max(data, max_grad_data, axis=None):\n # Why this is needed? verify_grad is not enough?\n # This works only for axis in [0, None].\n assert axis in [0, None]\n z = np.zeros_like(data)\n z = z.flatten()\n argmax = np.argmax(data, axis=axis)\n if argmax.ndim == 0:\n z[argmax] += 1\n else:\n for id, v in enumerate(argmax):\n z[v * np.prod(data.shape[data.ndim - 1 : axis : -1]) + id] += 1\n\n z = z.reshape(data.shape)\n assert np.all(max_grad_data == z)\n\n for axis in (-1, 0, 1, None):\n for j in range(2):\n safe_verify_grad(lambda v: max_and_argmax(v, axis=axis)[j], [data])\n if axis != 1:\n safe_verify_grad(\n lambda v: max_and_argmax(v.flatten(), axis=axis)[j], [data]\n )\n if axis in (0, None):\n check_grad_max(\n data,\n eval_outputs(grad(max_and_argmax(n, axis=axis)[0].sum(), n)),\n axis=axis,\n )\n check_grad_max(data, eval_outputs(grad(max_and_argmax(n.flatten())[0], n)))\n\n # Test 3d inner dimensions\n data = random(3, 4, 5)\n\n for i in [0, 1, 2]:\n safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[0], [data])\n safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[1], [data])\n\n # Test 4d inner dimensions\n data = random(2, 3, 4, 5)\n\n for i in [0, 1, 2, 3]:\n safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[0], [data])\n safe_verify_grad(lambda v: max_and_argmax(v, axis=[i])[1], [data])\n\n # Test grad with multiple axes\n for i in [[0, 1], [0, 0]]:\n safe_verify_grad(lambda v: max_and_argmax(v, axis=i)[0], [data])\n safe_verify_grad(lambda v: max_and_argmax(v, axis=i)[1], [data])\n\n def test_preserve_broadcastable(self):\n # Ensure the original broadcastable flags are preserved by Max/Argmax.\n x = matrix().dimshuffle(\"x\", 0, \"x\", 1, \"x\")\n y = x.max(axis=1)\n assert y.type.broadcastable == (True, True, False, True)\n\n def test_multiple_axes(self):\n data = np.arange(24).reshape(3, 2, 4)\n x = as_tensor_variable(data)\n v, i = eval_outputs(max_and_argmax(x, [1, -1]))\n assert np.all(v == np.array([7, 15, 23]))\n assert np.all(i == np.array([7, 7, 7]))\n\n v = eval_outputs(max_and_argmax(x, [1, -1])[0].shape)\n assert tuple(v) == np.max(data, (1, -1)).shape\n\n def test_zero_shape(self):\n x = matrix()\n m, i = max_and_argmax(x, axis=1)\n f = function([x], [m, i])\n xv = np.zeros((0, 4), dtype=config.floatX)\n mv, iv = f(xv)\n assert mv.shape == (0,)\n assert iv.shape == (0,)\n\n def test_numpy_input(self):\n ar = np.array([1, 2, 3])\n max_aet, argmax_aet = max_and_argmax(ar, axis=None)\n assert max_aet.eval(), 3\n assert argmax_aet.eval(), 2\n\n\nclass TestArgminArgmax:\n def setup_method(self):\n MaxAndArgmax.debug = 0\n\n def test_scalar(self):\n for fct in [argmin, argmax]:\n n = as_tensor_variable(5.0)\n i = eval_outputs(fct(n))\n assert i == 0\n v = eval_outputs(fct(n).shape)\n assert len(v) == 0\n\n def test_list(self):\n n = as_tensor_variable([1, 2, 3, 2, -6])\n i = eval_outputs(argmin(n))\n assert i == 4\n v = eval_outputs(argmin(n).shape)\n assert len(v) == 0\n\n n = as_tensor_variable([1, 2, 3, 2, -6])\n i = eval_outputs(argmax(n))\n assert i == 2\n v = eval_outputs(argmax(n).shape)\n assert len(v) == 0\n\n def test2(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n for fct, nfct in [(argmax, np.argmax), (argmin, np.argmin)]:\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (None, None),\n ([0, 1], None),\n ([1, 0], None),\n ]:\n v = eval_outputs(fct(n, axis))\n assert np.all(v == nfct(data, np_axis))\n v_shape = eval_outputs(fct(n, axis).shape)\n assert tuple(v_shape) == nfct(data, np_axis).shape\n\n def test2_float16(self):\n # Test negative values and bigger range to make sure numpy don't do the argmax as on uint16\n data = (random(20, 30).astype(\"float16\") - 0.5) * 20\n n = shared(data)\n mode = get_default_mode().including(\"local_max_and_argmax\", \"uncanonicalize\")\n for fct, nfct in [(argmax, np.argmax), (argmin, np.argmin)]:\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (None, None),\n ([0, 1], None),\n ([1, 0], None),\n ]:\n v = eval_outputs(fct(n, axis), (Argmax,), mode=mode)\n assert np.all(v == nfct(data, np_axis))\n v_shape = eval_outputs(fct(n, axis).shape, mode=mode)\n assert tuple(v_shape) == nfct(data, np_axis).shape\n\n def test2_invalid(self):\n for fct, nfct in [(argmax, np.argmax), (argmin, np.argmin)]:\n n = as_tensor_variable(random(2, 3))\n with pytest.raises(ValueError):\n eval_outputs(fct(n, 3))\n with pytest.raises(ValueError):\n eval_outputs(fct(n, -3))\n\n def test2_valid_neg(self):\n for fct, nfct in [(argmax, np.argmax), (argmin, np.argmin)]:\n n = as_tensor_variable(random(2, 3))\n i = eval_outputs(fct(n, -1))\n assert i.shape == (2,)\n assert np.all(i == nfct(n.value, -1))\n i = eval_outputs(fct(n, -2))\n assert i.shape == (3,)\n assert np.all(i == nfct(n.value, -2))\n\n v = eval_outputs(fct(n, -1).shape)\n assert v == (2)\n v = eval_outputs(fct(n, -2).shape)\n assert v == (3)\n\n def test3(self):\n data = random(2, 3, 4)\n n = as_tensor_variable(data)\n for fct, nfct in [(argmax, np.argmax), (argmin, np.argmin)]:\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (2, 2),\n (None, None),\n ([0, 1, 2], None),\n ([1, 0, 2], None),\n ]:\n v = eval_outputs(fct(n, axis))\n assert np.all(v == nfct(data, np_axis))\n v_shape = eval_outputs(fct(n, axis).shape)\n assert tuple(v_shape) == nfct(data, np_axis).shape\n\n def test_grad_argmin(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n n.name = \"n\"\n\n # test grad of argmin\n utt.verify_grad(lambda v: argmin(v, axis=-1), [data])\n\n utt.verify_grad(lambda v: argmin(v, axis=[0]), [data])\n\n utt.verify_grad(lambda v: argmin(v, axis=[1]), [data])\n\n utt.verify_grad(lambda v: argmin(v.flatten()), [data])\n\n try:\n cost = argmin(n, axis=-1)\n cost.name = None\n grad(cost, n)\n raise Exception(\"Expected an error\")\n except TypeError:\n pass\n\n def test_grad_argmax(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n\n # test grad of argmax\n utt.verify_grad(lambda v: argmax(v, axis=-1), [data])\n\n utt.verify_grad(lambda v: argmax(v, axis=[0]), [data])\n\n utt.verify_grad(lambda v: argmax(v, axis=[1]), [data])\n\n utt.verify_grad(lambda v: argmax(v.flatten()), [data])\n\n try:\n grad(argmax(n, axis=-1), n)\n raise Exception(\"Expected an error\")\n except TypeError:\n pass\n\n def test_uint(self):\n for dtype in (\"uint8\", \"uint16\", \"uint32\", \"uint64\"):\n itype = np.iinfo(dtype)\n data = np.array([itype.min + 3, itype.min, itype.max - 5, itype.max], dtype)\n n = as_tensor_variable(data)\n i = eval_outputs(argmin(n))\n assert i == 1\n i = eval_outputs(argmax(n))\n assert i == 3\n\n def test_bool(self):\n data = np.array([True, False], \"bool\")\n n = as_tensor_variable(data)\n i = eval_outputs(argmin(n))\n assert i == 1\n i = eval_outputs(argmax(n))\n assert i == 0\n\n\nclass TestMinMax:\n def setup_method(self):\n MaxAndArgmax.debug = 0\n\n def test_scalar(self):\n for fct in [max, min]:\n n = as_tensor_variable(5.0)\n v = eval_outputs(fct(n))\n assert v == 5.0\n\n v = eval_outputs(fct(n).shape)\n assert len(v) == 0\n\n def test_list(self):\n for fct, nfct in [(max, np.max), (min, np.min)]:\n n = as_tensor_variable([1, 2, 3, 2, -6])\n v = eval_outputs([fct(n)])\n assert v == nfct(n.value)\n\n v = eval_outputs(fct(n).shape)\n assert len(v) == 0\n\n def test2(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n for fct, nfct in [(max, np.max), (min, np.min)]:\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (None, None),\n ([0, 1], None),\n ([1, 0], None),\n ]:\n v = eval_outputs(fct(n, axis))\n assert np.all(v == nfct(data, np_axis))\n v_shape = eval_outputs(fct(n, axis).shape)\n assert tuple(v_shape) == nfct(data, np_axis).shape\n\n def test2_invalid(self):\n for fct in [max, min]:\n n = as_tensor_variable(random(2, 3))\n with pytest.raises(ValueError):\n eval_outputs(fct(n, 3))\n with pytest.raises(ValueError):\n eval_outputs(fct(n, -3))\n\n def test2_valid_neg(self):\n for fct, nfct in [(max, np.max), (min, np.min)]:\n n = as_tensor_variable(random(2, 3))\n v = eval_outputs(fct(n, -1))\n assert v.shape == (2,)\n assert np.all(v == nfct(n.value, -1))\n v = eval_outputs(fct(n, -2))\n assert v.shape == (3,)\n assert np.all(v == nfct(n.value, -2))\n\n v = eval_outputs(fct(n, -1).shape)\n assert v == (2)\n v = eval_outputs(fct(n, -2).shape)\n assert v == (3)\n\n def test3(self):\n # Test with 1 axis or all axis out of 3 dims\n data = random(2, 3, 4)\n n = as_tensor_variable(data)\n for fct, nfct in [(max, np.max), (min, np.min)]:\n for (axis, np_axis) in [\n (-1, -1),\n (0, 0),\n (1, 1),\n (2, 2),\n (None, None),\n ([0, 1, 2], None),\n ([1, 0, 2], None),\n ]:\n v = eval_outputs(fct(n, axis))\n assert np.all(v == nfct(data, np_axis))\n v_shape = eval_outputs(fct(n, axis).shape)\n assert tuple(v_shape) == nfct(data, np_axis).shape\n\n def test3b(self):\n # Test with 2 axis out of 3 dims\n data = random(2, 3, 4)\n n = as_tensor_variable(data)\n for fct, nfct in [(max, np.max), (min, np.min)]:\n for axis in [[0, 1], [1, 2], [0, 2]]:\n v = eval_outputs(fct(n, axis))\n np_v = nfct(nfct(data, axis[1]), axis[0])\n assert np.all(v == np_v)\n v_shape = eval_outputs(fct(n, axis).shape)\n assert tuple(v_shape) == np_v.shape\n\n def test_grad_max(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n\n def check_grad_max(data, max_grad_data, axis=None):\n # This work only for axis in [0,None]\n assert axis in [0, None]\n z = np.zeros_like(data)\n z = z.flatten()\n argmax = np.argmax(data, axis=axis)\n if argmax.ndim == 0:\n z[np.argmax(data, axis=axis)] += 1\n else:\n for id, v in enumerate(argmax):\n z[v * np.prod(data.shape[data.ndim - 1 : axis : -1]) + id] += 1\n\n z = z.reshape(data.shape)\n assert np.all(max_grad_data == z)\n\n # test grad of max\n # axis is the last one\n utt.verify_grad(lambda v: max(v, axis=-1), [data])\n\n utt.verify_grad(lambda v: max(v, axis=[0]), [data])\n check_grad_max(data, eval_outputs(grad(max(n, axis=0).sum(), n)), axis=0)\n\n utt.verify_grad(lambda v: max(v, axis=[1]), [data])\n # check_grad_max(data,eval_outputs(grad(max(n,axis=1),n)),axis=1)\n\n utt.verify_grad(lambda v: max(v.flatten()), [data])\n check_grad_max(data, eval_outputs(grad(max(n.flatten()), n)))\n\n def test_grad_min(self):\n data = random(2, 3)\n n = as_tensor_variable(data)\n\n def check_grad_min(data, min_grad_data, axis=None):\n # This work only for axis in [0, None]\n assert axis in [0, None]\n z = np.zeros_like(data)\n z = z.flatten()\n argmin = np.argmin(data, axis=axis)\n if argmin.ndim == 0:\n z[np.argmin(data, axis=axis)] += 1\n else:\n for id, v in enumerate(argmin):\n z[v * np.prod(data.shape[data.ndim - 1 : axis : -1]) + id] += 1\n\n z = z.reshape(data.shape)\n assert np.all(min_grad_data == z)\n\n # test grad of min\n # axis is the last one\n utt.verify_grad(lambda v: min(v, axis=-1), [data])\n\n utt.verify_grad(lambda v: min(v, axis=[0]), [data])\n check_grad_min(data, eval_outputs(grad(min(n, axis=0).sum(), n)), axis=0)\n\n utt.verify_grad(lambda v: min(v, axis=[1]), [data])\n # check_grad_min(data,eval_outputs(grad(min(n,axis=1),n)),axis=1)\n\n utt.verify_grad(lambda v: min(v.flatten()), [data])\n check_grad_min(data, eval_outputs(grad(min(n.flatten()), n)))\n\n def _grad_list(self):\n # Test the gradient when we have multiple axis at the same time.\n #\n # This not implemented, so we disable the test. See ticket:\n # http://www.assembla.com/spaces/aesara/tickets/511\n data = random(2, 3)\n for fct in [max_and_argmax, max, min]:\n utt.verify_grad(lambda v: fct(v, axis=[0, 1]), [data])\n # n = as_tensor_variable(data)\n # check_grad_max(data, eval_outputs(grad(max_and_argmax(n,\n # axis=1)[0], n)),axis=1)\n\n def test_uint(self):\n for dtype in (\"uint8\", \"uint16\", \"uint32\", \"uint64\"):\n itype = np.iinfo(dtype)\n data = np.array([itype.min + 3, itype.min, itype.max - 5, itype.max], dtype)\n n = as_tensor_variable(data)\n assert min(n).dtype == dtype\n i = eval_outputs(min(n))\n assert i == itype.min\n assert max(n).dtype == dtype\n i = eval_outputs(max(n))\n assert i == itype.max\n\n def test_bool(self):\n data = np.array([True, False], \"bool\")\n n = as_tensor_variable(data)\n assert min(n).dtype == \"bool\"\n i = eval_outputs(min(n))\n assert i.ndim == 0\n assert not np.any(i)\n assert max(n).dtype == \"bool\"\n i = eval_outputs(max(n))\n assert i.ndim == 0\n assert np.all(i)\n\n\nTestClip = makeTester(\n name=\"ClipTester\",\n op=clip,\n expected=lambda x, y, z: np.clip(x, y, z),\n good=dict(\n correct1=(\n (5 * random(5, 5)).astype(\"float32\"),\n np.array(-1, dtype=\"float32\"),\n np.array(1, dtype=\"float32\"),\n ),\n correct2=(\n (5 * random(5, 5)).astype(\"float64\"),\n np.array(-1, dtype=\"float64\"),\n np.array(1, dtype=\"float64\"),\n ),\n correct3=(\n integers(5, 5).astype(\"int8\"),\n np.array(-1, dtype=\"int8\"),\n np.array(1, dtype=\"int8\"),\n ),\n correct4=(\n integers(5, 5).astype(\"int16\"),\n np.array(-1, dtype=\"int16\"),\n np.array(1, dtype=\"int16\"),\n ),\n correct5=(\n integers(5, 5).astype(\"int32\"),\n np.array(-1, dtype=\"int32\"),\n np.array(1, dtype=\"int32\"),\n ),\n correct6=(\n integers(5, 5).astype(\"int64\"),\n np.array(-1, dtype=\"int64\"),\n np.array(1, dtype=\"int64\"),\n ),\n # min > max case moved below as numpy has changed\n correct8=(\n integers(0, 5).astype(\"uint8\"),\n np.array(2, dtype=\"uint8\"),\n np.array(4, dtype=\"uint8\"),\n ),\n correct9=(\n integers(0, 5).astype(\"uint16\"),\n np.array(2, dtype=\"uint16\"),\n np.array(4, dtype=\"uint16\"),\n ),\n )\n # I can't think of any way to make this fail at runtime\n)\n\n\n# min > max case - numpy.clip has changed but we haven't\n# https://github.com/Theano/Theano/issues/6715\nTestBackwardsClip = makeTester(\n name=\"BackwardsClipTester\",\n op=clip,\n expected=lambda x, y, z: np.where(x < y, y, np.minimum(x, z)),\n good=dict(\n correct7=(\n (5 * random(5, 5)).astype(\"float64\"),\n np.array(1, dtype=\"float64\"),\n np.array(-1, dtype=\"float64\"),\n ),\n ),\n)\n\n\nclass TestClip:\n def test_complex_value(self):\n for dtype in [\"complex64\", \"complex128\"]:\n a = vector(dtype=dtype)\n b = scalar()\n c = scalar()\n with pytest.raises(TypeError):\n clip(a, b, c)\n\n def test_clip_repeat_grad(self):\n # This is testing for the issue #633\n x, y = vectors(\"xy\")\n a = clip(x, y, x)\n g = grad(a.sum(), x)\n fn = function([x, y], [g])\n\n # Test the other way around as well\n a2 = clip(x, x, y)\n g2 = grad(a2.sum(), x)\n fn2 = function([x, y], [g2])\n\n # Test for the equal case too\n a3 = clip(x, x, x)\n g3 = grad(a3.sum(), x)\n fn3 = function([x], [g3])\n\n rng = np.random.default_rng(utt.fetch_seed())\n\n nvals = 50\n xval = rng.random(nvals).astype(config.floatX)\n # To ensure that the min < x\n yval_mn = rng.random(nvals).astype(config.floatX) - 1.0\n\n # To ensure that the max > x\n yval_mx = rng.random(nvals).astype(config.floatX) + 1.0\n\n (aval,) = fn(xval, yval_mn)\n (aval2,) = fn2(xval, yval_mx)\n (aval3,) = fn3(xval)\n assert np.all(aval == 1.0)\n assert np.all(aval2 == 1.0)\n assert np.all(aval3 == 1.0)\n\n def test_clip_repeat_verify_grad(self):\n # Additional tests for issue gh-633\n utt.verify_grad(op=lambda x: clip(x, 0, x), pt=[random_nonzero(3, 7)])\n\n utt.verify_grad(op=lambda x: clip(x, x, 0), pt=[random_nonzero(3, 7)])\n\n utt.verify_grad(op=lambda x: clip(0, x, x), pt=[random_nonzero(3, 7)])\n\n utt.verify_grad(op=lambda x: clip(x, x, x), pt=[random_nonzero(3, 7)])\n\n\nclass TestOuter:\n rng = np.random.default_rng(utt.fetch_seed())\n\n def test_outer(self):\n for m in range(4):\n for n in range(4):\n x = tensor(dtype=\"floatX\", broadcastable=(False,) * m)\n y = tensor(dtype=\"floatX\", broadcastable=(False,) * n)\n s1 = self.rng.integers(1, 10, m)\n s2 = self.rng.integers(1, 10, n)\n v1 = np.asarray(self.rng.random(s1)).astype(config.floatX)\n v2 = np.asarray(self.rng.random(s2)).astype(config.floatX)\n o = outer(x, y).eval({x: v1, y: v2})\n utt.assert_allclose(o, np.outer(v1, v2))\n\n def test_grad(self):\n # Test the combined graph of the graph of outer\n # with broadcastable dimensions, just in case.\n for shp0, shp1 in [\n ((1,), (2,)),\n ((3,), (1,)),\n ((1,), (1,)),\n ((3,), (2,)),\n ((3, 2), (1, 1)),\n ((3, 2), (1, 4)),\n ((3, 2), (4, 1)),\n ((3, 2), (4, 5)),\n ((1, 2), (4, 5)),\n ((3, 1), (4, 5)),\n ((1, 1), (4, 5)),\n ((1, 1), (1, 1)),\n ]:\n data0 = self.rng.random(shp0).astype(config.floatX)\n data1 = self.rng.random(shp1).astype(config.floatX)\n utt.verify_grad(outer, [data0, data1])\n\n\nclass TestComparison:\n # Test <, >, <=, >=, == and !=\n #\n # Test that we can do the comparison with different\n # combination of tensor(shared and constant variable) with\n # ndarray. ndarray cmp tensor was crashing. In a NumPy PR (should\n # be in the release 1.8 of NumPy), it will work. So we assert it\n # work(futur behavior) or raise an error(current NumPy release).\n def setup_method(self):\n self.mode = None\n self.shared = shared\n self.dtypes = [\"float64\", \"float32\", \"complex64\", \"complex128\"]\n\n def inplace_func(self, inputs, outputs, check_isfinite=None):\n mode = self.mode\n if check_isfinite is False:\n if mode is None:\n mode = get_default_mode()\n mode.check_isfinite = False\n f = inplace_func(inputs, outputs, mode=mode)\n return f\n\n def test_gt(self):\n for dtype in self.dtypes:\n l = np.asarray([0.0, -1.0, 1.0], dtype=dtype)\n r = np.asarray([0.0, 1.0, -1.0], dtype=dtype)\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func([], x > y)\n v = fn()\n assert np.all(v == (l > r)), (v, (l > r))\n except TypeError:\n assert err\n\n def test_lt(self):\n for dtype in self.dtypes:\n l = np.asarray([0.0, -1.0, 1.0], dtype=dtype)\n r = np.asarray([0.0, 1.0, -1.0], dtype=dtype)\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func([], x < y)\n v = fn()\n assert np.all(v == (l < r)), (v, (l < r))\n except TypeError:\n assert err\n\n def test_le(self):\n for dtype in self.dtypes:\n l = np.asarray([0.0, -1.0, 1.0], dtype=dtype)\n r = np.asarray([0.0, 1.0, -1.0], dtype=dtype)\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func([], x <= y)\n v = fn()\n assert np.all(v == (l <= r)), (v, (l <= r))\n except TypeError:\n assert err\n\n def test_ge(self):\n for dtype in self.dtypes:\n l = np.asarray([0.0, -1.0, 1.0], dtype=dtype)\n r = np.asarray([0.0, 1.0, -1.0], dtype=dtype)\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func([], x >= y)\n v = fn()\n assert np.all(v == (l >= r)), (v, (l >= r))\n except TypeError:\n assert err\n\n def test_eq(self):\n for dtype in self.dtypes:\n l = np.asarray([0.0, -1.0, 1.0], dtype=dtype)\n r = np.asarray([0.0, 1.0, -1.0], dtype=dtype)\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func([], eq(x, y))\n v = fn()\n assert np.all(v == (l == r)), (v, (l == r))\n except TypeError:\n assert err\n\n def test_neq(self):\n for dtype in self.dtypes:\n l = np.asarray([0.0, -1.0, 1.0], dtype=dtype)\n r = np.asarray([0.0, 1.0, -1.0], dtype=dtype)\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func([], neq(x, y))\n v = fn()\n assert np.all(v == (l != r)), (v, (l != r))\n except TypeError:\n assert err\n\n def test_isclose(self):\n for dtype in self.dtypes:\n l = np.asarray(\n [0.0, 1.0, -1.0, 0.0, np.nan, np.inf, -np.inf, np.inf], dtype=dtype\n )\n r = np.asarray(\n [0.0, 1.0001, -1.000000000001, np.nan, np.nan, np.inf, np.inf, 0.0],\n dtype=dtype,\n )\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n o1 = isclose(x, y, equal_nan=False)\n fn1 = self.inplace_func([], o1, check_isfinite=False)\n\n o2 = isclose(x, y, equal_nan=True)\n fn2 = self.inplace_func([], o2, check_isfinite=False)\n\n v1 = fn1()\n v2 = fn2()\n assert np.all(\n v1\n == np.asarray(\n [True, False, True, False, False, True, False, False],\n dtype=\"bool\",\n )\n )\n assert np.all(\n v2\n == np.asarray(\n [True, False, True, False, True, True, False, False],\n dtype=\"bool\",\n )\n )\n except TypeError:\n if not dtype.startswith(\"complex\"):\n raise\n assert err\n\n def test_allclose(self):\n # equal_nan argument not in current version of numpy allclose,\n # force it to False.\n for dtype in self.dtypes:\n l = np.asarray(\n [0.0, 1.0, -1.0, 0.0, np.nan, np.inf, -np.inf, np.inf], dtype=dtype\n )\n r = np.asarray(\n [0.0, 1.0001, -1.000000000001, np.nan, np.nan, np.inf, np.inf, 0.0],\n dtype=dtype,\n )\n for x, y, err in [\n (self.shared(l.astype(dtype)), self.shared(r.astype(dtype)), False),\n (l, self.shared(r.astype(dtype)), True),\n (constant(l), self.shared(r.astype(dtype)), False),\n (self.shared(l.astype(dtype)), r, False),\n (self.shared(l.astype(dtype)), constant(r), False),\n ]:\n try:\n fn = self.inplace_func(\n [], allclose(x, y, equal_nan=False), check_isfinite=False\n )\n v = fn()\n assert np.all(v == np.allclose(l, r))\n except TypeError:\n if not dtype.startswith(\"complex\"):\n assert err\n\n\nclass TestBitwise:\n dtype = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n ]\n\n def test_or(self):\n for dtype in self.dtype:\n x, y = vector(dtype=dtype), vector(dtype=dtype)\n fn = inplace_func([x, y], x | y)\n l = _asarray([0, 0, 1, 1], dtype=dtype)\n r = _asarray([0, 1, 0, 1], dtype=dtype)\n v = fn(l, r)\n assert np.all(v == (operator.or_(l, r))), (l, r, v)\n\n def test_XOR(self):\n for dtype in self.dtype:\n x, y = vector(dtype=dtype), vector(dtype=dtype)\n fn = inplace_func([x, y], x ^ y)\n l = _asarray([0, 0, 1, 1], dtype=dtype)\n r = _asarray([0, 1, 0, 1], dtype=dtype)\n v = fn(l, r)\n assert np.all(v == (operator.xor(l, r))), (l, r, v)\n\n def test_and(self):\n for dtype in self.dtype:\n x, y = vector(dtype=dtype), vector(dtype=dtype)\n fn = inplace_func([x, y], x & y)\n l = _asarray([0, 0, 1, 1], dtype=dtype)\n r = _asarray([0, 1, 0, 1], dtype=dtype)\n v = fn(l, r)\n assert np.all(v == (operator.and_(l, r))), (l, r, v)\n\n def test_inv(self):\n for dtype in self.dtype:\n x = vector(dtype=dtype)\n fn = inplace_func([x], ~x)\n for l in [\n [0, 0, 1, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 1, 0, 1],\n [-1, 2 ** 16, 2 ** 16 - 1],\n ]:\n l = _asarray([0, 0, 1, 1], dtype=dtype)\n v = fn(l)\n assert np.all(v == (~l)), (l, v)\n\n def test_eye(self):\n n = iscalar()\n m = iscalar()\n k = iscalar()\n fn = function([m, n, k], eye(m, n, k))\n assert np.all(fn(5, 6, 1) == np.eye(5, 6, 1))\n\n\nclass TestAdd:\n def test_complex_all_ops(self):\n for nbits in (64, 128):\n a = shared(np.ones(3, dtype=\"complex%i\" % nbits) + 0.5j)\n b = shared(np.ones(3, dtype=\"complex%i\" % nbits) + 1.5j)\n tests = (\n (\"+\", lambda x, y: x + y),\n (\"-\", lambda x, y: x - y),\n (\"*\", lambda x, y: x * y),\n (\"/\", lambda x, y: x / y),\n )\n for s, fn in tests:\n f = inplace_func([], fn(a, b))\n # print 'valid output:', fn(a.data, b.data)\n # print 'Aesara output:', f(a.data, b.data)\n assert a.type.values_eq_approx(fn(a.get_value(), b.get_value()), f())\n\n def test_grad_scalar_l(self):\n utt.verify_grad(add, [np.asarray([3.0]), random(3)])\n\n def test_grad_scalar_r(self):\n utt.verify_grad(add, [random(3), np.asarray([3.0])])\n\n def test_grad_row(self):\n utt.verify_grad(add, [random(3, 5), random(1, 5)])\n\n def test_grad_col(self):\n utt.verify_grad(add, [random(3, 5), random(3, 1)])\n\n\nclass TestCeil:\n def test_complex(self):\n with pytest.raises(TypeError):\n ceil(zvector())\n\n\nclass TestExp:\n def test_grad_0(self):\n utt.verify_grad(\n exp,\n [\n np.asarray(\n [\n [1.5089518, 1.48439076, -4.7820262],\n [2.04832468, 0.50791564, -1.58892269],\n ]\n )\n ],\n )\n\n def test_int(self):\n x = ivector()\n f = function([x], exp(x))\n exp_3 = f([3])\n assert exp_3.dtype == \"float64\"\n\n def test_complex(self):\n x = zvector()\n assert exp(x).dtype == \"complex128\"\n f = function([x], exp(x))\n exp_3 = f([3 + 2j])\n assert np.allclose(exp_3, np.exp(3 + 2j))\n\n\nclass TestDivimpl:\n def test_impls(self):\n i = iscalar()\n ii = lscalar()\n d = dscalar()\n f = fscalar()\n c = cscalar()\n\n assert np.allclose(function([i, d], i / d)(5, 7.0), (5.0 / 7.0))\n assert np.allclose(function([i, d], d / i)(5, 7.0), (7.0 / 5.0))\n assert np.allclose(function([i, f], i / f)(5, 11.0), (5.0 / 11.0))\n assert np.allclose(function([i, f], f / i)(5, 11.0), (11.0 / 5.0))\n assert np.allclose(function([i, ii], i // ii)(5, 3), (5 // 3))\n assert np.allclose(function([i, ii], ii // i)(5, 3), (3 // 5))\n assert np.allclose(function([i, ii], true_div(i, ii))(5, 3), (5.0 / 3.0))\n assert np.allclose(function([i, ii], true_div(ii, i))(5, 3), (3.0 / 5.0))\n assert np.allclose(\n function([i, c], i / c)(5, np.complex(5, 3)), (5.0 / (5 + 3j))\n )\n assert np.allclose(\n function([i, c], c / i)(5, np.complex(5, 3)), ((5 + 3j) / 5.0)\n )\n\n\nclass TestMean:\n def test_mean_single_element(self):\n res = mean(np.zeros(1))\n assert res.eval() == 0.0\n\n def test_mean_f16(self):\n x = vector(dtype=\"float16\")\n y = x.mean()\n f = function([x], y)\n utt.assert_allclose(f(np.ones((100000,), dtype=\"float16\")), 1.0)\n\n def test_basic(self):\n x = vector()\n f = function([x], mean(x))\n data = random(50)\n assert np.allclose(f(data), np.mean(data))\n\n def test_list(self):\n ll = [shared(0.0), shared(2.0)]\n assert mean(ll).eval() == 1\n\n\ndef test_dot_numpy_inputs():\n \"\"\"Test the `Aesara.tensor.dot` interface function with NumPy inputs.\"\"\"\n a = np.ones(2)\n b = np.ones(2)\n res = dot(a, b)\n assert isinstance(res, Variable)\n assert isinstance(res.owner.op, Dot)\n\n\nclass TestDot:\n def test_Op_dims(self):\n d0 = scalar()\n d1 = vector()\n d2 = matrix()\n d3 = tensor3()\n\n with pytest.raises(TypeError):\n _dot(d0, d0)\n with pytest.raises(TypeError):\n _dot(d0, d1)\n with pytest.raises(TypeError):\n _dot(d0, d2)\n with pytest.raises(TypeError):\n _dot(d0, d3)\n with pytest.raises(TypeError):\n _dot(d1, d0)\n _dot(d1, d1)\n _dot(d1, d2)\n with pytest.raises(TypeError):\n _dot(d1, d3)\n with pytest.raises(TypeError):\n _dot(d2, d0)\n _dot(d2, d1)\n _dot(d2, d2)\n with pytest.raises(TypeError):\n _dot(d2, d3)\n with pytest.raises(TypeError):\n _dot(d3, d0)\n with pytest.raises(TypeError):\n _dot(d3, d1)\n with pytest.raises(TypeError):\n _dot(d3, d2)\n with pytest.raises(TypeError):\n _dot(d3, d3)\n\n def test_grad(self):\n utt.verify_grad(dense_dot, [random(2, 3), random(3, 2)])\n utt.verify_grad(dense_dot, [random(2), random(2, 3)])\n utt.verify_grad(dense_dot, [random(3, 2), random(2)])\n utt.verify_grad(dense_dot, [random(2), random(2)])\n utt.verify_grad(dense_dot, [random(), random()])\n # TODO: What about the broadcastable conditions in `Dot.grad`?\n\n def test_broadcastable_patterns(self):\n\n #\n # These examples should all work. All dimensions of all results have\n # size 1.\n #\n def val_for(r):\n if r.dtype.startswith(\"complex\"):\n # We want to test complex at the same time, so we give a value\n # to the imaginary component.\n # This strange way of doing things is the only way that worked\n # on NumPy 1.4.1.\n if r.ndim == 0:\n return np.asarray(np.complex(1.1, 2.1), dtype=r.dtype)\n if r.ndim == 1:\n if r.dtype == \"complex64\":\n return np.complex64([np.complex(1.2, 2.2)])\n elif r.dtype == \"complex128\":\n return np.complex128([np.complex(1.2, 2.2)])\n elif r.ndim == 2:\n if r.dtype == \"complex64\":\n return np.complex64([[np.complex(1.3, 2.3)]])\n elif r.dtype == \"complex128\":\n return np.complex128([[np.complex(1.3, 2.3)]])\n\n if r.ndim == 0:\n return np.asarray(1.1, dtype=r.dtype)\n if r.ndim == 1:\n return np.asarray([1.2], dtype=r.dtype)\n elif r.ndim == 2:\n return np.asarray([[1.3]], dtype=r.dtype)\n raise AssertionError()\n\n for dtype0 in (\"float32\", \"float64\", \"complex64\"):\n for dtype1 in (\"float32\", \"complex64\", \"complex128\"):\n for bc0 in (\n (True,),\n (False,),\n (True, True),\n (True, False),\n (False, True),\n (False, False),\n ):\n x = TensorType(dtype=dtype0, broadcastable=bc0)()\n for bc1 in (\n (True,),\n (False,),\n (True, True),\n (True, False),\n (False, True),\n (False, False),\n ):\n\n y = TensorType(dtype=dtype1, broadcastable=bc1)()\n z = dense_dot(x, y)\n\n if dtype0.startswith(\"float\") and dtype1.startswith(\"float\"):\n g = grad(z.sum(), x)\n assert g.broadcastable == x.broadcastable\n g = grad(z.sum(), y)\n assert g.broadcastable == y.broadcastable\n\n\nclass TestTensordot:\n def TensorDot(self, axes):\n # Since tensordot is no longer an op, mimic the old op signature\n # to allow easy use of verify_grad.\n return lambda a, b: tensordot(a, b, axes)\n\n def test_basic(self):\n # Test vector-vector\n avec = vector()\n bvec = vector()\n axes = ((0,), (0,))\n c = tensordot(avec, bvec, axes)\n f1 = inplace_func([avec, bvec], c)\n aval = random(5)\n bval = random(5)\n out0 = np.tensordot(aval, bval, axes)\n out1 = f1(aval, bval)\n utt.assert_allclose(out0, out1)\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n # Test matrix-vector\n bmat = matrix()\n axes = ((0,), (1,))\n c = tensordot(avec, bmat, axes)\n f2 = inplace_func([avec, bmat], c)\n aval = random(5)\n bval = random(8, 5)\n utt.assert_allclose(np.tensordot(aval, bval, axes), f2(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n # Test matrix-matrix\n amat = matrix()\n for axes, shps in [\n [((0,), (0,)), [(4, 7), (4, 9)]],\n [((0,), (1,)), [(4, 7), (9, 4)]],\n [((1,), (0,)), [(4, 7), (7, 9)]],\n [((1,), (1,)), [(4, 7), (9, 7)]],\n [((0, 1), (0, 1)), [(4, 7), (4, 7)]],\n # [((0, 1), (1, 0)), [(4, 7), (7, 4)]],\n # [((1, 0), (1, 0)), [(4, 7), (4, 7)]],\n # [((1, 0), (0, 1)), [(4, 7), (7, 4)]],\n ]:\n c = tensordot(amat, bmat, axes)\n f3 = inplace_func([amat, bmat], c)\n aval = random(*shps[0])\n bval = random(*shps[1])\n utt.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n # Test ndarray-matrix, sum over one dim of matrix\n for axes, shps in [\n [((2,), (1,)), [(1, 2, 3, 4), (2, 3)]],\n [((0,), (1,)), [(1, 2, 3, 4), (3, 1)]],\n [((0,), (0,)), [(1, 2, 3, 4), (1, 3)]],\n [((3,), (0,)), [(1, 2, 3, 4), (4, 1)]],\n # [((3, 1), (0, 1)), [(1, 2, 3, 4), (4, 2)]],\n # [((0, 1), (1, 0)), [(1, 2, 3, 4), (2, 1)]],\n # [((3, 1), (1, 0)), [(1, 2, 3, 4), (2, 4)]],\n ]:\n atens = tensor4()\n c = tensordot(atens, bmat, axes)\n f4 = inplace_func([atens, bmat], c)\n aval = random(*shps[0])\n bval = random(*shps[1])\n utt.assert_allclose(np.tensordot(aval, bval, axes), f4(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n # Test ndarray-ndarray\n atens = tensor4()\n btens = tensor3()\n axes = ((1, 3), (0, 2))\n c = tensordot(atens, btens, axes)\n f5 = inplace_func([atens, btens], c)\n aval = random(4, 3, 5, 2)\n bval = random(3, 4, 2)\n utt.assert_allclose(np.tensordot(aval, bval, axes), f5(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n axes = (axes[1], axes[0])\n c = tensordot(btens, atens, axes)\n f6 = inplace_func([btens, atens], c)\n utt.assert_allclose(np.tensordot(bval, aval, axes), f6(bval, aval))\n utt.verify_grad(self.TensorDot(axes), [bval, aval])\n\n def test_raise_error(self):\n amat = matrix()\n bmat = matrix()\n bvec = vector()\n\n # Test invalid length for axes\n with pytest.raises(ValueError):\n tensordot(amat, bmat, (0, 1, 2))\n\n # Test axes of uneven length\n with pytest.raises(ValueError):\n tensordot(amat, bmat, ((0, 1), (0)))\n\n # Test invalid len(axes) given inputs are matrices\n with pytest.raises(ValueError):\n tensordot(amat, bmat, ((0, 1, 2), (0, 1, 2)))\n\n # Test invalid axes[1] given that y is a vector\n with pytest.raises(ValueError):\n tensordot(amat, bvec, (0, 1))\n\n # Test invalid scalar axes given inputs are matrices\n with pytest.raises(ValueError):\n tensordot(amat, bvec, 2)\n\n def test_weird_valid_axes(self):\n # Test matrix-matrix\n amat = matrix()\n bmat = matrix()\n for axes in [0, (1, 0), [1, 0], (1, (0,)), ((1,), 0), ([1], [0]), ([], [])]:\n c = tensordot(amat, bmat, axes)\n f3 = inplace_func([amat, bmat], c)\n aval = random(4, 7)\n bval = random(7, 9)\n utt.assert_allclose(np.tensordot(aval, bval, axes), f3(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n def test_scalar_axes(self):\n # Test matrix-matrix\n amat = fmatrix()\n bmat = dmatrix()\n # We let at float64 to test mix of float32 and float64.\n axes = 1\n aval = random(4, 5).astype(\"float32\")\n bval = random(5, 3)\n c = tensordot(amat, bmat, axes)\n f3 = inplace_func([amat, bmat], c)\n assert np.allclose(np.tensordot(aval, bval, axes), f3(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n # Test tensor-tensor\n amat = tensor3()\n bmat = tensor3()\n axes = 2\n aval = random(3, 4, 5)\n bval = random(4, 5, 3)\n c = tensordot(amat, bmat, axes)\n f3 = inplace_func([amat, bmat], c)\n assert np.allclose(np.tensordot(aval, bval, axes), f3(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n def test_scalar0(self):\n # Test tensor-tensor\n amat = matrix()\n bmat = matrix()\n axes = 0\n aval = random(4, 5)\n bval = random(5, 4)\n c = tensordot(amat, bmat, axes)\n f3 = inplace_func([amat, bmat], c)\n assert np.allclose(np.tensordot(aval, bval, axes), f3(aval, bval))\n utt.verify_grad(self.TensorDot(axes), [aval, bval])\n\n def test_broadcastable1(self):\n x = TensorType(dtype=config.floatX, broadcastable=(True, False, False))(\"x\")\n y = tensor3(\"y\")\n z = tensordot(x, y)\n assert z.broadcastable == (True, False)\n f = inplace_func([x, y], z)\n xv = random(1, 3, 4)\n yv = random(3, 4, 5)\n zv = f(xv, yv)\n assert np.allclose(np.tensordot(xv, yv), zv)\n\n def test_broadcastable2(self):\n x = TensorType(dtype=config.floatX, broadcastable=(True, False, False))(\"x\")\n y = tensor3(\"y\")\n axes = [[2, 1], [0, 1]]\n z = tensordot(x, y, axes=axes)\n assert z.broadcastable == (True, False)\n f = inplace_func([x, y], z)\n xv = random(1, 3, 4)\n yv = random(4, 3, 5)\n zv = f(xv, yv)\n assert np.allclose(np.tensordot(xv, yv, axes=axes), zv)\n\n\ndef test_smallest():\n x = dvector()\n y = dvector()\n z = dvector()\n f1 = inplace_func([x], smallest(x))\n assert np.all([1, 2, 3] == f1([1, 2, 3]))\n f3 = inplace_func([x, y, z], smallest(x, y, z))\n assert np.all([1, 2, 3] == f3([1, 3, 9], [7, 7, 7], [8, 2, 3]))\n\n sx, sy = dscalar(), dscalar()\n\n assert -4 == inplace_func([sx, sy], smallest(sx, sy))(-4.0, -2.0)\n\n\ndef test_var():\n a = TensorType(dtype=\"float64\", broadcastable=[False, False, False])()\n f = function([a], var(a))\n\n a_val = np.arange(6).reshape(1, 2, 3)\n assert np.allclose(np.var(a_val), f(a_val))\n\n f = function([a], var(a, axis=0))\n assert np.allclose(np.var(a_val, axis=0), f(a_val))\n\n f = function([a], var(a, axis=1))\n assert np.allclose(np.var(a_val, axis=1), f(a_val))\n\n f = function([a], var(a, axis=2))\n assert np.allclose(np.var(a_val, axis=2), f(a_val))\n\n f = function([a], var(a, axis=0, ddof=0))\n assert np.allclose(np.var(a_val, axis=0, ddof=0), f(a_val))\n\n f = function([a], var(a, axis=1, ddof=1))\n assert np.allclose(np.var(a_val, axis=1, ddof=1), f(a_val))\n\n f = function([a], var(a, axis=2, ddof=1))\n assert np.allclose(np.var(a_val, axis=2, ddof=1), f(a_val))\n\n f = function([a], var(a, ddof=0, corrected=True))\n mean_a = np.mean(a_val)\n centered_a = a_val - mean_a\n v = np.mean(centered_a ** 2)\n error = (np.mean(centered_a)) ** 2\n v = v - error\n assert np.allclose(v, f(a_val))\n\n f = function([a], var(a, axis=2, ddof=1, corrected=True))\n mean_a = np.mean(a_val, axis=2, keepdims=True)\n centered_a = a_val - mean_a\n v = np.var(a_val, axis=2, ddof=1)\n shp_inp = np.shape(a_val)\n shp = shp_inp - np.array(1)\n error = (np.sum(centered_a, axis=2)) ** 2\n error = np.true_divide(error, shp[1] * shp_inp[1])\n v = v - error\n assert np.allclose(v, f(a_val))\n\n # Test that we don't upcast float16 computation\n assert vector(dtype=\"float16\").var().dtype == \"float16\"\n\n\nclass TestSum:\n def test_sum_overflow(self):\n # Ensure that overflow errors are a little bit harder to get\n a = TensorType(dtype=\"int8\", broadcastable=[False])()\n f = function([a], aet_sum(a))\n assert f([1] * 300) == 300\n\n def test_list(self):\n ll = [shared(0.0), shared(2.0)]\n aet_sum(ll).eval() == 2\n\n\nclass TestArithmeticCast:\n \"\"\"Test output types of basic arithmeric operations (* / + - //).\n\n We only test the behavior for `config.cast_policy` set to either 'numpy' or\n 'numpy+floatX': the 'custom' behavior is (at least partially) tested in\n `_test_autocast_custom`.\n\n \"\"\"\n\n def test_arithmetic_cast(self):\n dtypes = get_numeric_types(with_complex=True)\n\n # Here:\n # scalar == scalar stored as a 0d array\n # array == 1d array\n # i_scalar == scalar type used internally by Aesara\n def Aesara_scalar(dtype):\n return scalar(dtype=str(dtype))\n\n def numpy_scalar(dtype):\n return np.array(1, dtype=dtype)\n\n def Aesara_array(dtype):\n return vector(dtype=str(dtype))\n\n def numpy_array(dtype):\n return np.array([1], dtype=dtype)\n\n def Aesara_i_scalar(dtype):\n return aes.Scalar(str(dtype))()\n\n def numpy_i_scalar(dtype):\n return numpy_scalar(dtype)\n\n with warnings.catch_warnings():\n # Avoid deprecation warning during tests.\n warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n for cfg in (\"numpy+floatX\",): # Used to test 'numpy' as well.\n with config.change_flags(cast_policy=cfg):\n for op in (\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.floordiv,\n ):\n for a_type in dtypes:\n for b_type in dtypes:\n\n # We will test all meaningful combinations of\n # scalar and array operations.\n for combo in (\n (\"scalar\", \"scalar\"),\n (\"array\", \"array\"),\n (\"scalar\", \"array\"),\n (\"array\", \"scalar\"),\n (\"i_scalar\", \"i_scalar\"),\n ):\n\n Aesara_args = list(\n map(eval, [f\"Aesara_{c}\" for c in combo])\n )\n numpy_args = list(\n map(eval, [f\"numpy_{c}\" for c in combo])\n )\n Aesara_dtype = op(\n Aesara_args[0](a_type),\n Aesara_args[1](b_type),\n ).type.dtype\n\n # For numpy we have a problem:\n # http://projects.scipy.org/numpy/ticket/1827\n # As a result we only consider the highest data\n # type that numpy may return.\n numpy_dtypes = [\n op(\n numpy_args[0](a_type), numpy_args[1](b_type)\n ).dtype,\n op(\n numpy_args[1](b_type), numpy_args[0](a_type)\n ).dtype,\n ]\n numpy_dtype = aes.upcast(\n *list(map(str, numpy_dtypes))\n )\n if numpy_dtype == Aesara_dtype:\n # Same data type found, all is good!\n continue\n if (\n cfg == \"numpy+floatX\"\n and config.floatX == \"float32\"\n and a_type != \"float64\"\n and b_type != \"float64\"\n and numpy_dtype == \"float64\"\n ):\n # We should keep float32.\n assert Aesara_dtype == \"float32\"\n continue\n if \"array\" in combo and \"scalar\" in combo:\n # For mixed scalar / array operations,\n # Aesara may differ from numpy as it does\n # not try to prevent the scalar from\n # upcasting the array.\n array_type, scalar_type = (\n (a_type, b_type)[list(combo).index(arg)]\n for arg in (\"array\", \"scalar\")\n )\n up_type = aes.upcast(array_type, scalar_type)\n if (\n # The two data types are different.\n scalar_type != array_type\n and\n # The array type is not enough to hold\n # the scalar type as well.\n array_type != up_type\n and\n # Aesara upcasted the result array.\n Aesara_dtype == up_type\n and\n # But Numpy kept its original type.\n array_type == numpy_dtype\n ):\n # Then we accept this difference in\n # behavior.\n continue\n\n if (\n cfg == \"numpy+floatX\"\n and a_type == \"complex128\"\n and (b_type == \"float32\" or b_type == \"float16\")\n and combo == (\"scalar\", \"array\")\n and Aesara_dtype == \"complex128\"\n and numpy_dtype == \"complex64\"\n ):\n # In numpy 1.6.x adding a complex128 with\n # a float32 may result in a complex64. As\n # of 1.9.2. this is still the case so it is\n # probably by design\n pytest.skip(\"Known issue with\" \"numpy see #761\")\n # In any other situation: something wrong is\n # going on!\n raise AssertionError()\n\n\ndef test_divmod():\n # Confirm that divmod is equivalent to the python version.\n x, y = fscalars(\"xy\")\n d, r = divmod(x, y)\n fn = DualLinker().accept(FunctionGraph([x, y], [d, r])).make_function()\n for a, b in (\n (0, 1),\n (1, 1),\n (0, -1),\n (1, -1),\n (-1, -1),\n (1, 2),\n (-1, 2),\n (1, -2),\n (-1, -2),\n (5, 3),\n (-5, 3),\n (5, -3),\n (-5, -3),\n ):\n d_v, r_v = fn(a, b)\n d_vp, r_vp = divmod(a, b)\n assert d_v == d_vp and r_v == r_vp, (a,)\n\n\ndef test_mod_compile():\n # This test generate an Elemwise of Composite as:\n # Elemwise{\n # Composite{\n # Composite{\n # Composite{\n # Composite{mod,EQ},\n # Switch},\n # mul},\n # add}}\n #\n # The c_code generated is not compiling as of 30 June 2010. I fix the\n # compilation in the same commit.\n x = vector()\n y = vector()\n out = switch(eq(3 % x.shape[0], 0), y, y[:-1])\n\n function([x, y], out)\n\n\nclass TestInferShape(utt.InferShapeTester):\n def test_Mean(self):\n adtens3 = dtensor3()\n adtens3_val = random(3, 4, 5)\n aiscal_val = 2\n self._compile_and_check([adtens3], [Mean(None)(adtens3)], [adtens3_val], Mean)\n self._compile_and_check(\n [adtens3], [Mean(aiscal_val)(adtens3)], [adtens3_val], Mean\n )\n\n def test_MaxAndArgmax(self):\n\n adtens3 = dtensor3()\n adtens3_val = random(4, 5, 3)\n self._compile_and_check(\n [adtens3], max_and_argmax(adtens3, None), [adtens3_val], MaxAndArgmax\n )\n\n self._compile_and_check(\n [adtens3], max_and_argmax(adtens3, 0), [adtens3_val], MaxAndArgmax\n )\n\n self._compile_and_check(\n [adtens3], max_and_argmax(adtens3, 1), [adtens3_val], MaxAndArgmax\n )\n\n self._compile_and_check(\n [adtens3], max_and_argmax(adtens3, 2), [adtens3_val], MaxAndArgmax\n )\n\n self._compile_and_check(\n [adtens3], max_and_argmax(adtens3, [0, 1, 2]), [adtens3_val], MaxAndArgmax\n )\n\n def test_Dot(self):\n # Dot\n\n # vec/vec\n advec = dvector()\n bdvec = dvector()\n advec_val = random(4)\n bdvec_val = random(4)\n self._compile_and_check(\n [advec, bdvec],\n [Dot()(advec, bdvec)],\n [advec_val, bdvec_val],\n (Dot, blas.Dot22, blas.Gemv, blas_c.CGemv),\n )\n\n # mat/mat\n admat = dmatrix()\n bdmat = dmatrix()\n admat_val = random(4, 5)\n bdmat_val = random(5, 3)\n self._compile_and_check(\n [admat, bdmat],\n [Dot()(admat, bdmat)],\n [admat_val, bdmat_val],\n (Dot, blas.Dot22),\n )\n\n # vec/mat\n bdmat_val = random(4, 5)\n self._compile_and_check(\n [advec, bdmat],\n [Dot()(advec, bdmat)],\n [advec_val, bdmat_val],\n (Dot, blas.Dot22, blas.Gemv, blas_c.CGemv),\n )\n\n # mat/vec\n admat_val = random(5, 4)\n self._compile_and_check(\n [admat, bdvec],\n [Dot()(admat, bdvec)],\n [admat_val, bdvec_val],\n (Dot, blas.Dot22, blas.Gemv, blas_c.CGemv),\n )\n\n\nclass TestTensorInstanceMethods:\n def setup_method(self):\n self.vars = matrices(\"X\", \"Y\")\n self.vals = [m.astype(config.floatX) for m in [random(2, 2), random(2, 2)]]\n\n def test_argmin(self):\n X, _ = self.vars\n x, _ = self.vals\n assert_array_equal(X.argmin().eval({X: x}), x.argmin())\n\n def test_argmax(self):\n X, _ = self.vars\n x, _ = self.vals\n assert_array_equal(X.argmax().eval({X: x}), x.argmax())\n\n def test_argsort(self):\n X, _ = self.vars\n x, _ = self.vals\n assert_array_equal(X.argsort().eval({X: x}), x.argsort())\n assert_array_equal(X.argsort(1).eval({X: x}), x.argsort(1))\n\n def test_clip(self):\n X, Y = self.vars\n x, y = self.vals\n # np.clip gives unexpected values when min > max,\n # so we have to make sure that min <= max in that test,\n # otherwise it randomly fails.\n Z = X.clip(Y - 0.5, Y + 0.5)\n z = x.clip(y - 0.5, y + 0.5)\n assert_array_equal(Z.eval({X: x, Y: y}), z)\n\n def test_dot(self):\n X, Y = self.vars\n x, y = self.vals\n # Use allclose comparison as a user reported on the mailing\n # list failure otherwise with array that print exactly the same.\n utt.assert_allclose(x.dot(y), X.dot(Y).eval({X: x, Y: y}))\n Z = X.dot(Y)\n z = x.dot(y)\n utt.assert_allclose(x.dot(z), X.dot(Z).eval({X: x, Z: z}))\n\n def test_real_imag(self):\n X, Y = self.vars\n x, y = self.vals\n Z = X + Y * 1j\n z = x + y * 1j\n assert_array_equal(Z.real.eval({Z: z}), x)\n assert_array_equal(Z.imag.eval({Z: z}), y)\n\n def test_conj(self):\n X, Y = self.vars\n x, y = self.vals\n Z = X + Y * 1j\n z = x + y * 1j\n assert_array_equal(Z.conj().eval({Z: z}), z.conj())\n assert_array_equal(Z.conjugate().eval({Z: z}), z.conj())\n\n def test_round(self):\n X, _ = self.vars\n x, _ = self.vals\n assert_array_equal(X.round().eval({X: x}), x.round())\n\n def test_std(self):\n X, _ = self.vars\n x, _ = self.vals\n # std() is implemented as Aesara tree and does not pass its\n # args directly to numpy. This sometimes results in small\n # difference, so we use allclose test.\n utt.assert_allclose(X.std().eval({X: x}), x.std())\n\n def test_cumsum(self):\n X, _ = self.vars\n x, _ = self.vals\n assert_array_equal(X.cumsum().eval({X: x}), x.cumsum())\n\n def test_cumprod(self):\n X, _ = self.vars\n x, _ = self.vals\n assert_array_equal(X.cumprod().eval({X: x}), x.cumprod())\n\n\ndef test_norm():\n x = vector(\"x\")\n n = x.norm(2)\n f = function([x], n)\n assert np.allclose(f([1, 1]), np.sqrt(2))\n\n\ndef test_cov():\n x = matrix(\"x\")\n y = matrix(\"y\")\n\n for rowvar, bias, ddof in product([True, False], [True, False], [None, 2]):\n c = cov(x, rowvar=rowvar, bias=bias, ddof=ddof)\n f = function([x], c)\n\n data = np.asarray(np.random.random((3, 5)), dtype=config.floatX)\n assert np.allclose(f(data), np.cov(data, rowvar=rowvar, bias=bias, ddof=ddof))\n\n c = cov(x, y=y, rowvar=rowvar, bias=bias, ddof=ddof)\n f = function([x, y], c)\n\n data = np.asarray(np.random.random((3, 5)), dtype=config.floatX)\n y_val = np.asarray(np.random.random((3, 5)), dtype=config.floatX)\n assert np.allclose(\n f(data, y_val), np.cov(data, rowvar=rowvar, y=y_val, bias=bias, ddof=ddof)\n )\n\n\ndef test_ptp():\n # Should return 0 for all scalar\n x = scalar(\"x\")\n p = ptp(x)\n f = function([x], p)\n\n y = np.asarray(random() * 20 - 10, dtype=config.floatX)\n result = f(y)\n numpyResult = np.ptp(y)\n\n assert np.array_equal(result, numpyResult)\n\n\nclass TestPower:\n def test_numpy_compare(self):\n rng = np.random.default_rng(utt.fetch_seed())\n A = matrix(\"A\", dtype=config.floatX)\n Q = power(A, 3)\n fn = function([A], [Q])\n a = rng.random((4, 4)).astype(config.floatX)\n\n n_p = np.power(a, 3)\n t_p = fn(a)\n assert np.allclose(n_p, t_p)\n\n def test_multiple_power(self):\n x = vector()\n y = [1, 2, 3]\n z = power(x, y)\n f = function([x], z)\n assert np.allclose(f([1, 2, 3]), [1, 4, 27])\n\n def test_wrong_shape(self):\n x = vector()\n y = [1, 2, 3]\n z = power(x, y)\n f = function([x], z)\n with pytest.raises(ValueError):\n f([1, 2, 3, 4])\n\n\nclass TestProd:\n def setup_method(self):\n # we want to allow nans in the matrices, so we disable this\n # DEBUG_MODE check\n mode = get_default_mode()\n mode = copy(mode)\n mode.check_isfinite = False\n self.mode = mode\n\n def test_verify_grad(self):\n # including zeros, as the case with zeros is important\n # (and special cases: 1 zero in the row, more than 1 zero in the row)\n x_val = np.asarray(\n [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]], dtype=\"float32\"\n )\n # now with verify_grad\n utt.verify_grad(Prod(axis=1), [x_val], mode=self.mode)\n\n # second time, with some added complexity\n # verify_grad takes the sum of the matrices anyway\n def fn(x2):\n return sqr(Prod(axis=1)(x2))\n\n utt.verify_grad(fn, [x_val], mode=self.mode)\n\n def test_verify_grad_with_zeros(self):\n # including zeros, as the case with zeros is important\n # (and special cases: 1 zero in the row, more than 1 zero in the row)\n x_val = np.asarray(\n [[1.0, 2.0, 3.0], [0.0, 5.0, 6.0], [0.0, 0.0, 9.0]], dtype=\"float32\"\n )\n x = dmatrix()\n\n # sanity check\n p = Prod(axis=1)(x)\n\n fn3 = function([x], [p], mode=self.mode)\n assert np.allclose(fn3(x_val), [6.0, 0.0, 0.0])\n\n # now with verify_grad\n utt.verify_grad(Prod(axis=1), [x_val], mode=self.mode)\n\n def test_prod_no_zeros_in_input(self):\n x = dmatrix()\n x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=\"float32\")\n pwz = Prod(axis=1, no_zeros_in_input=True)(x)\n fn = function([x], pwz, mode=self.mode)\n\n assert np.allclose(fn(x_val), [6, 120, 504])\n\n pwz = Prod(no_zeros_in_input=True)(x)\n g = grad(pwz, x)\n gg = grad(g.sum(), x)\n fn = function([x], g, mode=self.mode)\n assert np.allclose(\n fn(x_val),\n [\n [362880.0, 181440.0, 120960.0],\n [90720.0, 72576.0, 60480.0],\n [51840.0, 45360.0, 40320.0],\n ],\n )\n fn = function([x], gg, mode=self.mode)\n assert np.allclose(\n fn(x_val),\n [\n [663696.0, 422568.0, 301872.0],\n [233964.0, 190800.0, 161016.0],\n [139248.0, 122652.0, 109584.0],\n ],\n )\n utt.verify_grad(Prod(axis=1, no_zeros_in_input=True), [x_val], mode=self.mode)\n utt.verify_grad(Prod(no_zeros_in_input=True), [x_val], mode=self.mode)\n\n def test_prod_without_zeros(self):\n x = dmatrix()\n x_val = np.array([[1, 2, 3], [0, 5, 6], [0, 0, 9]], dtype=\"float32\")\n pwz = ProdWithoutZeros(axis=1)(x)\n fn = function([x], pwz, mode=self.mode)\n assert np.allclose(fn(x_val), [6, 30, 9])\n\n pwz_a0 = ProdWithoutZeros(axis=0)(x)\n fn_a0 = function([x], pwz_a0, mode=self.mode)\n assert np.allclose(fn_a0(x_val), [1, 10, 162])\n\n @pytest.mark.xfail(raises=NullTypeGradError)\n def test_prod_without_zeros_grad(self):\n x = dmatrix()\n pwz_a1 = ProdWithoutZeros(axis=0)(x)\n pwz_grad = grad(aet_sum(pwz_a1), x)\n # FIXME: This is not a real test.\n function([x], pwz_grad, mode=self.mode)\n\n def test_other_grad_tests(self):\n x = dmatrix()\n x_val1 = np.array([[1, 2, 3], [0, 5, 6], [0, 0, 9]], dtype=\"float32\")\n x_val2 = np.array(\n [[1, 2, 0], [0, 5, 6], [7, 8, 9], [9, 10, 0]], dtype=\"float32\"\n )\n rng = rng = np.random.default_rng(43)\n\n p = Prod(axis=1)\n grad_p = grad(p(x).sum(), x)\n grad_fn = function([x], grad_p, mode=self.mode)\n assert np.allclose(\n grad_fn(x_val1), [[6.0, 3.0, 2.0], [30.0, 0.0, 0.0], [0.0, 0.0, 0.0]]\n )\n assert np.allclose(\n grad_fn(x_val2),\n [[0.0, 0.0, 2.0], [30.0, 0.0, 0.0], [72.0, 63.0, 56.0], [0.0, 0.0, 90.0]],\n )\n\n p_axis0 = Prod(axis=0)\n grad_p_axis0 = grad(p_axis0(x).sum(), x)\n grad_fn_axis0 = function([x], grad_p_axis0, mode=self.mode)\n assert np.allclose(\n grad_fn_axis0(x_val2),\n [\n [0.0, 400.0, 0.0],\n [63.0, 160.0, 0.0],\n [0.0, 100.0, 0.0],\n [0.0, 80.0, 0.0],\n ],\n )\n\n utt.verify_grad(p, [x_val1], rng=rng, mode=self.mode)\n\n def test_pickle(self):\n test_prod = Prod()\n prod_pickled = pickle.dumps(test_prod, protocol=-1)\n unpickled_prod = pickle.loads(prod_pickled)\n assert not unpickled_prod.no_zeros_in_input\n\n prod_pickled = pickle.dumps(test_prod)\n unpickled_prod = pickle.loads(prod_pickled)\n assert not unpickled_prod.no_zeros_in_input\n\n\nclass TestIsInfIsNan:\n def setup_method(self):\n self.test_vals = [\n np.array(x, dtype=config.floatX)\n for x in [\n 0,\n 1,\n np.nan,\n np.inf,\n -np.inf,\n [np.nan, np.inf, -np.inf, 0, 1, -1],\n ]\n ]\n self.scalar = scalar()\n self.vector = vector()\n self.mode = get_default_mode()\n if isinstance(self.mode, DebugMode):\n # Disable the check preventing usage of NaN / Inf values.\n self.mode = copy(self.mode)\n self.mode.check_isfinite = False\n\n def run_isfunc(self, aet_func, np_func):\n for args in (self.scalar, self.vector):\n Aesara_isfunc = function([args], aet_func(args), mode=self.mode)\n for x in self.test_vals:\n if (x.ndim == 0 and args is not self.scalar) or (\n x.ndim == 1 and args is not self.vector\n ):\n # We only test with the appropriate input type.\n continue\n t_out = Aesara_isfunc(x)\n n_out = np_func(x)\n assert (t_out == n_out).all(), (t_out, n_out)\n\n def test_isinf(self):\n self.run_isfunc(isinf, np.isinf)\n\n def test_isnan(self):\n self.run_isfunc(isnan, np.isnan)\n\n\nclass TestSumProdReduceDtype:\n mode = get_default_mode().excluding(\"local_cut_useless_reduce\")\n op = CAReduce\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n methods = [\"sum\", \"prod\"]\n dtypes = list(map(str, aes.all_types))\n\n # Test the default dtype of a method().\n def test_reduce_default_dtype(self):\n # We try multiple axis combinations even though axis should not matter.\n for method in self.methods:\n for idx, dtype in enumerate(self.dtypes):\n axis = self.axes[idx % len(self.axes)]\n x = matrix(dtype=dtype)\n s = getattr(x, method)(axis=axis)\n assert (\n s.dtype\n == dict(\n bool=\"int64\",\n int8=\"int64\",\n int16=\"int64\",\n int32=\"int64\",\n uint8=\"uint64\",\n uint16=\"uint64\",\n uint32=\"uint64\",\n ).get(dtype, dtype)\n )\n f = function([x], s, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert [n for n in topo if isinstance(n.op, self.op)], (topo, dtype)\n data = np.random.random((3, 4)) * 10\n data = data.astype(dtype)\n f(data)\n\n def test_reduce_default_acc_dtype(self):\n # Test the default acc_dtype of a reduce().\n\n # We try multiple axis combinations even though axis should not matter.\n for method in self.methods:\n for idx, dtype in enumerate(self.dtypes):\n axis = self.axes[idx % len(self.axes)]\n x = matrix(dtype=dtype)\n s = getattr(x, method)(axis=axis)\n assert (\n s.owner.op.acc_dtype\n == dict(\n bool=\"int64\",\n int8=\"int64\",\n int16=\"int64\",\n int32=\"int64\",\n uint8=\"uint64\",\n uint16=\"uint64\",\n uint32=\"uint64\",\n float16=\"float32\",\n float32=\"float64\",\n complex64=\"complex128\",\n ).get(dtype, dtype)\n )\n f = function([x], s, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert [n for n in topo if isinstance(n.op, self.op)], (topo, dtype)\n data = np.random.random((3, 4)) * 10\n data = data.astype(dtype)\n f(data)\n\n @pytest.mark.slow\n def test_reduce_custom_dtype(self):\n # Test the ability to provide your own output dtype for a reduce.\n\n # We try multiple axis combinations even though axis should not matter.\n idx = 0\n for method in self.methods:\n for input_dtype in self.dtypes:\n x = matrix(dtype=input_dtype)\n for output_dtype in self.dtypes:\n # Only tests case where both input and output are complex.\n icomplex = input_dtype.startswith(\"complex\")\n ocomplex = output_dtype.startswith(\"complex\")\n if icomplex != ocomplex:\n continue\n\n axis = self.axes[idx % len(self.axes)]\n var = getattr(x, method)(dtype=output_dtype, axis=axis)\n assert var.dtype == output_dtype\n\n f = function([x], var, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert [n for n in topo if isinstance(n.op, self.op)], (\n topo,\n output_dtype,\n )\n data = np.random.random((3, 4)) * 10\n data = data.astype(input_dtype)\n if method == \"prod\" and output_dtype in [\n \"float16\",\n \"int8\",\n \"uint8\",\n \"int16\",\n \"uint16\",\n ]:\n # We will likely get something infinite,\n # or the overflow will be different between CPU and GPU,\n # and DebugMode will complain.\n data = data[0:1]\n f(data)\n if \"complex\" in input_dtype:\n continue\n # Check that we can take the gradient\n grad(var.sum(), x, disconnected_inputs=\"ignore\")\n idx += 1\n\n def test_reduce_custom_acc_dtype(self):\n # Test the ability to provide your own accumulator dtype for a reduce.\n\n # We try multiple axis combinations even though axis should not matter.\n idx = 0\n for method in self.methods:\n for input_dtype in self.dtypes:\n x = matrix(dtype=input_dtype)\n for acc_dtype in self.dtypes:\n # If the accumulator is a complex, the gradient of the reduce will\n # cast the complex to the input dtype. We can't call the normal\n # cast on a complex to a not complex as this is ambiguous.\n if not input_dtype.startswith(\"complex\") and acc_dtype.startswith(\n \"complex\"\n ):\n continue\n\n axis = self.axes[idx % len(self.axes)]\n # If output_dtype would force a downcast, we expect a TypeError\n # We always allow int/uint inputs with float/complex outputs.\n upcasted_dtype = aes.upcast(input_dtype, acc_dtype)\n if acc_dtype == upcasted_dtype or (\n input_dtype in discrete_dtypes\n and acc_dtype in continuous_dtypes\n ):\n var = getattr(x, method)(acc_dtype=acc_dtype, axis=axis)\n assert var.owner.op.acc_dtype == acc_dtype\n\n if \"complex\" in input_dtype:\n continue\n # Check that we can take the gradient\n grad(var.sum(), x, disconnected_inputs=\"ignore\")\n else:\n with pytest.raises(TypeError):\n getattr(x(method), acc_dtype=acc_dtype, axis=axis)\n\n idx += 1\n\n def test_reduce_precision(self):\n # Check that the default accumulator precision is sufficient\n for method in self.methods:\n x = shared(np.asarray([1e8, 1, -1e8], dtype=\"float32\"))\n s = getattr(x, method)()\n f = function([], s, mode=self.mode)\n topo = f.maker.fgraph.toposort()\n assert [n for n in topo if isinstance(n.op, self.op)], topo\n s_val = f()\n # Use extra precision in NumPy to compute the good answer.\n ret = getattr(np.asarray([1e8, 1, -1e8], dtype=\"float64\"), method)()\n assert np.allclose(s_val, ret), (s_val, ret)\n\n\nclass TestMeanDtype:\n def test_mean_default_dtype(self):\n # Test the default dtype of a mean().\n\n # We try multiple axis combinations even though axis should not matter.\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n for idx, dtype in enumerate(map(str, aes.all_types)):\n axis = axes[idx % len(axes)]\n x = matrix(dtype=dtype)\n m = x.mean(axis=axis)\n if dtype in discrete_dtypes:\n assert m.dtype == \"float64\"\n else:\n assert m.dtype == dtype, (m, m.dtype, dtype)\n f = function([x], m)\n data = np.random.random((3, 4)) * 10\n data = data.astype(dtype)\n f(data)\n\n @pytest.mark.slow\n def test_mean_custom_dtype(self):\n # Test the ability to provide your own output dtype for a mean.\n\n # We try multiple axis combinations even though axis should not matter.\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n idx = 0\n for input_dtype in map(str, aes.all_types):\n x = matrix(dtype=input_dtype)\n for sum_dtype in map(str, aes.all_types):\n axis = axes[idx % len(axes)]\n # If the inner sum cannot be created, it will raise a\n # TypeError.\n try:\n mean_var = x.mean(dtype=sum_dtype, axis=axis)\n except TypeError:\n pass\n else:\n # Executed if no TypeError was raised\n if sum_dtype in discrete_dtypes:\n assert mean_var.dtype == \"float64\", (mean_var.dtype, sum_dtype)\n else:\n assert mean_var.dtype == sum_dtype, (mean_var.dtype, sum_dtype)\n if (\n \"complex\" in input_dtype or \"complex\" in sum_dtype\n ) and input_dtype != sum_dtype:\n continue\n f = function([x], mean_var)\n data = np.random.random((3, 4)) * 10\n data = data.astype(input_dtype)\n f(data)\n # Check that we can take the gradient, when implemented\n if \"complex\" in mean_var.dtype:\n continue\n try:\n grad(mean_var.sum(), x, disconnected_inputs=\"ignore\")\n except NotImplementedError:\n # TrueDiv does not seem to have a gradient when\n # the numerator is complex.\n if mean_var.dtype in complex_dtypes:\n pass\n else:\n raise\n\n idx += 1\n\n def test_mean_precision(self):\n # Check that the default accumulator precision is sufficient\n x = shared(np.asarray([1e8, 1, -1e8], dtype=\"float32\"))\n m = x.mean()\n f = function([], m)\n m_val = f()\n assert np.allclose(m_val, 1.0 / 3)\n\n\nclass TestProdWithoutZerosDtype:\n def test_prod_without_zeros_default_dtype(self):\n # Test the default dtype of a ProdWithoutZeros().\n\n # We try multiple axis combinations even though axis should not matter.\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n for idx, dtype in enumerate(map(str, aes.all_types)):\n axis = axes[idx % len(axes)]\n x = ProdWithoutZeros(axis=axis)(matrix(dtype=dtype))\n assert (\n x.dtype\n == dict(\n bool=\"int64\",\n int8=\"int64\",\n int16=\"int64\",\n int32=\"int64\",\n uint8=\"uint64\",\n uint16=\"uint64\",\n uint32=\"uint64\",\n ).get(dtype, dtype)\n )\n\n def test_prod_without_zeros_default_acc_dtype(self):\n # Test the default dtype of a ProdWithoutZeros().\n\n # We try multiple axis combinations even though axis should not matter.\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n for idx, dtype in enumerate(map(str, aes.all_types)):\n axis = axes[idx % len(axes)]\n x = matrix(dtype=dtype)\n p = ProdWithoutZeros(axis=axis)(x)\n assert (\n p.owner.op.acc_dtype\n == dict(\n bool=\"int64\",\n int8=\"int64\",\n int16=\"int64\",\n int32=\"int64\",\n uint8=\"uint64\",\n uint16=\"uint64\",\n uint32=\"uint64\",\n float16=\"float32\",\n float32=\"float64\",\n complex64=\"complex128\",\n ).get(dtype, dtype)\n )\n\n if \"complex\" in dtype:\n continue\n f = function([x], p)\n data = np.random.random((2, 3)) * 3\n data = data.astype(dtype)\n f(data)\n\n @pytest.mark.slow\n def test_prod_without_zeros_custom_dtype(self):\n # Test ability to provide your own output dtype for a ProdWithoutZeros().\n\n # We try multiple axis combinations even though axis should not matter.\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n idx = 0\n for input_dtype in map(str, aes.all_types):\n x = matrix(dtype=input_dtype)\n for output_dtype in map(str, aes.all_types):\n axis = axes[idx % len(axes)]\n prod_woz_var = ProdWithoutZeros(axis=axis, dtype=output_dtype)(x)\n assert prod_woz_var.dtype == output_dtype\n idx += 1\n if \"complex\" in output_dtype or \"complex\" in input_dtype:\n continue\n f = function([x], prod_woz_var)\n data = np.random.random((2, 3)) * 3\n data = data.astype(input_dtype)\n f(data)\n\n @pytest.mark.slow\n def test_prod_without_zeros_custom_acc_dtype(self):\n # Test ability to provide your own acc_dtype for a ProdWithoutZeros().\n\n # We try multiple axis combinations even though axis should not matter.\n axes = [None, 0, 1, [], [0], [1], [0, 1]]\n idx = 0\n for input_dtype in map(str, aes.all_types):\n x = matrix(dtype=input_dtype)\n for acc_dtype in map(str, aes.all_types):\n axis = axes[idx % len(axes)]\n # If acc_dtype would force a downcast, we expect a TypeError\n # We always allow int/uint inputs with float/complex outputs.\n upcasted_dtype = aes.upcast(input_dtype, acc_dtype)\n if acc_dtype == upcasted_dtype or (\n input_dtype in discrete_dtypes and acc_dtype in continuous_dtypes\n ):\n prod_woz_var = ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype)(x)\n assert prod_woz_var.owner.op.acc_dtype == acc_dtype\n\n if acc_dtype.startswith(\"complex\") and input_dtype != acc_dtype:\n continue\n f = function([x], prod_woz_var)\n data = np.random.random((2, 3)) * 3\n data = data.astype(input_dtype)\n f(data)\n else:\n with pytest.raises(TypeError):\n ProdWithoutZeros(axis=axis, acc_dtype=acc_dtype)(x)\n\n idx += 1\n\n\nclass TestSumMeanMaxMinArgMaxVarReduceAxes:\n def test_sum_axes(self):\n axes = [None, 0, 1, [0, 1], np.array(1), [np.array(0), np.array(1)]]\n for a in axes:\n x = matrix()\n x.sum(a)\n\n def test_mean_axes(self):\n axes = [None, 0, 1, [0, 1], np.array(1), [np.array(0), np.array(1)]]\n for a in axes:\n x = matrix()\n x.mean(a)\n\n def test_max_axes(self):\n axes = [None, 0, 1, [0, 1], np.array(1), [np.array(0), np.array(1)]]\n for a in axes:\n x = matrix()\n x.max(a)\n\n def test_min_axes(self):\n axes = [None, 0, 1, [0, 1], np.array(1), [np.array(0), np.array(1)]]\n for a in axes:\n x = matrix()\n x.min(a)\n\n def test_argmax_axes(self):\n axes = [None, 0, 1, [0, 1], np.array(1), [np.array(0), np.array(1)]]\n for a in axes:\n x = matrix()\n x.argmax(a)\n\n def test_var_axes(self):\n axes = [None, 0, 1, [0, 1], np.array(1), [np.array(0), np.array(1)]]\n for a in axes:\n x = matrix()\n x.var(a)\n\n\ndef reduce_bitwise_and(x, axis=-1, dtype=\"int8\"):\n identity = np.array((-1,), dtype=dtype)[0]\n\n shape_without_axis = tuple([s for i, s in enumerate(x.shape) if i != axis])\n if 0 in shape_without_axis:\n return np.empty(shape=shape_without_axis, dtype=x.dtype)\n\n def custom_reduce(a):\n out = identity\n for i in range(a.size):\n out = np.bitwise_and(a[i], out)\n return out\n\n return np.apply_along_axis(custom_reduce, axis, x)\n\n\ndef test_clip_grad():\n\n # test the gradient of clip\n def func(x, y, z):\n return clip(x, y, z)\n\n # use an x value less than y, an x value between y and z, and an x value\n # greater than z\n utt.verify_grad(func, [np.asarray([-1.0, 0.5, 2.0]), 0.0, 1.0])\n\n\ndef test_clip_grad_int():\n # FIXME: This is not a real test.\n # test that integers don't crash clip gradient\n x = iscalar()\n y = iscalar()\n z = iscalar()\n c = clip(x, y, z)\n grad(c, [x, y, z])\n\n\ndef test_grad_useless_sum():\n \"\"\"\n Test absence of useless sum.\n\n When an operation (such as `Aesara.tensor.mul`) is done on a broadcastable\n vector and a matrix, the gradient in backward path is computed for the\n broadcasted vector. So a sum reverts the broadcasted vector to a vector. In\n the case of operations on two broadcastable vectors, the sum should not be\n generated.\n\n This test checks whether there is a useless sum in the gradient\n computations.\n \"\"\"\n\n mode = get_default_mode().including(\"canonicalize\")\n mode.check_isfinite = False\n x = TensorType(config.floatX, (True,))(\"x\")\n l = log(1.0 - sigmoid(x))[0]\n g = grad(l, x)\n\n f = function([x], g, mode=mode)\n test_values = [-100, -1, 0, 1, 100]\n outputs = []\n old_values_eq_approx = staticmethod(TensorType.values_eq_approx)\n TensorType.values_eq_approx = staticmethod(values_eq_approx_remove_nan)\n try:\n for test_value in test_values:\n outputs.append(f(np.array([test_value]).astype(\"float32\")))\n finally:\n TensorType.values_eq_approx = old_values_eq_approx\n\n assert not any([isinstance(node.op, Sum) for node in applys_between([x], [g])])\n assert np.allclose(\n outputs, [[-3.72007598e-44], [-0.26894142], [-0.5], [-0.73105858], [-1.0]]\n )\n\n\ndef test_tanh_grad_broadcast():\n # FIXME: This is not a real test.\n # This crashed in the past.\n\n x = tensor(dtype=\"float32\", broadcastable=(True, False, False, False))\n y = tensor(dtype=\"float32\", broadcastable=(True, True, False, False))\n\n grad(tanh(x).sum(), x)\n grad(tanh(x + y).sum(), y)\n grad(tanh(x + y).sum(), [x, y])\n\n\ndef test_logaddexp():\n # Test more than two multidimensional inputs\n x, y, z = matrices(\"x\", \"y\", \"z\")\n out = logaddexp(x, y, z)\n f = function([x, y, z], out)\n\n inp = np.zeros((3, 3), dtype=config.floatX)\n np.testing.assert_allclose(\n f(inp, inp, inp),\n np.full((3, 3), np.log(3)),\n )\n\n # Test scalar inputs\n x, y = scalars(\"x\", \"y\")\n out = logaddexp(x, y)\n f = function([x, y], out)\n\n res = f(0, 0)\n assert np.ndim(res) == 0\n assert np.isclose(res, np.log(2))\n\n # Test scalar and matrix inputs\n x = scalar(\"x\")\n y = matrix(\"y\")\n out = logaddexp(x, y)\n f = function([x, y], out)\n\n res = f(\n np.array(0, dtype=config.floatX),\n np.zeros((3, 3), dtype=config.floatX),\n )\n assert np.shape(res) == (3, 3)\n np.testing.assert_allclose(\n res,\n np.full((3, 3), np.log(2)),\n )\n\n\[email protected](\n [\"shape\", \"axis\"],\n [\n ((1,), 0),\n ((3,), 0),\n ((3, 4), None),\n ((3, 4), 0),\n ((3, 4), 1),\n ((3, 4, 5), None),\n ((3, 3, 5), 0),\n ((3, 4, 5), 1),\n ((3, 4, 5), 2),\n ],\n)\[email protected](\n \"keepdims\",\n [True, False],\n)\ndef test_logsumexp(shape, axis, keepdims):\n scipy_inp = np.zeros(shape)\n scipy_out = scipy_logsumexp(scipy_inp, axis=axis, keepdims=keepdims)\n\n aesara_inp = as_tensor_variable(scipy_inp)\n f = function([], logsumexp(aesara_inp, axis=axis, keepdims=keepdims))\n aesara_out = f()\n\n np.testing.assert_array_almost_equal(\n aesara_out,\n scipy_out,\n )\n"
] | [
[
"numpy.asarray",
"numpy.max",
"numpy.zeros",
"numpy.min"
],
[
"numpy.dot",
"numpy.maximum",
"numpy.allclose",
"numpy.asarray",
"numpy.concatenate",
"numpy.max",
"numpy.int64",
"numpy.argmax",
"numpy.mean",
"numpy.iinfo",
"numpy.isscalar",
"numpy.prod",
"numpy.array"
],
[
"numpy.dot",
"numpy.true_divide",
"numpy.minimum",
"numpy.sqrt",
"numpy.asarray",
"numpy.all",
"numpy.max",
"numpy.mean",
"numpy.zeros_like",
"numpy.iinfo",
"numpy.argmin",
"numpy.var",
"numpy.any",
"numpy.exp",
"numpy.random.default_rng",
"numpy.allclose",
"numpy.clip",
"numpy.arange",
"numpy.eye",
"numpy.int8",
"numpy.sin",
"numpy.apply_along_axis",
"numpy.argmax",
"numpy.tensordot",
"numpy.outer",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal",
"numpy.log",
"numpy.power",
"numpy.ndim",
"numpy.cov",
"numpy.array",
"numpy.sum",
"scipy.special.logsumexp",
"numpy.random.random",
"numpy.abs",
"numpy.array_equal",
"numpy.maximum",
"numpy.ptp",
"numpy.cos",
"numpy.ones",
"numpy.bitwise_and",
"numpy.complex",
"numpy.shape",
"numpy.prod",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"0.19",
"1.5",
"1.2",
"1.7",
"1.0",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
APS-XSD-OPT-Group/wavepytools | [
"25397c099e86a8939cc4ee3a2d266e4f809a1d18",
"25397c099e86a8939cc4ee3a2d266e4f809a1d18",
"25397c099e86a8939cc4ee3a2d266e4f809a1d18",
"25397c099e86a8939cc4ee3a2d266e4f809a1d18",
"25397c099e86a8939cc4ee3a2d266e4f809a1d18"
] | [
"wavepytools/optics/fourierOptics/exampleCircularLens2Steps.py",
"wavepytools/NRA/nra_analyses.py",
"wavepytools/wgTools/fastplot_with_pyqtgraph.py",
"wavepytools/imaging/single_grating/diff_image.py",
"wavepytools/imaging/single_grating/wavefront_response_func.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"\nCreated on Tue Mar 3 11:18:30 2015\n\n@author: wcgrizolli\n\"\"\"\n\nimport sys\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom myFourierLib import *\n\n\nsys.path.append('/home/wcgrizolli/pythonWorkspace/wgTools')\nimport wgTools as wgt\n\nsys.path.append('/home/wcgrizolli/pythonWorkspace/srw/wgTools4srw')\nfrom wgTools4srw import *\n\n##=========================================================#\n# %% sampling definition\n##=========================================================#\nwavelength = 1.2398e-9 # 1KeV\n[Lx, Ly] = [2.5e-3, 2.5e-3]\n# Mx = Lx^2/wavelength/z\n[Mx, My] = [1001, 1001]\ndx = Lx/Mx\ndy = Ly/My\n\n#zz = 1.00 # XXX: dist to propag\n#Lx2 = Lx\n\nzz = .00322808 # XXX: dist to propag\nLx2 = Lx/2500.0\n\nprint('WG: sampling x=' + str(Mx))\nprint('WG: sampling y=' + str(My))\n\n# %%\nif Mx > 1001 or My > 1001:\n wgt.color_print('WG: Sampling bigger than 1001^2, stoping the program')\n# sys.exit()\n\n##=========================================================#\n# %% 2D u1 function\n##=========================================================#\n\n\ndef circ(X, Y, wx, wy, Xo=0.0, Yo=0.0): # circular\n out = X*0.0\n out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) < 0.5**2] = 1.0\n out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) == 0.5**2] = .50\n return out\n\n\ndef tFuncLens(X, Y, wavelength, fx=1e23, fy=1e23):\n return np.exp(-1j*2*np.pi/wavelength/2/fx*(X**2+Y**2))\n\n\ndef tFuncZP(X, Y, wavelength, fx=1e23, fy=1e23):\n return .5*(1.0 + np.sign(np.cos(np.pi/wavelength/fx*(X**2 + Y**2))))\n\n\nwx = 200e-6\nwy = 200e-6\nX, Y = np.meshgrid(np.linspace(-Lx/2, Lx/2, Mx), np.linspace(-Ly/2, Ly/2, My))\n\nprint('WG: Creating Source Wave u1...')\n\n#u1_xy = circ(X, Y, wx, wy)*tFuncZP(X, Y, wavelength, fx=zz)\nu1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=zz)\n\n#u1_xy = circ(X, Y, wx, wy, 0, 80e-6) + circ(X, Y, wx, wy, 0,-80e-6) # double slit\n\nprint('WG: Creating Source Wave u1: DONE!')\n\n##=========================================================#\n# %% Propagation\n##=========================================================#\n\nprint('WG: Propagation...')\n\n\nif Lx == Lx2:\n u2_xy = propTForIR(u1_xy, Lx, Ly, wavelength, zz)\n X2, Y2 = X, Y\nelse:\n u2_xy = prop2step(u1_xy, Lx, Lx2, wavelength, zz)\n X2, Y2 = np.meshgrid(np.linspace(-Lx2/2, Lx2/2, Mx),\n np.linspace(-Lx2/2, Lx2/2, My))\n\nprint('WG: Propagation: DONE!')\n\n##=========================================================#\n# %% Plot u1\n##=========================================================#\n\n\n\n\nsaveFigure = 0\n\nprint('WG: Plot u1...')\n\n\n\nfactorX, unitStrX = wgt.chooseUnit(X)\nfactorY, unitStrY = wgt.chooseUnit(Y)\n\nunitStrX = unitStrX + ' m'\nunitStrY = unitStrY + ' m'\n\n# %% U1\n\n\n\nwgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),\n r'$x [' + unitStrX +']$',\n r'$y [' + unitStrY + ']$',\n r'Intensity [a.u.]',\n xo=0.0, yo=0.0,\n unitX=unitStrX, unitY=unitStrY)\n\n\n# %% U1\n\n#wgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),\n# r'$x [' + unitStrX +']$',\n# r'$y [' + unitStrY + ']$',\n# r'Intensity [a.u.]',\n# xo=0.0, yo=0.0,\n# unitX=unitStrX, unitY=unitStrY)\nif saveFigure:\n outputFigureName = wgt.datetimeNowStr() + '_u1.png'\n plt.savefig(outputFigureName)\n print('WG: Figure saved at %s!\\n' % (outputFigureName))\n plt.close()\nelse:\n plt.show(block=True)\n\nprint('WG: Plot u1: DONE!')\n\n##=========================================================#\n# %% Plot u2\n##=========================================================#\n\nprint('WG: Plot u2...')\n\nfactorX2, unitStrX2 = wgt.chooseUnit(X2)\nfactorY2, unitStrY2 = wgt.chooseUnit(Y2)\n\nunitStrX2 = unitStrX2 + ' m'\nunitStrY2 = unitStrY2 + ' m'\n\n\n## U1\n\nwgt.plotProfile(X2*factorX2, Y2*factorY2, np.abs(u2_xy),\n r'$x [' + unitStrX2 + ']$',\n r'$y [' + unitStrY2 + ']$',\n r'Intensity [a.u.]',\n unitX=unitStrX2, unitY=unitStrY2)\n\nif saveFigure:\n outputFigureName = wgt.datetimeNowStr() + '_u2.png'\n plt.savefig(outputFigureName)\n print('WG: Figure saved at %s!\\n' % (outputFigureName))\n plt.close()\nelse:\n plt.show(block=True)\n\nprint('WG: Plot u2: DONE!')\n# %%\n",
"#! /bin/python\n# -*- coding: utf-8 -*- #\n\"\"\"\nCreated on Tue Oct 08\n\n@author: wcgrizolli\n\"\"\"\n\n\n#import dxchange\n\n\nfrom pywinspec import SpeFile, test_headers\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\n\nimport sys\n\nsys.path.append('/home/grizolli/workspace/pythonWorkspace/wgTools/')\n\nimport wgTools as wgt\n\n\nimport wavepy.utils as wpu\n\nimport scipy\n\nfrom scipy.ndimage import gaussian_filter, uniform_filter, maximum_filter\n\n\nwpu._mpl_settings_4_nice_graphs()\n\n#==============================================================================\n# preamble\n#==============================================================================\n\n\n# Flags\nsaveFigFlag = True\n\n# useful constants\nrad2deg = np.rad2deg(1)\ndeg2rad = np.deg2rad(1)\nNAN = float('Nan') # not a number alias\n\nfrom scipy import constants\nhc = constants.value('inverse meter-electron volt relationship') # hc\n\n#==============================================================================\n# %% Experimental Values\n#==============================================================================\n\npixelsize = 13.5e-6\ndist2detector = 375.5e-3\nphenergy = 778.00\n\nwavelength = hc/phenergy\nkwave = 2*np.pi/wavelength\n\n\n#==============================================================================\n# %% Load SPE\n#==============================================================================\n\nfname = wgt.selectFile('*SPE', 3)\nspe_file = SpeFile(fname)\n\n#==============================================================================\n# %% Crop ROI\n#==============================================================================\n\n#idx4crop = [635, 1993, 841, 1792]\n#img = wpu.crop_matrix_at_indexes(spe_file.data[0], idx4crop)\n\nimg, idx4crop = wpu.crop_graphic_image(spe_file.data[0], verbose=False)\nprint(idx4crop)\n\n\n\nimg = wpu.pad_to_make_square(img, mode='edge')\n\n# %% Plot Detector coordinates\n\n\nxx, yy = wpu.realcoordmatrix(img.shape[1], pixelsize,\n img.shape[0], pixelsize)\n\n\n\nplt.figure(figsize=plt.figaspect(.6))\nplt.contourf(xx*1e3, yy*1e3, img/np.nanmax(img), 201, cmap='plasma', vmin=0.1)\nplt.xlabel(r'$x$ [mm]')\nplt.ylabel(r'$y$ [mm]')\nplt.title(r'Data ')\nplt.colorbar()\nplt.show(block=True)\n\n\n#xVec = wpu.realcoordvec(img.shape[1], pixelsize)\n#yVec = wpu.realcoordvec(img.shape[0], pixelsize)\n\n\n# %%\n\n#\nxx, yy = wpu.realcoordmatrix(img.shape[1], 1,\n img.shape[0], 1)\n#wpu.plot_profile(xx, yy, img/np.nanmax(img))\n#==============================================================================\n# %% FFT\n#==============================================================================\n\n\nfrom numpy.fft import *\n\nqx, qy = wpu.realcoordmatrix(img.shape[1], pixelsize/dist2detector*kwave,\n img.shape[0], pixelsize/dist2detector*kwave)\n\nfftimg = ifftshift(fft2(fftshift(img)))*pixelsize*pixelsize\nabs_fftimg = np.abs(fftimg)\nabs_fftimg -= np.min(abs_fftimg)\nnorm_abs_fftimg = abs_fftimg/np.max(abs_fftimg)\n\nlog_abs_fftimg = np.log(norm_abs_fftimg + np.finfo(float).eps)\n\n\n\n# %%\n\n\nplt.figure(figsize=plt.figaspect(.6))\nplt.contourf(qx*1e-6, qy*1e-6, log_abs_fftimg, 201, cmap='plasma', vmin=-10, vmax=-.5)\nplt.xlabel(r'$q_x$ [$ \\mu m^{-1} $]')\nplt.ylabel(r'$q_y$ [$ \\mu m^{-1}$]')\nplt.title(r'log of module FFT ')\nplt.colorbar()\nplt.show(block=True)\n\n\n\n\n#==============================================================================\n# %% Mask Angle\n#==============================================================================\n\ndef create_mask_angle(angle, delAngle, shape, indexing='xy'):\n\n if indexing == 'xy':\n ii, jj = np.mgrid[shape[0]:0:-1,0:shape[1]]\n angle = - angle\n elif indexing == 'ij':\n ii, jj = np.mgrid[0:shape[0],0:shape[1]]\n\n ii -= shape[0] // 2\n jj -= shape[1] // 2\n\n #mask = 1.0*(np.logical_and(np.arctan2(ii,jj)*np.rad2deg(1) < angle + delAngle - 180.00,\n # np.arctan2(ii,jj)*np.rad2deg(1) > angle - delAngle - 180.00))\n\n #mask = 1.0*(np.logical_and(np.arctan(ii/jj)*np.rad2deg(1) < angle + delAngle,\n # np.arctan(ii/jj)*np.rad2deg(1) > angle - delAngle))\n\n mask = 1.0*(np.logical_and(np.arctan2(ii,jj)*np.rad2deg(1) < angle + delAngle,\n np.arctan2(ii,jj)*np.rad2deg(1) > angle - delAngle) +\n np.logical_and(np.arctan2(ii,jj)*np.rad2deg(1) < 180. + angle + delAngle,\n np.arctan2(ii,jj)*np.rad2deg(1) > 180. + angle - delAngle)+\n np.logical_and(np.arctan2(ii,jj)*np.rad2deg(1) < -180. + angle + delAngle,\n np.arctan2(ii,jj)*np.rad2deg(1) > -180. + angle - delAngle))\n\n #mask = 1.0*(np.logical_and(jj/ii < np.tan(angle + delAngle),\n # jj/ii > np.tan(angle - delAngle)) +\n # np.logical_and(jj/ii < np.tan(180. + angle + delAngle),\n # jj/ii > np.tan(180. + angle - delAngle)))\n\n mask[mask>.5] = 1.000\n mask[mask<.5] = np.nan\n\n return mask\n\n\nplotThis = log_abs_fftimg\nplotThis = plotThis[::-1,:]\n\n# Select angle\n\n\n#joio = wpu.graphical_select_point_idx(plotThis, verbose=True)\n#jo = int(joio[0])\n#io = int(joio[1])\n#angle = np.arctan2(abs_fftimg.shape[0]//2 - io, jo - abs_fftimg.shape[1]//2)*np.rad2deg(1)\n\nangle = -21.4061120849\n\nprint('angle = ' + str(angle))\n\n\n\n# %%\nmask_angle = create_mask_angle(angle, 1, abs_fftimg.shape, indexing = 'xy')\n\n\nprint('oi 1346')\n\n\n# %% peaks\n\ndef create_mask_peaks2DOLD(array2D, threshold=None, order=3):\n\n\n import scipy.signal\n\n if threshold is not None:\n mask_threshold = wpu.nan_mask_threshold(array2D, threshold=.001)\n else:\n mask_threshold = array2D*0.0 +1.0\n\n\n\n idx_x_axis_0, idx_y_axis_0 = scipy.signal.argrelmax(array2D*mask_threshold,\n axis=0, order = order)\n idx_x_axis_1, idx_y_axis_1 = scipy.signal.argrelmax(array2D*mask_threshold,\n axis=1, order = order)\n\n peaks_axis0 = np.zeros(np.shape(array2D))\n peaks_axis0[idx_x_axis_0[:], idx_y_axis_0[:]] = 1.0\n peaks_axis1 = np.zeros(np.shape(array2D))\n peaks_axis1[idx_x_axis_1[:], idx_y_axis_1[:]] = 1.0\n\n\n\n return peaks_axis0, peaks_axis1\n\n# %%\n\ndef create_mask_peaks2D(array2D, order=1, mode='clip', srn_threshold=0.0):\n\n\n array2D = np.pad(array2D[order:-order,order:-order], order, 'edge')\n # make our life easier by making the edges peak free\n\n idx_axis_0 = scipy.signal.argrelmax(array2D, axis=0, order=order, mode=mode)\n idx_axis_1 = scipy.signal.argrelmax(array2D, axis=1, order=order, mode=mode)\n\n peaks_axis0 = np.zeros(np.shape(array2D))\n peaks_axis0[idx_axis_0] = 1.0\n peaks_axis1 = np.zeros(np.shape(array2D))\n peaks_axis1[idx_axis_1] = 1.0\n\n\n snr0 = np.zeros(np.shape(array2D))\n snr1 = np.zeros(np.shape(array2D))\n\n snr0[idx_axis_0] = np.abs(array2D[idx_axis_0[0], idx_axis_0[1]] / \\\n np.mean((array2D[idx_axis_0[0] - order, idx_axis_0[1]],\n array2D[idx_axis_0[0] + order, idx_axis_0[1]])))\n\n snr1[idx_axis_1] = np.abs(array2D[idx_axis_1[0], idx_axis_1[1]] / \\\n np.mean((array2D[idx_axis_1[0], idx_axis_1[1]],\n array2D[idx_axis_1[0], idx_axis_1[1]+ order])))\n\n srn = (snr0 + snr1)/2\n mask_snr = np.where(srn > srn_threshold, 1, 0)\n\n\n return np.where(peaks_axis0*peaks_axis1*mask_snr >= 0.5), srn*peaks_axis0*peaks_axis1\n\n\n# %%\n\nfor srn_threshold in [1, 1.5, 2, 3, 5]:\n\n fig = plt.figure(figsize=plt.figaspect(.6))\n\n\n\n\n plotThis = log_abs_fftimg\n\n plt.contourf(qx, qy, plotThis, 101, cmap='plasma', vmin=-10, vmax=5)\n\n\n [idx_angle_i, idx_angle_j], srn = create_mask_peaks2D(gaussian_filter(norm_abs_fftimg, 2),\n order=4, srn_threshold=srn_threshold)\n\n #[idx_angle_i, idx_angle_j], srn = create_mask_peaks2D(norm_abs_fftimg,\n # order=5, srn_threshold=2.0)\n\n plt.plot(qx[idx_angle_i,idx_angle_j],\n qy[idx_angle_i,idx_angle_j], 'bx', ms=10, mew=2)\n\n\n\n plt.show(block=False)\n\n# %%\n\n\nfig = plt.figure(figsize=plt.figaspect(.6))\n\n\nplotThis = maximum_filter(srn, 3)\n\nplotThis[plotThis > 500] = NAN\n\nplt.contourf(qx, qy, plotThis, 101, cmap='plasma')\n\nplt.colorbar()\nplt.show(block=True)\n\n\n\n# %%\n\nplt.figure()\nplotThis = log_abs_fftimg\nplt.contourf(qx*1e-3, qy*1e-3, plotThis, 101, cmap='plasma', vmin=-10, vmax=5)\n\nidx_angle_i, idx_angle_j = np.where(peaks_mask0*peaks_mask1>.5)\nplt.plot(qx[idx_angle_i,idx_angle_j]*1e-3,\n qy[idx_angle_i,idx_angle_j]*1e-3, 'bx', ms=10, mew=2)\n\nplt.title('FFT Image, Log Scale')\nplt.colorbar()\nplt.show(block=True)\n\n\nprint('oi')\n\n# %%\n\nfig = plt.figure(figsize=plt.figaspect(.6))\n\nplt.contourf(qx*1e-6, qy*1e-6, log_abs_fftimg, 201, cmap='plasma', vmin=-10, vmax=-.5)\nplt.xlabel(r'$q_x$ [$ \\mu m^{-1} $]')\nplt.ylabel(r'$q_y$ [$ \\mu m^{-1}$]')\nplt.title(r'log of module FFT ')\nplt.colorbar()\n\n\nidx_angle_i, idx_angle_j = np.where(peaks_mask0*peaks_mask1*mask_angle>.5)\nplt.plot(qx[idx_angle_i,idx_angle_j]*1e-6,\n qy[idx_angle_i,idx_angle_j]*1e-6, 'bo', ms=10, mew=2, mfc=\"None\")\n\n\n\nplt.show(block=True)\n\n\n# %%\n\n\nimport skimage.filters\nimport scipy.ndimage\n\n\nscipy.ndimage.uniform_filter\n\n\n# %%\n\nvec_q = np.sqrt(qx[idx_angle_i,idx_angle_j]**2 + \\\n qy[idx_angle_i,idx_angle_j]**2)*np.sign(qy[idx_angle_i,idx_angle_j])\n\n#intensity = abs_fftimg[idx_angle_i,idx_angle_j]\n#intensity = gaussian_filter(abs_fftimg, 2.5)[idx_angle_i,idx_angle_j]\n\n\nintensity = scipy.ndimage.uniform_filter(abs_fftimg, 5)[idx_angle_i,idx_angle_j]\n\n\n\n# %%\n\nplt.figure()\n\nplt.plot(vec_q, intensity, '-x', ms=10, mew=2)\nplt.plot(-vec_q, intensity*1.1, '-x', ms=10, mew=2)\n\nplt.xlabel(r'q [$ m^{-1}$]')\nplt.title('FFT Image, Log Scale')\nplt.show(block=True)\n\n\n# %%\n\nrho_x, rho_y = wpu.reciprocalcoordmatrix(qx.shape[1], qy[1, 0] - qy[0,0],\n qy.shape[0], qx[0, 1] - qx[0,0])\n\n\n\nrho_x *= 2*np.pi\nrho_y *= 2*np.pi\n\n\nvec_rho = np.sqrt(rho_x[idx_angle_i,idx_angle_j]**2 + \\\n rho_y[idx_angle_i,idx_angle_j]**2)*np.sign(rho_y[idx_angle_i,idx_angle_j])\n\n\n\n\n# %%\nplt.figure()\n\n\nintensity = scipy.ndimage.uniform_filter(norm_abs_fftimg, 3)[idx_angle_i,idx_angle_j]\nplt.plot(vec_rho*1e6, intensity, '-xg', ms=10, mew=2)\n\nintensity = scipy.ndimage.uniform_filter(norm_abs_fftimg, 0)[idx_angle_i,idx_angle_j]\nplt.plot(vec_rho*1e6, intensity, '-xb', ms=10, mew=2)\n\n\n\nprint(vec_rho*1e6)\n\nfor i in range(intensity.size):\n\n print('{:.3f}, \\t {:2.4g}'.format(vec_rho[i]*1e6, intensity[i]*1e2))\n\n\nintensity = scipy.ndimage.uniform_filter(norm_abs_fftimg, 5)[idx_angle_i,idx_angle_j]\nplt.plot(vec_rho*1e6, intensity, '-xr', ms=5, mew=2)\n\n\n#plt.plot(-vec_rho*1e6/1.5, intensity*1.1, '-x', ms=10, mew=2)\n\n\nplt.xlabel(r'$\\rho$ [$\\mu m$]')\nplt.title('FFT Image, Log Scale')\nplt.show(block=True)\n\n\n\n# %%\n\nexit()\n\n# %%\nc_j_over_c_0 = np.array([4.465, 3.976, 3.13, 3.024, 3.113, 2.308, 1.781])\n\nrho = np.array([1.5, 3. , 4.5, 6. , 7.5, 9. , 10.5])\n\n\nS_j = np.array([0.992, 0.968, 0.96, 0.81, 0.817, 0.576, 0.791])\n\nsigma = 4\n\nintensity_temp = c_j_over_c_0/S_j\n\n\n\n# %%\nplt.figure()\n\n\n#\n#plt.plot(rho, c_j_over_c_0, '-or', ms=10, mew=2)\n\n#plt.plot(rho, S_j, '-ob', ms=10, mew=2)\nplt.plot(rho, intensity_temp, '-kx', ms=10, mew=2)\n\n\nplt.xlabel(r'$\\rho$ [$\\mu m$]')\nplt.show(block=False)\n\n\n\n# %%\n\n\n\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\"\"\"\nCreated on Thu Mar 20 16:46:25 2014\n\n@author: wcgrizolli\n\"\"\"\n\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui\nimport pyqtgraph.opengl as gl\nimport numpy as np\nfrom matplotlib import cm\nfrom wavepy.utils import easyqt\n\nimport wavepy.utils as wpu\n\n\ndef plot_surf_fast(dataz, pixelsize=[1., 1.], style='viridis',\n ratio_x_y=1.0, scaling_z=1.0, distCamera=3):\n\n# _ = QtGui.QApplication([])\n\n\n pg.mkQApp()\n\n maxZ = np.nanmax(np.abs(dataz))\n\n z = dataz/maxZ*scaling_z\n\n # THE DEFINITIONS OF X AND Y ARE DIFFERENT IN PYQTGRAPH\n # AND THUS i NEET TO TRANSPOSE z\n\n [pixelSize_j, pixelSize_i] = pixelsize\n npoints_i = z.shape[1]\n npoints_j = z.shape[0]\n\n x = np.linspace(-.500, .500, npoints_j)\n y = np.linspace(-.500, .500, npoints_i)\n\n sizeX = pixelSize_i*npoints_j*ratio_x_y\n sizeY = pixelSize_j*npoints_i\n\n colorMap = _generateColormap(z, style, 1, 1)\n # z[np.isnan(z)] = 0.0\n\n # Create a GL View widget to display data\n\n w = gl.GLViewWidget()\n w.show()\n w.setWindowTitle('Lx = {:.3e}m, '.format(pixelSize_i*npoints_i) +\n 'Ly = {:.3e}m, '.format(pixelSize_j*npoints_j) +\n 'Max z = {:.3e}'.format(maxZ))\n w.setCameraPosition(distance=3)\n\n # plot\n\n p3 = gl.GLSurfacePlotItem(x=x, y=y, z=z, colors=colorMap, shader='shaded')\n\n p3.scale(1, sizeX/sizeY, 1)\n w.addItem(p3)\n\n # Add a grid to the view\n\n gx = gl.GLGridItem()\n gx.rotate(90, 1, 0, 0)\n gx.translate(0, -.5*sizeX/sizeY, .5*scaling_z)\n gx.scale(.05, .05*scaling_z, 1)\n gx.setDepthValue(10)\n w.addItem(gx)\n\n gy = gl.GLGridItem()\n gy.rotate(90, 0, 1, 0)\n gy.translate(-.5, 0, .5*scaling_z)\n gy.scale(.05*scaling_z, .05*sizeX/sizeY, 1)\n gy.setDepthValue(10)\n w.addItem(gy)\n\n gz = gl.GLGridItem()\n gz.scale(.05, .05*sizeX/sizeY, 1)\n gz.setDepthValue(10)\n w.addItem(gz)\n\n QtGui.QApplication.instance().exec_()\n\n\ndef _generateColormap(z, style='viridis', power=1, inverse=1):\n\n jetcmap = cm.get_cmap(style) # generate a jet map with 10 values\n nColours = jetcmap.N\n jet_vals = jetcmap(np.arange(nColours)) # extract those values as an array\n\n zmin = np.nanmin(z)\n zmax = np.nanmax(z)\n\n colorIndex = np.rint(inverse*((z-zmin)/(zmax-zmin))**power *\n (nColours-1)).astype(int)\n\n colorIndex[np.isnan(z)] = 1\n colorMap = jet_vals[colorIndex[:, :]]\n\n return colorMap\n\n\nif __name__ == '__main__':\n# %%\n\n dataFilename = easyqt.get_file_names()\n\n if len(dataFilename) == 1:\n dataFilename = dataFilename[0]\n dataZ, pixelSize, headerdic = wpu.load_sdf_file(dataFilename)\n else:\n\n y, x = np.mgrid[-1:1:100j, -1:1:100j]\n dataZ = np.sinc(10*x**2)*np.exp(-y**2/.5**2)\n\n pixelSize = [1/2, 1]\n\n\n# %%\n\n plot_surf_fast(dataZ, pixelSize,\n style='rainbow',\n ratio_x_y=1.0, scaling_z=1.0)\n\n\n # %% OLD\n# with open(dataFilename) as f:\n# header = f.readline().split()\n#\n# # % reshape from x,y,z for meshgrid format5.00\n# dataZ = np.loadtxt(dataFilename, comments='#')\n# dataZ -= np.nanmin(dataZ)\n# pixelSize_i = float(header[header.index('i,j') + 2])\n# pixelSize_j = float(header[header.index('i,j') + 4])\n\n# plot_surf_fast(dataZ, [pixelSize_i, pixelSize_j],\n# style='rainbow',\n# ratio_x_y=1.0, scaling_z=1.0)\n",
"'''\n this function provides a tool to get differential image. the image type is tiff\n this function can be used as gui mode or terminal mode\n for terminal mode, here is how to use it:\n image_crop path_to_image_folder path_to_output_folder crop_size_col crop_size_row col_shift row_shift\n\n description:\n sample: raw image folder with sample\n nosample: raw image without sample\n dark: raw dark image\n path_output: output image folder\n\n'''\n\nimport os\nimport sys\nimport skimage\nimport numpy as np\nimport dxchange\nimport tifffile as tiff\nfrom wavepy.utils import easyqt\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\nimport glob\n\ndef gui_load_data_directory(directory='', title=\"File name with Data\"):\n \n originalDir = os.getcwd()\n\n fname1 = easyqt.get_directory_name(title)\n\n if len(fname1) == 0:\n fname_last = None\n\n else:\n fname_last = fname1\n\n os.chdir(originalDir)\n\n return fname_last\n\ndef gui_load_data_finename(directory='', title=\"File name with Data\"):\n \n originalDir = os.getcwd()\n\n if directory != '':\n\n if os.path.isdir(directory):\n os.chdir(directory)\n else:\n wpu.print_red(\"WARNING: Directory \" + directory + \" doesn't exist.\")\n wpu.print_blue(\"MESSAGE: Using current working directory \" +\n originalDir)\n\n fname1 = easyqt.get_file_names(title)\n\n if len(fname1) == 0:\n fname_last = None\n\n else:\n fname_last = fname1[0]\n\n os.chdir(originalDir)\n\n return fname_last\n\n\nif __name__ == \"__main__\":\n '''\n input:\n sample: raw image folder with sample\n nosample: raw image without sample\n dark: raw dark image\n path_output: output image folder\n '''\n if len(sys.argv) == 5:\n '''\n if the passing parameter has five parameters, means it's under terminal mode\n '''\n sample_path, nosample_path, dark_path, path_output = sys.argv[1:5]\n\n # read sample image\n sample_img = tiff.imread(sample_path)\n sample_img = np.array(sample_img)\n # read no sample image\n nosample_img = tiff.imread(nosample_path)\n nosample_img = np.array(nosample_img)\n # read dark image\n dark_img = tiff.imread(dark_path)\n dark_img = np.array(dark_img)\n\n # process the data\n # sample_img = sample_img - dark_img\n # nosample_img = nosample_img - dark_img\n sample_img = np.abs((sample_img - np.amin(sample_img))/(np.amax(sample_img) - np.amin(sample_img)))\n nosample_img = np.abs((nosample_img - np.amin(nosample_img))/(np.amax(nosample_img) - np.amin(nosample_img)))\n color_type = 'gist_heat'\n # plt.subplot(121)\n # plt.imshow(sample_img, cmap=plt.get_cmap(color_type), vmax=abs(sample_img).max(), vmin=-abs(sample_img).max())\n # plt.subplot(122)\n # plt.imshow(nosample_img, cmap=plt.get_cmap(color_type), vmax=abs(nosample_img).max(), vmin=-abs(nosample_img).max())\n\n # plt.show()\n result = np.abs((sample_img-dark_img)/(nosample_img-dark_img))\n result = np.abs((result - np.amin(result))/(np.amax(result) - np.amin(result))) * 40000\n result_log = np.log10(result+1e-20)\n img_result = np.array(result, dtype='uint16')\n img_result_log = np.array(result_log, dtype='uint16')\n\n plt.imshow(result, cmap=plt.get_cmap(color_type), vmax=abs(result).max(), vmin=-abs(result).max())\n plt.title('Siemens star image (linear)')\n # plt.show()\n # plt.imshow(result_log, cmap=plt.get_cmap(color_type))\n # plt.title('Siemens star image (log10)')\n # plt.show()\n\n if not os.path.exists(path_output):\n os.makedirs(path_output)\n \n # file_name = path_output + '/result_sample_image_640mm'\n # file_number = 640\n # while os.path.isfile(file_name+'.tif'):\n # file_number = file_number + 10\n # file_name = path_output + '/result_sample_image_' + str(file_number)+'mm'\n \n file_name = path_output + '/result_sample_image'\n tiff.imsave(file_name + '.tif', img_result)\n plt.imsave(file_name + '.png', result, cmap=plt.get_cmap(color_type), format='png')\n # plt.imsave(path_output + '/result_sample_image_log10.png', result_log, cmap=plt.get_cmap(color_type), format='png')\n tiff.imsave(file_name + '_log10' + '.tif',img_result)\n print('\\033[32m' + 'MESSAGE: File ' + file_name + '.tif'\n ' saved' + '\\033[0m')\n\n\n elif len(sys.argv) == 1:\n '''\n if only one parameter, it's gui mode to get the folder path\n '''\n\n print('\\033[32m' + 'MESSAGE: select raw image with sample' + '\\033[0m')\n sample_path = gui_load_data_finename('', 'load sample image')\n\n print('\\033[32m' + 'MESSAGE: select raw image without sample' + '\\033[0m')\n nosample_path = gui_load_data_finename('', 'load no sample image')\n\n print('\\033[32m' + 'MESSAGE: select raw dark image' + '\\033[0m')\n dark_path = gui_load_data_finename('', 'load dark image')\n\n print('\\033[32m' + 'MESSAGE: select output folder' + '\\033[0m')\n path_output = gui_load_data_directory('', 'select output folder')\n\n # read sample image\n sample_img = tiff.imread(sample_path)\n sample_img = np.array(sample_img)\n # read no sample image\n nosample_img = tiff.imread(nosample_path)\n nosample_img = np.array(nosample_img)\n # read dark image\n dark_img = tiff.imread(dark_path)\n dark_img = np.array(dark_img)\n\n # process the data\n # sample_img = sample_img - dark_img\n # nosample_img = nosample_img - dark_img\n sample_img = np.abs((sample_img - np.amin(sample_img))/(np.amax(sample_img) - np.amin(sample_img)))\n nosample_img = np.abs((nosample_img - np.amin(nosample_img))/(np.amax(nosample_img) - np.amin(nosample_img)))\n color_type = 'gist_heat'\n plt.subplot(121)\n plt.imshow(sample_img, cmap=plt.get_cmap(color_type), vmax=abs(sample_img).max(), vmin=-abs(sample_img).max())\n plt.subplot(122)\n plt.imshow(nosample_img, cmap=plt.get_cmap(color_type), vmax=abs(nosample_img).max(), vmin=-abs(nosample_img).max())\n\n plt.show()\n result = np.abs((sample_img-dark_img)/(nosample_img-dark_img))\n result = np.abs((result - np.amin(result))/(np.amax(result) - np.amin(result))) * 40000\n result_log = np.log10(result+1e-13)\n img_result = np.array(result, dtype='uint16')\n img_result_log = np.array(result_log, dtype='uint16')\n\n plt.imshow(result, cmap=plt.get_cmap(color_type), vmax=abs(result).max(), vmin=-abs(result).max())\n plt.title('Siemens star image (linear)')\n plt.show()\n plt.imshow(result_log, cmap=plt.get_cmap('hot'), vmax=np.amax(result_log), vmin=np.amin(result_log))\n plt.title('Siemens star image (log10)')\n plt.show()\n\n tiff.imsave(\n path_output + '/result_sample_image' + '.tif',\n img_result)\n plt.imsave(path_output + '/result_sample_image.png', result, cmap=plt.get_cmap(color_type), format='png')\n plt.imsave(path_output + '/result_sample_image_log10.png', result_log, cmap=plt.get_cmap(color_type), format='png')\n tiff.imsave(\n path_output + '/result_sample_image_log10' + '.tif',\n img_result)\n print('\\033[32m' + 'MESSAGE: File ' + path_output + '/result_sample_image' + '.tif'\n ' saved' + '\\033[0m')\n \n",
"#!/usr/bin/env python\n\"\"\"\nCreated on %(date)s\n\n@author: %(username)s\n\"\"\"\n\n# %%% imports cell\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom labellines import labelLine, labelLines\nimport sys\n\n# %%\n\nimport wavepy.utils as wpu\nfrom wavepy.utils import easyqt\nimport glob\n\nfrom scipy.interpolate import interp1d\n\n\ndef _gaussian_dist(x, sigma, xo):\n\n return 1/np.sqrt(2*np.pi)/sigma*np.exp(-(x-xo)**2/2/sigma**2)\n\ndef load_cap_file(path_cap):\n '''\n Here use the cap to wavesensor data transfer function to generate the same structure\n data file with the wavefront measurement curve. The file is generated in the dir_name path\n '''\n\n # Load data as numpy array\n data = np.loadtxt(path_cap, delimiter=',', encoding='utf-8-sig', skiprows=1)\n header = ['Height']\n comments = []\n return data, header, comments\n\ndef cap_process(dir_name, data, profilenumber):\n '''\n read the cap data, then use the three data to get the response\n '''\n # here is the difference change\n p1_diff = 50\n p2_diff = -50\n\n # read the data and use the three data to generate two difference data\n\n data_p1 = (data[1][:, profilenumber] - data[0][:, profilenumber]) / p1_diff\n data_p2 = (data[2][:, profilenumber] - data[1][:, profilenumber]) / p2_diff\n data_combi = data[0:2]\n # print(np.shape(data_combi))\n data_combi[0][:, profilenumber] = data_p1\n data_combi[1][:, profilenumber] = data_p2\n # data_combi = [[data[0][:,0], data_p1], [data[0][:,0], data_p2]]\n\n if isinstance(data_combi, list):\n\n data2save = np.c_[data_combi[0], data_combi[1]]\n\n if len(data_combi) >= 2:\n\n for array in data_combi[2:]:\n data2save = np.c_[data2save, array]\n\n elif isinstance(data_combi, np.ndarray):\n data2save = data_combi\n\n np.savetxt(dir_name + '/response_diff.txt', data2save, fmt='%f', delimiter=',')\n\n plt.figure(11)\n plt.plot(range(len(data_p1)), data_p1, '-r', range(len(data_p1)), data_p2, '-b')\n plt.show()\n\n return data_combi\n\n\n\nwpu._mpl_settings_4_nice_graphs(otheroptions={'lines.linewidth': 2})\n\nuse_cap = True\n# if the perfect wavefront is used as the target wavefront\ntarget_perfect = True\n\nvoltage4response = 1.00\n\n\nphenergy = 1e20\nwavelength = wpu.hc/phenergy\n\n# here is to choose which collum in the csv fitting data is used for the response function calculation\nprofilenumber = 3\n\n# %%\n\nif len(sys.argv) == 1:\n dirName = easyqt.get_directory_name('Directory to the data:')\nelse:\n dirName = sys.argv[1]\n\n# here the response function initialization file\ninifname = '.wavefront_response_func.ini'\ndefaults = wpu.load_ini_file(inifname)\n\nif dirName == '':\n dirName = defaults['Files'].get('Folder with csv files')\n\nelse:\n wpu.set_at_ini_file(inifname,\n 'Files',\n 'Folder with csv files', dirName)\n\nprint(dirName)\n\n'''\nHere use the cap to wavesensor data transfer function to generate the same structure\ndata file with the wavefront measurement curve. The file is generated in the dir_name path\n'''\n\nlistOfFiles = glob.glob(dirName + '/*.csv')\nlistOfFiles.sort()\nn_files = len(listOfFiles)\n\n# %% Load data\n\nlistOfArrays = []\nlistOfShapes = []\n\nfor fname in listOfFiles:\n wpu.print_blue('MESSAGE: Open File ' + fname)\n if use_cap:\n fileContent = load_cap_file(fname)\n else:\n fileContent = wpu.load_csv_file(fname)\n listOfArrays.append(fileContent[0])\n\n listOfShapes.append(np.shape(fileContent[0]))\n\nheaders = fileContent[1]\n\n\n# %%\nfor data, fname in zip(listOfArrays, listOfFiles):\n label = fname.rsplit('/', 1)[1].split('.')[0]\n print(label + ', rms value: {:.4f} nm'.format(np.std(data[:, profilenumber])*1e9))\n\n# here to process the cap sensor data to get the relative response\nif use_cap:\n data_new = cap_process(dirName, listOfArrays, profilenumber)\n listOfArrays = data_new\n listOfShapes = []\n for kk in range(len(listOfArrays)):\n listOfShapes.append(np.shape(fileContent[0]))\n\n # use the two differential data\n n_files -= 1\n print(listOfFiles)\n del listOfFiles[2]\n\n\n# %% define what to do, use of ini file\n\nif 'Height' in headers[-1]:\n what2do = 'Height response function'\nelse:\n\n what2do = wpu.get_from_ini_file(inifname, 'Parameters', 'what2do')\n\n if 'DPC' in what2do:\n choices = ['DPC response function', 'Curvature response function from diff data']\n else:\n choices = ['Curvature response function from diff data', 'DPC response function']\n\n what2do = easyqt.get_choice('Pick one', choices=choices)\n\nwpu.set_at_ini_file(inifname, 'Parameters', 'what2do', what2do)\n\n# %% Target\n# Target file is the shape wanted for the correction.\n\nif len(sys.argv) > 2 or easyqt.get_yes_or_no('Do you want to load a target file?'):\n\n if len(sys.argv) > 2:\n targetName = sys.argv[2]\n else:\n targetName = easyqt.get_file_names('the target file directory:')\n\n # use one image as the current state\n if easyqt.get_yes_or_no('Do you want to load the refence file for target?'):\n ref_name = easyqt.get_file_names('the reference file directory:')\n else:\n ref_name = []\n\n\n\n if targetName == []:\n targetName = defaults['Files'].get('target file')\n else:\n wpu.set_at_ini_file(inifname,\n 'Files',\n 'target file', targetName[0])\n\n if use_cap:\n if ref_name == []:\n temp_Data = load_cap_file(targetName[0])[0]\n # temp_Data = wpu.load_csv_file(targetName[0])[0]\n else:\n temp_Data = load_cap_file(targetName[0])[0]\n temp_Data_ref = load_cap_file(ref_name[0])[0]\n # temp_Data = wpu.load_csv_file(targetName[0])[0]\n # temp_Data_ref = wpu.load_csv_file(ref_name[0])[0]\n # substitude the reference state\n temp_Data[:,profilenumber] = temp_Data[:,profilenumber] - temp_Data_ref[:,profilenumber]\n\n else:\n temp_Data = wpu.load_csv_file(targetName[0])[0]\n # temp_Data = wpu.load_csv_file(targetName[0])[0]\n lim_xnew = np.min((np.abs(listOfArrays[0][0, 0]),\n np.abs(listOfArrays[0][-1, 0]),\n np.abs(temp_Data[0, 0]),\n np.abs(temp_Data[-1, 0])))\nelse:\n temp_Data = None\n\n lim_xnew = np.min((np.abs(listOfArrays[0][0, 0]),\n np.abs(listOfArrays[0][-1, 0])))\n# use the perfect spherical wavefront of the current state as a target\nif target_perfect and ref_name==[]:\n '''\n use polyfit to fit the wavefront to the spherical wavefront\n '''\n\n f_fitting = np.polyfit(temp_Data[:, 0], temp_Data[:, profilenumber], 2)\n fitted_target = f_fitting[0]*temp_Data[:, 0]**2 + f_fitting[1]*temp_Data[:, 0] + f_fitting[2]\n\n target_original = list(temp_Data[:,profilenumber])\n temp_Data[:,profilenumber] = target_original - fitted_target\n plt.figure(12)\n plt.subplot(121)\n plt.plot(temp_Data[:,0], target_original, '-r',temp_Data[:,0], fitted_target, '*k')\n plt.legend(['original', 'spherical fitting'])\n plt.title('target wavefront spherical fitting')\n plt.ylabel('rad')\n plt.xlabel('position')\n plt.subplot(122)\n plt.plot(temp_Data[:,0], temp_Data[:,profilenumber], '-r')\n plt.title('residual')\n plt.ylabel('rad')\n plt.xlabel('position')\n\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n plt.show()\n\n# %%\n\nnpoints_interp = 200\n\nxnew = np.linspace(-lim_xnew, lim_xnew, npoints_interp)\n\n# %%\n\n#exit()\n\n\n# %% to filter the data to make it smooth for the analyze\n\nfrom scipy.ndimage import gaussian_filter1d\n\nif 'Curvature' in what2do:\n\n for data in listOfArrays:\n data[:, profilenumber] = gaussian_filter1d(data[:, profilenumber], 50)\n\n# %%\n\nplt.figure(figsize=(12, 8))\n\nlistInterpFunc = []\n\nfor data, fname in zip(listOfArrays, listOfFiles):\n f = interp1d(data[:, 0], data[:, profilenumber], kind='cubic')\n listInterpFunc.append(f)\n\n label = fname.rsplit('/', 1)[1].split('.')[0]\n\n plt.plot(data[:, 0]*1e6, data[:, profilenumber], 'o', label=label)\n # plt.plot(xnew, f(xnew), '-')\n\nplt.ylabel('WF ' + headers[-1])\nplt.xlabel('[µm]')\n\nif n_files < 20:\n plt.legend(loc='best', fontsize='x-small')\n\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nprint('MESSAGE: Saving ' + figname)\n\nxlim = plt.gca().get_xlim()\nylim = plt.gca().get_ylim()\nplt.show(block=True)\n\n# %% Plot with labels inline\n\nplt.figure(figsize=(12, 8))\n\nfor data, fname in zip(listOfArrays, listOfFiles):\n\n label = fname.rsplit('/', 1)[1].split('.')[0]\n plt.plot(data[:, 0]*1e6, data[:, profilenumber], '-', label=label)\n\nif n_files < 20:\n labelLines(plt.gca().get_lines(),align=False,fontsize=14,\n xvals=(data[0, 0]*1e6, data[-1, 0]*1e6))\n\nplt.ylabel('WF ' + headers[-1])\nplt.xlabel('[µm]')\n#plt.legend(loc='best', fontsize='x-small')\n\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nprint('MESSAGE: Saving ' + figname)\n\n\nxlim = plt.gca().get_xlim()\nylim = plt.gca().get_ylim()\nplt.show(block=True)\n\nplt.show()\n\n\n# %% Animation\n\ntimeStep = 1\nt = 0\n\nif False:\n\n plt.figure(figsize=(12, 8))\n\n plt.ylabel('WF ' + headers[-1])\n plt.xlabel('[µm]')\n\n for data, fname in zip(listOfArrays, listOfFiles):\n\n label = fname.rsplit('/', 1)[1].split('.')[0]\n\n plt.plot(data[:, 0]*1e6, data[:, profilenumber], '-k', label=label)\n # plt.title('t = {:d}s'.format(t))\n plt.title(label)\n t += timeStep\n\n plt.xlim(xlim)\n plt.ylim(ylim)\n figname = wpu.get_unique_filename(dirName + '/anim_respons_func', 'png', width=3)\n plt.savefig(figname)\n print('MESSAGE: Saving ' + figname)\n\n del plt.gca().lines[0]\n\n plt.close('all')\n\n\n# %% Time plot\n\nif True:\n\n plt.figure(figsize=(12, 8))\n\n bufferdata = np.zeros(n_files)\n\n xIndex2plot = np.size(data[:, 0])//2\n xIndex2plot = np.argmin((data[:, 0]-0.0e-6)**2)\n for i, data in enumerate(listOfArrays):\n\n bufferdata[i] = data[xIndex2plot, 1]\n\n # bufferdata[i] = np.ptp(data[:, profilenumber])\n\n foo = np.linspace(0, (n_files-1)*timeStep, n_files)\n plt.plot(foo, bufferdata, '-o')\n\n plt.title(what2do + ' at x = {:.0f} µm'.format(data[xIndex2plot, 0]*1e6//1))\n\n # plt.title(what2do + ', PV')\n plt.xlabel('Times [s]')\n plt.ylabel('WF [m]')\n\n figname = wpu.get_unique_filename(dirName + '/resp_func_time_scan', 'png')\n plt.savefig(figname)\n print('MESSAGE: Saving ' + figname)\n\n plt.show()\n\n\n# %%\n\nwpu.save_csv_file([foo, bufferdata],\n fname=wpu.get_unique_filename(dirName + '/resp_func_time_scan', 'dat'),\n headerList=['Time',\n what2do +\n ' at x = {:.0f} µm'.format(data[xIndex2plot, 0]*1e6//1)])\n\n# %% Curvature\n\nif 'Curvature' in what2do:\n\n listInterpFunc = []\n\n # curvature calculation and spline\n\n\n\n listOfArrays_tmp = []\n\n\n for data, fname in zip(listOfArrays, listOfFiles):\n\n plt.figure(figsize=(12, 8))\n\n\n label = fname.rsplit('/', 1)[1].split('.')[0]\n\n data_tmp = -1/2/np.pi*wavelength*np.diff(data[:, profilenumber])/np.mean(np.diff(data[:, 0]))\n data_tmp = np.pad(data_tmp, (0 ,1), 'edge')\n\n f = interp1d(data[:, 0], data_tmp, kind='cubic')\n\n plt.plot(data[:, 0]*1e6, data_tmp, 'o', label=label + ' data')\n # plt.plot(xnew, f(xnew), '-')\n\n listInterpFunc.append(f)\n\n plt.xlabel('[µm]')\n plt.title('Curvature [1/m] at {:.1f} KeV'.format(phenergy*1e3))\n plt.legend(loc='best', fontsize='x-small')\n\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n\n plt.show(block=True)\n\n\n# %% remove 2nd order\n\nif False:\n listOfArrays_Buff = np.copy(listOfArrays)\n\n # %%\n plt.figure(figsize=(12, 8))\n\n for data, fname in zip(listOfArrays, listOfFiles):\n\n pfit = np.polyfit(data[:, 0], data[:, profilenumber], 2)\n\n label = fname.rsplit('/', 1)[1].split('.')[0]\n\n plt.plot(data[:, 0]*1e6, data[:, profilenumber], '-o', label=label)\n\n fitted_func = pfit[0]*data[:, 0]**2 + pfit[1]*data[:, 0] + pfit[2]\n plt.plot(data[:, 0]*1e6, fitted_func, '--c')\n\n data[:, profilenumber] -= fitted_func # make change permanent\n\n plt.ylabel('WF ' + headers[-1])\n plt.xlabel('[µm]')\n plt.title('2nd order polynomial Fit')\n #plt.legend(loc='best', fontsize='x-small')\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n plt.show()\n\n # %%\n plt.figure(figsize=(12, 8))\n\n for data, fname in zip(listOfArrays, listOfFiles):\n\n label = fname.rsplit('/', 1)[1].split('.')[0]\n plt.plot(data[:, 0]*1e6, data[:, profilenumber], '-', label=label)\n\n plt.title('Residual from 2nd order polynomial')\n\n if n_files < 20:\n labelLines(plt.gca().get_lines(),align=False, fontsize=14,\n xvals=(data[0, 0]*1e6, data[-1, 0]*1e6))\n\n # plt.legend(loc='best', fontsize='x-small')\n\n plt.ylabel('WF ' + headers[-1])\n plt.xlabel('[µm]')\n\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n plt.show()\n\n # %%\n # for_csv = [xnew]\n # for f_j in listInterpFunc:\n # for_csv.append(f_j(xnew))\n #\n # wpu.save_csv_file(for_csv, 'curv.csv')\n\n# %%\n\n\n\n # %% plot curvature\n\n\n# fittedFunction = []\n#\n# plt.figure()\n# for data, fname, f in zip(listOfArrays, listOfFiles, listInterpFunc):\n# label = fname.rsplit('/', 1)[1].split('.')[0]\n# # plt.plot(data[:, 0], data[:, profilenumber], 'o', label=label + ' data')\n# # plt.plot(xnew, f(xnew), '-')\n#\n# popt, pcov = curve_fit(_gaussian_dist,\n# data[:, 0],\n# data[:, profilenumber],\n# [data[np.argmax(data[:, profilenumber]), 0], 1e-4])\n#\n# fitted = _gaussian_dist(xnew, popt[0], popt[1])\n#\n# fittedFunction.append(fitted)\n# plt.plot(xnew, fitted, '-')\n#\n# plt.title('Curvature [1/m]')\n# plt.legend(loc='best', fontsize='x-small')\n# plt.show()\n\n\n# %%\n\nm_matrix = np.zeros((npoints_interp, n_files + 2))\n\nfor j in range(n_files):\n\n f_j = listInterpFunc[j]\n\n m_matrix[:, j] = f_j(xnew)/voltage4response\n\nm_matrix[:, -2] = np.ones(npoints_interp) # piston term\nm_matrix[:, -1] = xnew # tilt\n#m_matrix[:, -1] = xnew**2 # second order\n\n# %%\nplt.figure()\nplt.imshow(m_matrix[:,:-2], aspect='auto', origin='upper')\nplt.title('M Matrix - ' + what2do)\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nplt.show(block=False)\n\nplt.figure()\nplt.imshow(m_matrix, aspect='auto', origin='upper')\nplt.title('M Matrix - ' + what2do)\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nplt.show()\n\n# %%\n# if there's no target for correction, then exit the code\n\nif temp_Data is None:\n exit()\n\n# %%\n\n#exit()\n\n\n# %%\n# Radius is the pre-curve for the mirror?\nRadius = 104.00\n#Radius = 1.828\nnominal = -(Radius-np.sqrt(Radius**2-(xnew-0.0)**2))\n\ntarget = temp_Data[:, profilenumber]\n\n\nf_target = interp1d(temp_Data[:, 0], target, kind='cubic')\n\nif xnew[-1] <= temp_Data[-1, 0]:\n target = f_target(xnew)\nelse:\n target = xnew*0.0\n target[np.where(np.abs(xnew)<temp_Data[-1, 0])] = f_target(xnew[np.where(np.abs(xnew)<temp_Data[-1, 0])])\n target[np.where(np.abs(xnew)<temp_Data[-1, 0])] -= nominal[np.where(np.abs(xnew)<temp_Data[-1, 0])]\n\n# to reduce the second order phase\n#pfit = np.polyfit(xnew, target, 2)\n#bestfit2nd = pfit[0]*xnew**2 + pfit[1]*xnew + pfit[2]\n#target -= bestfit2nd\n\ndpc_target = np.diff(target)/np.mean(np.diff(xnew))/(-1/2/np.pi*wavelength)\ncurv_target = np.diff(dpc_target)/np.mean(np.diff(xnew))*(-1/2/np.pi*wavelength)\n\n# to make them same length as their length before diff function\ndpc_target = np.pad(dpc_target, (0, 1), 'edge')\ncurv_target = np.pad(curv_target, 1, 'edge')\n\n\n\n# %%\nif True:\n plt.figure()\n plt.plot(xnew*1e6, target*1e9)\n # plt.plot(temp_Data[:, 0]*1e6, temp_Data[:, 1]*1e9)\n plt.title('Target, ' + targetName[0].rsplit('/', 1)[1] +\n ', rms = {:.2f} pm'.format(np.std(target)*1e12))\n plt.xlabel(r'y [um]')\n plt.ylabel(r'height [nm]')\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n plt.show(block=False)\n\n # target DPC\n\n plt.figure()\n plt.plot(xnew*1e6, dpc_target)\n plt.xlabel(r'y [um]')\n plt.title('DPC Target')\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n plt.show(block=False)\n\n # target curv\n\n plt.figure()\n plt.plot(xnew*1e6, curv_target)\n plt.xlabel(r'y [um]')\n plt.title('Curvature Target')\n figname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\n plt.savefig(figname)\n plt.show(block=True)\n\n# %%\n\n#exit()\n#\n## ******************************************************************************\n# here is to choose the interesting area in the target to make it flat within this area\n# *******************************************************************************\n#min_x = -340e-6\n#max_x = 340e-6\n#\n#arg_min = np.argmin((xnew-min_x)**2)\n#\n#arg_max = np.argmin((xnew-max_x)**2)\n#\n\n'''\nhere's how to choose the part to be fitted.\n'''\n# arg_min = -round(npoints_interp/4)\n# arg_max = round(npoints_interp/4)\n\n# m_matrix = m_matrix[arg_min:arg_max,:]\n# target = target[arg_min:arg_max]\n# xnew = xnew[arg_min:arg_max]\n\n\n# %%\nfrom scipy.optimize import lsq_linear, least_squares\n\n\nbound_all = 500.000\n\n\n# bound_top = np.array([bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all,\n# 1e20, 1e20])\n\nbound_top = np.array([100, 100,\n 1e20, 1e20])\n\nbound_all = -500.00\n\n# bound_bottom = np.array([bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all, bound_all, bound_all,\n# bound_all, bound_all,\n# -1e20, -1e20])\n\nbound_bottom = np.array([-100, -100,\n -1e20, -1e20])\n# correction 1\n\n#bound_bottom = np.array([-.145, -.759, -.1, -.553, -.491, -.235,\n# -.648, -.1, -.489, -.248,\n# -1e20, -1e20])\n#\n#bound_top = 1 + bound_bottom\n#\n#bound_top[-2] = 1e20\n#bound_top[-1] = 1e20\n\n\nif 'Height' in what2do:\n res = lsq_linear(m_matrix, target, bounds=(bound_bottom, bound_top),\n method='bvls', tol=1e-32, verbose=1, max_iter=1000)\nelif 'DPC' in what2do:\n res = lsq_linear(m_matrix, dpc_target, bounds=(bound_bottom, bound_top), verbose=1)\nelif 'Curvature' in what2do:\n res = lsq_linear(m_matrix, curv_target, bounds=(bound_bottom, bound_top), verbose=1)\n\n# %%\nprint('Status: {}'.format(res.status))\nif res.success:\n print(\"Uha!\")\nelse:\n print(\"FAIL!!!!\")\nvoltage = res.x[:-2]\npiston = res.x[-2]\ntilt = res.x[-1]\n\n# %%\n\n\nfor i, fname in enumerate(listOfFiles):\n label = fname.rsplit('/', 1)[1].split('.')[0]\n wpu.print_blue(label + '\\t: {:6.5f} Volts'.format(voltage[i]))\n\nwpu.print_blue('piston: {:.4g} nm'.format(piston*1e9))\nwpu.print_blue('tilt: {:.4g} rad?'.format(tilt))\n\n\n\n# %%\n\n# TODO:\n\nbase_voltage = 00.0\nfor volt in voltage:\n print('{:.1f}'.format(volt + base_voltage), end=\" \")\n\nprint('')\n\n\n\n# %%\n\nfname = wpu.get_unique_filename(dirName + '/resp_func_m_matrix_' + what2do.replace(' ', '_'), 'dat')\nwpu.save_csv_file(m_matrix,\n fname=fname)\n\nfname = wpu.get_unique_filename(dirName + '/resp_func_voltage_' + what2do.replace(' ', '_'), 'dat')\nwpu.save_csv_file([np.arange(1, np.size(voltage) + 1), voltage],\n fname=fname,\n headerList=['Channel', 'Voltage [V]'])\n\nfname = wpu.get_unique_filename(dirName + '/resp_func_target_' + what2do.replace(' ', '_'), 'dat')\nwpu.save_csv_file(target,\n fname=fname,\n headerList=['Height [m]'])\n\n# %%\nvoltage4plot = np.zeros(np.size(voltage)+2)\nvoltage4plot[:-2] = voltage\n\nvoltage4plot[-2] = piston\nvoltage4plot[-1] = tilt\n\n# %%\n\nfinalSurface = m_matrix @ voltage4plot\nplt.figure()\n\n\nif 'Height' in what2do:\n plt.plot(xnew*1e6, finalSurface*1e9)\n plt.ylabel('Height [nm]')\nelse:\n plt.plot(xnew*1e6, finalSurface)\n plt.ylabel(headers[-1])\n\n#plt.title('Final Surface, Correction: {:} V'.format(bound_top))\nplt.title('Surface Displacement, rms = {:.2f} pm'.format(np.std(finalSurface)*1e12))\n\nplt.xlabel(r'y [um]')\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nplt.show(block=False)\n\nplt.figure()\n\nif 'Height' in what2do:\n plt.plot(xnew*1e6, -(res.fun-np.mean(res.fun))*1e9, '-', label='Residual')\n plt.plot(xnew*1e6, (target-np.mean(target))*1e9, '-', label='Target')\n plt.ylabel('Height [nm]')\nelse:\n plt.plot(xnew*1e6, res.fun, label='Residual')\n plt.ylabel(headers[-1])\n\n\n#plt.title('Residual, Correction: {:} V'.format(bound_top))\n\nplt.title('Target, ' + targetName[0].rsplit('/', 1)[1] +\n ', rms = {:.2f} pm'.format(np.std(target)*1e12) +\n '\\nResidual, rms = {:.2f} pm'.format(np.std(res.fun)*1e12))\nplt.xlabel(r'y [um]')\nplt.legend()\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nplt.show(block=False)\n\nplt.figure()\nplt.bar(range(1, np.size(voltage) + 1),voltage, width=1.0)\nplt.xlabel('Channel #')\nplt.ylabel('Voltage [V]')\n\n#plt.title('Final Voltage, Max Correction: {:} V'.format(bound_top))\nplt.title('Final Voltage')\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nplt.show()\n\n# %%\nfor_csv = [xnew]\nfor_csv.append(finalSurface)\nfor_csv.append(res.fun)\n\nfname = wpu.get_unique_filename(dirName + '/final_shape_' + what2do.replace(' ', '_'), 'dat')\nwpu.save_csv_file(for_csv,\n fname=fname,\n headerList=['x[m], Height [m], Residual [m]'])\n# %%\n#\n#test_volt = np.array([-100., -100., 100., -100.,\n# 100., 100., 100., 100.,\n# -100., -100., -100., -100.,\n# 100., 100., 100., -100.,\n# -100., -100.])\n#\n#\n#finalSurface_test = m_matrix @ test_volt\n#\n#\n#plt.figure()\n#plt.plot(xnew*1e6, finalSurface_test*1e9)\n#\n#\n#plt.ylabel('Height [nm]')\n#plt.title('Final Surface')\n#plt.xlabel(r'y [um]')\n#plt.show(block=False)\n#\n## %%\n#plt.figure()\n#\n#residual_test = finalSurface_test -target\n#residual_test -= np.mean(residual_test)\n#\n#plt.plot(xnew*1e6, residual_test*1e9)\n#\n#plt.ylabel('Height [nm]')\n#\n#\n#plt.title('Residual')\n#plt.xlabel(r'y [um]')\n#plt.show(block=False)\n\n# %%\n#\nplt.figure()\n#\n##myModel = .145*m_matrix[:, 0] + \\\n## .759*m_matrix[:, 1] + \\\n## .1*m_matrix[:, 2] + \\\n## .553*m_matrix[:, 3] + \\\n## .491*m_matrix[:, 4] + \\\n## .235*m_matrix[:, 5] + \\\n## .648*m_matrix[:, 6] + \\\n## .1*m_matrix[:, 7] + \\\n## .489*m_matrix[:, 8] + \\\n## .248*m_matrix[:, 9]\n#\n#\n\nmyVoltages = [0., 0., 0., 0.,\n 0., 0., 0., 0.,\n 0., 0., 0., 0.,\n 0., 0., 0., 0.,\n 0., 0., 0.]\n\n#voltage4plot[-2] = 0\n#voltage4plot[-1] = 0\nmyModel = m_matrix @ voltage4plot\n\n\nplt.plot(xnew*1e6, (myModel - np.min((myModel)))*1e9, '-')\n\n\n#plt.title('Residual, Correction: {:} V'.format(bound_top))\n\nplt.title('My Model')\nplt.xlabel(r'y [um]')\nfigname = wpu.get_unique_filename(dirName + '/respons_func', 'png')\nplt.savefig(figname)\nplt.show(block=False)\n#\n## %%\n#\n#plt.figure()\n#\n#foo = m_matrix[:, 0]\n#for i in range(13):\n# print(i)\n# foo = m_matrix[:, i]\n#\n# plt.plot(foo, label='{}'.format(i))\n#\n#plt.legend()\n#plt.show()\n"
] | [
[
"numpy.abs",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.pyplot.show"
],
[
"numpy.nanmax",
"matplotlib.pyplot.contourf",
"numpy.sqrt",
"numpy.rad2deg",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.arctan2",
"scipy.constants.value",
"numpy.mean",
"numpy.where",
"numpy.pad",
"numpy.finfo",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"scipy.signal.argrelmax",
"numpy.deg2rad",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figaspect",
"scipy.ndimage.gaussian_filter",
"numpy.abs",
"scipy.ndimage.uniform_filter",
"scipy.ndimage.maximum_filter",
"matplotlib.pyplot.colorbar",
"numpy.sign",
"numpy.shape",
"matplotlib.pyplot.xlabel"
],
[
"numpy.nanmax",
"numpy.sinc",
"numpy.abs",
"numpy.linspace",
"numpy.isnan",
"numpy.arange",
"numpy.nanmin",
"numpy.rint",
"matplotlib.cm.get_cmap",
"numpy.exp"
],
[
"numpy.amax",
"numpy.abs",
"matplotlib.pyplot.title",
"numpy.amin",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.subplot",
"numpy.log10",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"numpy.polyfit",
"numpy.sqrt",
"numpy.linspace",
"scipy.ndimage.gaussian_filter1d",
"matplotlib.pyplot.plot",
"numpy.argmin",
"numpy.mean",
"numpy.exp",
"matplotlib.pyplot.gca",
"numpy.pad",
"numpy.copy",
"scipy.interpolate.interp1d",
"matplotlib.pyplot.subplot",
"numpy.size",
"matplotlib.pyplot.close",
"numpy.diff",
"numpy.std",
"numpy.zeros",
"scipy.optimize.lsq_linear",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.savetxt",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.xlim",
"numpy.shape",
"matplotlib.pyplot.xlabel",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"0.17",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
George-Jiao/pytorch-toolbelt | [
"920e03876805351ed5645e439a64074cb4f37589"
] | [
"pytorch_toolbelt/modules/encoders/timm/common.py"
] | [
"import math\nimport warnings\nimport torch\n\nfrom typing import List, Union\nfrom torch import Tensor, nn\n\nfrom ..common import EncoderModule, _take\n\n__all__ = [\"GenericTimmEncoder\", \"make_n_channel_input_std_conv\"]\n\n\nclass GenericTimmEncoder(EncoderModule):\n def __init__(self, timm_encoder: Union[nn.Module, str], layers: List[int] = None):\n strides = []\n channels = []\n default_layers = []\n if isinstance(timm_encoder, str):\n import timm.models.factory\n\n timm_encoder = timm.models.factory.create_model(timm_encoder, pretrained=True)\n\n for i, oi in enumerate(timm_encoder.feature_info.out_indices):\n fi = timm_encoder.feature_info.info[i]\n strides.append(fi[\"reduction\"])\n channels.append(fi[\"num_chs\"])\n default_layers.append(i)\n\n if layers is None:\n layers = default_layers\n\n super().__init__(channels, strides, layers)\n self.encoder = timm_encoder\n\n def forward(self, x: Tensor) -> List[Tensor]:\n return _take(self.encoder(x), self._layers)\n\n\ndef make_n_channel_input_std_conv(conv: nn.Module, in_channels: int, mode=\"auto\", **kwargs) -> nn.Module:\n \"\"\"\n Return the same convolution class but with desired number of channels\n\n Args:\n conv: Input nn.Conv2D object to copy settings/weights from\n in_channels: Desired number of input channels\n mode:\n **kwargs: Optional overrides for Conv2D parameters\n \"\"\"\n conv_cls = conv.__class__\n\n if conv.in_channels == in_channels:\n warnings.warn(\"make_n_channel_input call is spurious\")\n return conv\n\n new_conv = conv_cls(\n in_channels,\n out_channels=conv.out_channels,\n kernel_size=kwargs.get(\"kernel_size\", conv.kernel_size),\n stride=kwargs.get(\"stride\", conv.stride),\n padding=kwargs.get(\"padding\", conv.padding),\n dilation=kwargs.get(\"dilation\", conv.dilation),\n groups=kwargs.get(\"groups\", conv.groups),\n bias=kwargs.get(\"bias\", conv.bias is not None),\n eps=kwargs.get(\"eps\", conv.eps),\n )\n\n w = conv.weight\n if in_channels > conv.in_channels:\n n = math.ceil(in_channels / float(conv.in_channels))\n w = torch.cat([w] * n, dim=1)\n w = w[:, :in_channels, ...]\n new_conv.weight = nn.Parameter(w, requires_grad=True)\n else:\n w = w[:, 0:in_channels, ...]\n new_conv.weight = nn.Parameter(w, requires_grad=True)\n\n return new_conv\n"
] | [
[
"torch.nn.Parameter",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
goldtime1987/pyQTGraph | [
"97193758d9f8f57f304f95959403f1db84c3c0b0"
] | [
"go.py"
] | [
"from PyQt4 import QtGui,QtCore\r\nimport sys\r\nimport ui_main\r\nimport numpy as np\r\nimport pylab\r\nimport time\r\nimport pyqtgraph\r\n\r\nclass ExampleApp(QtGui.QMainWindow, ui_main.Ui_MainWindow):\r\n def __init__(self, parent=None):\r\n pyqtgraph.setConfigOption('background', 'w') #before loading widget\r\n super(ExampleApp, self).__init__(parent)\r\n self.setupUi(self)\r\n self.btnAdd.clicked.connect(self.update)\r\n self.grPlot.plotItem.showGrid(True, True, 0.7)\r\n\r\n def update(self):\r\n t1=time.clock()\r\n points=100 #number of data points\r\n X=np.arange(points)\r\n Y=np.sin(np.arange(points)/points*3*np.pi+time.time())\r\n C=pyqtgraph.hsvColor(time.time()/5%1,alpha=.5)\r\n pen=pyqtgraph.mkPen(color=C,width=10)\r\n self.grPlot.plot(X,Y,pen=pen,clear=True)\r\n print(\"update took %.02f ms\"%((time.clock()-t1)*1000))\r\n if self.chkMore.isChecked():\r\n QtCore.QTimer.singleShot(1, self.update) # QUICKLY repeat\r\n\r\nif __name__==\"__main__\":\r\n app = QtGui.QApplication(sys.argv)\r\n form = ExampleApp()\r\n form.show()\r\n form.update() #start with something\r\n app.exec_()\r\n print(\"DONE\")"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ImmortalSdm/Speech-Emotion-Recognition-1 | [
"c5f766a0f66c77df30c6d75e86d97c27c2bbb240"
] | [
"extract_feats/opensmile.py"
] | [
"import os\nimport csv\nimport sys\nimport time\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom typing import Tuple\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import train_test_split\n\n# 每个特征集的特征数量\nFEATURE_NUM = {\n 'IS09_emotion': 384,\n 'IS10_paraling': 1582,\n 'IS11_speaker_state': 4368,\n 'IS12_speaker_trait': 6125,\n 'IS13_ComParE': 6373,\n 'ComParE_2016': 6373\n}\n\n\n'''\nget_feature_opensmile(): Opensmile 提取一个音频的特征\n\n输入:\n config(Class)\n file_path: 音频路径\n\n输出:\n 该音频的特征向量\n'''\n\ndef get_feature_opensmile(config, filepath: str):\n # 项目路径\n BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\n # single_feature.csv 路径\n single_feat_path = os.path.join(BASE_DIR, config.feature_path, 'single_feature.csv')\n # Opensmile 配置文件路径\n opensmile_config_path = os.path.join(config.opensmile_path, 'config', config.opensmile_config + '.conf')\n\n # Opensmile 命令\n cmd = 'cd ' + config.opensmile_path + ' && ./SMILExtract -C ' + opensmile_config_path + ' -I ' + filepath + ' -O ' + single_feat_path\n print(\"Opensmile cmd: \", cmd)\n os.system(cmd)\n \n reader = csv.reader(open(single_feat_path,'r'))\n rows = [row for row in reader]\n last_line = rows[-1]\n return last_line[1: FEATURE_NUM[config.opensmile_config] + 1]\n\n\n'''\nload_feature(): 从 .csv 文件中加载特征数据\n\n输入:\n config(Class)\n feature_path: 特征文件路径\n train: 是否为训练数据\n\n输出:\n 训练数据、测试数据和对应的标签\n'''\n\ndef load_feature(config, feature_path: str, train: bool):\n # 加载特征数据\n df = pd.read_csv(feature_path)\n features = [str(i) for i in range(1, FEATURE_NUM[config.opensmile_config] + 1)]\n\n X = df.loc[:,features].values\n Y = df.loc[:,'label'].values\n\n # 标准化模型路径\n scaler_path = os.path.join(config.checkpoint_path, 'SCALER_OPENSMILE.m')\n\n if train == True:\n # 标准化数据 \n scaler = StandardScaler().fit(X)\n # 保存标准化模型\n joblib.dump(scaler, scaler_path)\n X = scaler.transform(X)\n\n # 划分训练集和测试集\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)\n return x_train, x_test, y_train, y_test\n else:\n # 标准化数据\n # 加载标准化模型\n scaler = joblib.load(scaler_path)\n X = scaler.transform(X)\n return X\n\n\n'''\nget_data(): \n 提取所有音频的特征: 遍历所有文件夹, 读取每个文件夹中的音频, 提取每个音频的特征,把所有特征保存在 feature_path 中\n\n输入:\n config(Class)\n data_path: 数据集文件夹/测试文件路径\n feature_path: 保存特征的路径\n train: 是否为训练数据\n\n输出:\n train = True: 训练数据、测试数据特征和对应的标签\n train = False: 预测数据特征\n'''\n\n# Opensmile 提取特征\ndef get_data(config, data_path, feature_path: str, train: bool):\n\n writer = csv.writer(open(feature_path, 'w'))\n first_row = ['label']\n for i in range(1, FEATURE_NUM[config.opensmile_config] + 1):\n first_row.append(str(i))\n writer.writerow(first_row)\n\n writer = csv.writer(open(feature_path, 'a+'))\n print('Opensmile extracting...')\n\n if train == True:\n cur_dir = os.getcwd()\n sys.stderr.write('Curdir: %s\\n' % cur_dir)\n os.chdir(data_path)\n # 遍历文件夹\n for i, directory in enumerate(config.class_labels):\n sys.stderr.write(\"Started reading folder %s\\n\" % directory)\n os.chdir(directory)\n\n # label_name = directory\n label = config.class_labels.index(directory)\n\n # 读取该文件夹下的音频\n for filename in os.listdir('.'):\n if not filename.endswith('wav'):\n continue\n filepath = os.path.join(os.getcwd(), filename)\n \n # 提取该音频的特征\n feature_vector = get_feature_opensmile(config, filepath)\n feature_vector.insert(0, label)\n # 把每个音频的特征整理到一个 csv 文件中\n writer.writerow(feature_vector)\n\n sys.stderr.write(\"Ended reading folder %s\\n\" % directory)\n os.chdir('..')\n os.chdir(cur_dir)\n \n else:\n feature_vector = get_feature_opensmile(config, data_path)\n feature_vector.insert(0, '-1')\n writer.writerow(feature_vector)\n\n print('Opensmile extract done.')\n\n # 一个玄学 bug 的暂时性解决方案\n # 这里无法直接加载除了 IS10_paraling 以外的其他特征集的预测数据特征,非常玄学\n if(train == True):\n return load_feature(config, feature_path, train = train)"
] | [
[
"sklearn.externals.joblib.dump",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.StandardScaler",
"sklearn.externals.joblib.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kpflugshaupt/pandas | [
"a1fee9199eba7ebf423880243936b9f1501d3d3a",
"a1fee9199eba7ebf423880243936b9f1501d3d3a",
"a1fee9199eba7ebf423880243936b9f1501d3d3a",
"a1fee9199eba7ebf423880243936b9f1501d3d3a",
"a1fee9199eba7ebf423880243936b9f1501d3d3a",
"a1fee9199eba7ebf423880243936b9f1501d3d3a"
] | [
"pandas/tests/series/test_replace.py",
"pandas/tests/indexes/period/test_setops.py",
"pandas/core/indexing.py",
"pandas/tests/resample/test_resample_api.py",
"pandas/core/reshape/concat.py",
"pandas/tests/io/formats/test_printing.py"
] | [
"# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas.util.testing as tm\n\nfrom .common import TestData\n\n\nclass TestSeriesReplace(TestData):\n def test_replace(self):\n N = 100\n ser = pd.Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n ser.replace([np.nan], -1, inplace=True)\n\n exp = ser.fillna(-1)\n tm.assert_series_equal(ser, exp)\n\n rs = ser.replace(0., np.nan)\n ser[ser == 0.] = np.nan\n tm.assert_series_equal(rs, ser)\n\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n ser = pd.Series([np.nan, 0, np.inf])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, pd.NaT])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n tm.assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = pd.Series(self.ts.index)\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n msg = r\"Replacement lists must match in length\\. Expecting 3 got 2\"\n with pytest.raises(ValueError, match=msg):\n ser.replace([1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with pytest.raises(TypeError, match='Cannot compare types .+'):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))\n\n def test_replace_gh5319(self):\n # API change from 0.12?\n # GH 5319\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n tm.assert_series_equal(result, expected)\n\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n tm.assert_series_equal(result, expected)\n # GH 5797\n ser = pd.Series(pd.date_range('20130101', periods=5))\n expected = ser.copy()\n expected.loc[2] = pd.Timestamp('20120101')\n result = ser.replace({pd.Timestamp('20130103'):\n pd.Timestamp('20120101')})\n tm.assert_series_equal(result, expected)\n result = ser.replace(pd.Timestamp('20130103'),\n pd.Timestamp('20120101'))\n tm.assert_series_equal(result, expected)\n\n # GH 11792: Test with replacing NaT in a list with tz data\n ts = pd.Timestamp('2015/01/01', tz='UTC')\n s = pd.Series([pd.NaT, pd.Timestamp('2015/01/01', tz='UTC')])\n result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)\n expected = pd.Series([pd.Timestamp.min, ts], dtype=object)\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_single_list(self):\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([1, 2, 3])\n tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))\n\n s = ser.copy()\n s.replace([1, 2, 3], inplace=True)\n tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n msg = (r\"Invalid fill method\\. Expecting pad \\(ffill\\) or backfill\"\n r\" \\(bfill\\)\\. Got crash_cymbal\")\n with pytest.raises(ValueError, match=msg):\n s.replace([1, 2, 3], inplace=True, method='crash_cymbal')\n tm.assert_series_equal(s, ser)\n\n def test_replace_with_empty_list(self):\n # GH 21977\n s = pd.Series([[1], [2, 3], [], np.nan, [4]])\n expected = s\n result = s.replace([], np.nan)\n tm.assert_series_equal(result, expected)\n\n # GH 19266\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: []})\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: ['dummy', 'alt']})\n\n def test_replace_mixed_types(self):\n s = pd.Series(np.arange(5), dtype='int64')\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n sc.replace(to_rep, val, inplace=True)\n tm.assert_series_equal(expected, r)\n tm.assert_series_equal(expected, sc)\n\n # MUST upcast to float\n e = pd.Series([0., 1., 2., 3., 4.])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = pd.Series([0, 1, 2, 3.5, 4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, 'a'])\n tr, v = [3, 4], [3.5, 'a']\n check_replace(tr, v, e)\n\n # again casts to object\n e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])\n tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, True], dtype='object')\n tr, v = [3, 4], [3.5, True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = pd.date_range('1/1/2001', '1/10/2001',\n freq='D').to_series().reset_index(drop=True)\n result = dr.astype(object).replace(\n [dr[0], dr[1], dr[2]], [1.0, 2, 'a'])\n expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = pd.Series([True, False, True])\n result = s.replace('fun', 'in-the-sun')\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = pd.Series([True, False, True])\n result = s.replace(True, '2u')\n expected = pd.Series(['2u', False, '2u'])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = pd.Series([True, False, True])\n result = s.replace(True, False)\n expected = pd.Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = pd.Series([True, False, True])\n with pytest.raises(TypeError, match='Cannot compare types .+'):\n s.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_replace2(self):\n N = 100\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n def test_replace_with_empty_dictlike(self):\n # GH 15289\n s = pd.Series(list('abcd'))\n tm.assert_series_equal(s, s.replace(dict()))\n tm.assert_series_equal(s, s.replace(pd.Series([])))\n\n def test_replace_string_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace('2', np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_replacer_equals_replacement(self):\n # GH 20656\n # make sure all replacers are matching against original values\n s = pd.Series(['a', 'b'])\n expected = pd.Series(['b', 'a'])\n result = s.replace({'a': 'b', 'b': 'a'})\n tm.assert_series_equal(expected, result)\n\n def test_replace_unicode_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace('2', np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_mixed_types_with_string(self):\n # Testing mixed\n s = pd.Series([1, 2, 3, '4', 4, 5])\n result = s.replace([2, '4'], np.nan)\n expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\"categorical, numeric\", [\n (pd.Categorical('A', categories=['A', 'B']), [1]),\n (pd.Categorical(('A', ), categories=['A', 'B']), [1]),\n (pd.Categorical(('A', 'B'), categories=['A', 'B']), [1, 2]),\n ])\n def test_replace_categorical(self, categorical, numeric):\n # GH 24971\n # Do not check if dtypes are equal due to a known issue that\n # Categorical.replace sometimes coerces to object (GH 23305)\n s = pd.Series(categorical)\n result = s.replace({'A': 1, 'B': 2})\n expected = pd.Series(numeric)\n tm.assert_series_equal(expected, result, check_dtype=False)\n\n def test_replace_with_no_overflowerror(self):\n # GH 25616\n # casts to object without Exception from OverflowError\n s = pd.Series([0, 1, 2, 3, 4])\n result = s.replace([3], ['100000000000000000000'])\n expected = pd.Series([0, 1, 2, '100000000000000000000', 4])\n tm.assert_series_equal(result, expected)\n\n s = pd.Series([0, '100000000000000000000',\n '100000000000000000001'])\n result = s.replace(['100000000000000000000'], [1])\n expected = pd.Series([0, 1, '100000000000000000001'])\n tm.assert_series_equal(result, expected)\n",
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Index, PeriodIndex, date_range, period_range\nimport pandas.core.indexes.period as period\nimport pandas.util.testing as tm\n\n\ndef _permute(obj):\n return obj.take(np.random.permutation(len(obj)))\n\n\nclass TestPeriodIndex(object):\n\n def test_joins(self, join_type):\n index = period_range('1/1/2000', '1/20/2000', freq='D')\n\n joined = index.join(index[:-5], how=join_type)\n\n assert isinstance(joined, PeriodIndex)\n assert joined.freq == index.freq\n\n def test_join_self(self, join_type):\n index = period_range('1/1/2000', '1/20/2000', freq='D')\n\n res = index.join(index, how=join_type)\n assert index is res\n\n def test_join_does_not_recur(self):\n df = tm.makeCustomDataframe(\n 3, 2, data_gen_f=lambda *args: np.random.randint(2),\n c_idx_type='p', r_idx_type='dt')\n s = df.iloc[:2, 0]\n\n res = s.index.join(df.columns, how='outer')\n expected = Index([s.index[0], s.index[1],\n df.columns[0], df.columns[1]], object)\n tm.assert_index_equal(res, expected)\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_union(self, sort):\n # union\n other1 = pd.period_range('1/1/2000', freq='D', periods=5)\n rng1 = pd.period_range('1/6/2000', freq='D', periods=5)\n expected1 = pd.PeriodIndex(['2000-01-06', '2000-01-07',\n '2000-01-08', '2000-01-09',\n '2000-01-10', '2000-01-01',\n '2000-01-02', '2000-01-03',\n '2000-01-04', '2000-01-05'],\n freq='D')\n\n rng2 = pd.period_range('1/1/2000', freq='D', periods=5)\n other2 = pd.period_range('1/4/2000', freq='D', periods=5)\n expected2 = pd.period_range('1/1/2000', freq='D', periods=8)\n\n rng3 = pd.period_range('1/1/2000', freq='D', periods=5)\n other3 = pd.PeriodIndex([], freq='D')\n expected3 = pd.period_range('1/1/2000', freq='D', periods=5)\n\n rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)\n other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)\n expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',\n '2000-01-01 11:00', '2000-01-01 12:00',\n '2000-01-01 13:00', '2000-01-02 09:00',\n '2000-01-02 10:00', '2000-01-02 11:00',\n '2000-01-02 12:00', '2000-01-02 13:00'],\n freq='H')\n\n rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',\n '2000-01-01 09:05'], freq='T')\n other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'\n '2000-01-01 09:08'],\n freq='T')\n expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',\n '2000-01-01 09:05', '2000-01-01 09:08'],\n freq='T')\n\n rng6 = pd.period_range('2000-01-01', freq='M', periods=7)\n other6 = pd.period_range('2000-04-01', freq='M', periods=7)\n expected6 = pd.period_range('2000-01-01', freq='M', periods=10)\n\n rng7 = pd.period_range('2003-01-01', freq='A', periods=5)\n other7 = pd.period_range('1998-01-01', freq='A', periods=8)\n expected7 = pd.PeriodIndex(['2003', '2004', '2005', '2006', '2007',\n '1998', '1999', '2000', '2001', '2002'],\n freq='A')\n\n rng8 = pd.PeriodIndex(['1/3/2000', '1/2/2000', '1/1/2000',\n '1/5/2000', '1/4/2000'], freq='D')\n other8 = pd.period_range('1/6/2000', freq='D', periods=5)\n expected8 = pd.PeriodIndex(['1/3/2000', '1/2/2000', '1/1/2000',\n '1/5/2000', '1/4/2000', '1/6/2000',\n '1/7/2000', '1/8/2000', '1/9/2000',\n '1/10/2000'], freq='D')\n\n for rng, other, expected in [(rng1, other1, expected1),\n (rng2, other2, expected2),\n (rng3, other3, expected3),\n (rng4, other4, expected4),\n (rng5, other5, expected5),\n (rng6, other6, expected6),\n (rng7, other7, expected7),\n (rng8, other8, expected8)]:\n\n result_union = rng.union(other, sort=sort)\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result_union, expected)\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_union_misc(self, sort):\n index = period_range('1/1/2000', '1/20/2000', freq='D')\n\n result = index[:-5].union(index[10:], sort=sort)\n tm.assert_index_equal(result, index)\n\n # not in order\n result = _permute(index[:-5]).union(_permute(index[10:]), sort=sort)\n if sort is None:\n tm.assert_index_equal(result, index)\n assert tm.equalContents(result, index)\n\n # raise if different frequencies\n index = period_range('1/1/2000', '1/20/2000', freq='D')\n index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')\n with pytest.raises(period.IncompatibleFrequency):\n index.union(index2, sort=sort)\n\n msg = 'can only call with other PeriodIndex-ed objects'\n with pytest.raises(ValueError, match=msg):\n index.join(index.to_timestamp())\n\n index3 = period_range('1/1/2000', '1/20/2000', freq='2D')\n with pytest.raises(period.IncompatibleFrequency):\n index.join(index3)\n\n def test_union_dataframe_index(self):\n rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')\n s1 = pd.Series(np.random.randn(len(rng1)), rng1)\n\n rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')\n s2 = pd.Series(np.random.randn(len(rng2)), rng2)\n df = pd.DataFrame({'s1': s1, 's2': s2})\n\n exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')\n tm.assert_index_equal(df.index, exp)\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_intersection(self, sort):\n index = period_range('1/1/2000', '1/20/2000', freq='D')\n\n result = index[:-5].intersection(index[10:], sort=sort)\n tm.assert_index_equal(result, index[10:-5])\n\n # not in order\n left = _permute(index[:-5])\n right = _permute(index[10:])\n result = left.intersection(right, sort=sort)\n if sort is None:\n tm.assert_index_equal(result, index[10:-5])\n assert tm.equalContents(result, index[10:-5])\n\n # raise if different frequencies\n index = period_range('1/1/2000', '1/20/2000', freq='D')\n index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')\n with pytest.raises(period.IncompatibleFrequency):\n index.intersection(index2, sort=sort)\n\n index3 = period_range('1/1/2000', '1/20/2000', freq='2D')\n with pytest.raises(period.IncompatibleFrequency):\n index.intersection(index3, sort=sort)\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_intersection_cases(self, sort):\n base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx')\n\n # if target has the same name, it is preserved\n rng2 = period_range('5/15/2000', '6/20/2000', freq='D', name='idx')\n expected2 = period_range('6/1/2000', '6/20/2000', freq='D',\n name='idx')\n\n # if target name is different, it will be reset\n rng3 = period_range('5/15/2000', '6/20/2000', freq='D', name='other')\n expected3 = period_range('6/1/2000', '6/20/2000', freq='D',\n name=None)\n\n rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')\n expected4 = PeriodIndex([], name='idx', freq='D')\n\n for (rng, expected) in [(rng2, expected2), (rng3, expected3),\n (rng4, expected4)]:\n result = base.intersection(rng, sort=sort)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n # non-monotonic\n base = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-02',\n '2011-01-03'], freq='D', name='idx')\n\n rng2 = PeriodIndex(['2011-01-04', '2011-01-02',\n '2011-02-02', '2011-02-03'],\n freq='D', name='idx')\n expected2 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',\n name='idx')\n\n rng3 = PeriodIndex(['2011-01-04', '2011-01-02', '2011-02-02',\n '2011-02-03'],\n freq='D', name='other')\n expected3 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',\n name=None)\n\n rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')\n expected4 = PeriodIndex([], freq='D', name='idx')\n\n for (rng, expected) in [(rng2, expected2), (rng3, expected3),\n (rng4, expected4)]:\n result = base.intersection(rng, sort=sort)\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == 'D'\n\n # empty same freq\n rng = date_range('6/1/2000', '6/15/2000', freq='T')\n result = rng[0:0].intersection(rng)\n assert len(result) == 0\n\n result = rng.intersection(rng[0:0])\n assert len(result) == 0\n\n @pytest.mark.parametrize(\"sort\", [None, False])\n def test_difference(self, sort):\n # diff\n period_rng = ['1/3/2000', '1/2/2000', '1/1/2000', '1/5/2000',\n '1/4/2000']\n rng1 = pd.PeriodIndex(period_rng, freq='D')\n other1 = pd.period_range('1/6/2000', freq='D', periods=5)\n expected1 = rng1\n\n rng2 = pd.PeriodIndex(period_rng, freq='D')\n other2 = pd.period_range('1/4/2000', freq='D', periods=5)\n expected2 = pd.PeriodIndex(['1/3/2000', '1/2/2000', '1/1/2000'],\n freq='D')\n\n rng3 = pd.PeriodIndex(period_rng, freq='D')\n other3 = pd.PeriodIndex([], freq='D')\n expected3 = rng3\n\n period_rng = ['2000-01-01 10:00', '2000-01-01 09:00',\n '2000-01-01 12:00', '2000-01-01 11:00',\n '2000-01-01 13:00']\n rng4 = pd.PeriodIndex(period_rng, freq='H')\n other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)\n expected4 = rng4\n\n rng5 = pd.PeriodIndex(['2000-01-01 09:03', '2000-01-01 09:01',\n '2000-01-01 09:05'], freq='T')\n other5 = pd.PeriodIndex(\n ['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')\n expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')\n\n period_rng = ['2000-02-01', '2000-01-01', '2000-06-01',\n '2000-07-01', '2000-05-01', '2000-03-01',\n '2000-04-01']\n rng6 = pd.PeriodIndex(period_rng, freq='M')\n other6 = pd.period_range('2000-04-01', freq='M', periods=7)\n expected6 = pd.PeriodIndex(['2000-02-01', '2000-01-01', '2000-03-01'],\n freq='M')\n\n period_rng = ['2003', '2007', '2006', '2005', '2004']\n rng7 = pd.PeriodIndex(period_rng, freq='A')\n other7 = pd.period_range('1998-01-01', freq='A', periods=8)\n expected7 = pd.PeriodIndex(['2007', '2006'], freq='A')\n\n for rng, other, expected in [(rng1, other1, expected1),\n (rng2, other2, expected2),\n (rng3, other3, expected3),\n (rng4, other4, expected4),\n (rng5, other5, expected5),\n (rng6, other6, expected6),\n (rng7, other7, expected7), ]:\n result_difference = rng.difference(other, sort=sort)\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result_difference, expected)\n",
"# pylint: disable=W0223\nimport textwrap\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs.indexing import _NDFrameIndexerBase\nfrom pandas._libs.lib import item_from_zerodim\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import (\n ensure_platform_int, is_float, is_integer, is_integer_dtype, is_iterator,\n is_list_like, is_numeric_dtype, is_scalar, is_sequence, is_sparse)\nfrom pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries\nfrom pandas.core.dtypes.missing import _infer_fill_value, isna\n\nimport pandas.core.common as com\nfrom pandas.core.index import Index, MultiIndex\n\n\n# the supported indexers\ndef get_indexers_list():\n\n return [\n ('ix', _IXIndexer),\n ('iloc', _iLocIndexer),\n ('loc', _LocIndexer),\n ('at', _AtIndexer),\n ('iat', _iAtIndexer),\n ]\n\n\n# \"null slice\"\n_NS = slice(None, None)\n\n\n# the public IndexSlicerMaker\nclass _IndexSlice(object):\n \"\"\"\n Create an object to more easily perform multi-index slicing\n\n See Also\n --------\n MultiIndex.remove_unused_levels : New MultiIndex with no unused levels.\n\n Notes\n -----\n See :ref:`Defined Levels <advanced.shown_levels>`\n for further info on slicing a MultiIndex.\n\n Examples\n --------\n\n >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])\n >>> columns = ['foo', 'bar']\n >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),\n index=midx, columns=columns)\n\n Using the default slice command:\n\n >>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]\n foo bar\n A0 B0 0 1\n B1 2 3\n A1 B0 8 9\n B1 10 11\n\n Using the IndexSlice class for a more intuitive command:\n\n >>> idx = pd.IndexSlice\n >>> dfmi.loc[idx[:, 'B0':'B1'], :]\n foo bar\n A0 B0 0 1\n B1 2 3\n A1 B0 8 9\n B1 10 11\n \"\"\"\n\n def __getitem__(self, arg):\n return arg\n\n\nIndexSlice = _IndexSlice()\n\n\nclass IndexingError(Exception):\n pass\n\n\nclass _NDFrameIndexer(_NDFrameIndexerBase):\n _valid_types = None\n _exception = KeyError\n axis = None\n\n def __call__(self, axis=None):\n # we need to return a copy of ourselves\n new_self = self.__class__(self.name, self.obj)\n\n if axis is not None:\n axis = self.obj._get_axis_number(axis)\n new_self.axis = axis\n return new_self\n\n def __iter__(self):\n raise NotImplementedError('ix is not iterable')\n\n def __getitem__(self, key):\n if type(key) is tuple:\n key = tuple(com.apply_if_callable(x, self.obj)\n for x in key)\n try:\n values = self.obj._get_value(*key)\n if is_scalar(values):\n return values\n except Exception:\n pass\n\n return self._getitem_tuple(key)\n else:\n # we by definition only have the 0th axis\n axis = self.axis or 0\n\n key = com.apply_if_callable(key, self.obj)\n return self._getitem_axis(key, axis=axis)\n\n def _get_label(self, label, axis=None):\n if axis is None:\n axis = self.axis or 0\n\n if self.ndim == 1:\n # for perf reasons we want to try _xs first\n # as its basically direct indexing\n # but will fail when the index is not present\n # see GH5667\n return self.obj._xs(label, axis=axis)\n elif isinstance(label, tuple) and isinstance(label[axis], slice):\n raise IndexingError('no slices here, handle elsewhere')\n\n return self.obj._xs(label, axis=axis)\n\n def _get_loc(self, key, axis=None):\n if axis is None:\n axis = self.axis\n return self.obj._ixs(key, axis=axis)\n\n def _slice(self, obj, axis=None, kind=None):\n if axis is None:\n axis = self.axis\n return self.obj._slice(obj, axis=axis, kind=kind)\n\n def _get_setitem_indexer(self, key):\n if self.axis is not None:\n return self._convert_tuple(key, is_setter=True)\n\n axis = self.obj._get_axis(0)\n\n if isinstance(axis, MultiIndex) and self.name != 'iloc':\n try:\n return axis.get_loc(key)\n except Exception:\n pass\n\n if isinstance(key, tuple):\n try:\n return self._convert_tuple(key, is_setter=True)\n except IndexingError:\n pass\n\n if isinstance(key, range):\n return self._convert_range(key, is_setter=True)\n\n try:\n return self._convert_to_indexer(key, is_setter=True)\n except TypeError as e:\n\n # invalid indexer type vs 'other' indexing errors\n if 'cannot do' in str(e):\n raise\n raise IndexingError(key)\n\n def __setitem__(self, key, value):\n if isinstance(key, tuple):\n key = tuple(com.apply_if_callable(x, self.obj)\n for x in key)\n else:\n key = com.apply_if_callable(key, self.obj)\n indexer = self._get_setitem_indexer(key)\n self._setitem_with_indexer(indexer, value)\n\n def _validate_key(self, key, axis):\n \"\"\"\n Ensure that key is valid for current indexer.\n\n Parameters\n ----------\n key : scalar, slice or list-like\n The key requested\n\n axis : int\n Dimension on which the indexing is being made\n\n Raises\n ------\n TypeError\n If the key (or some element of it) has wrong type\n\n IndexError\n If the key (or some element of it) is out of bounds\n\n KeyError\n If the key was not found\n \"\"\"\n raise AbstractMethodError()\n\n def _has_valid_tuple(self, key):\n \"\"\" check the key for valid keys across my indexer \"\"\"\n for i, k in enumerate(key):\n if i >= self.obj.ndim:\n raise IndexingError('Too many indexers')\n try:\n self._validate_key(k, i)\n except ValueError:\n raise ValueError(\"Location based indexing can only have \"\n \"[{types}] types\"\n .format(types=self._valid_types))\n\n def _is_nested_tuple_indexer(self, tup):\n if any(isinstance(ax, MultiIndex) for ax in self.obj.axes):\n return any(is_nested_tuple(tup, ax) for ax in self.obj.axes)\n return False\n\n def _convert_tuple(self, key, is_setter=False):\n keyidx = []\n if self.axis is not None:\n axis = self.obj._get_axis_number(self.axis)\n for i in range(self.ndim):\n if i == axis:\n keyidx.append(self._convert_to_indexer(\n key, axis=axis, is_setter=is_setter))\n else:\n keyidx.append(slice(None))\n else:\n for i, k in enumerate(key):\n if i >= self.obj.ndim:\n raise IndexingError('Too many indexers')\n idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)\n keyidx.append(idx)\n return tuple(keyidx)\n\n def _convert_range(self, key, is_setter=False):\n \"\"\" convert a range argument \"\"\"\n return list(key)\n\n def _convert_scalar_indexer(self, key, axis):\n # if we are accessing via lowered dim, use the last dim\n if axis is None:\n axis = 0\n ax = self.obj._get_axis(min(axis, self.ndim - 1))\n # a scalar\n return ax._convert_scalar_indexer(key, kind=self.name)\n\n def _convert_slice_indexer(self, key, axis):\n # if we are accessing via lowered dim, use the last dim\n ax = self.obj._get_axis(min(axis, self.ndim - 1))\n return ax._convert_slice_indexer(key, kind=self.name)\n\n def _has_valid_setitem_indexer(self, indexer):\n return True\n\n def _has_valid_positional_setitem_indexer(self, indexer):\n \"\"\" validate that an positional indexer cannot enlarge its target\n will raise if needed, does not modify the indexer externally\n \"\"\"\n if isinstance(indexer, dict):\n raise IndexError(\"{0} cannot enlarge its target object\"\n .format(self.name))\n else:\n if not isinstance(indexer, tuple):\n indexer = self._tuplify(indexer)\n for ax, i in zip(self.obj.axes, indexer):\n if isinstance(i, slice):\n # should check the stop slice?\n pass\n elif is_list_like_indexer(i):\n # should check the elements?\n pass\n elif is_integer(i):\n if i >= len(ax):\n raise IndexError(\"{name} cannot enlarge its target \"\n \"object\".format(name=self.name))\n elif isinstance(i, dict):\n raise IndexError(\"{name} cannot enlarge its target object\"\n .format(name=self.name))\n\n return True\n\n def _setitem_with_indexer(self, indexer, value):\n self._has_valid_setitem_indexer(indexer)\n\n # also has the side effect of consolidating in-place\n from pandas import Series\n info_axis = self.obj._info_axis_number\n\n # maybe partial set\n take_split_path = self.obj._is_mixed_type\n\n # if there is only one block/type, still have to take split path\n # unless the block is one-dimensional or it can hold the value\n if not take_split_path and self.obj._data.blocks:\n blk, = self.obj._data.blocks\n if 1 < blk.ndim: # in case of dict, keys are indices\n val = list(value.values()) if isinstance(value,\n dict) else value\n take_split_path = not blk._can_hold_element(val)\n\n if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):\n\n for i, ax in zip(indexer, self.obj.axes):\n\n # if we have any multi-indexes that have non-trivial slices\n # (not null slices) then we must take the split path, xref\n # GH 10360\n if (isinstance(ax, MultiIndex) and\n not (is_integer(i) or com.is_null_slice(i))):\n take_split_path = True\n break\n\n if isinstance(indexer, tuple):\n nindexer = []\n for i, idx in enumerate(indexer):\n if isinstance(idx, dict):\n\n # reindex the axis to the new value\n # and set inplace\n key, _ = convert_missing_indexer(idx)\n\n # if this is the items axes, then take the main missing\n # path first\n # this correctly sets the dtype and avoids cache issues\n # essentially this separates out the block that is needed\n # to possibly be modified\n if self.ndim > 1 and i == self.obj._info_axis_number:\n\n # add the new item, and set the value\n # must have all defined axes if we have a scalar\n # or a list-like on the non-info axes if we have a\n # list-like\n len_non_info_axes = (\n len(_ax) for _i, _ax in enumerate(self.obj.axes)\n if _i != i\n )\n if any(not l for l in len_non_info_axes):\n if not is_list_like_indexer(value):\n raise ValueError(\"cannot set a frame with no \"\n \"defined index and a scalar\")\n self.obj[key] = value\n return self.obj\n\n # add a new item with the dtype setup\n self.obj[key] = _infer_fill_value(value)\n\n new_indexer = convert_from_missing_indexer_tuple(\n indexer, self.obj.axes)\n self._setitem_with_indexer(new_indexer, value)\n\n return self.obj\n\n # reindex the axis\n # make sure to clear the cache because we are\n # just replacing the block manager here\n # so the object is the same\n index = self.obj._get_axis(i)\n labels = index.insert(len(index), key)\n self.obj._data = self.obj.reindex(labels, axis=i)._data\n self.obj._maybe_update_cacher(clear=True)\n self.obj._is_copy = None\n\n nindexer.append(labels.get_loc(key))\n\n else:\n nindexer.append(idx)\n\n indexer = tuple(nindexer)\n else:\n\n indexer, missing = convert_missing_indexer(indexer)\n\n if missing:\n\n # reindex the axis to the new value\n # and set inplace\n if self.ndim == 1:\n index = self.obj.index\n new_index = index.insert(len(index), indexer)\n\n # we have a coerced indexer, e.g. a float\n # that matches in an Int64Index, so\n # we will not create a duplicate index, rather\n # index to that element\n # e.g. 0.0 -> 0\n # GH12246\n if index.is_unique:\n new_indexer = index.get_indexer([new_index[-1]])\n if (new_indexer != -1).any():\n return self._setitem_with_indexer(new_indexer,\n value)\n\n # this preserves dtype of the value\n new_values = Series([value])._values\n if len(self.obj._values):\n try:\n new_values = np.concatenate([self.obj._values,\n new_values])\n except TypeError:\n as_obj = self.obj.astype(object)\n new_values = np.concatenate([as_obj,\n new_values])\n self.obj._data = self.obj._constructor(\n new_values, index=new_index, name=self.obj.name)._data\n self.obj._maybe_update_cacher(clear=True)\n return self.obj\n\n elif self.ndim == 2:\n\n # no columns and scalar\n if not len(self.obj.columns):\n raise ValueError(\"cannot set a frame with no defined \"\n \"columns\")\n\n # append a Series\n if isinstance(value, Series):\n\n value = value.reindex(index=self.obj.columns,\n copy=True)\n value.name = indexer\n\n # a list-list\n else:\n\n # must have conforming columns\n if is_list_like_indexer(value):\n if len(value) != len(self.obj.columns):\n raise ValueError(\"cannot set a row with \"\n \"mismatched columns\")\n\n value = Series(value, index=self.obj.columns,\n name=indexer)\n\n self.obj._data = self.obj.append(value)._data\n self.obj._maybe_update_cacher(clear=True)\n return self.obj\n\n # set using setitem (Panel and > dims)\n elif self.ndim >= 3:\n return self.obj.__setitem__(indexer, value)\n\n # set\n item_labels = self.obj._get_axis(info_axis)\n\n # align and set the values\n if take_split_path:\n\n if not isinstance(indexer, tuple):\n indexer = self._tuplify(indexer)\n\n if isinstance(value, ABCSeries):\n value = self._align_series(indexer, value)\n\n info_idx = indexer[info_axis]\n if is_integer(info_idx):\n info_idx = [info_idx]\n labels = item_labels[info_idx]\n\n # if we have a partial multiindex, then need to adjust the plane\n # indexer here\n if (len(labels) == 1 and\n isinstance(self.obj[labels[0]].axes[0], MultiIndex)):\n item = labels[0]\n obj = self.obj[item]\n index = obj.index\n idx = indexer[:info_axis][0]\n\n plane_indexer = tuple([idx]) + indexer[info_axis + 1:]\n lplane_indexer = length_of_indexer(plane_indexer[0], index)\n\n # require that we are setting the right number of values that\n # we are indexing\n if is_list_like_indexer(value) and np.iterable(\n value) and lplane_indexer != len(value):\n\n if len(obj[idx]) != len(value):\n raise ValueError(\"cannot set using a multi-index \"\n \"selection indexer with a different \"\n \"length than the value\")\n\n # make sure we have an ndarray\n value = getattr(value, 'values', value).ravel()\n\n # we can directly set the series here\n # as we select a slice indexer on the mi\n idx = index._convert_slice_indexer(idx)\n obj._consolidate_inplace()\n obj = obj.copy()\n obj._data = obj._data.setitem(indexer=tuple([idx]),\n value=value)\n self.obj[item] = obj\n return\n\n # non-mi\n else:\n plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]\n if info_axis > 0:\n plane_axis = self.obj.axes[:info_axis][0]\n lplane_indexer = length_of_indexer(plane_indexer[0],\n plane_axis)\n else:\n lplane_indexer = 0\n\n def setter(item, v):\n s = self.obj[item]\n pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer\n\n # perform the equivalent of a setitem on the info axis\n # as we have a null slice or a slice with full bounds\n # which means essentially reassign to the columns of a\n # multi-dim object\n # GH6149 (null slice), GH10408 (full bounds)\n if (isinstance(pi, tuple) and\n all(com.is_null_slice(idx) or\n com.is_full_slice(idx, len(self.obj))\n for idx in pi)):\n s = v\n else:\n # set the item, possibly having a dtype change\n s._consolidate_inplace()\n s = s.copy()\n s._data = s._data.setitem(indexer=pi, value=v)\n s._maybe_update_cacher(clear=True)\n\n # reset the sliced object if unique\n self.obj[item] = s\n\n def can_do_equal_len():\n \"\"\" return True if we have an equal len settable \"\"\"\n if (not len(labels) == 1 or not np.iterable(value) or\n is_scalar(plane_indexer[0])):\n return False\n\n item = labels[0]\n index = self.obj[item].index\n\n values_len = len(value)\n # equal len list/ndarray\n if len(index) == values_len:\n return True\n elif lplane_indexer == values_len:\n return True\n\n return False\n\n # we need an iterable, with a ndim of at least 1\n # eg. don't pass through np.array(0)\n if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:\n\n # we have an equal len Frame\n if isinstance(value, ABCDataFrame) and value.ndim > 1:\n sub_indexer = list(indexer)\n multiindex_indexer = isinstance(labels, MultiIndex)\n\n for item in labels:\n if item in value:\n sub_indexer[info_axis] = item\n v = self._align_series(\n tuple(sub_indexer), value[item],\n multiindex_indexer)\n else:\n v = np.nan\n\n setter(item, v)\n\n # we have an equal len ndarray/convertible to our labels\n # hasattr first, to avoid coercing to ndarray without reason.\n # But we may be relying on the ndarray coercion to check ndim.\n # Why not just convert to an ndarray earlier on if needed?\n elif ((hasattr(value, 'ndim') and value.ndim == 2)\n or (not hasattr(value, 'ndim') and\n np.array(value).ndim) == 2):\n\n # note that this coerces the dtype if we are mixed\n # GH 7551\n value = np.array(value, dtype=object)\n if len(labels) != value.shape[1]:\n raise ValueError('Must have equal len keys and value '\n 'when setting with an ndarray')\n\n for i, item in enumerate(labels):\n\n # setting with a list, recoerces\n setter(item, value[:, i].tolist())\n\n # we have an equal len list/ndarray\n elif can_do_equal_len():\n setter(labels[0], value)\n\n # per label values\n else:\n\n if len(labels) != len(value):\n raise ValueError('Must have equal len keys and value '\n 'when setting with an iterable')\n\n for item, v in zip(labels, value):\n setter(item, v)\n else:\n\n # scalar\n for item in labels:\n setter(item, value)\n\n else:\n if isinstance(indexer, tuple):\n indexer = maybe_convert_ix(*indexer)\n\n # if we are setting on the info axis ONLY\n # set using those methods to avoid block-splitting\n # logic here\n if (len(indexer) > info_axis and\n is_integer(indexer[info_axis]) and\n all(com.is_null_slice(idx)\n for i, idx in enumerate(indexer)\n if i != info_axis) and\n item_labels.is_unique):\n self.obj[item_labels[indexer[info_axis]]] = value\n return\n\n if isinstance(value, (ABCSeries, dict)):\n # TODO(EA): ExtensionBlock.setitem this causes issues with\n # setting for extensionarrays that store dicts. Need to decide\n # if it's worth supporting that.\n value = self._align_series(indexer, Series(value))\n\n elif isinstance(value, ABCDataFrame):\n value = self._align_frame(indexer, value)\n\n if isinstance(value, ABCPanel):\n value = self._align_panel(indexer, value)\n\n # check for chained assignment\n self.obj._check_is_chained_assignment_possible()\n\n # actually do the set\n self.obj._consolidate_inplace()\n self.obj._data = self.obj._data.setitem(indexer=indexer,\n value=value)\n self.obj._maybe_update_cacher(clear=True)\n\n def _align_series(self, indexer, ser, multiindex_indexer=False):\n \"\"\"\n Parameters\n ----------\n indexer : tuple, slice, scalar\n The indexer used to get the locations that will be set to\n `ser`\n\n ser : pd.Series\n The values to assign to the locations specified by `indexer`\n\n multiindex_indexer : boolean, optional\n Defaults to False. Should be set to True if `indexer` was from\n a `pd.MultiIndex`, to avoid unnecessary broadcasting.\n\n\n Returns:\n --------\n `np.array` of `ser` broadcast to the appropriate shape for assignment\n to the locations selected by `indexer`\n\n \"\"\"\n if isinstance(indexer, (slice, np.ndarray, list, Index)):\n indexer = tuple([indexer])\n\n if isinstance(indexer, tuple):\n\n # flatten np.ndarray indexers\n def ravel(i):\n return i.ravel() if isinstance(i, np.ndarray) else i\n indexer = tuple(map(ravel, indexer))\n\n aligners = [not com.is_null_slice(idx) for idx in indexer]\n sum_aligners = sum(aligners)\n single_aligner = sum_aligners == 1\n is_frame = self.obj.ndim == 2\n is_panel = self.obj.ndim >= 3\n obj = self.obj\n\n # are we a single alignable value on a non-primary\n # dim (e.g. panel: 1,2, or frame: 0) ?\n # hence need to align to a single axis dimension\n # rather that find all valid dims\n\n # frame\n if is_frame:\n single_aligner = single_aligner and aligners[0]\n\n # panel\n elif is_panel:\n single_aligner = (single_aligner and\n (aligners[1] or aligners[2]))\n\n # we have a frame, with multiple indexers on both axes; and a\n # series, so need to broadcast (see GH5206)\n if (sum_aligners == self.ndim and\n all(is_sequence(_) for _ in indexer)):\n ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values\n\n # single indexer\n if len(indexer) > 1 and not multiindex_indexer:\n len_indexer = len(indexer[1])\n ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T\n\n return ser\n\n for i, idx in enumerate(indexer):\n ax = obj.axes[i]\n\n # multiple aligners (or null slices)\n if is_sequence(idx) or isinstance(idx, slice):\n if single_aligner and com.is_null_slice(idx):\n continue\n new_ix = ax[idx]\n if not is_list_like_indexer(new_ix):\n new_ix = Index([new_ix])\n else:\n new_ix = Index(new_ix)\n if ser.index.equals(new_ix) or not len(new_ix):\n return ser._values.copy()\n\n return ser.reindex(new_ix)._values\n\n # 2 dims\n elif single_aligner and is_frame:\n\n # reindex along index\n ax = self.obj.axes[1]\n if ser.index.equals(ax) or not len(ax):\n return ser._values.copy()\n return ser.reindex(ax)._values\n\n # >2 dims\n elif single_aligner:\n\n broadcast = []\n for n, labels in enumerate(self.obj._get_plane_axes(i)):\n\n # reindex along the matching dimensions\n if len(labels & ser.index):\n ser = ser.reindex(labels)\n else:\n broadcast.append((n, len(labels)))\n\n # broadcast along other dims\n ser = ser._values.copy()\n for (axis, l) in broadcast:\n shape = [-1] * (len(broadcast) + 1)\n shape[axis] = l\n ser = np.tile(ser, l).reshape(shape)\n\n if self.obj.ndim == 3:\n ser = ser.T\n\n return ser\n\n elif is_scalar(indexer):\n ax = self.obj._get_axis(1)\n\n if ser.index.equals(ax):\n return ser._values.copy()\n\n return ser.reindex(ax)._values\n\n raise ValueError('Incompatible indexer with Series')\n\n def _align_frame(self, indexer, df):\n is_frame = self.obj.ndim == 2\n is_panel = self.obj.ndim >= 3\n\n if isinstance(indexer, tuple):\n\n idx, cols = None, None\n sindexers = []\n for i, ix in enumerate(indexer):\n ax = self.obj.axes[i]\n if is_sequence(ix) or isinstance(ix, slice):\n if isinstance(ix, np.ndarray):\n ix = ix.ravel()\n if idx is None:\n idx = ax[ix]\n elif cols is None:\n cols = ax[ix]\n else:\n break\n else:\n sindexers.append(i)\n\n # panel\n if is_panel:\n\n # need to conform to the convention\n # as we are not selecting on the items axis\n # and we have a single indexer\n # GH 7763\n if len(sindexers) == 1 and sindexers[0] != 0:\n df = df.T\n\n if idx is None:\n idx = df.index\n if cols is None:\n cols = df.columns\n\n if idx is not None and cols is not None:\n\n if df.index.equals(idx) and df.columns.equals(cols):\n val = df.copy()._values\n else:\n val = df.reindex(idx, columns=cols)._values\n return val\n\n elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and\n is_frame):\n ax = self.obj.index[indexer]\n if df.index.equals(ax):\n val = df.copy()._values\n else:\n\n # we have a multi-index and are trying to align\n # with a particular, level GH3738\n if (isinstance(ax, MultiIndex) and\n isinstance(df.index, MultiIndex) and\n ax.nlevels != df.index.nlevels):\n raise TypeError(\"cannot align on a multi-index with out \"\n \"specifying the join levels\")\n\n val = df.reindex(index=ax)._values\n return val\n\n elif is_scalar(indexer) and is_panel:\n idx = self.obj.axes[1]\n cols = self.obj.axes[2]\n\n # by definition we are indexing on the 0th axis\n # a passed in dataframe which is actually a transpose\n # of what is needed\n if idx.equals(df.index) and cols.equals(df.columns):\n return df.copy()._values\n\n return df.reindex(idx, columns=cols)._values\n\n raise ValueError('Incompatible indexer with DataFrame')\n\n def _align_panel(self, indexer, df):\n raise NotImplementedError(\"cannot set using an indexer with a Panel \"\n \"yet!\")\n\n def _getitem_tuple(self, tup):\n try:\n return self._getitem_lowerdim(tup)\n except IndexingError:\n pass\n\n # no multi-index, so validate all of the indexers\n self._has_valid_tuple(tup)\n\n # ugly hack for GH #836\n if self._multi_take_opportunity(tup):\n return self._multi_take(tup)\n\n # no shortcut needed\n retval = self.obj\n for i, key in enumerate(tup):\n if i >= self.obj.ndim:\n raise IndexingError('Too many indexers')\n\n if com.is_null_slice(key):\n continue\n\n retval = getattr(retval, self.name)._getitem_axis(key, axis=i)\n\n return retval\n\n def _multi_take_opportunity(self, tup):\n \"\"\"\n Check whether there is the possibility to use ``_multi_take``.\n Currently the limit is that all axes being indexed must be indexed with\n list-likes.\n\n Parameters\n ----------\n tup : tuple\n Tuple of indexers, one per axis\n\n Returns\n -------\n boolean: Whether the current indexing can be passed through _multi_take\n \"\"\"\n if not all(is_list_like_indexer(x) for x in tup):\n return False\n\n # just too complicated\n if any(com.is_bool_indexer(x) for x in tup):\n return False\n\n return True\n\n def _multi_take(self, tup):\n \"\"\"\n Create the indexers for the passed tuple of keys, and execute the take\n operation. This allows the take operation to be executed all at once -\n rather than once for each dimension - improving efficiency.\n\n Parameters\n ----------\n tup : tuple\n Tuple of indexers, one per axis\n\n Returns\n -------\n values: same type as the object being indexed\n \"\"\"\n # GH 836\n o = self.obj\n d = {axis: self._get_listlike_indexer(key, axis)\n for (key, axis) in zip(tup, o._AXIS_ORDERS)}\n return o._reindex_with_indexers(d, copy=True, allow_dups=True)\n\n def _convert_for_reindex(self, key, axis=None):\n return key\n\n def _handle_lowerdim_multi_index_axis0(self, tup):\n # we have an axis0 multi-index, handle or raise\n\n try:\n # fast path for series or for tup devoid of slices\n return self._get_label(tup, axis=self.axis)\n except TypeError:\n # slices are unhashable\n pass\n except Exception as e1:\n if isinstance(tup[0], (slice, Index)):\n raise IndexingError(\"Handle elsewhere\")\n\n # raise the error if we are not sorted\n ax0 = self.obj._get_axis(0)\n if not ax0.is_lexsorted_for_tuple(tup):\n raise e1\n\n return None\n\n def _getitem_lowerdim(self, tup):\n\n # we can directly get the axis result since the axis is specified\n if self.axis is not None:\n axis = self.obj._get_axis_number(self.axis)\n return self._getitem_axis(tup, axis=axis)\n\n # we may have a nested tuples indexer here\n if self._is_nested_tuple_indexer(tup):\n return self._getitem_nested_tuple(tup)\n\n # we maybe be using a tuple to represent multiple dimensions here\n ax0 = self.obj._get_axis(0)\n # ...but iloc should handle the tuple as simple integer-location\n # instead of checking it as multiindex representation (GH 13797)\n if isinstance(ax0, MultiIndex) and self.name != 'iloc':\n result = self._handle_lowerdim_multi_index_axis0(tup)\n if result is not None:\n return result\n\n if len(tup) > self.obj.ndim:\n raise IndexingError(\"Too many indexers. handle elsewhere\")\n\n # to avoid wasted computation\n # df.ix[d1:d2, 0] -> columns first (True)\n # df.ix[0, ['C', 'B', A']] -> rows first (False)\n for i, key in enumerate(tup):\n if is_label_like(key) or isinstance(key, tuple):\n section = self._getitem_axis(key, axis=i)\n\n # we have yielded a scalar ?\n if not is_list_like_indexer(section):\n return section\n\n elif section.ndim == self.ndim:\n # we're in the middle of slicing through a MultiIndex\n # revise the key wrt to `section` by inserting an _NS\n new_key = tup[:i] + (_NS,) + tup[i + 1:]\n\n else:\n new_key = tup[:i] + tup[i + 1:]\n\n # unfortunately need an odious kludge here because of\n # DataFrame transposing convention\n if (isinstance(section, ABCDataFrame) and i > 0 and\n len(new_key) == 2):\n a, b = new_key\n new_key = b, a\n\n if len(new_key) == 1:\n new_key, = new_key\n\n # Slices should return views, but calling iloc/loc with a null\n # slice returns a new object.\n if com.is_null_slice(new_key):\n return section\n # This is an elided recursive call to iloc/loc/etc'\n return getattr(section, self.name)[new_key]\n\n raise IndexingError('not applicable')\n\n def _getitem_nested_tuple(self, tup):\n # we have a nested tuple so have at least 1 multi-index level\n # we should be able to match up the dimensionaility here\n\n # we have too many indexers for our dim, but have at least 1\n # multi-index dimension, try to see if we have something like\n # a tuple passed to a series with a multi-index\n if len(tup) > self.ndim:\n result = self._handle_lowerdim_multi_index_axis0(tup)\n if result is not None:\n return result\n\n # this is a series with a multi-index specified a tuple of\n # selectors\n return self._getitem_axis(tup, axis=self.axis)\n\n # handle the multi-axis by taking sections and reducing\n # this is iterative\n obj = self.obj\n axis = 0\n for i, key in enumerate(tup):\n\n if com.is_null_slice(key):\n axis += 1\n continue\n\n current_ndim = obj.ndim\n obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)\n axis += 1\n\n # if we have a scalar, we are done\n if is_scalar(obj) or not hasattr(obj, 'ndim'):\n break\n\n # has the dim of the obj changed?\n # GH 7199\n if obj.ndim < current_ndim:\n\n # GH 7516\n # if had a 3 dim and are going to a 2d\n # axes are reversed on a DataFrame\n if i >= 1 and current_ndim == 3 and obj.ndim == 2:\n obj = obj.T\n\n axis -= 1\n\n return obj\n\n def _getitem_axis(self, key, axis=None):\n\n if axis is None:\n axis = self.axis or 0\n\n if is_iterator(key):\n key = list(key)\n self._validate_key(key, axis)\n\n labels = self.obj._get_axis(axis)\n if isinstance(key, slice):\n return self._get_slice_axis(key, axis=axis)\n elif (is_list_like_indexer(key) and\n not (isinstance(key, tuple) and\n isinstance(labels, MultiIndex))):\n\n if hasattr(key, 'ndim') and key.ndim > 1:\n raise ValueError('Cannot index with multidimensional key')\n\n return self._getitem_iterable(key, axis=axis)\n else:\n\n # maybe coerce a float scalar to integer\n key = labels._maybe_cast_indexer(key)\n\n if is_integer(key):\n if axis == 0 and isinstance(labels, MultiIndex):\n try:\n return self._get_label(key, axis=axis)\n except (KeyError, TypeError):\n if self.obj.index.levels[0].is_integer():\n raise\n\n # this is the fallback! (for a non-float, non-integer index)\n if not labels.is_floating() and not labels.is_integer():\n return self._get_loc(key, axis=axis)\n\n return self._get_label(key, axis=axis)\n\n def _get_listlike_indexer(self, key, axis, raise_missing=False):\n \"\"\"\n Transform a list-like of keys into a new index and an indexer.\n\n Parameters\n ----------\n key : list-like\n Target labels\n axis: int\n Dimension on which the indexing is being made\n raise_missing: bool\n Whether to raise a KeyError if some labels are not found. Will be\n removed in the future, and then this method will always behave as\n if raise_missing=True.\n\n Raises\n ------\n KeyError\n If at least one key was requested but none was found, and\n raise_missing=True.\n\n Returns\n -------\n keyarr: Index\n New index (coinciding with 'key' if the axis is unique)\n values : array-like\n An indexer for the return object; -1 denotes keys not found\n \"\"\"\n o = self.obj\n ax = o._get_axis(axis)\n\n # Have the index compute an indexer or return None\n # if it cannot handle:\n indexer, keyarr = ax._convert_listlike_indexer(key,\n kind=self.name)\n # We only act on all found values:\n if indexer is not None and (indexer != -1).all():\n self._validate_read_indexer(key, indexer, axis,\n raise_missing=raise_missing)\n return ax[indexer], indexer\n\n if ax.is_unique:\n # If we are trying to get actual keys from empty Series, we\n # patiently wait for a KeyError later on - otherwise, convert\n if len(ax) or not len(key):\n key = self._convert_for_reindex(key, axis)\n indexer = ax.get_indexer_for(key)\n keyarr = ax.reindex(keyarr)[0]\n else:\n keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)\n\n self._validate_read_indexer(keyarr, indexer,\n o._get_axis_number(axis),\n raise_missing=raise_missing)\n return keyarr, indexer\n\n def _getitem_iterable(self, key, axis=None):\n \"\"\"\n Index current object with an an iterable key (which can be a boolean\n indexer, or a collection of keys).\n\n Parameters\n ----------\n key : iterable\n Target labels, or boolean indexer\n axis: int, default None\n Dimension on which the indexing is being made\n\n Raises\n ------\n KeyError\n If no key was found. Will change in the future to raise if not all\n keys were found.\n IndexingError\n If the boolean indexer is unalignable with the object being\n indexed.\n\n Returns\n -------\n scalar, DataFrame, or Series: indexed value(s),\n \"\"\"\n\n if axis is None:\n axis = self.axis or 0\n\n self._validate_key(key, axis)\n\n labels = self.obj._get_axis(axis)\n\n if com.is_bool_indexer(key):\n # A boolean indexer\n key = check_bool_indexer(labels, key)\n inds, = key.nonzero()\n return self.obj._take(inds, axis=axis)\n else:\n # A collection of keys\n keyarr, indexer = self._get_listlike_indexer(key, axis,\n raise_missing=False)\n return self.obj._reindex_with_indexers({axis: [keyarr, indexer]},\n copy=True, allow_dups=True)\n\n def _validate_read_indexer(self, key, indexer, axis, raise_missing=False):\n \"\"\"\n Check that indexer can be used to return a result (e.g. at least one\n element was found, unless the list of keys was actually empty).\n\n Parameters\n ----------\n key : list-like\n Target labels (only used to show correct error message)\n indexer: array-like of booleans\n Indices corresponding to the key (with -1 indicating not found)\n axis: int\n Dimension on which the indexing is being made\n raise_missing: bool\n Whether to raise a KeyError if some labels are not found. Will be\n removed in the future, and then this method will always behave as\n if raise_missing=True.\n\n Raises\n ------\n KeyError\n If at least one key was requested but none was found, and\n raise_missing=True.\n \"\"\"\n\n ax = self.obj._get_axis(axis)\n\n if len(key) == 0:\n return\n\n # Count missing values:\n missing = (indexer < 0).sum()\n\n if missing:\n if missing == len(indexer):\n raise KeyError(\n \"None of [{key}] are in the [{axis}]\".format(\n key=key, axis=self.obj._get_axis_name(axis)))\n\n # We (temporarily) allow for some missing keys with .loc, except in\n # some cases (e.g. setting) in which \"raise_missing\" will be False\n if not(self.name == 'loc' and not raise_missing):\n not_found = list(set(key) - set(ax))\n raise KeyError(\"{} not in index\".format(not_found))\n\n # we skip the warning on Categorical/Interval\n # as this check is actually done (check for\n # non-missing values), but a bit later in the\n # code, so we want to avoid warning & then\n # just raising\n\n _missing_key_warning = textwrap.dedent(\"\"\"\n Passing list-likes to .loc or [] with any missing label will raise\n KeyError in the future, you can use .reindex() as an alternative.\n\n See the documentation here:\n https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike\"\"\") # noqa\n\n if not (ax.is_categorical() or ax.is_interval()):\n warnings.warn(_missing_key_warning,\n FutureWarning, stacklevel=6)\n\n def _convert_to_indexer(self, obj, axis=None, is_setter=False,\n raise_missing=False):\n \"\"\"\n Convert indexing key into something we can use to do actual fancy\n indexing on an ndarray\n\n Examples\n ix[:5] -> slice(0, 5)\n ix[[1,2,3]] -> [1,2,3]\n ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)\n\n Going by Zen of Python?\n 'In the face of ambiguity, refuse the temptation to guess.'\n raise AmbiguousIndexError with integer labels?\n - No, prefer label-based indexing\n \"\"\"\n if axis is None:\n axis = self.axis or 0\n\n labels = self.obj._get_axis(axis)\n\n if isinstance(obj, slice):\n return self._convert_slice_indexer(obj, axis)\n\n # try to find out correct indexer, if not type correct raise\n try:\n obj = self._convert_scalar_indexer(obj, axis)\n except TypeError:\n\n # but we will allow setting\n if is_setter:\n pass\n\n # see if we are positional in nature\n is_int_index = labels.is_integer()\n is_int_positional = is_integer(obj) and not is_int_index\n\n # if we are a label return me\n try:\n return labels.get_loc(obj)\n except LookupError:\n if isinstance(obj, tuple) and isinstance(labels, MultiIndex):\n if is_setter and len(obj) == labels.nlevels:\n return {'key': obj}\n raise\n except TypeError:\n pass\n except (ValueError):\n if not is_int_positional:\n raise\n\n # a positional\n if is_int_positional:\n\n # if we are setting and its not a valid location\n # its an insert which fails by definition\n if is_setter:\n\n # always valid\n if self.name == 'loc':\n return {'key': obj}\n\n # a positional\n if (obj >= self.obj.shape[axis] and\n not isinstance(labels, MultiIndex)):\n raise ValueError(\"cannot set by positional indexing with \"\n \"enlargement\")\n\n return obj\n\n if is_nested_tuple(obj, labels):\n return labels.get_locs(obj)\n\n elif is_list_like_indexer(obj):\n\n if com.is_bool_indexer(obj):\n obj = check_bool_indexer(labels, obj)\n inds, = obj.nonzero()\n return inds\n else:\n # When setting, missing keys are not allowed, even with .loc:\n kwargs = {'raise_missing': True if is_setter else\n raise_missing}\n return self._get_listlike_indexer(obj, axis, **kwargs)[1]\n else:\n try:\n return labels.get_loc(obj)\n except LookupError:\n # allow a not found key only if we are a setter\n if not is_list_like_indexer(obj) and is_setter:\n return {'key': obj}\n raise\n\n def _tuplify(self, loc):\n tup = [slice(None, None) for _ in range(self.ndim)]\n tup[0] = loc\n return tuple(tup)\n\n def _get_slice_axis(self, slice_obj, axis=None):\n obj = self.obj\n\n if axis is None:\n axis = self.axis or 0\n\n if not need_slice(slice_obj):\n return obj.copy(deep=False)\n indexer = self._convert_slice_indexer(slice_obj, axis)\n\n if isinstance(indexer, slice):\n return self._slice(indexer, axis=axis, kind='iloc')\n else:\n return self.obj._take(indexer, axis=axis)\n\n\nclass _IXIndexer(_NDFrameIndexer):\n \"\"\"A primarily label-location based indexer, with integer position\n fallback.\n\n Warning: Starting in 0.20.0, the .ix indexer is deprecated, in\n favor of the more strict .iloc and .loc indexers.\n\n ``.ix[]`` supports mixed integer and label based access. It is\n primarily label based, but will fall back to integer positional\n access unless the corresponding axis is of integer type.\n\n ``.ix`` is the most general indexer and will support any of the\n inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating\n point label schemes. ``.ix`` is exceptionally useful when dealing\n with mixed positional and label based hierarchical indexes.\n\n However, when an axis is integer based, ONLY label based access\n and not positional access is supported. Thus, in such cases, it's\n usually better to be explicit and use ``.iloc`` or ``.loc``.\n\n See more at :ref:`Advanced Indexing <advanced>`.\n \"\"\"\n\n _ix_deprecation_warning = textwrap.dedent(\"\"\"\n .ix is deprecated. Please use\n .loc for label based indexing or\n .iloc for positional indexing\n\n See the documentation here:\n http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated\"\"\") # noqa\n\n def __init__(self, name, obj):\n warnings.warn(self._ix_deprecation_warning,\n DeprecationWarning, stacklevel=2)\n super(_IXIndexer, self).__init__(name, obj)\n\n @Appender(_NDFrameIndexer._validate_key.__doc__)\n def _validate_key(self, key, axis):\n if isinstance(key, slice):\n return True\n\n elif com.is_bool_indexer(key):\n return True\n\n elif is_list_like_indexer(key):\n return True\n\n else:\n\n self._convert_scalar_indexer(key, axis)\n\n return True\n\n def _convert_for_reindex(self, key, axis=None):\n \"\"\"\n Transform a list of keys into a new array ready to be used as axis of\n the object we return (e.g. including NaNs).\n\n Parameters\n ----------\n key : list-like\n Target labels\n axis: int\n Where the indexing is being made\n\n Returns\n -------\n list-like of labels\n \"\"\"\n\n if axis is None:\n axis = self.axis or 0\n labels = self.obj._get_axis(axis)\n\n if com.is_bool_indexer(key):\n key = check_bool_indexer(labels, key)\n return labels[key]\n\n if isinstance(key, Index):\n keyarr = labels._convert_index_indexer(key)\n else:\n # asarray can be unsafe, NumPy strings are weird\n keyarr = com.asarray_tuplesafe(key)\n\n if is_integer_dtype(keyarr):\n # Cast the indexer to uint64 if possible so\n # that the values returned from indexing are\n # also uint64.\n keyarr = labels._convert_arr_indexer(keyarr)\n\n if not labels.is_integer():\n keyarr = ensure_platform_int(keyarr)\n return labels.take(keyarr)\n\n return keyarr\n\n\nclass _LocationIndexer(_NDFrameIndexer):\n _exception = Exception\n\n def __getitem__(self, key):\n if type(key) is tuple:\n key = tuple(com.apply_if_callable(x, self.obj)\n for x in key)\n try:\n if self._is_scalar_access(key):\n return self._getitem_scalar(key)\n except (KeyError, IndexError, AttributeError):\n pass\n return self._getitem_tuple(key)\n else:\n # we by definition only have the 0th axis\n axis = self.axis or 0\n\n maybe_callable = com.apply_if_callable(key, self.obj)\n return self._getitem_axis(maybe_callable, axis=axis)\n\n def _is_scalar_access(self, key):\n raise NotImplementedError()\n\n def _getitem_scalar(self, key):\n raise NotImplementedError()\n\n def _getitem_axis(self, key, axis=None):\n raise NotImplementedError()\n\n def _getbool_axis(self, key, axis=None):\n if axis is None:\n axis = self.axis or 0\n labels = self.obj._get_axis(axis)\n key = check_bool_indexer(labels, key)\n inds, = key.nonzero()\n try:\n return self.obj._take(inds, axis=axis)\n except Exception as detail:\n raise self._exception(detail)\n\n def _get_slice_axis(self, slice_obj, axis=None):\n \"\"\" this is pretty simple as we just have to deal with labels \"\"\"\n if axis is None:\n axis = self.axis or 0\n\n obj = self.obj\n if not need_slice(slice_obj):\n return obj.copy(deep=False)\n\n labels = obj._get_axis(axis)\n indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,\n slice_obj.step, kind=self.name)\n\n if isinstance(indexer, slice):\n return self._slice(indexer, axis=axis, kind='iloc')\n else:\n return self.obj._take(indexer, axis=axis)\n\n\nclass _LocIndexer(_LocationIndexer):\n \"\"\"\n Access a group of rows and columns by label(s) or a boolean array.\n\n ``.loc[]`` is primarily label based, but may also be used with a\n boolean array.\n\n Allowed inputs are:\n\n - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is\n interpreted as a *label* of the index, and **never** as an\n integer position along the index).\n - A list or array of labels, e.g. ``['a', 'b', 'c']``.\n - A slice object with labels, e.g. ``'a':'f'``.\n\n .. warning:: Note that contrary to usual python slices, **both** the\n start and the stop are included\n\n - A boolean array of the same length as the axis being sliced,\n e.g. ``[True, False, True]``.\n - A ``callable`` function with one argument (the calling Series, DataFrame\n or Panel) and that returns valid output for indexing (one of the above)\n\n See more at :ref:`Selection by Label <indexing.label>`\n\n Raises\n ------\n KeyError:\n when any items are not found\n\n See Also\n --------\n DataFrame.at : Access a single value for a row/column label pair.\n DataFrame.iloc : Access group of rows and columns by integer position(s).\n DataFrame.xs : Returns a cross-section (row(s) or column(s)) from the\n Series/DataFrame.\n Series.loc : Access group of values using labels.\n\n Examples\n --------\n **Getting values**\n\n >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', 'sidewinder'],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n sidewinder 7 8\n\n Single label. Note this returns the row as a Series.\n\n >>> df.loc['viper']\n max_speed 4\n shield 5\n Name: viper, dtype: int64\n\n List of labels. Note using ``[[]]`` returns a DataFrame.\n\n >>> df.loc[['viper', 'sidewinder']]\n max_speed shield\n viper 4 5\n sidewinder 7 8\n\n Single label for row and column\n\n >>> df.loc['cobra', 'shield']\n 2\n\n Slice with labels for row and single label for column. As mentioned\n above, note that both the start and stop of the slice are included.\n\n >>> df.loc['cobra':'viper', 'max_speed']\n cobra 1\n viper 4\n Name: max_speed, dtype: int64\n\n Boolean list with the same length as the row axis\n\n >>> df.loc[[False, False, True]]\n max_speed shield\n sidewinder 7 8\n\n Conditional that returns a boolean Series\n\n >>> df.loc[df['shield'] > 6]\n max_speed shield\n sidewinder 7 8\n\n Conditional that returns a boolean Series with column labels specified\n\n >>> df.loc[df['shield'] > 6, ['max_speed']]\n max_speed\n sidewinder 7\n\n Callable that returns a boolean Series\n\n >>> df.loc[lambda df: df['shield'] == 8]\n max_speed shield\n sidewinder 7 8\n\n **Setting values**\n\n Set value for all items matching the list of labels\n\n >>> df.loc[['viper', 'sidewinder'], ['shield']] = 50\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 50\n sidewinder 7 50\n\n Set value for an entire row\n\n >>> df.loc['cobra'] = 10\n >>> df\n max_speed shield\n cobra 10 10\n viper 4 50\n sidewinder 7 50\n\n Set value for an entire column\n\n >>> df.loc[:, 'max_speed'] = 30\n >>> df\n max_speed shield\n cobra 30 10\n viper 30 50\n sidewinder 30 50\n\n Set value for rows matching callable condition\n\n >>> df.loc[df['shield'] > 35] = 0\n >>> df\n max_speed shield\n cobra 30 10\n viper 0 0\n sidewinder 0 0\n\n **Getting values on a DataFrame with an index that has integer labels**\n\n Another example using integers for the index\n\n >>> df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=[7, 8, 9], columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n 7 1 2\n 8 4 5\n 9 7 8\n\n Slice with integer labels for rows. As mentioned above, note that both\n the start and stop of the slice are included.\n\n >>> df.loc[7:9]\n max_speed shield\n 7 1 2\n 8 4 5\n 9 7 8\n\n **Getting values with a MultiIndex**\n\n A number of examples using a DataFrame with a MultiIndex\n\n >>> tuples = [\n ... ('cobra', 'mark i'), ('cobra', 'mark ii'),\n ... ('sidewinder', 'mark i'), ('sidewinder', 'mark ii'),\n ... ('viper', 'mark ii'), ('viper', 'mark iii')\n ... ]\n >>> index = pd.MultiIndex.from_tuples(tuples)\n >>> values = [[12, 2], [0, 4], [10, 20],\n ... [1, 4], [7, 1], [16, 36]]\n >>> df = pd.DataFrame(values, columns=['max_speed', 'shield'], index=index)\n >>> df\n max_speed shield\n cobra mark i 12 2\n mark ii 0 4\n sidewinder mark i 10 20\n mark ii 1 4\n viper mark ii 7 1\n mark iii 16 36\n\n Single label. Note this returns a DataFrame with a single index.\n\n >>> df.loc['cobra']\n max_speed shield\n mark i 12 2\n mark ii 0 4\n\n Single index tuple. Note this returns a Series.\n\n >>> df.loc[('cobra', 'mark ii')]\n max_speed 0\n shield 4\n Name: (cobra, mark ii), dtype: int64\n\n Single label for row and column. Similar to passing in a tuple, this\n returns a Series.\n\n >>> df.loc['cobra', 'mark i']\n max_speed 12\n shield 2\n Name: (cobra, mark i), dtype: int64\n\n Single tuple. Note using ``[[]]`` returns a DataFrame.\n\n >>> df.loc[[('cobra', 'mark ii')]]\n max_speed shield\n cobra mark ii 0 4\n\n Single tuple for the index with a single label for the column\n\n >>> df.loc[('cobra', 'mark i'), 'shield']\n 2\n\n Slice from index tuple to single label\n\n >>> df.loc[('cobra', 'mark i'):'viper']\n max_speed shield\n cobra mark i 12 2\n mark ii 0 4\n sidewinder mark i 10 20\n mark ii 1 4\n viper mark ii 7 1\n mark iii 16 36\n\n Slice from index tuple to index tuple\n\n >>> df.loc[('cobra', 'mark i'):('viper', 'mark ii')]\n max_speed shield\n cobra mark i 12 2\n mark ii 0 4\n sidewinder mark i 10 20\n mark ii 1 4\n viper mark ii 7 1\n \"\"\"\n\n _valid_types = (\"labels (MUST BE IN THE INDEX), slices of labels (BOTH \"\n \"endpoints included! Can be slices of integers if the \"\n \"index is integers), listlike of labels, boolean\")\n _exception = KeyError\n\n @Appender(_NDFrameIndexer._validate_key.__doc__)\n def _validate_key(self, key, axis):\n\n # valid for a collection of labels (we check their presence later)\n # slice of labels (where start-end in labels)\n # slice of integers (only if in the labels)\n # boolean\n\n if isinstance(key, slice):\n return\n\n if com.is_bool_indexer(key):\n return\n\n if not is_list_like_indexer(key):\n self._convert_scalar_indexer(key, axis)\n\n def _is_scalar_access(self, key):\n # this is a shortcut accessor to both .loc and .iloc\n # that provide the equivalent access of .at and .iat\n # a) avoid getting things via sections and (to minimize dtype changes)\n # b) provide a performant path\n if not hasattr(key, '__len__'):\n return False\n\n if len(key) != self.ndim:\n return False\n\n for i, k in enumerate(key):\n if not is_scalar(k):\n return False\n\n ax = self.obj.axes[i]\n if isinstance(ax, MultiIndex):\n return False\n\n if not ax.is_unique:\n return False\n\n return True\n\n def _getitem_scalar(self, key):\n # a fast-path to scalar access\n # if not, raise\n values = self.obj._get_value(*key)\n return values\n\n def _get_partial_string_timestamp_match_key(self, key, labels):\n \"\"\"Translate any partial string timestamp matches in key, returning the\n new key (GH 10331)\"\"\"\n if isinstance(labels, MultiIndex):\n if (isinstance(key, str) and labels.levels[0].is_all_dates):\n # Convert key '2016-01-01' to\n # ('2016-01-01'[, slice(None, None, None)]+)\n key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))\n\n if isinstance(key, tuple):\n # Convert (..., '2016-01-01', ...) in tuple to\n # (..., slice('2016-01-01', '2016-01-01', None), ...)\n new_key = []\n for i, component in enumerate(key):\n if (isinstance(component, str) and\n labels.levels[i].is_all_dates):\n new_key.append(slice(component, component, None))\n else:\n new_key.append(component)\n key = tuple(new_key)\n\n return key\n\n def _getitem_axis(self, key, axis=None):\n if axis is None:\n axis = self.axis or 0\n\n key = item_from_zerodim(key)\n if is_iterator(key):\n key = list(key)\n\n labels = self.obj._get_axis(axis)\n key = self._get_partial_string_timestamp_match_key(key, labels)\n\n if isinstance(key, slice):\n self._validate_key(key, axis)\n return self._get_slice_axis(key, axis=axis)\n elif com.is_bool_indexer(key):\n return self._getbool_axis(key, axis=axis)\n elif is_list_like_indexer(key):\n\n # convert various list-like indexers\n # to a list of keys\n # we will use the *values* of the object\n # and NOT the index if its a PandasObject\n if isinstance(labels, MultiIndex):\n\n if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:\n # Series, or 0,1 ndim ndarray\n # GH 14730\n key = list(key)\n elif isinstance(key, ABCDataFrame):\n # GH 15438\n raise NotImplementedError(\"Indexing a MultiIndex with a \"\n \"DataFrame key is not \"\n \"implemented\")\n elif hasattr(key, 'ndim') and key.ndim > 1:\n raise NotImplementedError(\"Indexing a MultiIndex with a \"\n \"multidimensional key is not \"\n \"implemented\")\n\n if (not isinstance(key, tuple) and len(key) > 1 and\n not isinstance(key[0], tuple)):\n key = tuple([key])\n\n # an iterable multi-selection\n if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):\n\n if hasattr(key, 'ndim') and key.ndim > 1:\n raise ValueError('Cannot index with multidimensional key')\n\n return self._getitem_iterable(key, axis=axis)\n\n # nested tuple slicing\n if is_nested_tuple(key, labels):\n locs = labels.get_locs(key)\n indexer = [slice(None)] * self.ndim\n indexer[axis] = locs\n return self.obj.iloc[tuple(indexer)]\n\n # fall thru to straight lookup\n self._validate_key(key, axis)\n return self._get_label(key, axis=axis)\n\n\nclass _iLocIndexer(_LocationIndexer):\n \"\"\"\n Purely integer-location based indexing for selection by position.\n\n ``.iloc[]`` is primarily integer position based (from ``0`` to\n ``length-1`` of the axis), but may also be used with a boolean\n array.\n\n Allowed inputs are:\n\n - An integer, e.g. ``5``.\n - A list or array of integers, e.g. ``[4, 3, 0]``.\n - A slice object with ints, e.g. ``1:7``.\n - A boolean array.\n - A ``callable`` function with one argument (the calling Series, DataFrame\n or Panel) and that returns valid output for indexing (one of the above).\n This is useful in method chains, when you don't have a reference to the\n calling object, but would like to base your selection on some value.\n\n ``.iloc`` will raise ``IndexError`` if a requested indexer is\n out-of-bounds, except *slice* indexers which allow out-of-bounds\n indexing (this conforms with python/numpy *slice* semantics).\n\n See more at ref:`Selection by Position <indexing.integer>`.\n\n See Also\n --------\n DataFrame.iat : Fast integer location scalar accessor.\n DataFrame.loc : Purely label-location based indexer for selection by label.\n Series.iloc : Purely integer-location based indexing for\n selection by position.\n\n Examples\n --------\n\n >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},\n ... {'a': 100, 'b': 200, 'c': 300, 'd': 400},\n ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]\n >>> df = pd.DataFrame(mydict)\n >>> df\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n 2 1000 2000 3000 4000\n\n **Indexing just the rows**\n\n With a scalar integer.\n\n >>> type(df.iloc[0])\n <class 'pandas.core.series.Series'>\n >>> df.iloc[0]\n a 1\n b 2\n c 3\n d 4\n Name: 0, dtype: int64\n\n With a list of integers.\n\n >>> df.iloc[[0]]\n a b c d\n 0 1 2 3 4\n >>> type(df.iloc[[0]])\n <class 'pandas.core.frame.DataFrame'>\n\n >>> df.iloc[[0, 1]]\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n\n With a `slice` object.\n\n >>> df.iloc[:3]\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n 2 1000 2000 3000 4000\n\n With a boolean mask the same length as the index.\n\n >>> df.iloc[[True, False, True]]\n a b c d\n 0 1 2 3 4\n 2 1000 2000 3000 4000\n\n With a callable, useful in method chains. The `x` passed\n to the ``lambda`` is the DataFrame being sliced. This selects\n the rows whose index label even.\n\n >>> df.iloc[lambda x: x.index % 2 == 0]\n a b c d\n 0 1 2 3 4\n 2 1000 2000 3000 4000\n\n **Indexing both axes**\n\n You can mix the indexer types for the index and columns. Use ``:`` to\n select the entire axis.\n\n With scalar integers.\n\n >>> df.iloc[0, 1]\n 2\n\n With lists of integers.\n\n >>> df.iloc[[0, 2], [1, 3]]\n b d\n 0 2 4\n 2 2000 4000\n\n With `slice` objects.\n\n >>> df.iloc[1:3, 0:3]\n a b c\n 1 100 200 300\n 2 1000 2000 3000\n\n With a boolean array whose length matches the columns.\n\n >>> df.iloc[:, [True, False, True, False]]\n a c\n 0 1 3\n 1 100 300\n 2 1000 3000\n\n With a callable function that expects the Series or DataFrame.\n\n >>> df.iloc[:, lambda df: [0, 2]]\n a c\n 0 1 3\n 1 100 300\n 2 1000 3000\n \"\"\"\n\n _valid_types = (\"integer, integer slice (START point is INCLUDED, END \"\n \"point is EXCLUDED), listlike of integers, boolean array\")\n _exception = IndexError\n\n def _validate_key(self, key, axis):\n if com.is_bool_indexer(key):\n if hasattr(key, 'index') and isinstance(key.index, Index):\n if key.index.inferred_type == 'integer':\n raise NotImplementedError(\"iLocation based boolean \"\n \"indexing on an integer type \"\n \"is not available\")\n raise ValueError(\"iLocation based boolean indexing cannot use \"\n \"an indexable as a mask\")\n return\n\n if isinstance(key, slice):\n return\n elif is_integer(key):\n self._validate_integer(key, axis)\n elif isinstance(key, tuple):\n # a tuple should already have been caught by this point\n # so don't treat a tuple as a valid indexer\n raise IndexingError('Too many indexers')\n elif is_list_like_indexer(key):\n arr = np.array(key)\n len_axis = len(self.obj._get_axis(axis))\n\n # check that the key has a numeric dtype\n if not is_numeric_dtype(arr.dtype):\n raise IndexError(\".iloc requires numeric indexers, got \"\n \"{arr}\".format(arr=arr))\n\n # check that the key does not exceed the maximum size of the index\n if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis):\n raise IndexError(\"positional indexers are out-of-bounds\")\n else:\n raise ValueError(\"Can only index by location with \"\n \"a [{types}]\".format(types=self._valid_types))\n\n def _has_valid_setitem_indexer(self, indexer):\n self._has_valid_positional_setitem_indexer(indexer)\n\n def _is_scalar_access(self, key):\n # this is a shortcut accessor to both .loc and .iloc\n # that provide the equivalent access of .at and .iat\n # a) avoid getting things via sections and (to minimize dtype changes)\n # b) provide a performant path\n if not hasattr(key, '__len__'):\n return False\n\n if len(key) != self.ndim:\n return False\n\n for i, k in enumerate(key):\n if not is_integer(k):\n return False\n\n ax = self.obj.axes[i]\n if not ax.is_unique:\n return False\n\n return True\n\n def _getitem_scalar(self, key):\n # a fast-path to scalar access\n # if not, raise\n values = self.obj._get_value(*key, takeable=True)\n return values\n\n def _validate_integer(self, key, axis):\n \"\"\"\n Check that 'key' is a valid position in the desired axis.\n\n Parameters\n ----------\n key : int\n Requested position\n axis : int\n Desired axis\n\n Returns\n -------\n None\n\n Raises\n ------\n IndexError\n If 'key' is not a valid position in axis 'axis'\n \"\"\"\n\n len_axis = len(self.obj._get_axis(axis))\n if key >= len_axis or key < -len_axis:\n raise IndexError(\"single positional indexer is out-of-bounds\")\n\n def _getitem_tuple(self, tup):\n\n self._has_valid_tuple(tup)\n try:\n return self._getitem_lowerdim(tup)\n except IndexingError:\n pass\n\n retval = self.obj\n axis = 0\n for i, key in enumerate(tup):\n if i >= self.obj.ndim:\n raise IndexingError('Too many indexers')\n\n if com.is_null_slice(key):\n axis += 1\n continue\n\n retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)\n\n # if the dim was reduced, then pass a lower-dim the next time\n if retval.ndim < self.ndim:\n axis -= 1\n\n # try to get for the next axis\n axis += 1\n\n return retval\n\n def _get_slice_axis(self, slice_obj, axis=None):\n if axis is None:\n axis = self.axis or 0\n obj = self.obj\n\n if not need_slice(slice_obj):\n return obj.copy(deep=False)\n\n slice_obj = self._convert_slice_indexer(slice_obj, axis)\n if isinstance(slice_obj, slice):\n return self._slice(slice_obj, axis=axis, kind='iloc')\n else:\n return self.obj._take(slice_obj, axis=axis)\n\n def _get_list_axis(self, key, axis=None):\n \"\"\"\n Return Series values by list or array of integers\n\n Parameters\n ----------\n key : list-like positional indexer\n axis : int (can only be zero)\n\n Returns\n -------\n Series object\n \"\"\"\n if axis is None:\n axis = self.axis or 0\n try:\n return self.obj._take(key, axis=axis)\n except IndexError:\n # re-raise with different error message\n raise IndexError(\"positional indexers are out-of-bounds\")\n\n def _getitem_axis(self, key, axis=None):\n if axis is None:\n axis = self.axis or 0\n\n if isinstance(key, slice):\n return self._get_slice_axis(key, axis=axis)\n\n if isinstance(key, list):\n key = np.asarray(key)\n\n if com.is_bool_indexer(key):\n self._validate_key(key, axis)\n return self._getbool_axis(key, axis=axis)\n\n # a list of integers\n elif is_list_like_indexer(key):\n return self._get_list_axis(key, axis=axis)\n\n # a single integer\n else:\n key = item_from_zerodim(key)\n if not is_integer(key):\n raise TypeError(\"Cannot index by location index with a \"\n \"non-integer key\")\n\n # validate the location\n self._validate_integer(key, axis)\n\n return self._get_loc(key, axis=axis)\n\n def _convert_to_indexer(self, obj, axis=None, is_setter=False):\n \"\"\" much simpler as we only have to deal with our valid types \"\"\"\n if axis is None:\n axis = self.axis or 0\n\n # make need to convert a float key\n if isinstance(obj, slice):\n return self._convert_slice_indexer(obj, axis)\n\n elif is_float(obj):\n return self._convert_scalar_indexer(obj, axis)\n\n try:\n self._validate_key(obj, axis)\n return obj\n except ValueError:\n raise ValueError(\"Can only index by location with \"\n \"a [{types}]\".format(types=self._valid_types))\n\n\nclass _ScalarAccessIndexer(_NDFrameIndexer):\n \"\"\" access scalars quickly \"\"\"\n\n def _convert_key(self, key, is_setter=False):\n return list(key)\n\n def __getitem__(self, key):\n if not isinstance(key, tuple):\n\n # we could have a convertible item here (e.g. Timestamp)\n if not is_list_like_indexer(key):\n key = tuple([key])\n else:\n raise ValueError('Invalid call for scalar access (getting)!')\n\n key = self._convert_key(key)\n return self.obj._get_value(*key, takeable=self._takeable)\n\n def __setitem__(self, key, value):\n if isinstance(key, tuple):\n key = tuple(com.apply_if_callable(x, self.obj)\n for x in key)\n else:\n # scalar callable may return tuple\n key = com.apply_if_callable(key, self.obj)\n\n if not isinstance(key, tuple):\n key = self._tuplify(key)\n if len(key) != self.obj.ndim:\n raise ValueError('Not enough indexers for scalar access '\n '(setting)!')\n key = list(self._convert_key(key, is_setter=True))\n key.append(value)\n self.obj._set_value(*key, takeable=self._takeable)\n\n\nclass _AtIndexer(_ScalarAccessIndexer):\n \"\"\"\n Access a single value for a row/column label pair.\n\n Similar to ``loc``, in that both provide label-based lookups. Use\n ``at`` if you only need to get or set a single value in a DataFrame\n or Series.\n\n Raises\n ------\n KeyError\n When label does not exist in DataFrame\n\n See Also\n --------\n DataFrame.iat : Access a single value for a row/column pair by integer\n position.\n DataFrame.loc : Access a group of rows and columns by label(s).\n Series.at : Access a single value using a label.\n\n Examples\n --------\n >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],\n ... index=[4, 5, 6], columns=['A', 'B', 'C'])\n >>> df\n A B C\n 4 0 2 3\n 5 0 4 1\n 6 10 20 30\n\n Get value at specified row/column pair\n\n >>> df.at[4, 'B']\n 2\n\n Set value at specified row/column pair\n\n >>> df.at[4, 'B'] = 10\n >>> df.at[4, 'B']\n 10\n\n Get value within a Series\n\n >>> df.loc[5].at['B']\n 4\n \"\"\"\n\n _takeable = False\n\n def _convert_key(self, key, is_setter=False):\n \"\"\" require they keys to be the same type as the index (so we don't\n fallback)\n \"\"\"\n\n # allow arbitrary setting\n if is_setter:\n return list(key)\n\n for ax, i in zip(self.obj.axes, key):\n if ax.is_integer():\n if not is_integer(i):\n raise ValueError(\"At based indexing on an integer index \"\n \"can only have integer indexers\")\n else:\n if is_integer(i) and not ax.holds_integer():\n raise ValueError(\"At based indexing on an non-integer \"\n \"index can only have non-integer \"\n \"indexers\")\n return key\n\n\nclass _iAtIndexer(_ScalarAccessIndexer):\n \"\"\"\n Access a single value for a row/column pair by integer position.\n\n Similar to ``iloc``, in that both provide integer-based lookups. Use\n ``iat`` if you only need to get or set a single value in a DataFrame\n or Series.\n\n Raises\n ------\n IndexError\n When integer position is out of bounds\n\n See Also\n --------\n DataFrame.at : Access a single value for a row/column label pair.\n DataFrame.loc : Access a group of rows and columns by label(s).\n DataFrame.iloc : Access a group of rows and columns by integer position(s).\n\n Examples\n --------\n >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],\n ... columns=['A', 'B', 'C'])\n >>> df\n A B C\n 0 0 2 3\n 1 0 4 1\n 2 10 20 30\n\n Get value at specified row/column pair\n\n >>> df.iat[1, 2]\n 1\n\n Set value at specified row/column pair\n\n >>> df.iat[1, 2] = 10\n >>> df.iat[1, 2]\n 10\n\n Get value within a series\n\n >>> df.loc[0].iat[1]\n 2\n \"\"\"\n\n _takeable = True\n\n def _has_valid_setitem_indexer(self, indexer):\n self._has_valid_positional_setitem_indexer(indexer)\n\n def _convert_key(self, key, is_setter=False):\n \"\"\" require integer args (and convert to label arguments) \"\"\"\n for a, i in zip(self.obj.axes, key):\n if not is_integer(i):\n raise ValueError(\"iAt based indexing can only have integer \"\n \"indexers\")\n return key\n\n\ndef length_of_indexer(indexer, target=None):\n \"\"\"\n return the length of a single non-tuple indexer which could be a slice\n \"\"\"\n if target is not None and isinstance(indexer, slice):\n target_len = len(target)\n start = indexer.start\n stop = indexer.stop\n step = indexer.step\n if start is None:\n start = 0\n elif start < 0:\n start += target_len\n if stop is None or stop > target_len:\n stop = target_len\n elif stop < 0:\n stop += target_len\n if step is None:\n step = 1\n elif step < 0:\n step = -step\n return (stop - start + step - 1) // step\n elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):\n return len(indexer)\n elif not is_list_like_indexer(indexer):\n return 1\n raise AssertionError(\"cannot find the length of the indexer\")\n\n\ndef convert_to_index_sliceable(obj, key):\n \"\"\"\n if we are index sliceable, then return my slicer, otherwise return None\n \"\"\"\n idx = obj.index\n if isinstance(key, slice):\n return idx._convert_slice_indexer(key, kind='getitem')\n\n elif isinstance(key, str):\n\n # we are an actual column\n if obj._data.items.contains(key):\n return None\n\n # We might have a datetimelike string that we can translate to a\n # slice here via partial string indexing\n if idx.is_all_dates:\n try:\n return idx._get_string_slice(key)\n except (KeyError, ValueError, NotImplementedError):\n return None\n\n return None\n\n\ndef check_bool_indexer(ax, key):\n # boolean indexing, need to check that the data are aligned, otherwise\n # disallowed\n\n # this function assumes that is_bool_indexer(key) == True\n\n result = key\n if isinstance(key, ABCSeries) and not key.index.equals(ax):\n result = result.reindex(ax)\n mask = isna(result._values)\n if mask.any():\n raise IndexingError('Unalignable boolean Series provided as '\n 'indexer (index of the boolean Series and of '\n 'the indexed object do not match')\n result = result.astype(bool)._values\n elif is_sparse(result):\n result = result.to_dense()\n result = np.asarray(result, dtype=bool)\n else:\n # is_bool_indexer has already checked for nulls in the case of an\n # object array key, so no check needed here\n result = np.asarray(result, dtype=bool)\n\n return result\n\n\ndef check_setitem_lengths(indexer, value, values):\n \"\"\"\n Validate that value and indexer are the same length.\n\n An special-case is allowed for when the indexer is a boolean array\n and the number of true values equals the length of ``value``. In\n this case, no exception is raised.\n\n Parameters\n ----------\n indexer : sequence\n The key for the setitem\n value : array-like\n The value for the setitem\n values : array-like\n The values being set into\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n When the indexer is an ndarray or list and the lengths don't\n match.\n \"\"\"\n # boolean with truth values == len of the value is ok too\n if isinstance(indexer, (np.ndarray, list)):\n if is_list_like(value) and len(indexer) != len(value):\n if not (isinstance(indexer, np.ndarray) and\n indexer.dtype == np.bool_ and\n len(indexer[indexer]) == len(value)):\n raise ValueError(\"cannot set using a list-like indexer \"\n \"with a different length than the value\")\n # slice\n elif isinstance(indexer, slice):\n\n if is_list_like(value) and len(values):\n if len(value) != length_of_indexer(indexer, values):\n raise ValueError(\"cannot set using a slice indexer with a \"\n \"different length than the value\")\n\n\ndef convert_missing_indexer(indexer):\n \"\"\"\n reverse convert a missing indexer, which is a dict\n return the scalar indexer and a boolean indicating if we converted\n \"\"\"\n\n if isinstance(indexer, dict):\n\n # a missing key (but not a tuple indexer)\n indexer = indexer['key']\n\n if isinstance(indexer, bool):\n raise KeyError(\"cannot use a single bool to index into setitem\")\n return indexer, True\n\n return indexer, False\n\n\ndef convert_from_missing_indexer_tuple(indexer, axes):\n \"\"\"\n create a filtered indexer that doesn't have any missing indexers\n \"\"\"\n\n def get_indexer(_i, _idx):\n return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else\n _idx)\n\n return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))\n\n\ndef maybe_convert_indices(indices, n):\n \"\"\"\n Attempt to convert indices into valid, positive indices.\n\n If we have negative indices, translate to positive here.\n If we have indices that are out-of-bounds, raise an IndexError.\n\n Parameters\n ----------\n indices : array-like\n The array of indices that we are to convert.\n n : int\n The number of elements in the array that we are indexing.\n\n Returns\n -------\n valid_indices : array-like\n An array-like of positive indices that correspond to the ones\n that were passed in initially to this function.\n\n Raises\n ------\n IndexError : one of the converted indices either exceeded the number\n of elements (specified by `n`) OR was still negative.\n \"\"\"\n\n if isinstance(indices, list):\n indices = np.array(indices)\n if len(indices) == 0:\n # If list is empty, np.array will return float and cause indexing\n # errors.\n return np.empty(0, dtype=np.intp)\n\n mask = indices < 0\n if mask.any():\n indices = indices.copy()\n indices[mask] += n\n\n mask = (indices >= n) | (indices < 0)\n if mask.any():\n raise IndexError(\"indices are out-of-bounds\")\n return indices\n\n\ndef validate_indices(indices, n):\n \"\"\"\n Perform bounds-checking for an indexer.\n\n -1 is allowed for indicating missing values.\n\n Parameters\n ----------\n indices : ndarray\n n : int\n length of the array being indexed\n\n Raises\n ------\n ValueError\n\n Examples\n --------\n >>> validate_indices([1, 2], 3)\n # OK\n >>> validate_indices([1, -2], 3)\n ValueError\n >>> validate_indices([1, 2, 3], 3)\n IndexError\n >>> validate_indices([-1, -1], 0)\n # OK\n >>> validate_indices([0, 1], 0)\n IndexError\n \"\"\"\n if len(indices):\n min_idx = indices.min()\n if min_idx < -1:\n msg = (\"'indices' contains values less than allowed ({} < {})\"\n .format(min_idx, -1))\n raise ValueError(msg)\n\n max_idx = indices.max()\n if max_idx >= n:\n raise IndexError(\"indices are out-of-bounds\")\n\n\ndef maybe_convert_ix(*args):\n \"\"\"\n We likely want to take the cross-product\n \"\"\"\n\n ixify = True\n for arg in args:\n if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):\n ixify = False\n\n if ixify:\n return np.ix_(*args)\n else:\n return args\n\n\ndef is_nested_tuple(tup, labels):\n # check for a compatible nested tuple and multiindexes among the axes\n if not isinstance(tup, tuple):\n return False\n\n for i, k in enumerate(tup):\n\n if is_list_like(k) or isinstance(k, slice):\n return isinstance(labels, MultiIndex)\n\n return False\n\n\ndef is_list_like_indexer(key):\n # allow a list_like, but exclude NamedTuples which can be indexers\n return is_list_like(key) and not (isinstance(key, tuple) and\n type(key) is not tuple)\n\n\ndef is_label_like(key):\n # select a label or row\n return not isinstance(key, slice) and not is_list_like_indexer(key)\n\n\ndef need_slice(obj):\n return (obj.start is not None or obj.stop is not None or\n (obj.step is not None and obj.step != 1))\n\n\ndef maybe_droplevels(index, key):\n # drop levels\n original_index = index\n if isinstance(key, tuple):\n for _ in key:\n try:\n index = index.droplevel(0)\n except ValueError:\n # we have dropped too much, so back out\n return original_index\n else:\n try:\n index = index.droplevel(0)\n except ValueError:\n pass\n\n return index\n\n\ndef _non_reducing_slice(slice_):\n \"\"\"\n Ensurse that a slice doesn't reduce to a Series or Scalar.\n\n Any user-paseed `subset` should have this called on it\n to make sure we're always working with DataFrames.\n \"\"\"\n # default to column slice, like DataFrame\n # ['A', 'B'] -> IndexSlices[:, ['A', 'B']]\n kinds = (ABCSeries, np.ndarray, Index, list, str)\n if isinstance(slice_, kinds):\n slice_ = IndexSlice[:, slice_]\n\n def pred(part):\n # true when slice does *not* reduce, False when part is a tuple,\n # i.e. MultiIndex slice\n return ((isinstance(part, slice) or is_list_like(part))\n and not isinstance(part, tuple))\n\n if not is_list_like(slice_):\n if not isinstance(slice_, slice):\n # a 1-d slice, like df.loc[1]\n slice_ = [[slice_]]\n else:\n # slice(a, b, c)\n slice_ = [slice_] # to tuplize later\n else:\n slice_ = [part if pred(part) else [part] for part in slice_]\n return tuple(slice_)\n\n\ndef _maybe_numeric_slice(df, slice_, include_bool=False):\n \"\"\"\n want nice defaults for background_gradient that don't break\n with non-numeric data. But if slice_ is passed go with that.\n \"\"\"\n if slice_ is None:\n dtypes = [np.number]\n if include_bool:\n dtypes.append(bool)\n slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]\n return slice_\n",
"# pylint: disable=E1101\n\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame, Series\nfrom pandas.core.indexes.datetimes import date_range\nimport pandas.util.testing as tm\nfrom pandas.util.testing import assert_frame_equal, assert_series_equal\n\ndti = date_range(start=datetime(2005, 1, 1),\n end=datetime(2005, 1, 10), freq='Min')\n\ntest_series = Series(np.random.rand(len(dti)), dti)\ntest_frame = DataFrame(\n {'A': test_series, 'B': test_series, 'C': np.arange(len(dti))})\n\n\ndef test_str():\n\n r = test_series.resample('H')\n assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, '\n 'label=left, convention=start, base=0]' in str(r))\n\n\ndef test_api():\n\n r = test_series.resample('H')\n result = r.mean()\n assert isinstance(result, Series)\n assert len(result) == 217\n\n r = test_series.to_frame().resample('H')\n result = r.mean()\n assert isinstance(result, DataFrame)\n assert len(result) == 217\n\n\ndef test_groupby_resample_api():\n\n # GH 12448\n # .groupby(...).resample(...) hitting warnings\n # when appropriate\n df = DataFrame({'date': pd.date_range(start='2016-01-01',\n periods=4,\n freq='W'),\n 'group': [1, 1, 2, 2],\n 'val': [5, 6, 7, 8]}).set_index('date')\n\n # replication step\n i = pd.date_range('2016-01-03', periods=8).tolist() + \\\n pd.date_range('2016-01-17', periods=8).tolist()\n index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i],\n names=['group', 'date'])\n expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]},\n index=index)\n result = df.groupby('group').apply(\n lambda x: x.resample('1D').ffill())[['val']]\n assert_frame_equal(result, expected)\n\n\ndef test_groupby_resample_on_api():\n\n # GH 15021\n # .groupby(...).resample(on=...) results in an unexpected\n # keyword warning.\n df = DataFrame({'key': ['A', 'B'] * 5,\n 'dates': pd.date_range('2016-01-01', periods=10),\n 'values': np.random.randn(10)})\n\n expected = df.set_index('dates').groupby('key').resample('D').mean()\n\n result = df.groupby('key').resample('D', on='dates').mean()\n assert_frame_equal(result, expected)\n\n\ndef test_pipe():\n # GH17905\n\n # series\n r = test_series.resample('H')\n expected = r.max() - r.mean()\n result = r.pipe(lambda x: x.max() - x.mean())\n tm.assert_series_equal(result, expected)\n\n # dataframe\n r = test_frame.resample('H')\n expected = r.max() - r.mean()\n result = r.pipe(lambda x: x.max() - x.mean())\n tm.assert_frame_equal(result, expected)\n\n\ndef test_getitem():\n\n r = test_frame.resample('H')\n tm.assert_index_equal(r._selected_obj.columns, test_frame.columns)\n\n r = test_frame.resample('H')['B']\n assert r._selected_obj.name == test_frame.columns[1]\n\n # technically this is allowed\n r = test_frame.resample('H')['A', 'B']\n tm.assert_index_equal(r._selected_obj.columns,\n test_frame.columns[[0, 1]])\n\n r = test_frame.resample('H')['A', 'B']\n tm.assert_index_equal(r._selected_obj.columns,\n test_frame.columns[[0, 1]])\n\n\[email protected]('key', [['D'], ['A', 'D']])\ndef test_select_bad_cols(key):\n g = test_frame.resample('H')\n # 'A' should not be referenced as a bad column...\n # will have to rethink regex if you change message!\n msg = r\"^\\\"Columns not found: 'D'\\\"$\"\n with pytest.raises(KeyError, match=msg):\n g[key]\n\n\ndef test_attribute_access():\n\n r = test_frame.resample('H')\n tm.assert_series_equal(r.A.sum(), r['A'].sum())\n\n\ndef test_api_compat_before_use():\n\n # make sure that we are setting the binner\n # on these attributes\n for attr in ['groups', 'ngroups', 'indices']:\n rng = pd.date_range('1/1/2012', periods=100, freq='S')\n ts = Series(np.arange(len(rng)), index=rng)\n rs = ts.resample('30s')\n\n # before use\n getattr(rs, attr)\n\n # after grouper is initialized is ok\n rs.mean()\n getattr(rs, attr)\n\n\ndef tests_skip_nuisance():\n\n df = test_frame\n df['D'] = 'foo'\n r = df.resample('H')\n result = r[['A', 'B']].sum()\n expected = pd.concat([r.A.sum(), r.B.sum()], axis=1)\n assert_frame_equal(result, expected)\n\n expected = r[['A', 'B', 'C']].sum()\n result = r.sum()\n assert_frame_equal(result, expected)\n\n\ndef test_downsample_but_actually_upsampling():\n\n # this is reindex / asfreq\n rng = pd.date_range('1/1/2012', periods=100, freq='S')\n ts = Series(np.arange(len(rng), dtype='int64'), index=rng)\n result = ts.resample('20s').asfreq()\n expected = Series([0, 20, 40, 60, 80],\n index=pd.date_range('2012-01-01 00:00:00',\n freq='20s',\n periods=5))\n assert_series_equal(result, expected)\n\n\ndef test_combined_up_downsampling_of_irregular():\n\n # since we are reallydoing an operation like this\n # ts2.resample('2s').mean().ffill()\n # preserve these semantics\n\n rng = pd.date_range('1/1/2012', periods=100, freq='S')\n ts = Series(np.arange(len(rng)), index=rng)\n ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]]\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = ts2.resample('2s', how='mean', fill_method='ffill')\n expected = ts2.resample('2s').mean().ffill()\n assert_series_equal(result, expected)\n\n\ndef test_transform():\n\n r = test_series.resample('20min')\n expected = test_series.groupby(\n pd.Grouper(freq='20min')).transform('mean')\n result = r.transform('mean')\n assert_series_equal(result, expected)\n\n\ndef test_fillna():\n\n # need to upsample here\n rng = pd.date_range('1/1/2012', periods=10, freq='2S')\n ts = Series(np.arange(len(rng), dtype='int64'), index=rng)\n r = ts.resample('s')\n\n expected = r.ffill()\n result = r.fillna(method='ffill')\n assert_series_equal(result, expected)\n\n expected = r.bfill()\n result = r.fillna(method='bfill')\n assert_series_equal(result, expected)\n\n msg = (r\"Invalid fill method\\. Expecting pad \\(ffill\\), backfill\"\n r\" \\(bfill\\) or nearest\\. Got 0\")\n with pytest.raises(ValueError, match=msg):\n r.fillna(0)\n\n\ndef test_apply_without_aggregation():\n\n # both resample and groupby should work w/o aggregation\n r = test_series.resample('20min')\n g = test_series.groupby(pd.Grouper(freq='20min'))\n\n for t in [g, r]:\n result = t.apply(lambda x: x)\n assert_series_equal(result, test_series)\n\n\ndef test_agg_consistency():\n\n # make sure that we are consistent across\n # similar aggregations with and w/o selection list\n df = DataFrame(np.random.randn(1000, 3),\n index=pd.date_range('1/1/2012', freq='S', periods=1000),\n columns=['A', 'B', 'C'])\n\n r = df.resample('3T')\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})\n result = r.agg({'r1': 'mean', 'r2': 'sum'})\n assert_frame_equal(result, expected)\n\n# TODO: once GH 14008 is fixed, move these tests into\n# `Base` test class\n\n\ndef test_agg():\n # test with all three Resampler apis and TimeGrouper\n\n np.random.seed(1234)\n index = date_range(datetime(2005, 1, 1),\n datetime(2005, 1, 10), freq='D')\n index.name = 'date'\n df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)\n df_col = df.reset_index()\n df_mult = df_col.copy()\n df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],\n names=['index', 'date'])\n r = df.resample('2D')\n cases = [\n r,\n df_col.resample('2D', on='date'),\n df_mult.resample('2D', level='date'),\n df.groupby(pd.Grouper(freq='2D'))\n ]\n\n a_mean = r['A'].mean()\n a_std = r['A'].std()\n a_sum = r['A'].sum()\n b_mean = r['B'].mean()\n b_std = r['B'].std()\n b_sum = r['B'].sum()\n\n expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)\n expected.columns = pd.MultiIndex.from_product([['A', 'B'],\n ['mean', 'std']])\n for t in cases:\n result = t.aggregate([np.mean, np.std])\n assert_frame_equal(result, expected)\n\n expected = pd.concat([a_mean, b_std], axis=1)\n for t in cases:\n result = t.aggregate({'A': np.mean,\n 'B': np.std})\n assert_frame_equal(result, expected, check_like=True)\n\n expected = pd.concat([a_mean, a_std], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),\n ('A', 'std')])\n for t in cases:\n result = t.aggregate({'A': ['mean', 'std']})\n assert_frame_equal(result, expected)\n\n expected = pd.concat([a_mean, a_sum], axis=1)\n expected.columns = ['mean', 'sum']\n for t in cases:\n result = t['A'].aggregate(['mean', 'sum'])\n assert_frame_equal(result, expected)\n\n expected = pd.concat([a_mean, a_sum], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),\n ('A', 'sum')])\n for t in cases:\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})\n assert_frame_equal(result, expected, check_like=True)\n\n expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),\n ('A', 'sum'),\n ('B', 'mean2'),\n ('B', 'sum2')])\n for t in cases:\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},\n 'B': {'mean2': 'mean', 'sum2': 'sum'}})\n assert_frame_equal(result, expected, check_like=True)\n\n expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),\n ('A', 'std'),\n ('B', 'mean'),\n ('B', 'std')])\n for t in cases:\n result = t.aggregate({'A': ['mean', 'std'],\n 'B': ['mean', 'std']})\n assert_frame_equal(result, expected, check_like=True)\n\n expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'),\n ('r1', 'A', 'sum'),\n ('r2', 'B', 'mean'),\n ('r2', 'B', 'sum')])\n\n\ndef test_agg_misc():\n # test with all three Resampler apis and TimeGrouper\n\n np.random.seed(1234)\n index = date_range(datetime(2005, 1, 1),\n datetime(2005, 1, 10), freq='D')\n index.name = 'date'\n df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)\n df_col = df.reset_index()\n df_mult = df_col.copy()\n df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],\n names=['index', 'date'])\n\n r = df.resample('2D')\n cases = [\n r,\n df_col.resample('2D', on='date'),\n df_mult.resample('2D', level='date'),\n df.groupby(pd.Grouper(freq='2D'))\n ]\n\n # passed lambda\n for t in cases:\n result = t.agg({'A': np.sum,\n 'B': lambda x: np.std(x, ddof=1)})\n rcustom = t['B'].apply(lambda x: np.std(x, ddof=1))\n expected = pd.concat([r['A'].sum(), rcustom], axis=1)\n assert_frame_equal(result, expected, check_like=True)\n\n # agg with renamers\n expected = pd.concat([t['A'].sum(),\n t['B'].sum(),\n t['A'].mean(),\n t['B'].mean()],\n axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'),\n ('result1', 'B'),\n ('result2', 'A'),\n ('result2', 'B')])\n\n for t in cases:\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),\n ('result2', np.mean)]))\n assert_frame_equal(result, expected, check_like=True)\n\n # agg with different hows\n expected = pd.concat([t['A'].sum(),\n t['A'].std(),\n t['B'].mean(),\n t['B'].std()],\n axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),\n ('A', 'std'),\n ('B', 'mean'),\n ('B', 'std')])\n for t in cases:\n result = t.agg(OrderedDict([('A', ['sum', 'std']),\n ('B', ['mean', 'std'])]))\n assert_frame_equal(result, expected, check_like=True)\n\n # equivalent of using a selection list / or not\n for t in cases:\n result = t[['A', 'B']].agg({'A': ['sum', 'std'],\n 'B': ['mean', 'std']})\n assert_frame_equal(result, expected, check_like=True)\n\n # series like aggs\n for t in cases:\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t['A'].agg({'A': ['sum', 'std']})\n expected = pd.concat([t['A'].sum(),\n t['A'].std()],\n axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),\n ('A', 'std')])\n assert_frame_equal(result, expected, check_like=True)\n\n expected = pd.concat([t['A'].agg(['sum', 'std']),\n t['A'].agg(['mean', 'std'])],\n axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),\n ('A', 'std'),\n ('B', 'mean'),\n ('B', 'std')])\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t['A'].agg({'A': ['sum', 'std'],\n 'B': ['mean', 'std']})\n assert_frame_equal(result, expected, check_like=True)\n\n # errors\n # invalid names in the agg specification\n msg = \"\\\"Column 'B' does not exist!\\\"\"\n for t in cases:\n with pytest.raises(KeyError, match=msg):\n t[['A']].agg({'A': ['sum', 'std'],\n 'B': ['mean', 'std']})\n\n\ndef test_agg_nested_dicts():\n\n np.random.seed(1234)\n index = date_range(datetime(2005, 1, 1),\n datetime(2005, 1, 10), freq='D')\n index.name = 'date'\n df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)\n df_col = df.reset_index()\n df_mult = df_col.copy()\n df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],\n names=['index', 'date'])\n r = df.resample('2D')\n cases = [\n r,\n df_col.resample('2D', on='date'),\n df_mult.resample('2D', level='date'),\n df.groupby(pd.Grouper(freq='2D'))\n ]\n\n msg = r\"cannot perform renaming for r(1|2) with a nested dictionary\"\n for t in cases:\n with pytest.raises(pd.core.base.SpecificationError, match=msg):\n t.aggregate({'r1': {'A': ['mean', 'sum']},\n 'r2': {'B': ['mean', 'sum']}})\n\n for t in cases:\n expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(),\n t['B'].std()], axis=1)\n expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (\n 'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},\n 'B': {'rb': ['mean', 'std']}})\n assert_frame_equal(result, expected, check_like=True)\n\n with tm.assert_produces_warning(FutureWarning,\n check_stacklevel=False):\n result = t.agg({'A': {'ra': ['mean', 'std']},\n 'B': {'rb': ['mean', 'std']}})\n assert_frame_equal(result, expected, check_like=True)\n\n\ndef test_try_aggregate_non_existing_column():\n # GH 16766\n data = [\n {'dt': datetime(2017, 6, 1, 0), 'x': 1.0, 'y': 2.0},\n {'dt': datetime(2017, 6, 1, 1), 'x': 2.0, 'y': 2.0},\n {'dt': datetime(2017, 6, 1, 2), 'x': 3.0, 'y': 1.5}\n ]\n df = DataFrame(data).set_index('dt')\n\n # Error as we don't have 'z' column\n msg = \"\\\"Column 'z' does not exist!\\\"\"\n with pytest.raises(KeyError, match=msg):\n df.resample('30T').agg({'x': ['mean'],\n 'y': ['median'],\n 'z': ['sum']})\n\n\ndef test_selection_api_validation():\n # GH 13500\n index = date_range(datetime(2005, 1, 1),\n datetime(2005, 1, 10), freq='D')\n\n rng = np.arange(len(index), dtype=np.int64)\n df = DataFrame({'date': index, 'a': rng},\n index=pd.MultiIndex.from_arrays([rng, index],\n names=['v', 'd']))\n df_exp = DataFrame({'a': rng}, index=index)\n\n # non DatetimeIndex\n msg = (\"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex,\"\n \" but got an instance of 'Int64Index'\")\n with pytest.raises(TypeError, match=msg):\n df.resample('2D', level='v')\n\n msg = \"The Grouper cannot specify both a key and a level!\"\n with pytest.raises(ValueError, match=msg):\n df.resample('2D', on='date', level='d')\n\n msg = \"unhashable type: 'list'\"\n with pytest.raises(TypeError, match=msg):\n df.resample('2D', on=['a', 'date'])\n\n msg = r\"\\\"Level \\['a', 'date'\\] not found\\\"\"\n with pytest.raises(KeyError, match=msg):\n df.resample('2D', level=['a', 'date'])\n\n # upsampling not allowed\n msg = (\"Upsampling from level= or on= selection is not supported, use\"\n r\" \\.set_index\\(\\.\\.\\.\\) to explicitly set index to datetime-like\")\n with pytest.raises(ValueError, match=msg):\n df.resample('2D', level='d').asfreq()\n with pytest.raises(ValueError, match=msg):\n df.resample('2D', on='date').asfreq()\n\n exp = df_exp.resample('2D').sum()\n exp.index.name = 'date'\n assert_frame_equal(exp, df.resample('2D', on='date').sum())\n\n exp.index.name = 'd'\n assert_frame_equal(exp, df.resample('2D', level='d').sum())\n\n\[email protected]('col_name', ['t2', 't2x', 't2q', 'T_2M',\n 't2p', 't2m', 't2m1', 'T2M'])\ndef test_agg_with_datetime_index_list_agg_func(col_name):\n # GH 22660\n # The parametrized column names would get converted to dates by our\n # date parser. Some would result in OutOfBoundsError (ValueError) while\n # others would result in OverflowError when passed into Timestamp.\n # We catch these errors and move on to the correct branch.\n df = pd.DataFrame(list(range(200)),\n index=pd.date_range(start='2017-01-01', freq='15min',\n periods=200, tz='Europe/Berlin'),\n columns=[col_name])\n result = df.resample('1d').aggregate(['mean'])\n expected = pd.DataFrame([47.5, 143.5, 195.5],\n index=pd.date_range(start='2017-01-01', freq='D',\n periods=3, tz='Europe/Berlin'),\n columns=pd.MultiIndex(levels=[[col_name],\n ['mean']],\n codes=[[0], [0]]))\n assert_frame_equal(result, expected)\n",
"\"\"\"\nconcat routines\n\"\"\"\n\nimport numpy as np\n\nimport pandas.core.dtypes.concat as _concat\n\nfrom pandas import DataFrame, Index, MultiIndex, Series, compat\nfrom pandas.core import common as com\nfrom pandas.core.arrays.categorical import (\n _factorize_from_iterable, _factorize_from_iterables)\nfrom pandas.core.generic import NDFrame\nfrom pandas.core.index import (\n _all_indexes_same, _get_consensus_names, _get_objs_combined_axis,\n ensure_index)\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.internals import concatenate_block_managers\n\n# ---------------------------------------------------------------------\n# Concatenate DataFrame objects\n\n\ndef concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False,\n sort=None, copy=True):\n \"\"\"\n Concatenate pandas objects along a particular axis with optional set logic\n along the other axes.\n\n Can also add a layer of hierarchical indexing on the concatenation axis,\n which may be useful if the labels are the same (or overlapping) on\n the passed axis number.\n\n Parameters\n ----------\n objs : a sequence or mapping of Series, DataFrame, or Panel objects\n If a dict is passed, the sorted keys will be used as the `keys`\n argument, unless it is passed, in which case the values will be\n selected (see below). Any None objects will be dropped silently unless\n they are all None in which case a ValueError will be raised.\n axis : {0/'index', 1/'columns'}, default 0\n The axis to concatenate along.\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis (or axes).\n join_axes : list of Index objects\n Specific indexes to use for the other n - 1 axes instead of performing\n inner/outer set logic.\n ignore_index : bool, default False\n If True, do not use the index values along the concatenation axis. The\n resulting axis will be labeled 0, ..., n - 1. This is useful if you are\n concatenating objects where the concatenation axis does not have\n meaningful indexing information. Note the index values on the other\n axes are still respected in the join.\n keys : sequence, default None\n If multiple levels passed, should contain tuples. Construct\n hierarchical index using the passed keys as the outermost level.\n levels : list of sequences, default None\n Specific levels (unique values) to use for constructing a\n MultiIndex. Otherwise they will be inferred from the keys.\n names : list, default None\n Names for the levels in the resulting hierarchical index.\n verify_integrity : bool, default False\n Check whether the new concatenated axis contains duplicates. This can\n be very expensive relative to the actual data concatenation.\n sort : bool, default None\n Sort non-concatenation axis if it is not already aligned when `join`\n is 'outer'. The current default of sorting is deprecated and will\n change to not-sorting in a future version of pandas.\n\n Explicitly pass ``sort=True`` to silence the warning and sort.\n Explicitly pass ``sort=False`` to silence the warning and not sort.\n\n This has no effect when ``join='inner'``, which already preserves\n the order of the non-concatenation axis.\n\n .. versionadded:: 0.23.0\n\n copy : bool, default True\n If False, do not copy data unnecessarily.\n\n Returns\n -------\n object, type of objs\n When concatenating all ``Series`` along the index (axis=0), a\n ``Series`` is returned. When ``objs`` contains at least one\n ``DataFrame``, a ``DataFrame`` is returned. When concatenating along\n the columns (axis=1), a ``DataFrame`` is returned.\n\n See Also\n --------\n Series.append : Concatenate Series.\n DataFrame.append : Concatenate DataFrames.\n DataFrame.join : Join DataFrames using indexes.\n DataFrame.merge : Merge DataFrames by indexes or columns.\n\n Notes\n -----\n The keys, levels, and names arguments are all optional.\n\n A walkthrough of how this method fits in with other tools for combining\n pandas objects can be found `here\n <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.\n\n Examples\n --------\n Combine two ``Series``.\n\n >>> s1 = pd.Series(['a', 'b'])\n >>> s2 = pd.Series(['c', 'd'])\n >>> pd.concat([s1, s2])\n 0 a\n 1 b\n 0 c\n 1 d\n dtype: object\n\n Clear the existing index and reset it in the result\n by setting the ``ignore_index`` option to ``True``.\n\n >>> pd.concat([s1, s2], ignore_index=True)\n 0 a\n 1 b\n 2 c\n 3 d\n dtype: object\n\n Add a hierarchical index at the outermost level of\n the data with the ``keys`` option.\n\n >>> pd.concat([s1, s2], keys=['s1', 's2'])\n s1 0 a\n 1 b\n s2 0 c\n 1 d\n dtype: object\n\n Label the index keys you create with the ``names`` option.\n\n >>> pd.concat([s1, s2], keys=['s1', 's2'],\n ... names=['Series name', 'Row ID'])\n Series name Row ID\n s1 0 a\n 1 b\n s2 0 c\n 1 d\n dtype: object\n\n Combine two ``DataFrame`` objects with identical columns.\n\n >>> df1 = pd.DataFrame([['a', 1], ['b', 2]],\n ... columns=['letter', 'number'])\n >>> df1\n letter number\n 0 a 1\n 1 b 2\n >>> df2 = pd.DataFrame([['c', 3], ['d', 4]],\n ... columns=['letter', 'number'])\n >>> df2\n letter number\n 0 c 3\n 1 d 4\n >>> pd.concat([df1, df2])\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n Combine ``DataFrame`` objects with overlapping columns\n and return everything. Columns outside the intersection will\n be filled with ``NaN`` values.\n\n >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],\n ... columns=['letter', 'number', 'animal'])\n >>> df3\n letter number animal\n 0 c 3 cat\n 1 d 4 dog\n >>> pd.concat([df1, df3], sort=False)\n letter number animal\n 0 a 1 NaN\n 1 b 2 NaN\n 0 c 3 cat\n 1 d 4 dog\n\n Combine ``DataFrame`` objects with overlapping columns\n and return only those that are shared by passing ``inner`` to\n the ``join`` keyword argument.\n\n >>> pd.concat([df1, df3], join=\"inner\")\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n Combine ``DataFrame`` objects horizontally along the x axis by\n passing in ``axis=1``.\n\n >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],\n ... columns=['animal', 'name'])\n >>> pd.concat([df1, df4], axis=1)\n letter number animal name\n 0 a 1 bird polly\n 1 b 2 monkey george\n\n Prevent the result from including duplicate index values with the\n ``verify_integrity`` option.\n\n >>> df5 = pd.DataFrame([1], index=['a'])\n >>> df5\n 0\n a 1\n >>> df6 = pd.DataFrame([2], index=['a'])\n >>> df6\n 0\n a 2\n >>> pd.concat([df5, df6], verify_integrity=True)\n Traceback (most recent call last):\n ...\n ValueError: Indexes have overlapping values: ['a']\n \"\"\"\n op = _Concatenator(objs, axis=axis, join_axes=join_axes,\n ignore_index=ignore_index, join=join,\n keys=keys, levels=levels, names=names,\n verify_integrity=verify_integrity,\n copy=copy, sort=sort)\n return op.get_result()\n\n\nclass _Concatenator(object):\n \"\"\"\n Orchestrates a concatenation operation for BlockManagers\n \"\"\"\n\n def __init__(self, objs, axis=0, join='outer', join_axes=None,\n keys=None, levels=None, names=None,\n ignore_index=False, verify_integrity=False, copy=True,\n sort=False):\n if isinstance(objs, (NDFrame, str)):\n raise TypeError('first argument must be an iterable of pandas '\n 'objects, you passed an object of type '\n '\"{name}\"'.format(name=type(objs).__name__))\n\n if join == 'outer':\n self.intersect = False\n elif join == 'inner':\n self.intersect = True\n else: # pragma: no cover\n raise ValueError('Only can inner (intersect) or outer (union) '\n 'join the other axis')\n\n if isinstance(objs, dict):\n if keys is None:\n keys = com.dict_keys_to_ordered_list(objs)\n objs = [objs[k] for k in keys]\n else:\n objs = list(objs)\n\n if len(objs) == 0:\n raise ValueError('No objects to concatenate')\n\n if keys is None:\n objs = list(com._not_none(*objs))\n else:\n # #1649\n clean_keys = []\n clean_objs = []\n for k, v in zip(keys, objs):\n if v is None:\n continue\n clean_keys.append(k)\n clean_objs.append(v)\n objs = clean_objs\n name = getattr(keys, 'name', None)\n keys = Index(clean_keys, name=name)\n\n if len(objs) == 0:\n raise ValueError('All objects passed were None')\n\n # consolidate data & figure out what our result ndim is going to be\n ndims = set()\n for obj in objs:\n if not isinstance(obj, (Series, DataFrame)):\n msg = (\"cannot concatenate object of type '{}';\"\n ' only Series and DataFrame objs are valid'\n .format(type(obj)))\n raise TypeError(msg)\n\n # consolidate\n obj._consolidate(inplace=True)\n ndims.add(obj.ndim)\n\n # get the sample\n # want the highest ndim that we have, and must be non-empty\n # unless all objs are empty\n sample = None\n if len(ndims) > 1:\n max_ndim = max(ndims)\n for obj in objs:\n if obj.ndim == max_ndim and np.sum(obj.shape):\n sample = obj\n break\n\n else:\n # filter out the empties if we have not multi-index possibilities\n # note to keep empty Series as it affect to result columns / name\n non_empties = [obj for obj in objs\n if sum(obj.shape) > 0 or isinstance(obj, Series)]\n\n if (len(non_empties) and (keys is None and names is None and\n levels is None and\n join_axes is None and\n not self.intersect)):\n objs = non_empties\n sample = objs[0]\n\n if sample is None:\n sample = objs[0]\n self.objs = objs\n\n # Standardize axis parameter to int\n if isinstance(sample, Series):\n axis = DataFrame._get_axis_number(axis)\n else:\n axis = sample._get_axis_number(axis)\n\n # Need to flip BlockManager axis in the DataFrame special case\n self._is_frame = isinstance(sample, DataFrame)\n if self._is_frame:\n axis = 1 if axis == 0 else 0\n\n self._is_series = isinstance(sample, Series)\n if not 0 <= axis <= sample.ndim:\n raise AssertionError(\"axis must be between 0 and {ndim}, input was\"\n \" {axis}\".format(ndim=sample.ndim, axis=axis))\n\n # if we have mixed ndims, then convert to highest ndim\n # creating column numbers as needed\n if len(ndims) > 1:\n current_column = 0\n max_ndim = sample.ndim\n self.objs, objs = [], self.objs\n for obj in objs:\n\n ndim = obj.ndim\n if ndim == max_ndim:\n pass\n\n elif ndim != max_ndim - 1:\n raise ValueError(\"cannot concatenate unaligned mixed \"\n \"dimensional NDFrame objects\")\n\n else:\n name = getattr(obj, 'name', None)\n if ignore_index or name is None:\n name = current_column\n current_column += 1\n\n # doing a row-wise concatenation so need everything\n # to line up\n if self._is_frame and axis == 1:\n name = 0\n obj = sample._constructor({name: obj})\n\n self.objs.append(obj)\n\n # note: this is the BlockManager axis (since DataFrame is transposed)\n self.axis = axis\n self.join_axes = join_axes\n self.keys = keys\n self.names = names or getattr(keys, 'names', None)\n self.levels = levels\n self.sort = sort\n\n self.ignore_index = ignore_index\n self.verify_integrity = verify_integrity\n self.copy = copy\n\n self.new_axes = self._get_new_axes()\n\n def get_result(self):\n\n # series only\n if self._is_series:\n\n # stack blocks\n if self.axis == 0:\n name = com.consensus_name_attr(self.objs)\n\n mgr = self.objs[0]._data.concat([x._data for x in self.objs],\n self.new_axes)\n cons = _concat._get_series_result_type(mgr, self.objs)\n return cons(mgr, name=name).__finalize__(self, method='concat')\n\n # combine as columns in a frame\n else:\n data = dict(zip(range(len(self.objs)), self.objs))\n cons = _concat._get_series_result_type(data)\n\n index, columns = self.new_axes\n df = cons(data, index=index)\n df.columns = columns\n return df.__finalize__(self, method='concat')\n\n # combine block managers\n else:\n mgrs_indexers = []\n for obj in self.objs:\n mgr = obj._data\n indexers = {}\n for ax, new_labels in enumerate(self.new_axes):\n if ax == self.axis:\n # Suppress reindexing on concat axis\n continue\n\n obj_labels = mgr.axes[ax]\n if not new_labels.equals(obj_labels):\n indexers[ax] = obj_labels.reindex(new_labels)[1]\n\n mgrs_indexers.append((obj._data, indexers))\n\n new_data = concatenate_block_managers(\n mgrs_indexers, self.new_axes, concat_axis=self.axis,\n copy=self.copy)\n if not self.copy:\n new_data._consolidate_inplace()\n\n cons = _concat._get_frame_result_type(new_data, self.objs)\n return (cons._from_axes(new_data, self.new_axes)\n .__finalize__(self, method='concat'))\n\n def _get_result_dim(self):\n if self._is_series and self.axis == 1:\n return 2\n else:\n return self.objs[0].ndim\n\n def _get_new_axes(self):\n ndim = self._get_result_dim()\n new_axes = [None] * ndim\n\n if self.join_axes is None:\n for i in range(ndim):\n if i == self.axis:\n continue\n new_axes[i] = self._get_comb_axis(i)\n else:\n if len(self.join_axes) != ndim - 1:\n raise AssertionError(\"length of join_axes must be equal \"\n \"to {length}\".format(length=ndim - 1))\n\n # ufff...\n indices = compat.lrange(ndim)\n indices.remove(self.axis)\n\n for i, ax in zip(indices, self.join_axes):\n new_axes[i] = ax\n\n new_axes[self.axis] = self._get_concat_axis()\n return new_axes\n\n def _get_comb_axis(self, i):\n data_axis = self.objs[0]._get_block_manager_axis(i)\n try:\n return _get_objs_combined_axis(self.objs, axis=data_axis,\n intersect=self.intersect,\n sort=self.sort)\n except IndexError:\n types = [type(x).__name__ for x in self.objs]\n raise TypeError(\"Cannot concatenate list of {types}\"\n .format(types=types))\n\n def _get_concat_axis(self):\n \"\"\"\n Return index to be used along concatenation axis.\n \"\"\"\n if self._is_series:\n if self.axis == 0:\n indexes = [x.index for x in self.objs]\n elif self.ignore_index:\n idx = ibase.default_index(len(self.objs))\n return idx\n elif self.keys is None:\n names = [None] * len(self.objs)\n num = 0\n has_names = False\n for i, x in enumerate(self.objs):\n if not isinstance(x, Series):\n raise TypeError(\"Cannot concatenate type 'Series' \"\n \"with object of type {type!r}\"\n .format(type=type(x).__name__))\n if x.name is not None:\n names[i] = x.name\n has_names = True\n else:\n names[i] = num\n num += 1\n if has_names:\n return Index(names)\n else:\n return ibase.default_index(len(self.objs))\n else:\n return ensure_index(self.keys).set_names(self.names)\n else:\n indexes = [x._data.axes[self.axis] for x in self.objs]\n\n if self.ignore_index:\n idx = ibase.default_index(sum(len(i) for i in indexes))\n return idx\n\n if self.keys is None:\n concat_axis = _concat_indexes(indexes)\n else:\n concat_axis = _make_concat_multiindex(indexes, self.keys,\n self.levels, self.names)\n\n self._maybe_check_integrity(concat_axis)\n\n return concat_axis\n\n def _maybe_check_integrity(self, concat_index):\n if self.verify_integrity:\n if not concat_index.is_unique:\n overlap = concat_index[concat_index.duplicated()].unique()\n raise ValueError('Indexes have overlapping values: '\n '{overlap!s}'.format(overlap=overlap))\n\n\ndef _concat_indexes(indexes):\n return indexes[0].append(indexes[1:])\n\n\ndef _make_concat_multiindex(indexes, keys, levels=None, names=None):\n\n if ((levels is None and isinstance(keys[0], tuple)) or\n (levels is not None and len(levels) > 1)):\n zipped = compat.lzip(*keys)\n if names is None:\n names = [None] * len(zipped)\n\n if levels is None:\n _, levels = _factorize_from_iterables(zipped)\n else:\n levels = [ensure_index(x) for x in levels]\n else:\n zipped = [keys]\n if names is None:\n names = [None]\n\n if levels is None:\n levels = [ensure_index(keys)]\n else:\n levels = [ensure_index(x) for x in levels]\n\n if not _all_indexes_same(indexes):\n codes_list = []\n\n # things are potentially different sizes, so compute the exact codes\n # for each level and pass those to MultiIndex.from_arrays\n\n for hlevel, level in zip(zipped, levels):\n to_concat = []\n for key, index in zip(hlevel, indexes):\n try:\n i = level.get_loc(key)\n except KeyError:\n raise ValueError('Key {key!s} not in level {level!s}'\n .format(key=key, level=level))\n\n to_concat.append(np.repeat(i, len(index)))\n codes_list.append(np.concatenate(to_concat))\n\n concat_index = _concat_indexes(indexes)\n\n # these go at the end\n if isinstance(concat_index, MultiIndex):\n levels.extend(concat_index.levels)\n codes_list.extend(concat_index.codes)\n else:\n codes, categories = _factorize_from_iterable(concat_index)\n levels.append(categories)\n codes_list.append(codes)\n\n if len(names) == len(levels):\n names = list(names)\n else:\n # make sure that all of the passed indices have the same nlevels\n if not len({idx.nlevels for idx in indexes}) == 1:\n raise AssertionError(\"Cannot concat indices that do\"\n \" not have the same number of levels\")\n\n # also copies\n names = names + _get_consensus_names(indexes)\n\n return MultiIndex(levels=levels, codes=codes_list, names=names,\n verify_integrity=False)\n\n new_index = indexes[0]\n n = len(new_index)\n kpieces = len(indexes)\n\n # also copies\n new_names = list(names)\n new_levels = list(levels)\n\n # construct codes\n new_codes = []\n\n # do something a bit more speedy\n\n for hlevel, level in zip(zipped, levels):\n hlevel = ensure_index(hlevel)\n mapped = level.get_indexer(hlevel)\n\n mask = mapped == -1\n if mask.any():\n raise ValueError('Values not found in passed level: {hlevel!s}'\n .format(hlevel=hlevel[mask]))\n\n new_codes.append(np.repeat(mapped, n))\n\n if isinstance(new_index, MultiIndex):\n new_levels.extend(new_index.levels)\n new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes])\n else:\n new_levels.append(new_index)\n new_codes.append(np.tile(np.arange(n), kpieces))\n\n if len(new_names) < len(new_levels):\n new_names.extend(new_index.names)\n\n return MultiIndex(levels=new_levels, codes=new_codes, names=new_names,\n verify_integrity=False)\n",
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pytest\n\nimport pandas._config.config as cf\n\nimport pandas as pd\nfrom pandas import compat\n\nimport pandas.io.formats.format as fmt\nimport pandas.io.formats.printing as printing\n\n\ndef test_adjoin():\n data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]\n expected = 'a dd ggg\\nb ee hhh\\nc ff iii'\n\n adjoined = printing.adjoin(2, *data)\n\n assert (adjoined == expected)\n\n\ndef test_repr_binary_type():\n import string\n letters = string.ascii_letters\n try:\n raw = bytes(letters, encoding=cf.get_option('display.encoding'))\n except TypeError:\n raw = bytes(letters)\n b = str(compat.bytes_to_str(raw))\n res = printing.pprint_thing(b, quote_strings=True)\n assert res == repr(b)\n res = printing.pprint_thing(b, quote_strings=False)\n assert res == b\n\n\nclass TestFormattBase(object):\n\n def test_adjoin(self):\n data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]\n expected = 'a dd ggg\\nb ee hhh\\nc ff iii'\n\n adjoined = printing.adjoin(2, *data)\n\n assert adjoined == expected\n\n def test_adjoin_unicode(self):\n data = [['あ', 'b', 'c'], ['dd', 'ええ', 'ff'], ['ggg', 'hhh', 'いいい']]\n expected = 'あ dd ggg\\nb ええ hhh\\nc ff いいい'\n adjoined = printing.adjoin(2, *data)\n assert adjoined == expected\n\n adj = fmt.EastAsianTextAdjustment()\n\n expected = \"\"\"あ dd ggg\nb ええ hhh\nc ff いいい\"\"\"\n\n adjoined = adj.adjoin(2, *data)\n assert adjoined == expected\n cols = adjoined.split('\\n')\n assert adj.len(cols[0]) == 13\n assert adj.len(cols[1]) == 13\n assert adj.len(cols[2]) == 16\n\n expected = \"\"\"あ dd ggg\nb ええ hhh\nc ff いいい\"\"\"\n\n adjoined = adj.adjoin(7, *data)\n assert adjoined == expected\n cols = adjoined.split('\\n')\n assert adj.len(cols[0]) == 23\n assert adj.len(cols[1]) == 23\n assert adj.len(cols[2]) == 26\n\n def test_justify(self):\n adj = fmt.EastAsianTextAdjustment()\n\n def just(x, *args, **kwargs):\n # wrapper to test single str\n return adj.justify([x], *args, **kwargs)[0]\n\n assert just('abc', 5, mode='left') == 'abc '\n assert just('abc', 5, mode='center') == ' abc '\n assert just('abc', 5, mode='right') == ' abc'\n assert just('abc', 5, mode='left') == 'abc '\n assert just('abc', 5, mode='center') == ' abc '\n assert just('abc', 5, mode='right') == ' abc'\n\n assert just('パンダ', 5, mode='left') == 'パンダ'\n assert just('パンダ', 5, mode='center') == 'パンダ'\n assert just('パンダ', 5, mode='right') == 'パンダ'\n\n assert just('パンダ', 10, mode='left') == 'パンダ '\n assert just('パンダ', 10, mode='center') == ' パンダ '\n assert just('パンダ', 10, mode='right') == ' パンダ'\n\n def test_east_asian_len(self):\n adj = fmt.EastAsianTextAdjustment()\n\n assert adj.len('abc') == 3\n assert adj.len('abc') == 3\n\n assert adj.len('パンダ') == 6\n assert adj.len('パンダ') == 5\n assert adj.len('パンダpanda') == 11\n assert adj.len('パンダpanda') == 10\n\n def test_ambiguous_width(self):\n adj = fmt.EastAsianTextAdjustment()\n assert adj.len('¡¡ab') == 4\n\n with cf.option_context('display.unicode.ambiguous_as_wide', True):\n adj = fmt.EastAsianTextAdjustment()\n assert adj.len('¡¡ab') == 6\n\n data = [['あ', 'b', 'c'], ['dd', 'ええ', 'ff'],\n ['ggg', '¡¡ab', 'いいい']]\n expected = 'あ dd ggg \\nb ええ ¡¡ab\\nc ff いいい'\n adjoined = adj.adjoin(2, *data)\n assert adjoined == expected\n\n\nclass TestTableSchemaRepr(object):\n\n @classmethod\n def setup_class(cls):\n pytest.importorskip('IPython')\n\n from IPython.core.interactiveshell import InteractiveShell\n cls.display_formatter = InteractiveShell.instance().display_formatter\n\n def test_publishes(self):\n\n df = pd.DataFrame({\"A\": [1, 2]})\n objects = [df['A'], df, df] # dataframe / series\n expected_keys = [\n {'text/plain', 'application/vnd.dataresource+json'},\n {'text/plain', 'text/html', 'application/vnd.dataresource+json'},\n ]\n\n opt = pd.option_context('display.html.table_schema', True)\n for obj, expected in zip(objects, expected_keys):\n with opt:\n formatted = self.display_formatter.format(obj)\n assert set(formatted[0].keys()) == expected\n\n with_latex = pd.option_context('display.latex.repr', True)\n\n with opt, with_latex:\n formatted = self.display_formatter.format(obj)\n\n expected = {'text/plain', 'text/html', 'text/latex',\n 'application/vnd.dataresource+json'}\n assert set(formatted[0].keys()) == expected\n\n def test_publishes_not_implemented(self):\n # column MultiIndex\n # GH 15996\n midx = pd.MultiIndex.from_product([['A', 'B'], ['a', 'b', 'c']])\n df = pd.DataFrame(np.random.randn(5, len(midx)), columns=midx)\n\n opt = pd.option_context('display.html.table_schema', True)\n\n with opt:\n formatted = self.display_formatter.format(df)\n\n expected = {'text/plain', 'text/html'}\n assert set(formatted[0].keys()) == expected\n\n def test_config_on(self):\n df = pd.DataFrame({\"A\": [1, 2]})\n with pd.option_context(\"display.html.table_schema\", True):\n result = df._repr_data_resource_()\n\n assert result is not None\n\n def test_config_default_off(self):\n df = pd.DataFrame({\"A\": [1, 2]})\n with pd.option_context(\"display.html.table_schema\", False):\n result = df._repr_data_resource_()\n\n assert result is None\n\n def test_enable_data_resource_formatter(self):\n # GH 10491\n formatters = self.display_formatter.formatters\n mimetype = 'application/vnd.dataresource+json'\n\n with pd.option_context('display.html.table_schema', True):\n assert 'application/vnd.dataresource+json' in formatters\n assert formatters[mimetype].enabled\n\n # still there, just disabled\n assert 'application/vnd.dataresource+json' in formatters\n assert not formatters[mimetype].enabled\n\n # able to re-set\n with pd.option_context('display.html.table_schema', True):\n assert 'application/vnd.dataresource+json' in formatters\n assert formatters[mimetype].enabled\n # smoke test that it works\n self.display_formatter.format(cf)\n"
] | [
[
"pandas.Series",
"numpy.arange",
"pandas.Categorical",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.makeDateIndex",
"numpy.random.randn",
"pandas.date_range",
"pandas.isna",
"pandas.Timestamp"
],
[
"pandas.PeriodIndex",
"pandas.period_range",
"pandas.Index",
"pandas.DataFrame",
"pandas.util.testing.assert_index_equal",
"pandas.date_range",
"pandas.util.testing.equalContents",
"numpy.random.randint"
],
[
"pandas.Series",
"numpy.asarray",
"numpy.concatenate",
"numpy.ix_",
"pandas.core.dtypes.common.is_numeric_dtype",
"pandas.errors.AbstractMethodError",
"pandas.core.common.asarray_tuplesafe",
"pandas.core.dtypes.common.is_iterator",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.missing._infer_fill_value",
"pandas.core.dtypes.common.is_sequence",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.iterable",
"pandas.core.dtypes.common.is_sparse",
"numpy.array",
"pandas.core.common.is_bool_indexer",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.common.is_null_slice",
"numpy.tile",
"pandas.core.common.apply_if_callable",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty",
"pandas.core.index.Index"
],
[
"pandas.concat",
"numpy.random.seed",
"pandas.MultiIndex",
"pandas.util.testing.assert_produces_warning",
"pandas.Grouper",
"pandas.util.testing.assert_series_equal",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"pandas.MultiIndex.from_tuples",
"pandas.util.testing.assert_index_equal",
"numpy.std",
"numpy.random.randn",
"pandas.MultiIndex.from_product",
"numpy.random.rand",
"pandas.date_range"
],
[
"pandas.core.index._get_consensus_names",
"pandas.core.index._get_objs_combined_axis",
"numpy.concatenate",
"pandas.core.arrays.categorical._factorize_from_iterables",
"pandas.DataFrame._get_axis_number",
"pandas.compat.lzip",
"numpy.arange",
"pandas.Index",
"pandas.core.index.ensure_index",
"pandas.core.common._not_none",
"pandas.core.common.dict_keys_to_ordered_list",
"numpy.repeat",
"pandas.MultiIndex",
"pandas.core.arrays.categorical._factorize_from_iterable",
"pandas.core.dtypes.concat._get_series_result_type",
"pandas.core.internals.concatenate_block_managers",
"pandas.core.dtypes.concat._get_frame_result_type",
"numpy.sum",
"pandas.core.common.consensus_name_attr",
"pandas.core.index._all_indexes_same",
"numpy.tile",
"pandas.compat.lrange"
],
[
"pandas.io.formats.format.EastAsianTextAdjustment",
"pandas.compat.bytes_to_str",
"pandas._config.config.option_context",
"pandas.io.formats.printing.adjoin",
"pandas.option_context",
"pandas.DataFrame",
"pandas.MultiIndex.from_product",
"pandas.io.formats.printing.pprint_thing",
"pandas._config.config.get_option"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shashi456/transformers | [
"0f43e742d908772733870730dbddd8e00e0253ef"
] | [
"src/transformers/models/pegasus/modeling_tf_pegasus.py"
] | [
"# coding=utf-8\n# Copyright 2021, Google Inc. and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 Pegasus model. \"\"\"\n\n\nimport random\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutput,\n TFBaseModelOutputWithPastAndCrossAttentions,\n TFSeq2SeqLMOutput,\n TFSeq2SeqModelOutput,\n)\n\n# Public API\nfrom ...modeling_tf_utils import (\n DUMMY_INPUTS,\n TFCausalLanguageModelingLoss,\n TFPreTrainedModel,\n TFSharedEmbeddings,\n TFWrappedEmbeddings,\n input_processing,\n keras_serializable,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_pegasus import PegasusConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"google/pegasus-large\"\n_CONFIG_FOR_DOC = \"PegasusConfig\"\n_TOKENIZER_FOR_DOC = \"PegasusTokenizer\"\n\n\nLARGE_NEGATIVE = -1e8\n\n\n# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right\ndef shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):\n start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)\n shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids = tf.where(\n shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids\n )\n\n if tf.executing_eagerly():\n # \"Verify that `labels` has only positive values and -100\"\n assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))\n\n # Make sure the assertion op is called by wrapping the result in an identity no-op\n with tf.control_dependencies([assert_gte0]):\n shifted_input_ids = tf.identity(shifted_input_ids)\n\n return shifted_input_ids\n\n\n# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask\ndef _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE\n mask_cond = tf.range(shape_list(mask)[-1])\n\n mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)\n\n if past_key_values_length > 0:\n mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)\n\n return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))\n\n\n# Copied from transformers.models.bart.modeling_tf_bart._expand_mask\ndef _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n src_len = shape_list(mask)[1]\n tgt_len = tgt_len if tgt_len is not None else src_len\n one_cst = tf.constant(1.0)\n mask = tf.cast(mask, dtype=one_cst.dtype)\n expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))\n\n return (one_cst - expanded_mask) * LARGE_NEGATIVE\n\n\n# Copied from transformers.models.marian.modeling_tf_marian.TFMarianSinusoidalPositionalEmbedding with Marian->Pegasus\nclass TFPegasusSinusoidalPositionalEmbedding(tf.keras.layers.Layer):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions: int, embedding_dim: int, **kwargs):\n super().__init__(**kwargs)\n\n if embedding_dim % 2 != 0:\n raise NotImplementedError(f\"odd embedding_dim {embedding_dim} not supported\")\n\n self.embedding_dim = embedding_dim\n self.num_positions = num_positions\n\n def build(self, input_shape: tf.TensorShape):\n \"\"\"\n Build shared token embedding layer Shared weights logic adapted from\n https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24\n \"\"\"\n\n weight = self._init_weight(self.num_positions, self.embedding_dim)\n\n self.weight = self.add_weight(\n name=\"embeddings\",\n shape=[self.num_positions, self.embedding_dim],\n )\n weight = tf.cast(weight, dtype=self.weight.dtype)\n\n self.weight.assign(weight)\n\n super().build(input_shape)\n\n @staticmethod\n def _init_weight(n_pos: int, dim: int):\n \"\"\"\n Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in\n the 2nd half of the vector. [dim // 2:]\n \"\"\"\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n # index 0 is all zero\n position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])\n position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2])\n # convert to tensor\n table = tf.convert_to_tensor(position_enc)\n tf.stop_gradient(table)\n return table\n\n def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_shape[:2]\n\n positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name=\"range\")\n return tf.gather(self.weight, positions)\n\n\n# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Pegasus\nclass TFPegasusAttention(tf.keras.layers.Layer):\n \"\"\"Multi-headed attention from \"Attention Is All You Need\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n\n self.num_heads = num_heads\n self.dropout = tf.keras.layers.Dropout(dropout)\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n self.is_decoder = is_decoder\n\n self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"k_proj\")\n self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"q_proj\")\n self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"v_proj\")\n self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"out_proj\")\n\n def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):\n return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))\n\n def call(\n self,\n hidden_states: tf.Tensor,\n key_value_states: Optional[tf.Tensor] = None,\n past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,\n attention_mask: Optional[tf.Tensor] = None,\n layer_head_mask: Optional[tf.Tensor] = None,\n training=False,\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = shape_list(hidden_states)\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = tf.concat([past_key_value[0], key_states], axis=2)\n value_states = tf.concat([past_key_value[1], value_states], axis=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)\n key_states = tf.reshape(key_states, proj_shape)\n value_states = tf.reshape(value_states, proj_shape)\n\n src_len = shape_list(key_states)[1]\n attn_weights = tf.matmul(query_states, key_states, transpose_b=True)\n\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(attn_weights),\n [bsz * self.num_heads, tgt_len, src_len],\n message=f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}\",\n )\n\n if attention_mask is not None:\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(attention_mask),\n [bsz, 1, tgt_len, src_len],\n message=f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}\",\n )\n\n attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)\n attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_weights = tf.nn.softmax(attn_weights, axis=-1)\n\n if layer_head_mask is not None:\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(layer_head_mask),\n [self.num_heads],\n message=f\"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}\",\n )\n\n attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(\n attn_weights, (bsz, self.num_heads, tgt_len, src_len)\n )\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_probs = self.dropout(attn_weights, training=training)\n attn_output = tf.matmul(attn_probs, value_states)\n\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(attn_output),\n [bsz * self.num_heads, tgt_len, self.head_dim],\n message=f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}\",\n )\n\n attn_output = tf.transpose(\n tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)\n )\n attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))\n\n attn_output = self.out_proj(attn_output)\n attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))\n\n return attn_output, attn_weights, past_key_value\n\n\n# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Pegasus\nclass TFPegasusEncoderLayer(tf.keras.layers.Layer):\n def __init__(self, config: PegasusConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFPegasusAttention(\n self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name=\"self_attn\"\n )\n self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.activation_fn = get_tf_activation(config.activation_function)\n self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)\n self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name=\"fc1\")\n self.fc2 = tf.keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n\n def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):\n \"\"\"\n Args:\n hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, self_attn_weights, _ = self.self_attn(\n hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask\n )\n\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(hidden_states),\n shape_list(residual),\n message=f\"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}\",\n )\n\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n return hidden_states, self_attn_weights\n\n\n# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Pegasus\nclass TFPegasusDecoderLayer(tf.keras.layers.Layer):\n def __init__(self, config: PegasusConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFPegasusAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n name=\"self_attn\",\n is_decoder=True,\n )\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.activation_fn = get_tf_activation(config.activation_function)\n self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)\n\n self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.encoder_attn = TFPegasusAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n name=\"encoder_attn\",\n is_decoder=True,\n )\n self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"encoder_attn_layer_norm\")\n self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name=\"fc1\")\n self.fc2 = tf.keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n\n def call(\n self,\n hidden_states,\n attention_mask: Optional[tf.Tensor] = None,\n encoder_hidden_states: Optional[tf.Tensor] = None,\n encoder_attention_mask: Optional[tf.Tensor] = None,\n layer_head_mask: Optional[tf.Tensor] = None,\n cross_attn_layer_head_mask: Optional[tf.Tensor] = None,\n past_key_value: Optional[Tuple[tf.Tensor]] = None,\n training=False,\n ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n \"\"\"\n Args:\n hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size\n `(decoder_attention_heads,)`\n cross_attn_layer_head_mask (:obj:`tf.Tensor`): mask for heads of the cross-attention module.\n `(decoder_attention_heads,)`\n past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n layer_head_mask=cross_attn_layer_head_mask,\n past_key_value=cross_attn_past_key_value,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n return (\n hidden_states,\n self_attn_weights,\n cross_attn_weights,\n present_key_value,\n )\n\n\nclass TFPegasusPreTrainedModel(TFPreTrainedModel):\n config_class = PegasusConfig\n base_model_prefix = \"model\"\n\n @property\n def dummy_inputs(self):\n pad_token = 1\n input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)\n decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)\n dummy_inputs = {\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": tf.math.not_equal(input_ids, pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n @tf.function(\n input_signature=[\n {\n \"input_ids\": tf.TensorSpec((None, None), tf.int32, name=\"input_ids\"),\n \"attention_mask\": tf.TensorSpec((None, None), tf.int32, name=\"attention_mask\"),\n \"decoder_input_ids\": tf.TensorSpec((None, None), tf.int32, name=\"decoder_input_ids\"),\n \"decoder_attention_mask\": tf.TensorSpec((None, None), tf.int32, name=\"decoder_attention_mask\"),\n }\n ]\n )\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving\n def serving(self, inputs):\n output = self.call(inputs)\n\n return self.serving_output(output)\n\n\nPEGASUS_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\n embeddings, pruning heads etc.)\n\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\n and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\n the first positional argument :\n\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Args:\n config (:class:`~transformers.PegasusConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the\n model weights.\n\"\"\"\n\nPEGASUS_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import PegasusTokenizer, TFPegasusForConditionalGeneration\n\n >>> model = TFPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum')\n >>> tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-xsum')\n\n >>> ARTICLE_TO_SUMMARIZE = (\n ... \"PG&E stated it scheduled the blackouts in response to forecasts for high winds \"\n ... \"amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were \"\n ... \"scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\"\n ... )\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf')\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'])\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])\n\"\"\"\n\nPEGASUS_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are decoder input IDs? <../glossary.html#decoder-input-ids>`__\n\n Pegasus uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If\n :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see\n :obj:`past_key_values`).\n decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.\n head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (:obj:`tf.FloatTensor`, `optional`):\n hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of\n past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation\n output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all\n attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only\n in eager mode, in graph mode the value in the config will be used instead.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@keras_serializable\nclass TFPegasusEncoder(tf.keras.layers.Layer):\n config_class = PegasusConfig\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`TFPegasusEncoderLayer`.\n\n Args:\n config: PegasusConfig\n \"\"\"\n\n def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.layerdrop = config.encoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0\n\n self.embed_tokens = embed_tokens\n self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n name=\"embed_positions\",\n )\n self.layers = [TFPegasusEncoderLayer(config, name=f\"layers.{i}\") for i in range(config.encoder_layers)]\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n\n def get_embed_tokens(self):\n return self.embed_tokens\n\n def set_embed_tokens(self, embed_tokens):\n self.embed_tokens = embed_tokens\n\n def call(\n self,\n input_ids=None,\n inputs_embeds=None,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n \"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value\n in the config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail. This argument can be used only in eager mode, in graph mode the value in the config\n will be used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.embed_tokens(inputs[\"input_ids\"]) * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n hidden_states = inputs[\"inputs_embeds\"] + embed_pos\n hidden_states = self.dropout(hidden_states, training=inputs[\"training\"])\n\n # check attention mask and invert\n if inputs[\"attention_mask\"] is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(inputs[\"attention_mask\"])\n else:\n attention_mask = None\n\n encoder_states = () if inputs[\"output_hidden_states\"] else None\n all_attentions = () if inputs[\"output_attentions\"] else None\n\n # check if head_mask has a correct number of layers specified if desired\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if inputs[\"head_mask\"] is not None and tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(inputs[\"head_mask\"])[0],\n len(self.layers),\n message=f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.\",\n )\n\n # encoder layers\n for idx, encoder_layer in enumerate(self.layers):\n\n if inputs[\"output_hidden_states\"]:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if inputs[\"training\"] and (dropout_probability < self.layerdrop): # skip the layer\n continue\n\n hidden_states, attn = encoder_layer(\n hidden_states,\n attention_mask,\n inputs[\"head_mask\"][idx] if inputs[\"head_mask\"] is not None else None,\n )\n\n if inputs[\"output_attentions\"]:\n all_attentions += (attn,)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if inputs[\"output_hidden_states\"]:\n encoder_states = encoder_states + (hidden_states,)\n\n if not inputs[\"return_dict\"]:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return TFBaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\n@keras_serializable\nclass TFPegasusDecoder(tf.keras.layers.Layer):\n config_class = PegasusConfig\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFPegasusDecoderLayer`\n\n Args:\n config: PegasusConfig\n embed_tokens: output embedding\n \"\"\"\n\n def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.padding_idx = config.pad_token_id\n self.embed_tokens = embed_tokens\n self.layerdrop = config.decoder_layerdrop\n self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n name=\"embed_positions\",\n )\n self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0\n self.layers = [TFPegasusDecoderLayer(config, name=f\"layers.{i}\") for i in range(config.decoder_layers)]\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n\n def get_embed_tokens(self):\n return self.embed_tokens\n\n def set_embed_tokens(self, embed_tokens):\n self.embed_tokens = embed_tokens\n\n def call(\n self,\n input_ids=None,\n inputs_embeds=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value\n in the config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail. This argument can be used only in eager mode, in graph mode the value in the config\n will be used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n past_key_values_length = (\n shape_list(inputs[\"past_key_values\"][0][0])[2] if inputs[\"past_key_values\"] is not None else 0\n )\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.embed_tokens(inputs[\"input_ids\"]) * self.embed_scale\n\n hidden_states = inputs[\"inputs_embeds\"]\n\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)\n else:\n combined_attention_mask = _expand_mask(\n tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]\n )\n\n if inputs[\"attention_mask\"] is not None:\n combined_attention_mask = combined_attention_mask + _expand_mask(\n inputs[\"attention_mask\"], tgt_len=input_shape[-1]\n )\n\n if inputs[\"encoder_hidden_states\"] is not None and inputs[\"encoder_attention_mask\"] is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n inputs[\"encoder_attention_mask\"] = _expand_mask(inputs[\"encoder_attention_mask\"], tgt_len=input_shape[-1])\n\n hidden_states = self.dropout(hidden_states + positions, training=inputs[\"training\"])\n\n # decoder layers\n all_hidden_states = () if inputs[\"output_hidden_states\"] else None\n all_self_attns = () if inputs[\"output_attentions\"] else None\n all_cross_attns = () if (inputs[\"output_attentions\"] and inputs[\"encoder_hidden_states\"] is not None) else None\n present_key_values = () if inputs[\"use_cache\"] else None\n\n # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n for attn_mask in [\"head_mask\", \"cross_attn_head_mask\"]:\n if inputs[attn_mask] is not None and tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(inputs[attn_mask])[0],\n len(self.layers),\n message=f\"The {attn_mask} should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs[attn_mask])[0]}.\",\n )\n\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if inputs[\"output_hidden_states\"]:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n\n if inputs[\"training\"] and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = inputs[\"past_key_values\"][idx] if inputs[\"past_key_values\"] is not None else None\n\n hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(\n hidden_states,\n attention_mask=combined_attention_mask,\n encoder_hidden_states=inputs[\"encoder_hidden_states\"],\n encoder_attention_mask=inputs[\"encoder_attention_mask\"],\n layer_head_mask=inputs[\"head_mask\"][idx] if inputs[\"head_mask\"] is not None else None,\n cross_attn_layer_head_mask=inputs[\"cross_attn_head_mask\"][idx]\n if inputs[\"cross_attn_head_mask\"] is not None\n else None,\n past_key_value=past_key_value,\n )\n\n if inputs[\"use_cache\"]:\n present_key_values += (present_key_value,)\n\n if inputs[\"output_attentions\"]:\n all_self_attns += (layer_self_attn,)\n\n if inputs[\"encoder_hidden_states\"] is not None:\n all_cross_attns += (layer_cross_attn,)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if inputs[\"output_hidden_states\"]:\n all_hidden_states += (hidden_states,)\n\n if inputs[\"output_attentions\"]:\n all_self_attns = list(all_self_attns)\n\n if inputs[\"encoder_hidden_states\"] is not None:\n all_cross_attns = list(all_cross_attns)\n\n if inputs[\"use_cache\"]:\n present_key_values = (inputs[\"encoder_hidden_states\"], present_key_values)\n\n if not inputs[\"return_dict\"]:\n return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns\n else:\n return TFBaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=present_key_values,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attns,\n )\n\n\n@keras_serializable\nclass TFPegasusMainLayer(tf.keras.layers.Layer):\n config_class = PegasusConfig\n\n def __init__(self, config: PegasusConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name=\"model.shared\")\n\n with tf.compat.v1.variable_scope(\"model.shared\") as shared_abs_scope_name:\n pass\n\n # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.\n embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)\n embed_tokens.vocab_size = self.shared.vocab_size\n embed_tokens.hidden_size = self.shared.hidden_size\n\n self.encoder = TFPegasusEncoder(config, embed_tokens, name=\"encoder\")\n self.decoder = TFPegasusDecoder(config, embed_tokens, name=\"decoder\")\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared.weight = new_embeddings\n self.shared.vocab_size = self.shared.weight.shape[0]\n # retrieve correct absolute scope for embed token wrapper\n with tf.compat.v1.variable_scope(\"model.shared\") as shared_abs_scope_name:\n pass\n # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.\n embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)\n self.encoder.set_embed_tokens(embed_tokens)\n self.decoder.set_embed_tokens(embed_tokens)\n\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"decoder_input_ids\"] is None and inputs[\"decoder_inputs_embeds\"] is None:\n inputs[\"use_cache\"] = False\n\n inputs[\"output_hidden_states\"] = (\n inputs[\"output_hidden_states\"]\n if inputs[\"output_hidden_states\"] is not None\n else self.config.output_hidden_states\n )\n\n if inputs[\"encoder_outputs\"] is None:\n inputs[\"encoder_outputs\"] = self.encoder(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True\n elif inputs[\"return_dict\"] and not isinstance(inputs[\"encoder_outputs\"], TFBaseModelOutput):\n inputs[\"encoder_outputs\"] = TFBaseModelOutput(\n last_hidden_state=inputs[\"encoder_outputs\"][0],\n hidden_states=inputs[\"encoder_outputs\"][1] if len(inputs[\"encoder_outputs\"]) > 1 else None,\n attentions=inputs[\"encoder_outputs\"][2] if len(inputs[\"encoder_outputs\"]) > 2 else None,\n )\n # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False\n elif not inputs[\"return_dict\"] and not isinstance(inputs[\"encoder_outputs\"], tuple):\n inputs[\"encoder_outputs\"] = inputs[\"encoder_outputs\"].to_tuple()\n\n decoder_outputs = self.decoder(\n inputs[\"decoder_input_ids\"],\n attention_mask=inputs[\"decoder_attention_mask\"],\n encoder_hidden_states=inputs[\"encoder_outputs\"][0],\n encoder_attention_mask=inputs[\"attention_mask\"],\n head_mask=inputs[\"decoder_head_mask\"],\n cross_attn_head_mask=inputs[\"cross_attn_head_mask\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n if not inputs[\"return_dict\"]:\n return decoder_outputs + inputs[\"encoder_outputs\"]\n\n return TFSeq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=inputs[\"encoder_outputs\"].last_hidden_state,\n encoder_hidden_states=inputs[\"encoder_outputs\"].hidden_states,\n encoder_attentions=inputs[\"encoder_outputs\"].attentions,\n )\n\n\n@add_start_docstrings(\n \"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.\",\n PEGASUS_START_DOCSTRING,\n)\nclass TFPegasusModel(TFPegasusPreTrainedModel):\n def __init__(self, config: PegasusConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.model = TFPegasusMainLayer(config, name=\"model\")\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_decoder(self):\n return self.model.decoder\n\n @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSeq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n outputs = self.model(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n decoder_input_ids=inputs[\"decoder_input_ids\"],\n decoder_attention_mask=inputs[\"decoder_attention_mask\"],\n head_mask=inputs[\"head_mask\"],\n decoder_head_mask=inputs[\"decoder_head_mask\"],\n cross_attn_head_mask=inputs[\"cross_attn_head_mask\"],\n encoder_outputs=inputs[\"encoder_outputs\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n decoder_inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n return outputs\n\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output\n def serving_output(self, output):\n pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None\n dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None\n dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None\n cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None\n enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None\n enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None\n\n return TFSeq2SeqModelOutput(\n last_hidden_state=output.last_hidden_state,\n past_key_values=pkv,\n decoder_hidden_states=dec_hs,\n decoder_attentions=dec_attns,\n cross_attentions=cross_attns,\n encoder_last_hidden_state=output.encoder_last_hidden_state,\n encoder_hidden_states=enc_hs,\n encoder_attentions=enc_attns,\n )\n\n\n@add_start_docstrings(\n \"The PEGASUS Model with a language modeling head. Can be used for summarization.\",\n PEGASUS_START_DOCSTRING,\n)\nclass TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLanguageModelingLoss):\n _keys_to_ignore_on_load_unexpected = [\n r\"model.encoder.embed_tokens.weight\",\n r\"model.decoder.embed_tokens.weight\",\n ]\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.model = TFPegasusMainLayer(config, name=\"model\")\n self.use_cache = config.use_cache\n # final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.\n self.final_logits_bias = self.add_weight(\n name=\"final_logits_bias\", shape=[1, config.vocab_size], initializer=\"zeros\", trainable=False\n )\n\n def get_decoder(self):\n return self.model.decoder\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_output_embeddings(self):\n return self.get_input_embeddings()\n\n def set_output_embeddings(self, value):\n self.set_input_embeddings(value)\n\n def get_bias(self):\n return {\"final_logits_bias\": self.final_logits_bias}\n\n def set_bias(self, value):\n self.final_logits_bias = value[\"final_logits_bias\"]\n\n @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs: Optional[TFBaseModelOutput] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n **kwargs,\n ):\n \"\"\"\n labels (:obj:`tf.tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"labels\"] is not None:\n inputs[\"labels\"] = tf.where(\n inputs[\"labels\"] == self.config.pad_token_id,\n tf.fill(shape_list(inputs[\"labels\"]), -100),\n inputs[\"labels\"],\n )\n inputs[\"use_cache\"] = False\n if inputs[\"decoder_input_ids\"] is None:\n inputs[\"decoder_input_ids\"] = shift_tokens_right(\n inputs[\"labels\"], self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n decoder_input_ids=inputs[\"decoder_input_ids\"],\n encoder_outputs=inputs[\"encoder_outputs\"],\n decoder_attention_mask=inputs[\"decoder_attention_mask\"],\n head_mask=inputs[\"head_mask\"],\n decoder_head_mask=inputs[\"decoder_head_mask\"],\n cross_attn_head_mask=inputs[\"cross_attn_head_mask\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n decoder_inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n lm_logits = self.model.shared(outputs[0], mode=\"linear\")\n lm_logits = lm_logits + self.final_logits_bias\n masked_lm_loss = None if inputs[\"labels\"] is None else self.compute_loss(inputs[\"labels\"], lm_logits)\n\n if not inputs[\"return_dict\"]:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n return TFSeq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values, # index 1 of d outputs\n decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs\n decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs\n cross_attentions=outputs.cross_attentions, # index 4 of d outputs\n encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs\n encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out\n encoder_attentions=outputs.encoder_attentions, # 2 of e out\n )\n\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output\n def serving_output(self, output):\n pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None\n dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None\n dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None\n cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None\n enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None\n enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None\n\n return TFSeq2SeqLMOutput(\n logits=output.logits,\n past_key_values=pkv,\n decoder_hidden_states=dec_hs,\n decoder_attentions=dec_attns,\n cross_attentions=cross_attns,\n encoder_last_hidden_state=output.encoder_last_hidden_state,\n encoder_hidden_states=enc_hs,\n encoder_attentions=enc_attns,\n )\n\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past,\n attention_mask,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n **kwargs,\n ) -> Dict:\n assert past is not None and len(past) in {1, 2}, f\"past has to be an iterable of length 1,2 got {past}\"\n if len(past) == 1:\n assert isinstance(past[0], tf.Tensor), f\"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}\"\n encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])\n past_key_values = None\n else:\n assert (\n len(past) == 2\n ), \"`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position.\"\n encoder_outputs, past_key_values = past\n if isinstance(encoder_outputs, tuple):\n assert isinstance(\n encoder_outputs[0], tf.Tensor\n ), f\"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}\"\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])\n elif isinstance(encoder_outputs, tf.Tensor):\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)\n assert (\n past_key_values\n ), f\"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past\"\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n assert isinstance(\n encoder_outputs, TFBaseModelOutput\n ), f\"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}.\"\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past_key_values,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n\n @staticmethod\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache\n def _reorder_cache(past, beam_idx):\n if len(past) == 1:\n return past\n\n past_key_values = past[1]\n\n reordered_past = ()\n for layer_past_key_values in past_key_values:\n reordered_past += (\n tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2])\n + layer_past_key_values[2:],\n )\n return (past[0], reordered_past)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.cast",
"tensorflow.math.not_equal",
"tensorflow.tuple",
"numpy.sin",
"tensorflow.stop_gradient",
"tensorflow.gather",
"tensorflow.compat.v1.variable_scope",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.executing_eagerly",
"numpy.power",
"tensorflow.keras.layers.Dense",
"tensorflow.identity",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.ones",
"numpy.cos",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrksbrg/adas-pro-sivic | [
"fb4bbd4f39b58e42c3d47494fb4116a3e7fced0d"
] | [
"scripts/compile_prosivic_results.py"
] | [
"import os\nimport statistics\nimport csv\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\n\nclass ExpSetup:\n\n def __init__(self, ped_x, ped_y, ped_orient, ped_speed, car_speed, min_dist, min_ttc, min_dist_awa, det, col):\n self.ped_x = ped_x\n self.ped_y = ped_y\n self.ped_orient = ped_orient\n self.ped_speed = ped_speed\n self.car_speed = car_speed\n self.min_dist_counter = Counter([min_dist])\n self.min_dist = [min_dist]\n self.min_ttc = [min_ttc]\n self.min_ttc_counter = Counter([min_ttc])\n self.min_dist_awa = [min_dist_awa]\n self.min_dist_awa_counter = Counter(([min_dist_awa]))\n self.detection = [det]\n self.collision = [col]\n self.nbr_results = 1\n\n self.results = Counter([ExpResult(min_dist, min_ttc, min_dist_awa, det, col)])\n\n def __str__(self):\n return \"### Scenario (x0P=\" + str(self.ped_x) + \", y0P=\" + str(self.ped_y) + \", Th0P=\" + str(self.ped_orient) + \", v0P=\" + str(self.ped_speed) + \", v0C=\" + str(self.car_speed) + \") ###\"\n\n def __eq__(self, other):\n return self.ped_x == other.ped_x and self.ped_y == other.ped_y and self.ped_orient == other.ped_orient \\\n and self.ped_speed == other.ped_speed and self.car_speed == other.car_speed\n\n def __lt__(self, other):\n return self.ped_x < other.ped_x\n\n def add_result(self, min_dist, min_ttc, min_dist_awa, det, col):\n self.min_dist.append(min_dist)\n self.min_dist_counter.update([min_dist])\n self.min_ttc.append(min_ttc)\n self.min_ttc_counter.update([min_ttc])\n self.min_dist_awa.append(min_dist_awa)\n self.min_dist_awa_counter.update([min_dist_awa])\n self.detection.append(det)\n self.collision.append(col)\n self.nbr_results += 1\n\n self.results.update([ExpResult(min_dist, min_ttc, min_dist_awa, det, col)])\n\n def get_nbr_results(self):\n return self.nbr_results\n\n def get_results(self):\n return self.results\n\n def get_nbr_unique_results(self):\n unique_list_of1 = []\n unique_list_of2 = []\n unique_list_of3 = []\n for x in self.min_dist:\n if x not in unique_list_of1:\n unique_list_of1.append(x)\n for y in self.min_ttc:\n if y not in unique_list_of2:\n unique_list_of2.append(y)\n for z in self.min_dist_awa:\n if z not in unique_list_of3:\n unique_list_of3.append(z)\n return {'of1': unique_list_of1, 'of2': unique_list_of2, 'of3': unique_list_of3}\n\n def get_avg_min_dist(self):\n sum = 0\n for res in self.min_dist:\n sum += res\n return sum / len(self.min_dist)\n\n def get_sd_min_dist(self):\n if len(self.min_dist) == 1:\n return 0\n else:\n return statistics.stdev(self.min_dist)\n\n def get_avg_min_ttc(self):\n sum = 0\n for res in self.min_ttc:\n sum += res\n return sum / len(self.min_ttc)\n\n def get_sd_min_ttc(self):\n if len(self.min_ttc) == 1:\n return 0\n else:\n return statistics.stdev(self.min_ttc)\n\n def get_avg_min_dist_awa(self):\n sum = 0\n for res in self.min_dist_awa:\n sum += res\n return sum / len(self.min_dist_awa)\n\n def get_sd_min_dist_awa(self):\n if len(self.min_dist_awa) == 1:\n return 0\n else:\n return statistics.stdev(self.min_dist_awa)\n\n def get_nbr_detections(self):\n sum = 0\n for res in self.detection:\n sum += res\n return sum\n\n def get_nbr_collisions(self):\n sum = 0\n for res in self.collision:\n sum += res\n return sum\n\n @property\n def get_ped_x(self):\n return self.ped_x\n\n @property\n def get_ped_y(self):\n return self.ped_y\n\n @property\n def get_ped_orient(self):\n return self.ped_orient\n\n @property\n def get_ped_speed(self):\n return self.ped_speed\n\n @property\n def get_car_speed(self):\n return self.car_speed\n\n @property\n def get_of1_counter(self):\n return self.min_dist_counter\n\n\nclass ExpResult:\n\n def __init__(self, min_dist, min_ttc, min_dist_awa, det, col):\n self.min_dist = min_dist\n self.min_ttc = min_ttc\n self.min_dist_awa = min_dist_awa\n self.detection = det\n self.collision = col\n\n @property\n def get_min_dist(self):\n return self.min_dist\n\n @property\n def get_min_ttc(self):\n return self.min_ttc\n\n @property\n def get_min_dist_awa(self):\n return self.min_dist_awa\n\n @property\n def get_detected(self):\n return self.detection\n\n @property\n def get_collision(self):\n return self.collision\n\n def __str__(self):\n return \"\\tOF1=\" + str(self.min_dist) + \", OF2=\" + str(self.min_ttc) + \", OF3=\" + str(self.min_dist_awa) + \", Detection=\" + str(self.detection) + \", Collision=\" + str(self.collision)\n\n def __eq__(self, other):\n return self.min_dist == other.min_dist and self.min_ttc == other.min_ttc and self.min_dist_awa == other.min_dist_awa \\\n and self.detection == other.detection and self.collision == other.collision\n\n def __lt__(self, other):\n return self.min_dist < other.min_dist\n\n def __hash__(self):\n return hash((self.min_dist, self.min_ttc, self.min_dist_awa, self.detection, self.collision))\n\ndir_name = 'prosivic_results'\nresult_dataframes = []\nscenario_results = []\n\nfor filename in os.listdir(dir_name):\n if filename.endswith(\".csv\"):\n df = pd.read_csv(dir_name + \"\\\\\" + filename)\n for index, row in df.iterrows():\n exp_setup = ExpSetup(row['ped_x'], row['ped_y'], row['ped_orient'], row['ped_speed'], row['car_speed'], row['of1'], row['of2'], row['of3'], row['detection'], row['collision'])\n if exp_setup not in scenario_results:\n scenario_results.append(exp_setup)\n else:\n #print(\"Adding results to: \" + str(conf))\n i = scenario_results.index(exp_setup)\n scenario_results[i].add_result(row['of1'], row['of2'], row['of3'], row['detection'], row['collision'])\n\nwith open('mode_prosivic_results.csv', mode='w') as merged_file:\n mode_writer = csv.writer(merged_file, delimiter=',')\n mode_writer.writerow(['x0P', 'y0P', 'Th0P', 'v0P', 'v0C', 'OF1', 'OF2', 'OF3', 'det', 'col', 'conf'])\n\n #merge_writer.writerow(['x0P', 'y0P', 'Th0P', 'v0P', 'v0C', 'nbr', 'OF1_unique', 'OF1_avg', 'OF1_sd', 'OF2_unique', 'OF2_avg', 'OF2_sd', 'OF3_unique', 'OF3_avg', 'OF3_sd', 'det_true', 'det_false', 'col_true', 'col_false'])\n\n for exp_setup in scenario_results:\n print(\"\\n\" + str(exp_setup))\n print(\"\\tNumber of results: \" + str(exp_setup.get_nbr_results()))\n res = exp_setup.get_results()\n for result, count in res.most_common():\n print(\"\\t\" + str(count) + \"x:\" + str(result))\n\n unique_per_of = exp_setup.get_nbr_unique_results()\n print(\"\\t\\t# Result per objective function #\")\n print(\"\\t\\tmin_dist:\\t\\tUnique = \" + str(len(unique_per_of[\"of1\"])) + \"\\tAvg = \" + str(exp_setup.get_avg_min_dist()) + \"\\tSD = \" + str(exp_setup.get_sd_min_dist()))\n print(\"\\t\\t\\tCounter min_dist: \" + str(exp_setup.min_dist_counter))\n print(\"\\t\\tmin_ttc:\\t\\tUnique = \" + str(len(unique_per_of[\"of2\"])) + \"\\tAvg = \" + str(exp_setup.get_avg_min_ttc()) + \"\\tSD = \" + str(exp_setup.get_sd_min_ttc()))\n print(\"\\t\\t\\tCounter min_ttc: \" + str(exp_setup.min_ttc_counter))\n print(\"\\t\\tmin_dist_awa:\\tUnique = \" + str(len(unique_per_of[\"of3\"])) + \"\\tAvg = \" + str(exp_setup.get_avg_min_dist_awa()) + \"\\tSD = \" + str(exp_setup.get_sd_min_dist_awa()))\n print(\"\\t\\t\\tCounter min_dist_awa: \" + str(exp_setup.min_dist_awa_counter))\n print(\"\\t\\tNumber detections: \" + str(exp_setup.get_nbr_detections()) + \" (out of \" + str(exp_setup.get_nbr_results()) + \" = \" + str(100 * (exp_setup.get_nbr_detections()/exp_setup.get_nbr_results())) + \"%)\")\n print(\"\\t\\tNumber collisions: \" + str(exp_setup.get_nbr_collisions()) + \" (out of \" + str(exp_setup.get_nbr_results()) + \" = \" + str(100 * (exp_setup.get_nbr_collisions()/exp_setup.get_nbr_results())) + \"%)\")\n\n mode_result = res.most_common(1)[0][0] # this is the most common ExpResult (first element in first tuple in first element in the Counter)\n conf = (res.most_common(1)[0][1]/exp_setup.get_nbr_results()) # this is the count of the most common results divided by the total number\n\n mode_writer.writerow([exp_setup.ped_x, exp_setup.ped_y, exp_setup.ped_orient, exp_setup.ped_speed, exp_setup.car_speed, mode_result.min_dist, mode_result.min_ttc, mode_result.min_dist_awa, mode_result.detection, mode_result.collision, conf])\n #merge_writer.writerow([exp_setup.ped_x, exp_setup.ped_y, exp_setup.ped_orient, exp_setup.ped_speed, exp_setup.car_speed, exp_setup.get_nbr_results(), len(unique_per_of[\"of1\"]), exp_setup.get_avg_min_dist(), exp_setup.get_sd_min_dist(), len(unique_per_of[\"of2\"]), exp_setup.get_avg_min_ttc(), exp_setup.get_sd_min_ttc(), len(unique_per_of[\"of3\"]), exp_setup.get_avg_min_dist_awa(), exp_setup.get_sd_min_dist_awa(), exp_setup.get_nbr_detections(), (exp_setup.get_nbr_results() - exp_setup.get_nbr_detections()), exp_setup.get_nbr_collisions(), (exp_setup.get_nbr_results() - exp_setup.get_nbr_collisions())])\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PeruBhardwaj/AttributionAttack | [
"0d5ca334c611c5e067029a3f8907f2d91255ddde"
] | [
"KGEAttack/ConvE/l2_del.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In this notebook, I delete a triple from the neighbourhood of the target triple based on the **L2 metric = euclidean distance** between the candidate triple's embedding and the target triple's embedding\n# \n# - 'triple' embedding is computed by applying the model's scoring function to embeddings\n# - neighbourhood refers to the triples that share the entities with target's entities\n# \n# \n\n# In[1]:\n\n\nimport pickle\nfrom typing import Dict, Tuple, List\nimport os\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nimport operator\n\nimport json\nimport logging\nimport argparse \nimport math\nfrom pprint import pprint\nimport errno\nimport time \n\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\n\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn import functional as F\nimport torch.autograd as autograd\n\nfrom evaluation import evaluation\nfrom model import Distmult, Complex, Conve, Transe\nimport utils\n\n\ndef generate_nghbrs(test_set, train_set):\n '''\n For every triple in test set, return the index of \n neighbouring triple in training set,\n i.e. indices in training set are returned\n '''\n n_dict = {}\n for t, triple in enumerate(test_set):\n sub = triple[0]\n obj = triple[2]\n mask = (np.isin(train_set[:,0], [sub, obj]) | np.isin(train_set[:,2], [sub, obj]))\n #nghbrs_dict[t] = pro_train[mask]\n mask_idx = np.where(mask)[0]\n n_dict[t] = mask_idx\n \n return n_dict \n\ndef get_deletions(train_data, test_data, neighbours, model, attack_batch_size):\n logger.info('------ Generating edits per target triple ------')\n start_time = time.time()\n logger.info('Start time: {0}'.format(str(start_time)))\n \n triples_to_delete = []\n for test_idx, test_trip in enumerate(test_data):\n test_nghbrs = neighbours[test_idx]\n nghbr_trip = train_data[test_nghbrs]\n test_trip = test_trip[None, :] # add a batch dimension\n test_trip = torch.from_numpy(test_trip).to(device)\n test_s, test_r, test_o = test_trip[:,0], test_trip[:,1], test_trip[:,2]\n test_vec = model.score_triples_vec(test_s, test_r, test_o)\n\n b_begin = 0\n nghbr_dist = []\n if attack_batch_size == -1:\n nghbr_batch = nghbr_trip.shape[0]\n else:\n nghbr_batch = args.attack_batch_size\n\n while b_begin < nghbr_trip.shape[0]:\n b_nghbr_trip = nghbr_trip[b_begin : b_begin+nghbr_batch]\n b_nghbr_trip = torch.from_numpy(b_nghbr_trip).to(device)\n b_nghbr_s, b_nghbr_r, b_nghbr_o = b_nghbr_trip[:,0], b_nghbr_trip[:,1], b_nghbr_trip[:,2]\n b_nghbr_vec = model.score_triples_vec(b_nghbr_s, b_nghbr_r, b_nghbr_o)\n # shape of nghbr_vec is (num_nghbrs x emb_dim) e.g. (459 x 100)\n # shape of test vec is (1 x emb_dim)\n #b_dist = -torch.cdist(test_vec, b_nghbr_vec).squeeze() \n b_dist = -torch.norm((b_nghbr_vec-test_vec), p=2, dim=-1)\n b_dist = b_dist.detach().cpu().numpy().tolist()\n nghbr_dist += b_dist\n b_begin += nghbr_batch \n\n nghbr_dist = np.array(nghbr_dist)\n nghbr_dist = torch.from_numpy(nghbr_dist).to(device)\n # we want to remove the neighbour with maximum norm similarity\n max_values, argsort = torch.sort(nghbr_dist, -1, descending=True)\n del_idx = argsort[0]\n triple_to_delete = nghbr_trip[del_idx]\n\n triples_to_delete.append(triple_to_delete)\n if test_idx%100 == 0 or test_idx == test_data.shape[0]-1:\n logger.info('Processed test triple {0}'.format(str(test_idx)))\n logger.info('Time taken: {0}'.format(str(time.time() - start_time)))\n logger.info('Time taken to generate edits: {0}'.format(str(time.time() - start_time))) \n \n return triples_to_delete\n\n\n\nif __name__ == '__main__':\n\n\n parser = utils.get_argument_parser()\n parser.add_argument('--target-split', type=str, default='0_100_1', help='Ranks to use for target set. Values are 0 for ranks==1; 1 for ranks <=10; 2 for ranks>10 and ranks<=100. Default: 1')\n parser.add_argument('--budget', type=int, default=1, help='Budget for each target triple for each corruption side')\n parser.add_argument('--rand-run', type=int, default=1, help='A number assigned to the random run of experiment')\n parser.add_argument('--attack-batch-size', type=int, default=-1, help='Batch size for processing neighbours of target')\n\n args = parser.parse_args()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args.device = device\n\n # args.target_split = '0_100_1' # which target split to use \n #Values are 1 for ranks <=10; 2 for ranks>10 and ranks<=100.\n # args.budget = 1 #indicates the num of adversarial edits for each target triple for each corruption side\n # args.rand_run = 1 # a number assigned to the random run of the experiment\n args.seed = args.seed + (args.rand_run - 1) # default seed is 17\n\n # args.model = 'distmult'\n # args.data = 'WN18RR'\n\n if args.reproduce_results:\n args = utils.set_hyperparams(args) \n\n\n # Fixing random seeds for reproducibility -https://pytorch.org/docs/stable/notes/randomness.html\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n cudnn.benchmark = False\n np.random.seed(args.seed)\n rng = np.random.default_rng(seed=args.seed)\n\n\n args.epochs = -1 #no training here\n model_name = '{0}_{1}_{2}_{3}_{4}'.format(args.model, args.embedding_dim, args.input_drop, args.hidden_drop, args.feat_drop)\n model_path = 'saved_models/{0}_{1}.model'.format(args.data, model_name)\n log_path = 'logs/attack_logs/l2_del_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data, \n args.target_split, args.budget, args.rand_run)\n\n\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO,\n filename = log_path\n )\n logger = logging.getLogger(__name__)\n\n\n data_path = 'data/target_{0}_{1}_{2}'.format(args.model, args.data, args.target_split)\n\n n_ent, n_rel, ent_to_id, rel_to_id = utils.generate_dicts(data_path)\n\n ##### load data####\n data = utils.load_data(data_path)\n train_data, valid_data, test_data = data['train'], data['valid'], data['test']\n\n inp_f = open(os.path.join(data_path, 'to_skip_eval.pickle'), 'rb')\n to_skip_eval: Dict[str, Dict[Tuple[int, int], List[int]]] = pickle.load(inp_f)\n inp_f.close()\n to_skip_eval['lhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['lhs'].items()}\n to_skip_eval['rhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['rhs'].items()}\n\n\n\n model = utils.load_model(model_path, args, n_ent, n_rel, device)\n\n neighbours = generate_nghbrs(test_data, train_data) \n # test set is the target set because we loaded data from target_...\n\n\n triples_to_delete = get_deletions(train_data, test_data, neighbours, \n model, args.attack_batch_size)\n\n\n df = pd.DataFrame(data=triples_to_delete)\n df = df.drop_duplicates()\n # print(df.shape)\n trips_to_delete = df.values\n # print(trips_to_delete.shape)\n num_duplicates = len(triples_to_delete) - trips_to_delete.shape[0]\n # print(num_duplicates)\n\n\n\n per_tr_1, n_ignored_edits = utils.perturb_data(train_data, \n trips_to_delete)\n\n\n # Perturbed dataset\n logger.info('Shape of perturbed training set: {0}'.format(per_tr_1.shape))\n logger.info('Number of adversarial deletions ignored (because of singleton nodes): {0}'.format(n_ignored_edits))\n logger.info('Number of duplicate adversarial deletions : {0}'.format(num_duplicates))\n\n\n\n logger.info ('Length of original training set: ' + str(train_data.shape[0]))\n logger.info ('Length of new poisoned training set: ' + str(per_tr_1.shape[0]))\n\n\n save_path = 'data/l2_del_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data, \n args.target_split, args.budget, args.rand_run)\n\n\n try :\n os.makedirs(save_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n logger.info(e)\n logger.info('Using the existing folder {0} for processed data'.format(save_path))\n else:\n raise\n\n\n new_train = per_tr_1\n num_en_or = np.unique(np.concatenate((train_data[:,0], train_data[:,2]))).shape[0]\n num_en_pos = np.unique(np.concatenate((new_train[:,0], new_train[:,2]))).shape[0]\n\n\n with open(os.path.join(save_path, 'train.txt'), 'w') as out:\n for item in new_train:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n out = open(os.path.join(save_path, 'train.pickle'), 'wb')\n pickle.dump(new_train.astype('uint64'), out)\n out.close()\n\n\n with open(os.path.join(save_path, 'entities_dict.json'), 'w') as f:\n f.write(json.dumps(ent_to_id) + '\\n')\n\n with open(os.path.join(save_path, 'relations_dict.json'), 'w') as f:\n f.write(json.dumps(rel_to_id) + '\\n')\n\n\n with open(os.path.join(save_path, 'valid.txt'), 'w') as out:\n for item in valid_data:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n out = open(os.path.join(save_path, 'valid.pickle'), 'wb')\n pickle.dump(valid_data.astype('uint64'), out)\n out.close()\n\n\n with open(os.path.join(save_path, 'test.txt'), 'w') as out:\n for item in test_data:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n out = open(os.path.join(save_path, 'test.pickle'), 'wb')\n pickle.dump(test_data.astype('uint64'), out)\n out.close()\n\n\n with open(os.path.join(save_path, 'stats.txt'), 'w') as f:\n f.write('Model: {0} \\n'.format(args.model))\n f.write('Data: {0} \\n'.format(args.data))\n f.write('Length of original training set: {0} \\n'. format(train_data.shape[0]))\n f.write('Length of new poisoned training set: {0} \\n'. format(new_train.shape[0]))\n f.write('Number of duplicate deletions: {0} \\n'. format(num_duplicates))\n f.write('Number of deletions ignored due to singleton nodes: {0} \\n'. format(n_ignored_edits))\n f.write('Number of entities in original training set: {0} \\n'. format(num_en_or))\n f.write('Number of entities in poisoned training set: {0} \\n'. format(num_en_pos))\n f.write('Length of original test set: {0} \\n'. format(test_data.shape[0]))\n f.write('---------------------------------------------------------------------- \\n')\n\n\n\n with open(os.path.join(save_path, 'influential_triples.txt'), 'w') as out:\n for item in triples_to_delete:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n\n with open(os.path.join(save_path, 'deletions.txt'), 'w') as out:\n for item in trips_to_delete:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n\n # In[ ]:\n\n\n\n\n\n # In[ ]:\n\n\n\n\n"
] | [
[
"numpy.isin",
"torch.norm",
"numpy.random.seed",
"torch.manual_seed",
"torch.from_numpy",
"pandas.DataFrame",
"numpy.concatenate",
"torch.sort",
"torch.cuda.is_available",
"numpy.array",
"numpy.where",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ydiller/NoMoreNMS | [
"1c1557357e5312c287f0971c840060deb1bcd039"
] | [
"tools/my_runner.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport platform\nimport shutil\nimport time\nimport warnings\nimport torch\nimport mmcv\nimport wandb\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom mmcv.runner.base_runner import BaseRunner\nfrom mmcv.runner.builder import RUNNERS\nfrom mmcv.runner.checkpoint import save_checkpoint\nfrom mmcv.runner.utils import get_host_info\nimport copy\nimport logging\nimport os.path as osp\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom torch.optim import Optimizer\nimport mmcv\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner.checkpoint import load_checkpoint\nfrom mmcv.runner.dist_utils import get_dist_info\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom mmcv.runner.log_buffer import LogBuffer\nfrom mmcv.runner.priority import Priority, get_priority\nfrom mmcv.runner.utils import get_time_str\n\n\[email protected]_module()\nclass MyRunner(BaseRunner):\n \"\"\"Epoch-based Runner.\n\n This runner train models epoch by epoch.\n \"\"\"\n def __init__(self,\n model,\n batch_processor=None,\n optimizer=None,\n work_dir=None,\n logger=None,\n meta=None,\n max_iters=None,\n max_epochs=None,\n with_wandb=None):\n if batch_processor is not None:\n if not callable(batch_processor):\n raise TypeError('batch_processor must be callable, '\n f'but got {type(batch_processor)}')\n warnings.warn(\n 'batch_processor is deprecated, please implement '\n 'train_step() and val_step() in the model instead.',\n DeprecationWarning)\n # raise an error is `batch_processor` is not None and\n # `model.train_step()` exists.\n if is_module_wrapper(model):\n _model = model.module\n else:\n _model = model\n if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):\n raise RuntimeError(\n 'batch_processor and model.train_step()/model.val_step() '\n 'cannot be both available.')\n else:\n assert hasattr(model, 'train_step')\n\n # check the type of `optimizer`\n if isinstance(optimizer, dict):\n for name, optim in optimizer.items():\n if not isinstance(optim, Optimizer):\n raise TypeError(\n f'optimizer must be a dict of torch.optim.Optimizers, '\n f'but optimizer[\"{name}\"] is a {type(optim)}')\n elif not isinstance(optimizer, Optimizer) and optimizer is not None:\n raise TypeError(\n f'optimizer must be a torch.optim.Optimizer object '\n f'or dict or None, but got {type(optimizer)}')\n\n # check the type of `logger`\n if not isinstance(logger, logging.Logger):\n raise TypeError(f'logger must be a logging.Logger object, '\n f'but got {type(logger)}')\n\n # check the type of `meta`\n if meta is not None and not isinstance(meta, dict):\n raise TypeError(\n f'meta must be a dict or None, but got {type(meta)}')\n\n self.model = model\n self.batch_processor = batch_processor\n self.optimizer = optimizer\n self.logger = logger\n self.meta = meta\n self.with_wandb = with_wandb\n # create work_dir\n if mmcv.is_str(work_dir):\n self.work_dir = osp.abspath(work_dir)\n mmcv.mkdir_or_exist(self.work_dir)\n elif work_dir is None:\n self.work_dir = None\n else:\n raise TypeError('\"work_dir\" must be a str or None')\n\n # get model name from the model class\n if hasattr(self.model, 'module'):\n self._model_name = self.model.module.__class__.__name__\n else:\n self._model_name = self.model.__class__.__name__\n\n self._rank, self._world_size = get_dist_info()\n self.timestamp = get_time_str()\n self.mode = None\n self._hooks = []\n self._epoch = 0\n self._iter = 0\n self._inner_iter = 0\n\n if max_epochs is not None and max_iters is not None:\n raise ValueError(\n 'Only one of `max_epochs` or `max_iters` can be set.')\n\n self._max_epochs = max_epochs\n self._max_iters = max_iters\n # TODO: Redesign LogBuffer, it is not flexible and elegant enough\n self.log_buffer = LogBuffer()\n\n def register_optimizer_hook(self, optimizer_config):\n if optimizer_config is None:\n return\n if isinstance(optimizer_config, dict):\n optimizer_config.setdefault('type', 'MyHook')\n hook = mmcv.build_from_cfg(optimizer_config, HOOKS)\n else:\n hook = optimizer_config\n self.register_hook(hook, priority='ABOVE_NORMAL')\n\n\n def run_iter(self, data_batch, train_mode, **kwargs):\n if self.batch_processor is not None:\n outputs = self.batch_processor(\n self.model, data_batch, train_mode=train_mode, **kwargs)\n elif train_mode:\n outputs = self.model.train_step(data_batch, self.optimizer,\n **kwargs)\n else:\n outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('\"batch_processor()\" or \"model.train_step()\"'\n 'and \"model.val_step()\" must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n\n def train(self, data_loader, **kwargs):\n self.model.train()\n self.mode = 'train'\n self.data_loader = data_loader\n self._max_iters = self._max_epochs * len(self.data_loader)\n self.call_hook('before_train_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.call_hook('before_train_iter')\n self.run_iter(data_batch, train_mode=True, **kwargs)\n self.call_hook('after_train_iter')\n self._iter += 1\n\n self.call_hook('after_train_epoch')\n self._epoch += 1\n\n @torch.no_grad()\n def val(self, data_loader, **kwargs):\n self.model.eval()\n self.mode = 'val'\n self.data_loader = data_loader\n self.call_hook('before_val_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.call_hook('before_val_iter')\n self.run_iter(data_batch, train_mode=False)\n self.call_hook('after_val_iter')\n\n self.call_hook('after_val_epoch')\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n if self.with_wandb:\n wandb.log({\"CE val loss\": sum(self.log_buffer.val_history['loss_deepsets_ce'])/\n len(self.log_buffer.val_history['loss_deepsets_ce']),\n \"val ds_acc\": sum(self.log_buffer.val_history['ds_acc'])/\n len(self.log_buffer.val_history['ds_acc']),\n \"val iou_error\": sum(self.log_buffer.val_history['iou_error'])/len(self.log_buffer.val_history['iou_error']),\n \"val max score predictions\": sum(self.log_buffer.val_history['ds_pred_on_max'])/\n len(self.log_buffer.val_history['ds_pred_on_max'])\n })\n else: # single gpu\n if self.with_wandb:\n wandb.log({\"CE val loss\": sum(self.log_buffer.val_history['loss_deepsets_ce']) /\n len(self.log_buffer.val_history['loss_deepsets_ce']),\n \"val ds_acc\": sum(self.log_buffer.val_history['ds_acc']) /\n len(self.log_buffer.val_history['ds_acc']),\n \"val iou_error\": sum(self.log_buffer.val_history['iou_error']) / len(\n self.log_buffer.val_history['iou_error']),\n \"val max score predictions\": sum(self.log_buffer.val_history['ds_pred_on_max']) /\n len(self.log_buffer.val_history['ds_pred_on_max'])})\n\n\n def run(self, data_loaders, workflow, max_epochs=None, **kwargs):\n \"\"\"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n \"\"\"\n assert isinstance(data_loaders, list)\n assert mmcv.is_list_of(workflow, tuple)\n assert len(data_loaders) == len(workflow)\n if max_epochs is not None:\n warnings.warn(\n 'setting max_epochs in run is deprecated, '\n 'please set max_epochs in runner_config', DeprecationWarning)\n self._max_epochs = max_epochs\n\n assert self._max_epochs is not None, (\n 'max_epochs must be specified during instantiation')\n\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if mode == 'train':\n self._max_iters = self._max_epochs * len(data_loaders[i])\n break\n\n work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n self.logger.info('Start running, host: %s, work_dir: %s',\n get_host_info(), work_dir)\n self.logger.info('Hooks will be executed in the following order:\\n%s',\n self.get_hook_info())\n self.logger.info('workflow: %s, max: %d epochs', workflow,\n self._max_epochs)\n self.call_hook('before_run')\n\n while self.epoch < self._max_epochs:\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if isinstance(mode, str): # self.train()\n if not hasattr(self, mode):\n raise ValueError(\n f'runner has no method named \"{mode}\" to run an '\n 'epoch')\n epoch_runner = getattr(self, mode)\n else:\n raise TypeError(\n 'mode in workflow must be a str, but got {}'.format(\n type(mode)))\n\n for _ in range(epochs):\n if mode == 'train' and self.epoch >= self._max_epochs:\n break\n epoch_runner(data_loaders[i], **kwargs)\n\n time.sleep(1) # wait for some hooks like loggers to finish\n self.call_hook('after_run')\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='end2end_epoch_{}.pth',\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n \"\"\"Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to 'epoch_{}.pth'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n \"latest.pth\" to point to the latest checkpoint.\n Defaults to True.\n \"\"\"\n if meta is None:\n meta = {}\n elif not isinstance(meta, dict):\n raise TypeError(\n f'meta should be a dict or None, but got {type(meta)}')\n if self.meta is not None:\n meta.update(self.meta)\n # Note: meta.update(self.meta) should be done before\n # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise\n # there will be problems with resumed checkpoints.\n # More details in https://github.com/open-mmlab/mmcv/pull/1108\n meta.update(epoch=self.epoch + 1, iter=self.iter)\n\n filename = filename_tmpl.format(self.epoch + 1)\n filepath = osp.join(out_dir, filename)\n optimizer = self.optimizer if save_optimizer else None\n save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n dst_file = osp.join(out_dir, 'latest.pth')\n if platform.system() != 'Windows':\n mmcv.symlink(filename, dst_file)\n else:\n shutil.copy(filepath, dst_file)\n\n\n# @RUNNERS.register_module()\n# class Runner(MyRunner):\n# \"\"\"Deprecated name of EpochBasedRunner.\"\"\"\n#\n# def __init__(self, *args, **kwargs):\n# warnings.warn(\n# 'Runner was deprecated, please use EpochBasedRunner instead',\n# DeprecationWarning)\n# super().__init__(*args, **kwargs)\n"
] | [
[
"torch.distributed.get_rank",
"torch.distributed.is_initialized",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MortonWang/geo_IF | [
"4e27aeb9e005cdfb151777bc730de6d8372d1b7f"
] | [
"data_process/kdtree.py"
] | [
"# -*- coding:utf-8 -*-\nimport copy\nimport numpy as np\n\nfrom scipy._lib.six import xrange\n\n\nclass KDTree:\n def __init__(self, bucket_size, dimensions, parent=None):\n self.bucket_size = bucket_size\n self.parent = None\n self.left = None\n self.right = None\n self.split_dimension = None\n self.split_value = None\n self.index_locations = []\n self.location_count = 0\n self.min_limit = [np.Inf] * dimensions \n self.max_limit = [-np.Inf] * dimensions\n self.dimensions = dimensions\n \n def get_leaf(self, location):\n if not self.left and not self.right:\n return self\n elif location[self.split_dimension] <= self.split_value:\n return self.left.get_leaf(location)\n else:\n return self.right.get_leaf(location) \n \n def add_point(self, index_location_tuple):\n self.index_locations.append(index_location_tuple)\n self.location_count += 1\n self.extendBounds(index_location_tuple[1])\n self.min_boundary = copy.deepcopy(self.min_limit)\n self.max_boundary = copy.deepcopy(self.max_limit)\n \n def extendBounds(self, location):\n # empty\n if self.min_limit == None:\n self.min_limit = copy.deepcopy(location)\n self.max_limit = copy.deepcopy(location)\n return\n for i in xrange(self.dimensions):\n self.min_limit[i] = min(self.min_limit[i], location[i])\n self.max_limit[i] = max(self.max_limit[i], location[i])\n \n def findWidestAxis(self):\n widths = [self.max_limit[i] - self.min_limit[i] for i in range(self.dimensions)]\n widest_axis = np.argmax(widths)\n return widest_axis\n\n def getNodes(self):\n nodes = []\n self.getNodesHelper(nodes)\n return nodes\n \n def getNodesHelper(self, nodes):\n nodes.append(self)\n if self.left:\n self.left.getNodesHelper(nodes)\n if self.right:\n self.right.getNodesHelper(nodes)\n \n def getLeaves(self):\n leaves = []\n self.getLeavesHelper(leaves)\n return leaves\n \n def getLeavesHelper(self, leaves):\n if not self.right and not self.left:\n leaves.append(self)\n else:\n if self.left:\n self.left.getLeavesHelper(leaves)\n if self.right:\n self.right.getLeavesHelper(leaves)\n \n def balance(self):\n self.nodeSplit(self)\n \n def nodeSplit(self, cursor, empty_non_leaf=True):\n if cursor.location_count > cursor.bucket_size:\n cursor.split_dimension = cursor.findWidestAxis()\n # the partition method is the median of all values in the widest dimension\n cursor.split_value = np.median([cursor.index_locations[i][1][cursor.split_dimension] for i in range(cursor.location_count)])\n # if width is 0 (all the values are the same) don't partition\n if cursor.min_limit[cursor.split_dimension] == cursor.max_limit[cursor.split_dimension]:\n return\n # Don't let the split value be the same as the upper value as\n # can happen due to rounding errors!\n if cursor.split_value == cursor.max_limit[cursor.split_dimension]:\n cursor.split_value = cursor.min_limit[cursor.split_dimension]\n cursor.left = KDTree(bucket_size=cursor.bucket_size, dimensions=cursor.dimensions, parent=cursor)\n cursor.right = KDTree(bucket_size=cursor.bucket_size, dimensions=cursor.dimensions, parent=cursor)\n \n cursor.left.min_boundary = copy.deepcopy(cursor.min_boundary)\n cursor.left.max_boundary = copy.deepcopy(cursor.max_boundary)\n cursor.right.min_boundary = copy.deepcopy(cursor.min_boundary)\n cursor.right.max_boundary = copy.deepcopy(cursor.max_boundary)\n cursor.left.max_boundary[cursor.split_dimension] = cursor.split_value\n cursor.right.min_boundary[cursor.split_dimension] = cursor.split_value\n \n for index_loc in cursor.index_locations:\n if index_loc[1][cursor.split_dimension] > cursor.split_value:\n cursor.right.index_locations.append(index_loc)\n cursor.right.location_count += 1\n cursor.right.extendBounds(index_loc[1])\n else:\n cursor.left.index_locations.append(index_loc)\n cursor.left.location_count += 1\n cursor.left.extendBounds(index_loc[1])\n if empty_non_leaf:\n cursor.index_locations = []\n cursor.nodeSplit(cursor.left)\n cursor.nodeSplit(cursor.right)\n\n\nclass KDTreeClustering:\n def __init__(self, bucket_size=10):\n self.bucket_size = bucket_size\n self.is_fitted = False\n \n def fit(self, X):\n # X is an array\n if hasattr(X, 'shape'):\n n_samples = X.shape[0]\n dimensions = X.shape[1]\n else:\n n_samples = len(X)\n dimensions = len(X[0])\n \n self.kdtree = KDTree(bucket_size=self.bucket_size, dimensions=dimensions, parent=None)\n for i in xrange(n_samples):\n self.kdtree.add_point((i, X[i]))\n self.kdtree.nodeSplit(cursor=self.kdtree, empty_non_leaf=True)\n self.clusters = [leave.index_locations for leave in self.kdtree.getLeaves()]\n clusters = [cluster.index_locations for cluster in self.kdtree.getLeaves()]\n results = np.zeros((n_samples,), dtype=int)\n for i, id_locs in enumerate(clusters):\n for id, l in id_locs:\n results[id] = i\n self.clusters = results\n self.num_clusters = len(clusters)\n self.is_fitted = True\n \n def get_clusters(self):\n if self.is_fitted:\n return self.clusters\n\n\nif __name__ == '__main__':\n # tree = KDTree(300, 2)\n import params\n import geolocate\n geolocate.initialize(granularity=params.BUCKET_SIZE, write=False, readText=True, reload_init=False, regression=False)\n locations = [geolocate.locationStr2Float(loc) for loc in params.trainUsers.values()]\n clusterer = KDTreeClustering(bucket_size=params.BUCKET_SIZE)\n clusterer.fit(locations)\n clusters = clusterer.get_clusters()\n"
] | [
[
"scipy._lib.six.xrange",
"numpy.zeros",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PIN-devel/inside-kids | [
"554e4a0a5654c9a0f5237b904bb2ca6db88a55cb",
"554e4a0a5654c9a0f5237b904bb2ca6db88a55cb"
] | [
"contents/tts/content/TensorflowTTS/tensorflow_tts/utils/group_conv.py",
"contents/tts/content/TensorflowTTS/tensorflow_tts/models/fastspeech.py"
] | [
"# -*- coding: utf-8 -*-\n# This code is copy from https://github.com/tensorflow/tensorflow/pull/36773.\n\"\"\"Group Convolution Modules.\"\"\"\n\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\n\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\n\nfrom tensorflow.python.keras.layers import Conv1D\nfrom tensorflow.python.keras.layers import SeparableConv1D\n\n\nclass Convolution(object):\n \"\"\"Helper class for convolution.\n Note that this class assumes that shapes of input and filter passed to\n __call__ are compatible with input_shape and filter_shape passed to the\n constructor.\n Arguments\n input_shape: static shape of input. i.e. input.get_shape().\n filter_shape: static shape of the filter. i.e. filter.get_shape().\n padding: see convolution.\n strides: see convolution.\n dilation_rate: see convolution.\n name: see convolution.\n data_format: see convolution.\n \"\"\"\n\n def __init__(\n self,\n input_shape,\n filter_shape,\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None,\n ):\n \"\"\"Helper function for convolution.\"\"\"\n num_total_dims = filter_shape.ndims\n if num_total_dims is None:\n num_total_dims = input_shape.ndims\n if num_total_dims is None:\n raise ValueError(\"rank of input or filter must be known\")\n\n num_spatial_dims = num_total_dims - 2\n\n try:\n input_shape.with_rank(num_spatial_dims + 2)\n except ValueError:\n raise ValueError(\"input tensor must have rank %d\" % (num_spatial_dims + 2))\n\n try:\n filter_shape.with_rank(num_spatial_dims + 2)\n except ValueError:\n raise ValueError(\"filter tensor must have rank %d\" % (num_spatial_dims + 2))\n\n if data_format is None or not data_format.startswith(\"NC\"):\n input_channels_dim = tensor_shape.dimension_at_index(\n input_shape, num_spatial_dims + 1\n )\n spatial_dims = range(1, num_spatial_dims + 1)\n else:\n input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)\n spatial_dims = range(2, num_spatial_dims + 2)\n\n filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)\n if not (input_channels_dim % filter_dim).is_compatible_with(0):\n raise ValueError(\n \"number of input channels is not divisible by corresponding \"\n \"dimension of filter, {} % {} != 0\".format(\n input_channels_dim, filter_dim\n )\n )\n\n strides, dilation_rate = nn_ops._get_strides_and_dilation_rate(\n num_spatial_dims, strides, dilation_rate\n )\n\n self.input_shape = input_shape\n self.filter_shape = filter_shape\n self.data_format = data_format\n self.strides = strides\n self.padding = padding\n self.name = name\n self.dilation_rate = dilation_rate\n self.conv_op = nn_ops._WithSpaceToBatch(\n input_shape,\n dilation_rate=dilation_rate,\n padding=padding,\n build_op=self._build_op,\n filter_shape=filter_shape,\n spatial_dims=spatial_dims,\n data_format=data_format,\n )\n\n def _build_op(self, _, padding):\n return nn_ops._NonAtrousConvolution(\n self.input_shape,\n filter_shape=self.filter_shape,\n padding=padding,\n data_format=self.data_format,\n strides=self.strides,\n name=self.name,\n )\n\n def __call__(self, inp, filter):\n return self.conv_op(inp, filter)\n\n\nclass Conv(Layer):\n \"\"\"Abstract N-D convolution layer (private, used as implementation base).\n This layer creates a convolution kernel that is convolved\n (actually cross-correlated) with the layer input to produce a tensor of\n outputs. If `use_bias` is True (and a `bias_initializer` is provided),\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n Arguments:\n rank: An integer, the rank of the convolution, e.g. \"2\" for 2D convolution.\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n length of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, ...)`.\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n groups: Integer, the number of channel groups controlling the connections\n between inputs and outputs. Input channels and `filters` must both be\n divisible by `groups`. For example,\n - At `groups=1`, all inputs are convolved to all outputs.\n - At `groups=2`, the operation becomes equivalent to having two\n convolutional layers side by side, each seeing half the input\n channels, and producing half the output channels, and both\n subsequently concatenated.\n - At `groups=input_channels`, each input channel is convolved with its\n own set of filters, of size `input_channels / filters`\n activation: Activation function to use.\n If you don't specify anything, no activation is applied.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(\n self,\n rank,\n filters,\n kernel_size,\n strides=1,\n padding=\"valid\",\n data_format=None,\n dilation_rate=1,\n groups=1,\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n **kwargs\n ):\n super(Conv, self).__init__(\n trainable=trainable,\n name=name,\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs\n )\n self.rank = rank\n if filters is not None and not isinstance(filters, int):\n filters = int(filters)\n self.filters = filters\n self.groups = groups or 1\n if filters is not None and filters % self.groups != 0:\n raise ValueError(\n \"The number of filters must be evenly divisible by the number of \"\n \"groups. Received: groups={}, filters={}\".format(groups, filters)\n )\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, \"kernel_size\")\n if not all(self.kernel_size):\n raise ValueError(\n \"The argument `kernel_size` cannot contain 0(s). \"\n \"Received: %s\" % (kernel_size,)\n )\n self.strides = conv_utils.normalize_tuple(strides, rank, \"strides\")\n self.padding = conv_utils.normalize_padding(padding)\n if self.padding == \"causal\" and not isinstance(self, (Conv1D, SeparableConv1D)):\n raise ValueError(\n \"Causal padding is only supported for `Conv1D`\"\n \"and ``SeparableConv1D`.\"\n )\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(\n dilation_rate, rank, \"dilation_rate\"\n )\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=self.rank + 2)\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_channel = self._get_input_channel(input_shape)\n if input_channel % self.groups != 0:\n raise ValueError(\n \"The number of input channels must be evenly divisible by the number \"\n \"of groups. Received groups={}, but the input has {} channels \"\n \"(full input shape is {}).\".format(\n self.groups, input_channel, input_shape\n )\n )\n kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)\n\n self.kernel = self.add_weight(\n name=\"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype,\n )\n if self.use_bias:\n self.bias = self.add_weight(\n name=\"bias\",\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype,\n )\n else:\n self.bias = None\n channel_axis = self._get_channel_axis()\n self.input_spec = InputSpec(\n ndim=self.rank + 2, axes={channel_axis: input_channel}\n )\n\n self._build_conv_op_input_shape = input_shape\n self._build_input_channel = input_channel\n self._padding_op = self._get_padding_op()\n self._conv_op_data_format = conv_utils.convert_data_format(\n self.data_format, self.rank + 2\n )\n self._convolution_op = Convolution(\n input_shape,\n filter_shape=self.kernel.shape,\n dilation_rate=self.dilation_rate,\n strides=self.strides,\n padding=self._padding_op,\n data_format=self._conv_op_data_format,\n )\n self.built = True\n\n def call(self, inputs):\n if self._recreate_conv_op(inputs):\n self._convolution_op = Convolution(\n inputs.get_shape(),\n filter_shape=self.kernel.shape,\n dilation_rate=self.dilation_rate,\n strides=self.strides,\n padding=self._padding_op,\n data_format=self._conv_op_data_format,\n )\n self._build_conv_op_input_shape = inputs.get_shape()\n\n # Apply causal padding to inputs for Conv1D.\n if self.padding == \"causal\" and self.__class__.__name__ == \"Conv1D\":\n inputs = array_ops.pad(inputs, self._compute_causal_padding())\n\n outputs = self._convolution_op(inputs, self.kernel)\n\n if self.use_bias:\n if self.data_format == \"channels_first\":\n if self.rank == 1:\n # nn.bias_add does not accept a 1D input tensor.\n bias = array_ops.reshape(self.bias, (1, self.filters, 1))\n outputs += bias\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format=\"NCHW\")\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format=\"NHWC\")\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i],\n )\n new_space.append(new_dim)\n return tensor_shape.TensorShape(\n [input_shape[0]] + new_space + [self.filters]\n )\n else:\n space = input_shape[2:]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i],\n )\n new_space.append(new_dim)\n return tensor_shape.TensorShape([input_shape[0], self.filters] + new_space)\n\n def get_config(self):\n config = {\n \"filters\": self.filters,\n \"kernel_size\": self.kernel_size,\n \"strides\": self.strides,\n \"padding\": self.padding,\n \"data_format\": self.data_format,\n \"dilation_rate\": self.dilation_rate,\n \"groups\": self.groups,\n \"activation\": activations.serialize(self.activation),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(self.activity_regularizer),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n }\n base_config = super(Conv, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _compute_causal_padding(self):\n \"\"\"Calculates padding for 'causal' option for 1-d conv layers.\"\"\"\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n if self.data_format == \"channels_last\":\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n else:\n causal_padding = [[0, 0], [0, 0], [left_pad, 0]]\n return causal_padding\n\n def _get_channel_axis(self):\n if self.data_format == \"channels_first\":\n return 1\n else:\n return -1\n\n def _get_input_channel(self, input_shape):\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError(\n \"The channel dimension of the inputs \"\n \"should be defined. Found `None`.\"\n )\n return int(input_shape[channel_axis])\n\n def _get_padding_op(self):\n if self.padding == \"causal\":\n op_padding = \"valid\"\n else:\n op_padding = self.padding\n if not isinstance(op_padding, (list, tuple)):\n op_padding = op_padding.upper()\n return op_padding\n\n def _recreate_conv_op(self, inputs):\n \"\"\"Recreate conv_op if necessary.\n Check if the input_shape in call() is different from that in build().\n For the values that are not None, if they are different, recreate\n the _convolution_op to avoid the stateful behavior.\n Args:\n inputs: The input data to call() method.\n Returns:\n `True` or `False` to indicate whether to recreate the conv_op.\n \"\"\"\n call_input_shape = inputs.get_shape()\n for axis in range(1, len(call_input_shape)):\n if (\n call_input_shape[axis] is not None\n and self._build_conv_op_input_shape[axis] is not None\n and call_input_shape[axis] != self._build_conv_op_input_shape[axis]\n ):\n return True\n return False\n\n\nclass GroupConv1D(Conv):\n \"\"\"1D convolution layer (e.g. temporal convolution).\n This layer creates a convolution kernel that is convolved\n with the layer input over a single spatial (or temporal) dimension\n to produce a tensor of outputs.\n If `use_bias` is True, a bias vector is created and added to the outputs.\n Finally, if `activation` is not `None`,\n it is applied to the outputs as well.\n When using this layer as the first layer in a model,\n provide an `input_shape` argument\n (tuple of integers or `None`, e.g.\n `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,\n or `(None, 128)` for variable-length sequences of 128-dimensional vectors.\n Examples:\n >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size\n >>> # is 4.\n >>> input_shape = (4, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu',input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 8, 32)\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"causal\"` or `\"same\"` (case-insensitive).\n `\"causal\"` results in causal (dilated) convolutions, e.g. `output[t]`\n does not depend on `input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section\n 2.1](https://arxiv.org/abs/1609.03499).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n groups: Integer, the number of channel groups controlling the connections\n between inputs and outputs. Input channels and `filters` must both be\n divisible by `groups`. For example,\n - At `groups=1`, all inputs are convolved to all outputs.\n - At `groups=2`, the operation becomes equivalent to having two\n convolutional layers side by side, each seeing half the input\n channels, and producing half the output channels, and both\n subsequently concatenated.\n - At `groups=input_channels`, each input channel is convolved with its\n own set of filters, of size `input_channels / filters`\n dilation_rate: an integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n Input shape:\n 3D tensor with shape: `(batch_size, steps, input_dim)`\n Output shape:\n 3D tensor with shape: `(batch_size, new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n Returns:\n A tensor of rank 3 representing\n `activation(conv1d(inputs, kernel) + bias)`.\n Raises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(\n self,\n filters,\n kernel_size,\n strides=1,\n padding=\"valid\",\n data_format=\"channels_last\",\n dilation_rate=1,\n groups=1,\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n rank=1,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n groups=groups,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs\n )\n",
"# -*- coding: utf-8 -*-\n# Copyright 2020 The FastSpeech Authors, The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tensorflow Model modules for FastSpeech.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef get_initializer(initializer_range=0.02):\n \"\"\"Creates a `tf.initializers.truncated_normal` with the given range.\n\n Args:\n initializer_range: float, initializer range for stddev.\n\n Returns:\n TruncatedNormal initializer with stddev = `initializer_range`.\n\n \"\"\"\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear unit.\"\"\"\n cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))\n return x * cdf\n\n\ndef gelu_new(x):\n \"\"\"Smoother gaussian Error Linear Unit.\"\"\"\n cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef swish(x):\n \"\"\"Swish activation function.\"\"\"\n return x * tf.sigmoid(x)\n\n\ndef mish(x):\n return x * tf.math.tanh(tf.math.softplus(x))\n\n\nACT2FN = {\n \"identity\": tf.keras.layers.Activation(\"linear\"),\n \"tanh\": tf.keras.layers.Activation(\"tanh\"),\n \"gelu\": tf.keras.layers.Activation(gelu),\n \"relu\": tf.keras.activations.relu,\n \"swish\": tf.keras.layers.Activation(swish),\n \"gelu_new\": tf.keras.layers.Activation(gelu_new),\n \"mish\": tf.keras.layers.Activation(mish),\n}\n\n\nclass TFFastSpeechEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct charactor/phoneme/positional/speaker embeddings.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.vocab_size = config.vocab_size\n self.hidden_size = config.encoder_self_attention_params.hidden_size\n self.initializer_range = config.initializer_range\n self.config = config\n\n self.position_embeddings = tf.keras.layers.Embedding(\n config.max_position_embeddings + 1,\n self.hidden_size,\n weights=[self._sincos_embedding()],\n name=\"position_embeddings\",\n trainable=False,\n )\n\n if config.n_speakers > 1:\n self.encoder_speaker_embeddings = tf.keras.layers.Embedding(\n config.n_speakers,\n self.hidden_size,\n embeddings_initializer=get_initializer(self.initializer_range),\n name=\"speaker_embeddings\",\n )\n self.speaker_fc = tf.keras.layers.Dense(\n units=self.hidden_size, name=\"speaker_fc\"\n )\n\n def build(self, input_shape):\n \"\"\"Build shared charactor/phoneme embedding layers.\"\"\"\n with tf.name_scope(\"charactor_embeddings\"):\n self.charactor_embeddings = self.add_weight(\n \"weight\",\n shape=[self.vocab_size, self.hidden_size],\n initializer=get_initializer(self.initializer_range),\n )\n super().build(input_shape)\n\n def call(self, inputs, training=False):\n \"\"\"Get charactor embeddings of inputs.\n\n Args:\n 1. charactor, Tensor (int32) shape [batch_size, length].\n 2. speaker_id, Tensor (int32) shape [batch_size]\n Returns:\n Tensor (float32) shape [batch_size, length, embedding_size].\n\n \"\"\"\n return self._embedding(inputs, training=training)\n\n def _embedding(self, inputs, training=False):\n \"\"\"Applies embedding based on inputs tensor.\"\"\"\n input_ids, speaker_ids = inputs\n\n input_shape = tf.shape(input_ids)\n seq_length = input_shape[1]\n\n position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[tf.newaxis, :]\n\n # create embeddings\n inputs_embeds = tf.gather(self.charactor_embeddings, input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n\n # sum embedding\n embeddings = inputs_embeds + position_embeddings\n if self.config.n_speakers > 1:\n speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)\n speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))\n # extended speaker embeddings\n extended_speaker_features = speaker_features[:, tf.newaxis, :]\n embeddings += extended_speaker_features\n\n return embeddings\n\n def _sincos_embedding(self):\n position_enc = np.array(\n [\n [\n pos / np.power(10000, 2.0 * (i // 2) / self.hidden_size)\n for i in range(self.hidden_size)\n ]\n for pos in range(self.config.max_position_embeddings + 1)\n ]\n )\n\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])\n\n # pad embedding.\n position_enc[0] = 0.0\n\n return position_enc\n\n\nclass TFFastSpeechSelfAttention(tf.keras.layers.Layer):\n \"\"\"Self attention module for fastspeech.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads\n self.all_head_size = self.num_attention_heads * config.attention_head_size\n\n self.query = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"query\",\n )\n self.key = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"key\",\n )\n self.value = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"value\",\n )\n\n self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)\n self.config = config\n\n def transpose_for_scores(self, x, batch_size):\n \"\"\"Transpose to calculate attention scores.\"\"\"\n x = tf.reshape(\n x,\n (batch_size, -1, self.num_attention_heads, self.config.attention_head_size),\n )\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n batch_size = tf.shape(hidden_states)[0]\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)\n dk = tf.cast(tf.shape(key_layer)[-1], tf.float32) # scale attention_scores\n attention_scores = attention_scores / tf.math.sqrt(dk)\n\n if attention_mask is not None:\n # extended_attention_masks for self attention encoder.\n extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]\n extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)\n extended_attention_mask = (1.0 - extended_attention_mask) * -1e9\n attention_scores = attention_scores + extended_attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = tf.nn.softmax(attention_scores, axis=-1)\n attention_probs = self.dropout(attention_probs, training=training)\n\n context_layer = tf.matmul(attention_probs, value_layer)\n context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])\n context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))\n\n outputs = (\n (context_layer, attention_probs)\n if self.output_attentions\n else (context_layer,)\n )\n return outputs\n\n\nclass TFFastSpeechSelfOutput(tf.keras.layers.Layer):\n \"\"\"Fastspeech output of self attention module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm\"\n )\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, input_tensor = inputs\n\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFFastSpeechAttention(tf.keras.layers.Layer):\n \"\"\"Fastspeech attention module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.self_attention = TFFastSpeechSelfAttention(config, name=\"self\")\n self.dense_output = TFFastSpeechSelfOutput(config, name=\"output\")\n\n def call(self, inputs, training=False):\n input_tensor, attention_mask = inputs\n\n self_outputs = self.self_attention(\n [input_tensor, attention_mask], training=training\n )\n attention_output = self.dense_output(\n [self_outputs[0], input_tensor], training=training\n )\n masked_attention_output = attention_output * tf.cast(\n tf.expand_dims(attention_mask, 2), dtype=tf.float32\n )\n outputs = (masked_attention_output,) + self_outputs[\n 1:\n ] # add attentions if we output them\n return outputs\n\n\nclass TFFastSpeechIntermediate(tf.keras.layers.Layer):\n \"\"\"Intermediate representation module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv1d_1 = tf.keras.layers.Conv1D(\n config.intermediate_size,\n kernel_size=config.intermediate_kernel_size,\n kernel_initializer=get_initializer(config.initializer_range),\n padding=\"same\",\n name=\"conv1d_1\",\n )\n self.conv1d_2 = tf.keras.layers.Conv1D(\n config.hidden_size,\n kernel_size=config.intermediate_kernel_size,\n kernel_initializer=get_initializer(config.initializer_range),\n padding=\"same\",\n name=\"conv1d_2\",\n )\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, inputs):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n hidden_states = self.conv1d_1(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.conv1d_2(hidden_states)\n\n masked_hidden_states = hidden_states * tf.cast(\n tf.expand_dims(attention_mask, 2), dtype=tf.float32\n )\n return masked_hidden_states\n\n\nclass TFFastSpeechOutput(tf.keras.layers.Layer):\n \"\"\"Output module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.LayerNorm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm\"\n )\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, input_tensor = inputs\n\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFFastSpeechLayer(tf.keras.layers.Layer):\n \"\"\"Fastspeech module (FFT module on the paper).\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.attention = TFFastSpeechAttention(config, name=\"attention\")\n self.intermediate = TFFastSpeechIntermediate(config, name=\"intermediate\")\n self.bert_output = TFFastSpeechOutput(config, name=\"output\")\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n attention_outputs = self.attention(\n [hidden_states, attention_mask], training=training\n )\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(\n [attention_output, attention_mask], training=training\n )\n layer_output = self.bert_output(\n [intermediate_output, attention_output], training=training\n )\n masked_layer_output = layer_output * tf.cast(\n tf.expand_dims(attention_mask, 2), dtype=tf.float32\n )\n outputs = (masked_layer_output,) + attention_outputs[\n 1:\n ] # add attentions if we output them\n return outputs\n\n\nclass TFFastSpeechEncoder(tf.keras.layers.Layer):\n \"\"\"Fast Speech encoder module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = [\n TFFastSpeechLayer(config, name=\"layer_._{}\".format(i))\n for i in range(config.num_hidden_layers)\n ]\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n hidden_states, attention_mask = inputs\n\n all_hidden_states = ()\n all_attentions = ()\n for _, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n [hidden_states, attention_mask], training=training\n )\n hidden_states = layer_outputs[0]\n\n if self.output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = (hidden_states,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # outputs, (hidden states), (attentions)\n\n\nclass TFFastSpeechDecoder(TFFastSpeechEncoder):\n \"\"\"Fast Speech decoder module.\"\"\"\n\n def __init__(self, config, **kwargs):\n self.is_compatible_encoder = kwargs.pop(\"is_compatible_encoder\", True)\n\n super().__init__(config, **kwargs)\n self.config = config\n\n # create decoder positional embedding\n self.decoder_positional_embeddings = tf.keras.layers.Embedding(\n config.max_position_embeddings + 1,\n config.hidden_size,\n weights=[self._sincos_embedding()],\n name=\"position_embeddings\",\n trainable=False,\n )\n\n if self.is_compatible_encoder is False:\n self.project_compatible_decoder = tf.keras.layers.Dense(\n units=config.hidden_size, name=\"project_compatible_decoder\"\n )\n\n if config.n_speakers > 1:\n self.decoder_speaker_embeddings = tf.keras.layers.Embedding(\n config.n_speakers,\n config.hidden_size,\n embeddings_initializer=get_initializer(config.initializer_range),\n name=\"speaker_embeddings\",\n )\n self.speaker_fc = tf.keras.layers.Dense(\n units=config.hidden_size, name=\"speaker_fc\"\n )\n\n def call(self, inputs, training=False):\n hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs\n\n if self.is_compatible_encoder is False:\n hidden_states = self.project_compatible_decoder(hidden_states)\n\n # calculate new hidden states.\n hidden_states += self.decoder_positional_embeddings(decoder_pos)\n\n if self.config.n_speakers > 1:\n speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)\n speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))\n # extended speaker embeddings\n extended_speaker_features = speaker_features[:, tf.newaxis, :]\n hidden_states += extended_speaker_features\n\n return super().call([hidden_states, encoder_mask], training=training)\n\n def _sincos_embedding(self):\n position_enc = np.array(\n [\n [\n pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)\n for i in range(self.config.hidden_size)\n ]\n for pos in range(self.config.max_position_embeddings + 1)\n ]\n )\n\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])\n\n # pad embedding.\n position_enc[0] = 0.0\n\n return position_enc\n\n\nclass TFTacotronPostnet(tf.keras.layers.Layer):\n \"\"\"Tacotron-2 postnet.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_batch_norm = []\n for i in range(config.n_conv_postnet):\n conv = tf.keras.layers.Conv1D(\n filters=config.postnet_conv_filters\n if i < config.n_conv_postnet - 1\n else config.num_mels,\n kernel_size=config.postnet_conv_kernel_sizes,\n padding=\"same\",\n name=\"conv_._{}\".format(i),\n )\n batch_norm = tf.keras.layers.BatchNormalization(\n name=\"batch_norm_._{}\".format(i)\n )\n self.conv_batch_norm.append((conv, batch_norm))\n self.dropout = tf.keras.layers.Dropout(\n rate=config.postnet_dropout_rate, name=\"dropout\"\n )\n self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity]\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n outputs, mask = inputs\n extended_mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32)\n for i, (conv, bn) in enumerate(self.conv_batch_norm):\n outputs = conv(outputs)\n outputs = bn(outputs)\n outputs = self.activation[i](outputs)\n outputs = self.dropout(outputs, training=training)\n return outputs * extended_mask\n\n\nclass TFFastSpeechDurationPredictor(tf.keras.layers.Layer):\n \"\"\"FastSpeech duration predictor module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n super().__init__(**kwargs)\n self.conv_layers = []\n for i in range(config.num_duration_conv_layers):\n self.conv_layers.append(\n tf.keras.layers.Conv1D(\n config.duration_predictor_filters,\n config.duration_predictor_kernel_sizes,\n padding=\"same\",\n name=\"conv_._{}\".format(i),\n )\n )\n self.conv_layers.append(\n tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"LayerNorm_._{}\".format(i)\n )\n )\n self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))\n self.conv_layers.append(\n tf.keras.layers.Dropout(config.duration_predictor_dropout_probs)\n )\n self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)\n self.output_layer = tf.keras.layers.Dense(1)\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\"\"\"\n encoder_hidden_states, attention_mask = inputs\n attention_mask = tf.cast(tf.expand_dims(attention_mask, 2), tf.float32)\n\n # mask encoder hidden states\n masked_encoder_hidden_states = encoder_hidden_states * attention_mask\n\n # pass though first layer\n outputs = self.conv_layers_sequence(masked_encoder_hidden_states)\n outputs = self.output_layer(outputs)\n masked_outputs = outputs * attention_mask\n return tf.squeeze(tf.nn.relu6(masked_outputs), -1) # make sure positive value.\n\n\nclass TFFastSpeechLengthRegulator(tf.keras.layers.Layer):\n \"\"\"FastSpeech lengthregulator module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init variables.\"\"\"\n self.enable_tflite_convertible = kwargs.pop(\"enable_tflite_convertible\", False)\n super().__init__(**kwargs)\n self.config = config\n\n def call(self, inputs, training=False):\n \"\"\"Call logic.\n Args:\n 1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size]\n 2. durations_gt, Tensor (float32/int32) shape [batch_size, length]\n \"\"\"\n encoder_hidden_states, durations_gt = inputs\n outputs, encoder_masks = self._length_regulator(\n encoder_hidden_states, durations_gt\n )\n return outputs, encoder_masks\n\n def _length_regulator(self, encoder_hidden_states, durations_gt):\n \"\"\"Length regulator logic.\"\"\"\n sum_durations = tf.reduce_sum(durations_gt, axis=-1) # [batch_size]\n max_durations = tf.reduce_max(sum_durations)\n\n input_shape = tf.shape(encoder_hidden_states)\n batch_size = input_shape[0]\n hidden_size = input_shape[-1]\n\n # initialize output hidden states and encoder masking.\n if self.enable_tflite_convertible:\n # There is only 1 batch in inference, so we don't have to use\n # `tf.While` op with 3-D output tensor.\n repeats = durations_gt[0]\n real_length = tf.reduce_sum(repeats)\n pad_size = max_durations - real_length\n # masks : [max_durations]\n masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)\n repeat_encoder_hidden_states = tf.repeat(\n encoder_hidden_states[0], repeats=repeats, axis=0\n )\n repeat_encoder_hidden_states = tf.expand_dims(\n tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0\n ) # [1, max_durations, hidden_size]\n\n outputs = repeat_encoder_hidden_states\n encoder_masks = masks\n else:\n outputs = tf.zeros(shape=[0, max_durations, hidden_size], dtype=tf.float32)\n encoder_masks = tf.zeros(shape=[0, max_durations], dtype=tf.int32)\n\n def condition(\n i,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ):\n return tf.less(i, batch_size)\n\n def body(\n i,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ):\n repeats = durations_gt[i]\n real_length = tf.reduce_sum(repeats)\n pad_size = max_durations - real_length\n masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)\n repeat_encoder_hidden_states = tf.repeat(\n encoder_hidden_states[i], repeats=repeats, axis=0\n )\n repeat_encoder_hidden_states = tf.expand_dims(\n tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0\n ) # [1, max_durations, hidden_size]\n outputs = tf.concat([outputs, repeat_encoder_hidden_states], axis=0)\n encoder_masks = tf.concat([encoder_masks, masks], axis=0)\n return [\n i + 1,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ]\n\n # initialize iteration i.\n i = tf.constant(0, dtype=tf.int32)\n _, _, outputs, encoder_masks, _, _, _, = tf.while_loop(\n condition,\n body,\n [\n i,\n batch_size,\n outputs,\n encoder_masks,\n encoder_hidden_states,\n durations_gt,\n max_durations,\n ],\n shape_invariants=[\n i.get_shape(),\n batch_size.get_shape(),\n tf.TensorShape(\n [\n None,\n None,\n self.config.encoder_self_attention_params.hidden_size,\n ]\n ),\n tf.TensorShape([None, None]),\n encoder_hidden_states.get_shape(),\n durations_gt.get_shape(),\n max_durations.get_shape(),\n ],\n )\n\n return outputs, encoder_masks\n\n\nclass TFFastSpeech(tf.keras.Model):\n \"\"\"TF Fastspeech module.\"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Init layers for fastspeech.\"\"\"\n self.enable_tflite_convertible = kwargs.pop(\"enable_tflite_convertible\", False)\n super().__init__(**kwargs)\n self.embeddings = TFFastSpeechEmbeddings(config, name=\"embeddings\")\n self.encoder = TFFastSpeechEncoder(\n config.encoder_self_attention_params, name=\"encoder\"\n )\n self.duration_predictor = TFFastSpeechDurationPredictor(\n config, name=\"duration_predictor\"\n )\n self.length_regulator = TFFastSpeechLengthRegulator(\n config,\n enable_tflite_convertible=self.enable_tflite_convertible,\n name=\"length_regulator\",\n )\n self.decoder = TFFastSpeechDecoder(\n config.decoder_self_attention_params,\n is_compatible_encoder=config.encoder_self_attention_params.hidden_size\n == config.decoder_self_attention_params.hidden_size,\n name=\"decoder\",\n )\n self.mel_dense = tf.keras.layers.Dense(units=config.num_mels, name=\"mel_before\")\n self.postnet = TFTacotronPostnet(config=config, name=\"postnet\")\n\n self.setup_inference_fn()\n\n def _build(self):\n \"\"\"Dummy input for building model.\"\"\"\n # fake inputs\n input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)\n attention_mask = tf.convert_to_tensor(\n [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32\n )\n speaker_ids = tf.convert_to_tensor([0], tf.int32)\n duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)\n self(input_ids, attention_mask, speaker_ids, duration_gts)\n\n def call(\n self, input_ids, attention_mask, speaker_ids, duration_gts, training=False\n ):\n \"\"\"Call logic.\"\"\"\n embedding_output = self.embeddings([input_ids, speaker_ids], training=training)\n encoder_output = self.encoder(\n [embedding_output, attention_mask], training=training\n )\n last_encoder_hidden_states = encoder_output[0]\n\n # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers\n # rather than just use last_hidden_states of encoder for duration_predictor.\n duration_outputs = self.duration_predictor(\n [last_encoder_hidden_states, attention_mask]\n ) # [batch_size, length]\n\n length_regulator_outputs, encoder_masks = self.length_regulator(\n [last_encoder_hidden_states, duration_gts], training=training\n )\n\n # create decoder positional embedding\n decoder_pos = tf.range(\n 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32\n )\n masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks\n\n decoder_output = self.decoder(\n [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],\n training=training,\n )\n last_decoder_hidden_states = decoder_output[0]\n\n # here u can use sum or concat more than 1 hidden states layers from decoder.\n mel_before = self.mel_dense(last_decoder_hidden_states)\n mel_after = (\n self.postnet([mel_before, encoder_masks], training=training) + mel_before\n )\n\n outputs = (mel_before, mel_after, duration_outputs)\n return outputs\n\n def _inference(self, input_ids, attention_mask, speaker_ids, speed_ratios):\n \"\"\"Call logic.\"\"\"\n embedding_output = self.embeddings([input_ids, speaker_ids], training=False)\n encoder_output = self.encoder(\n [embedding_output, attention_mask], training=False\n )\n last_encoder_hidden_states = encoder_output[0]\n\n # duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers\n # rather than just use last_hidden_states of encoder for duration_predictor.\n duration_outputs = self.duration_predictor(\n [last_encoder_hidden_states, attention_mask]\n ) # [batch_size, length]\n duration_outputs = tf.math.exp(duration_outputs) - 1.0\n\n if speed_ratios is None:\n speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32)\n\n duration_outputs = tf.cast(\n tf.math.round(duration_outputs * speed_ratios), tf.int32\n )\n\n length_regulator_outputs, encoder_masks = self.length_regulator(\n [last_encoder_hidden_states, duration_outputs], training=False\n )\n\n # create decoder positional embedding\n decoder_pos = tf.range(\n 1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32\n )\n masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks\n\n decoder_output = self.decoder(\n [length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],\n training=False,\n )\n last_decoder_hidden_states = decoder_output[0]\n\n # here u can use sum or concat more than 1 hidden states layers from decoder.\n mel_before = self.mel_dense(last_decoder_hidden_states)\n mel_after = (\n self.postnet([mel_before, encoder_masks], training=False) + mel_before\n )\n\n outputs = (mel_before, mel_after, duration_outputs)\n return outputs\n\n def setup_inference_fn(self):\n self.inference = tf.function(\n self._inference,\n experimental_relax_shapes=True,\n input_signature=[\n tf.TensorSpec(shape=[None, None], dtype=tf.int32),\n tf.TensorSpec(shape=[None, None], dtype=tf.bool),\n tf.TensorSpec(shape=[None,], dtype=tf.int32),\n tf.TensorSpec(shape=[None,], dtype=tf.float32),\n ],\n )\n\n self.inference_tflite = tf.function(\n self._inference,\n experimental_relax_shapes=True,\n input_signature=[\n tf.TensorSpec(shape=[1, None], dtype=tf.int32),\n tf.TensorSpec(shape=[1, None], dtype=tf.bool),\n tf.TensorSpec(shape=[1,], dtype=tf.int32),\n tf.TensorSpec(shape=[1,], dtype=tf.float32),\n ],\n )\n"
] | [
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.keras.utils.conv_utils.convert_data_format",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.keras.utils.conv_utils.conv_output_length",
"tensorflow.python.ops.nn_ops._get_strides_and_dilation_rate",
"tensorflow.python.ops.nn_ops._NonAtrousConvolution",
"tensorflow.python.ops.nn_ops._WithSpaceToBatch",
"tensorflow.python.framework.tensor_shape.dimension_at_index",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.keras.activations.get",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.keras.utils.conv_utils.normalize_tuple",
"tensorflow.python.keras.regularizers.serialize",
"tensorflow.python.keras.activations.serialize",
"tensorflow.python.keras.constraints.serialize",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"tensorflow.python.keras.initializers.get",
"tensorflow.python.keras.initializers.serialize",
"tensorflow.python.keras.utils.conv_utils.normalize_padding"
],
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"numpy.sqrt",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.keras.Sequential",
"tensorflow.pad",
"numpy.sin",
"tensorflow.gather",
"tensorflow.math.softplus",
"tensorflow.name_scope",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.matmul",
"tensorflow.TensorShape",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.keras.layers.Dense",
"numpy.power",
"tensorflow.pow",
"tensorflow.math.exp",
"tensorflow.math.round",
"numpy.array",
"tensorflow.sequence_mask",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.Activation",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.math.sqrt",
"tensorflow.nn.relu6",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.sigmoid",
"numpy.cos",
"tensorflow.expand_dims",
"tensorflow.repeat",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
FredericSauv/z-quantum-core | [
"f285b292159fe272d7401ba05baac7bab28475d7",
"f285b292159fe272d7401ba05baac7bab28475d7"
] | [
"src/python/zquantum/core/utils.py",
"src/python/zquantum/core/testing/_initialize.py"
] | [
"\"\"\"General-purpose utilities.\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import expm\nimport random\nimport math\nimport operator\nimport sys\nimport json\nimport openfermion\nfrom openfermion import hermitian_conjugated\nfrom openfermion.ops import SymbolicOperator\nfrom networkx.readwrite import json_graph\nimport lea\nimport collections\nimport scipy\nfrom typing import List\nimport importlib\n\nSCHEMA_VERSION = 'zapata-v1'\nRNDSEED = 12345\n\ndef convert_dict_to_array(dictionary: dict) -> np.ndarray:\n \"\"\"Convert a dictionary to a numpy array.\n\n Args:\n dictionary (dict): the dict containing the data\n \n Returns:\n array (numpy.array): a numpy array\n \"\"\"\n \n array = np.array(dictionary['real'])\n\n if dictionary.get('imag'):\n array = array + 1j*np.array(dictionary['imag'])\n\n return array\n\ndef convert_array_to_dict(array: np.ndarray) -> dict:\n \"\"\"Convert a numpy array to a dictionary.\n\n Args:\n array (numpy.array): a numpy array\n \n Returns:\n dictionary (dict): the dict containing the data\n \"\"\"\n\n dictionary = {}\n if np.iscomplexobj(array):\n dictionary['real'] = array.real.tolist()\n dictionary['imag'] = array.imag.tolist()\n else:\n dictionary['real'] = array.tolist()\n\n return dictionary\n\ndef dec2bin(number: int, length: int) -> List[int]:\n \"\"\"Converts a decimal number into a binary representation\n of fixed number of bits.\n\n Args:\n number: (int) the input decimal number\n length: (int) number of bits in the output string\n\n Returns:\n A list of binary numbers\n \"\"\"\n\n if pow(2,length) < number:\n sys.exit('Insufficient number of bits for representing the number {}'.format(number))\n\n bit_str = bin(number)\n bit_str = bit_str[2:len(bit_str)] # chop off the first two chars\n bit_string = [int(x) for x in list(bit_str)]\n if len(bit_string) < length:\n len_zeros = length - len(bit_string)\n bit_string = [int(x) for x in list(np.zeros(len_zeros))] + bit_string\n\n return bit_string\n\ndef bin2dec(x: List[int]) -> int:\n \"\"\"Converts a binary vector to an integer, with the 0-th\n element being the most significant digit.\n\n Args:\n x: (list) a binary vector\n\n Returns:\n An integer\n \"\"\"\n\n dec = 0\n coeff = 1\n for i in range(len(x)):\n dec = dec + coeff * x[len(x)-1-i]\n coeff = coeff * 2\n return dec\n\n\"\"\"\nThe functions PAULI_X, PAULI_Y, PAULI_Z and IDENTITY below are used for \ngenerating the generators of the Pauli group, which include Pauli X, Y, Z \noperators as well as identity operator\n\"\"\"\n\npauli_x = np.array([[0.0,1.0],[1.0,0.0]])\npauli_y = np.array([[0.0,-1.0j],[1.0j,0.0]])\npauli_z = np.array([[1.0,0.0],[0.0,-1.0]])\nidentity = np.array([[1.0,0.0],[0.0,1.0]])\n\ndef is_identity(u, tol=1e-15):\n \"\"\"Test if a matrix is identity.\n\n Args:\n u: np.ndarray\n Matrix to be checked.\n tol: float\n Threshold below which two matrix elements are considered equal.\n \"\"\"\n\n dims = np.array(u).shape\n if dims[0] != dims[1]:\n raise Exception('Input matrix is not square.')\n \n return np.allclose(u, np.eye(u.shape[0]), atol=tol)\n\ndef is_unitary(u, tol = 1e-15):\n \"\"\"Test if a matrix is unitary.\n\n Args:\n u: array\n Matrix to be checked.\n tol: float\n Threshold below which two matrix elements are considered equal.\n \"\"\"\n\n dims = np.array(u).shape\n if dims[0] != dims[1]:\n raise Exception('Input matrix is not square.')\n\n test_matrix = np.dot(hermitian_conjugated(np.array(u)), u)\n return is_identity(test_matrix, tol)\n\ndef compare_unitary(u1: np.ndarray, \n u2: np.ndarray, \n tol: float = 1e-15) -> bool:\n \"\"\"Compares two unitary operators to see if they are equal to within a phase.\n\n Args:\n u1 (numpy.ndarray): First unitary operator.\n u2 (numpy.ndarray): Second unitary operator.\n tol (float): Threshold below which two matrix elements are considered equal.\n \n Returns:\n bool: True if the unitaries are equal to within the tolerance, ignoring\n differences in global phase.\n \"\"\"\n\n if is_unitary(u1, tol) == False:\n raise Exception('The first input matrix is not unitary.')\n if is_unitary(u2, tol) == False:\n raise Exception('The second input matrix is not unitary.')\n \n test_matrix = np.dot(u1.conj().T, u2)\n phase = test_matrix.item((0,0))**-1\n return is_identity(phase*test_matrix, tol)\n\ndef sample_from_probability_distribution(probability_distribution: dict, n_samples: int) -> collections.Counter:\n '''\n Samples events from a discrete probability distribution\n\n Args:\n probabilty_distribution: The discrete probability distribution to be used\n for sampling. This should be a dictionary\n \n n_samples (int): The number of samples desired\n\n Returns:\n A dictionary of the outcomes sampled. The key values are the things be sampled\n and values are how many times those things appeared in the sampling\n '''\n if isinstance(probability_distribution, dict):\n prob_pmf = lea.pmf(probability_distribution)\n sampled_dict = collections.Counter(prob_pmf.random(n_samples))\n return sampled_dict\n else:\n raise RuntimeError(\"Probability distribution should be a dictionary with key value \\\n being the thing being sampled and the value being probability of getting \\\n sampled \")\n\n\ndef convert_bitstrings_to_tuples(bitstrings):\n '''Given the measured bitstrings, convert each bitstring to tuple format\n\n Args:\n bitstrings (list of strings): the measured bitstrings\n Returns:\n A list of tuples\n '''\n # Convert from bitstrings to tuple format\n measurements = []\n for bitstring in bitstrings:\n\n measurement = ()\n for char in bitstring:\n measurement = measurement + (int(char),)\n\n measurements.append(measurement)\n return measurements\n \n\ndef convert_tuples_to_bitstrings(tuples):\n '''Given a set of measurement tuples, convert each to bitstring format\n\n Args:\n tuples (list of tuples): the measurement tuples\n Returns:\n A list of bitstrings\n '''\n # Convert from tuples to bitstrings\n bitstrings = []\n for tuple_item in tuples:\n\n bitstring = \"\"\n for bit in tuple_item:\n bitstring = bitstring + str(bit)\n\n bitstrings.append(bitstring)\n return bitstrings\n\n\nclass ValueEstimate:\n \"\"\"A class representing a numerical value and its precision corresponding\n to an observable or an objective function\n\n Args:\n value (np.float): the numerical value\n precision (np.float): its precision\n\n Attributes:\n value (np.float): the numerical value\n precision (np.float): its precision\n \"\"\"\n\n def __init__(self, value, precision=None):\n self.value = value\n self.precision = precision\n \n def to_dict(self):\n \"\"\"Convert to a dictionary\"\"\"\n\n data = {'schema' : SCHEMA_VERSION + '-value_estimate'}\n if type(self.value).__module__ == np.__name__:\n data['value'] = self.value.item()\n else:\n data['value'] = self.value\n\n if type(self.precision).__module__ == np.__name__:\n data['precision'] = self.precision.item()\n else:\n data['precision'] = self.precision\n \n return data\n \n @classmethod\n def from_dict(cls, dictionary):\n \"\"\"Create an ExpectationValues object from a dictionary.\"\"\"\n\n value = dictionary['value']\n if 'precision' in dictionary:\n precision = dictionary['precision']\n return cls(value, precision)\n else:\n return cls(value)\n\n\ndef load_value_estimate(file):\n \"\"\"Loads value estimate from a faile.\n\n Args:\n file (str or file-like object): the name of the file, or a file-like object.\n \n Returns:\n array (numpy.array): the array\n \"\"\"\n\n if isinstance(file, str):\n with open(file, 'r') as f:\n data = json.load(f)\n else:\n data = json.load(file)\n \n return ValueEstimate.from_dict(data)\n\n\ndef save_value_estimate(value_estimate, filename):\n \"\"\"Saves value estimate to a file.\n\n Args:\n value_estimate (core.utils.ValueEstimate): the value estimate\n file (str or file-like object): the name of the file, or a file-like object\n \"\"\"\n dictionary = value_estimate.to_dict()\n dictionary['schema'] = SCHEMA_VERSION + '-value_estimate'\n\n with open(filename, 'w') as f:\n f.write(json.dumps(dictionary, indent=2))\n\n\ndef load_list(file):\n \"\"\"Load an array from a file.\n\n Args:\n file (str or file-like object): the name of the file, or a file-like object.\n \n Returns:\n array (list): the list\n \"\"\"\n\n if isinstance(file, str):\n with open(file, 'r') as f:\n data = json.load(f)\n else:\n data = json.load(file)\n \n return data['list']\n\n\ndef save_list(array, filename):\n \"\"\"Save expectation values to a file.\n\n Args:\n array (list): the list to be saved\n file (str or file-like object): the name of the file, or a file-like object\n \"\"\"\n dictionary = {}\n dictionary['schema'] = SCHEMA_VERSION + '-list'\n dictionary['list'] = array\n\n with open(filename, 'w') as f:\n f.write(json.dumps(dictionary, indent=2))\n\n\ndef create_object(specs, **kwargs):\n \"\"\"\n Creates an object based on given specs.\n Specs include information about module and function necessary to create the object, \n as well as any additional input parameters for it.\n\n Args:\n specs (dict): dictionary containing the following keys:\n module_name: specifies from which module an object comes.\n function_name: specifies the name of the function used to create object.\n \n Returns:\n object: object of any type\n \"\"\"\n module_name = specs.pop(\"module_name\")\n module = importlib.import_module(module_name)\n creator_name = specs.pop(\"function_name\")\n creator = getattr(module, creator_name)\n created_object = creator(**specs, **kwargs)\n return created_object",
"from pyquil import Program\nfrom pyquil.gates import *\nimport math\nimport random\nimport numpy as np\nfrom openfermion.ops import QubitOperator, IsingOperator\nfrom ..circuit import Circuit\nfrom ..utils import RNDSEED\n\ndef create_random_circuit(nqubits, ngates, seed=None):\n \"\"\"Generates random circuit acting on nqubits with ngates for testing purposes.\n The resulting circuit it saved to file in JSON format under 'circuit.json'.\n\n Args:\n nqubits: integer\n The number of qubits in the circuit\n ngates: integer\n The number of gates in the circuit\n\n *** OPTIONAL ***\n seed: integer\n The see for the random number generator\n \n Returns:\n None, a Circuit (core.circuit) object is saved under 'circuit.json'\n \"\"\"\n # Initialize all gates in set, not including RH or ZXZ\n SING_ZERO = [X, Y, Z, H, S, T]\n SING_ONE = [RX, RY, RZ, PHASE]\n TWO_ZERO = [CNOT, CZ, SWAP]\n TWO_ONE = [CPHASE]\n\n ALL = [SING_ZERO, TWO_ZERO, SING_ONE, TWO_ONE]\n\n NUM_QUBITS = range(0,nqubits)\n\n if seed is not None:\n random.seed(seed)\n\n # Create empty pyquil circuit\n p = Program()\n\n # Loop to add gates to pyquil circuit\n for i in range(0, ngates):\n # Pick gate type\n TYPE_OF_GATE = random.choice(ALL)\n gate = random.choice(TYPE_OF_GATE)\n\n # Pick qubit to act on (control if two qubit gate)\n qubit = random.choice(NUM_QUBITS)\n\n if(TYPE_OF_GATE == SING_ZERO):\n p += gate(qubit)\n elif(TYPE_OF_GATE == SING_ONE):\n # Choose random parameter between +/- pi\n param = random.uniform(-math.pi, math.pi)\n p += gate(param, qubit)\n elif(TYPE_OF_GATE == TWO_ZERO):\n target = random.choice(NUM_QUBITS)\n while(target == qubit):\n # Loop to ensure target =/= control\n target = random.choice(NUM_QUBITS)\n p += gate(qubit, target)\n elif(TYPE_OF_GATE == TWO_ONE):\n target = random.choice(NUM_QUBITS)\n while(target == qubit):\n # Loop to ensure target =/= control\n target = random.choice(NUM_QUBITS)\n # Choose random parameter between +/- pi\n param = random.uniform(-math.pi, math.pi)\n p += gate(param, qubit, target)\n\n c = Circuit(p)\n return c\n\n\ndef create_random_qubitop(nqubits, nterms, seed=None):\n \"\"\"Generates random qubit operator acting on nqubits with nterms for testing purposes.\n The resulting qubit operator it saved to file in JSON format under 'qubitop.json'.\n\n Args:\n nqubits: integer\n The number of qubits in the qubit operator\n nterms: integer\n The number of terms in the qubit operator\n\n *** OPTIONAL ***\n seed: integer\n The see for the random number generator\n \n Returns:\n None, a Qubit Operator (openfermion.QubitOperator) object is saved under 'qubitop.json'\n \"\"\"\n NUM_QUBITS = range(0,nqubits)\n\n if seed is not None:\n random.seed(seed)\n\n # Initialize empty qubit operator\n qubitop = QubitOperator()\n\n # Loop over number of separate terms in qubit operator\n for i in range(0, nterms):\n # Choose number of paulis to measure in term\n num_paulis = random.choice(range(nqubits+1))\n\n # Create empty list of qubits\n qubits = []\n\n # Create empty term\n full_term = \"\"\n \n # Loop over paulis\n for j in range(0, num_paulis):\n # Choose random qubit \n qubit_index = random.choice(NUM_QUBITS)\n while(qubit_index in qubits):\n # Ensure qubit not already being measured in this term\n qubit_index = random.choice(NUM_QUBITS)\n qubits.append(qubit_index)\n \n # Choose pauli\n pauli_gate = random.choice([\"X\",\"Y\",\"Z\"])\n # Construct string\n full_term += pauli_gate + str(qubit_index) + \" \"\n # Add full term to qubit operator\n qubitop += QubitOperator(full_term)\n\n return qubitop\n\n\ndef create_random_isingop(nqubits, nterms, seed=None):\n \"\"\"Generates random ising operator acting on nqubits with nterms for testing purposes.\n\n Args:\n nqubits: integer\n The number of qubits in the qubit operator\n nterms: integer\n The number of terms in the qubit operator\n\n *** OPTIONAL ***\n seed: integer\n The see for the random number generator\n \n Returns:\n an Ising Operator (openfermion.IsingOperator) object\n \"\"\"\n NUM_QUBITS = range(0,nqubits)\n\n if seed is not None:\n random.seed(seed)\n\n # Initialize empty qubit operator\n isingop = IsingOperator()\n\n # Loop over number of separate terms in qubit operator\n for i in range(0, nterms):\n # Choose number of paulis to measure in term\n num_paulis = random.choice(range(nqubits+1))\n\n # Create empty list of qubits\n qubits = []\n\n # Create empty term\n full_term = \"\"\n \n # Loop over paulis\n for j in range(0, num_paulis):\n # Choose random qubit \n qubit_index = random.choice(NUM_QUBITS)\n while(qubit_index in qubits):\n # Ensure qubit not already being measured in this term\n qubit_index = random.choice(NUM_QUBITS)\n qubits.append(qubit_index)\n \n # Choose pauli\n pauli_gate = \"Z\"\n # Construct string\n full_term += pauli_gate + str(qubit_index) + \" \"\n # Add full term to qubit operator\n isingop += IsingOperator(full_term)\n\n return isingop\n\n\ndef create_random_wavefunction(nqubits, seed=None):\n if seed:\n np.random.seed(seed)\n \n class Wavefunction():\n def __init__(self, nqubits):\n random_vector = np.random.rand(2**nqubits) + 1j*np.random.rand(2**nqubits)\n self.amplitudes = random_vector / np.linalg.norm(random_vector)\n self.nqubits = nqubits\n\n def __len__(self):\n return self.nqubits\n\n def probabilities(self):\n return np.absolute(self.amplitudes)**2\n\n return Wavefunction(nqubits)"
] | [
[
"numpy.eye",
"numpy.array",
"numpy.zeros",
"numpy.iscomplexobj"
],
[
"numpy.absolute",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuishihara/chainerrl | [
"74901712a8ed8207b9d526d3f45b04bf22996b8d",
"74901712a8ed8207b9d526d3f45b04bf22996b8d",
"74901712a8ed8207b9d526d3f45b04bf22996b8d",
"74901712a8ed8207b9d526d3f45b04bf22996b8d"
] | [
"examples/ale/train_nsq_ale.py",
"chainerrl/misc/init_like_torch.py",
"tests/misc_tests/test_draw_computational_graph.py",
"tests/functions_tests/test_lower_triangular_matrix.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom builtins import * # NOQA\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\n\nimport argparse\nimport os\nimport random\n\n# This prevents numpy from using multiple threads\nos.environ['OMP_NUM_THREADS'] = '1' # NOQA\n\nimport gym\ngym.undo_logger_setup() # NOQA\nfrom chainer import links as L\nimport numpy as np\n\nfrom chainerrl.action_value import DiscreteActionValue\nfrom chainerrl.agents import nsq\nfrom chainerrl import experiments\nfrom chainerrl import explorers\nfrom chainerrl import links\nfrom chainerrl import misc\nfrom chainerrl.optimizers import rmsprop_async\nfrom chainerrl import spaces\n\nimport atari_wrappers\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('processes', type=int)\n parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')\n parser.add_argument('--seed', type=int, default=0,\n help='Random seed [0, 2 ** 31)')\n parser.add_argument('--lr', type=float, default=7e-4)\n parser.add_argument('--steps', type=int, default=8 * 10 ** 7)\n parser.add_argument('--max-episode-len', type=int,\n default=5 * 60 * 60 // 4, # 5 minutes with 60/4 fps\n help='Maximum number of steps for each episode.')\n parser.add_argument('--final-exploration-frames',\n type=int, default=4 * 10 ** 6)\n parser.add_argument('--outdir', type=str, default='results',\n help='Directory path to save output files.'\n ' If it does not exist, it will be created.')\n parser.add_argument('--profile', action='store_true')\n parser.add_argument('--eval-interval', type=int, default=10 ** 6)\n parser.add_argument('--eval-n-runs', type=int, default=10)\n parser.add_argument('--demo', action='store_true', default=False)\n parser.add_argument('--load', type=str, default=None)\n parser.add_argument('--logging-level', type=int, default=20,\n help='Logging level. 10:DEBUG, 20:INFO etc.')\n parser.add_argument('--render', action='store_true', default=False,\n help='Render env states in a GUI window.')\n parser.add_argument('--monitor', action='store_true', default=False,\n help='Monitor env. Videos and additional information'\n ' are saved as output files.')\n args = parser.parse_args()\n\n import logging\n logging.basicConfig(level=args.logging_level)\n\n # Set a random seed used in ChainerRL.\n # If you use more than one processes, the results will be no longer\n # deterministic even with the same random seed.\n misc.set_random_seed(args.seed)\n\n # Set different random seeds for different subprocesses.\n # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].\n # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].\n process_seeds = np.arange(args.processes) + args.seed * args.processes\n assert process_seeds.max() < 2 ** 31\n\n args.outdir = experiments.prepare_output_dir(args, args.outdir)\n print('Output files are saved in {}'.format(args.outdir))\n\n def make_env(process_idx, test):\n # Use different random seeds for train and test envs\n process_seed = process_seeds[process_idx]\n env_seed = 2 ** 31 - 1 - process_seed if test else process_seed\n env = atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(args.env),\n episode_life=not test,\n clip_rewards=not test)\n env.seed(int(env_seed))\n if args.monitor:\n env = gym.wrappers.Monitor(\n env, args.outdir,\n mode='evaluation' if test else 'training')\n if args.render:\n misc.env_modifiers.make_rendered(env)\n return env\n\n sample_env = make_env(0, test=False)\n action_space = sample_env.action_space\n assert isinstance(action_space, spaces.Discrete)\n\n # Define a model and its optimizer\n q_func = links.Sequence(\n links.NIPSDQNHead(),\n L.Linear(256, action_space.n),\n DiscreteActionValue)\n opt = rmsprop_async.RMSpropAsync(lr=args.lr, eps=1e-1, alpha=0.99)\n opt.setup(q_func)\n\n def phi(x):\n # Feature extractor\n return np.asarray(x, dtype=np.float32) / 255\n\n # Make process-specific agents to diversify exploration\n def make_agent(process_idx):\n # Random epsilon assignment described in the original paper\n rand = random.random()\n if rand < 0.4:\n epsilon_target = 0.1\n elif rand < 0.7:\n epsilon_target = 0.01\n else:\n epsilon_target = 0.5\n explorer = explorers.LinearDecayEpsilonGreedy(\n 1, epsilon_target, args.final_exploration_frames,\n action_space.sample)\n # Suppress the explorer logger\n explorer.logger.setLevel(logging.INFO)\n return nsq.NSQ(q_func, opt, t_max=5, gamma=0.99,\n i_target=40000,\n explorer=explorer, phi=phi)\n\n if args.demo:\n env = make_env(0, True)\n agent = make_agent(0)\n eval_stats = experiments.eval_performance(\n env=env,\n agent=agent,\n n_runs=args.eval_n_runs)\n print('n_runs: {} mean: {} median: {} stdev {}'.format(\n args.eval_n_runs, eval_stats['mean'], eval_stats['median'],\n eval_stats['stdev']))\n else:\n explorer = explorers.ConstantEpsilonGreedy(0.05, action_space.sample)\n\n # Linearly decay the learning rate to zero\n def lr_setter(env, agent, value):\n agent.optimizer.lr = value\n\n lr_decay_hook = experiments.LinearInterpolationHook(\n args.steps, args.lr, 0, lr_setter)\n\n experiments.train_agent_async(\n outdir=args.outdir,\n processes=args.processes,\n make_env=make_env,\n make_agent=make_agent,\n profile=args.profile,\n steps=args.steps,\n eval_n_runs=args.eval_n_runs,\n eval_interval=args.eval_interval,\n eval_explorer=explorer,\n max_episode_len=args.max_episode_len,\n global_step_hooks=[lr_decay_hook],\n save_best_so_far_agent=False,\n )\n\n\nif __name__ == '__main__':\n main()\n",
"from __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\nfrom chainer import links as L\nimport numpy as np\n\n\ndef init_like_torch(link):\n # Mimic torch's default parameter initialization\n # TODO(muupan): Use chainer's initializers when it is merged\n for l in link.links():\n if isinstance(l, L.Linear):\n out_channels, in_channels = l.W.data.shape\n stdv = 1 / np.sqrt(in_channels)\n l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)\n if l.b is not None:\n l.b.data[:] = np.random.uniform(-stdv, stdv,\n size=l.b.data.shape)\n elif isinstance(l, L.Convolution2D):\n out_channels, in_channels, kh, kw = l.W.data.shape\n stdv = 1 / np.sqrt(in_channels * kh * kw)\n l.W.data[:] = np.random.uniform(-stdv, stdv, size=l.W.data.shape)\n if l.b is not None:\n l.b.data[:] = np.random.uniform(-stdv, stdv,\n size=l.b.data.shape)\n",
"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\n\nimport os\nimport tempfile\nimport unittest\n\nimport chainer\nfrom chainer import testing\nimport numpy as np\n\nimport chainerrl\n\n\n_v = chainer.Variable(np.zeros(5))\n_dav = chainerrl.action_value.DiscreteActionValue(\n chainer.Variable(np.zeros((5, 5))))\n_qav = chainerrl.action_value.QuadraticActionValue(\n chainer.Variable(np.zeros((5, 5), dtype=np.float32)),\n chainer.Variable(np.ones((5, 5, 5), dtype=np.float32)),\n chainer.Variable(np.zeros((5, 1), dtype=np.float32)),\n)\n_sdis = chainerrl.distribution.SoftmaxDistribution(\n chainer.Variable(np.zeros((5, 5))))\n_gdis = chainerrl.distribution.GaussianDistribution(\n chainer.Variable(np.zeros((5, 5), dtype=np.float32)),\n chainer.Variable(np.ones((5, 5), dtype=np.float32)))\n\n\[email protected](\n {'obj': [], 'expected': []},\n {'obj': (), 'expected': []},\n {'obj': _v, 'expected': [_v]},\n {'obj': _dav, 'expected': list(_dav.params)},\n {'obj': _qav, 'expected': list(_qav.params)},\n {'obj': _sdis, 'expected': list(_sdis.params)},\n {'obj': _gdis, 'expected': list(_gdis.params)},\n {'obj': [_v, _dav, _sdis],\n 'expected': [_v] + list(_dav.params) + list(_sdis.params)},\n)\nclass TestCollectVariables(unittest.TestCase):\n\n def _assert_eq_var_list(self, a, b):\n # Equality between two Variable lists\n self.assertEqual(len(a), len(b))\n self.assertTrue(isinstance(a, list))\n self.assertTrue(isinstance(b, list))\n for item in a:\n self.assertTrue(isinstance(item, chainer.Variable))\n for item in b:\n self.assertTrue(isinstance(item, chainer.Variable))\n for va, vb in zip(a, b):\n self.assertEqual(id(va), id(vb))\n\n def test_collect_variables(self):\n vs = chainerrl.misc.collect_variables(self.obj)\n self._assert_eq_var_list(vs, self.expected)\n\n # Wrap by a list\n vs = chainerrl.misc.collect_variables([self.obj])\n self._assert_eq_var_list(vs, self.expected)\n\n # Wrap by two lists\n vs = chainerrl.misc.collect_variables([[self.obj]])\n self._assert_eq_var_list(vs, self.expected)\n\n # Wrap by a tuple\n vs = chainerrl.misc.collect_variables((self.obj,))\n self._assert_eq_var_list(vs, self.expected)\n\n # Wrap by a two tuples\n vs = chainerrl.misc.collect_variables(((self.obj,),))\n self._assert_eq_var_list(vs, self.expected)\n\n\nclass TestDrawComputationalGraph(unittest.TestCase):\n\n def test_draw_computational_graph(self):\n x = chainer.Variable(np.zeros(5))\n y = x ** 2 + chainer.Variable(np.ones(5))\n dirname = tempfile.mkdtemp()\n filepath = os.path.join(dirname, 'graph')\n chainerrl.misc.draw_computational_graph(y, filepath)\n self.assertTrue(os.path.exists(filepath + '.gv'))\n if chainerrl.misc.is_graphviz_available():\n self.assertTrue(os.path.exists(filepath + '.png'))\n else:\n self.assertFalse(os.path.exists(filepath + '.png'))\n",
"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\nimport unittest\n\nimport numpy\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\n\nfrom chainerrl.functions.lower_triangular_matrix import lower_triangular_matrix\nfrom chainerrl.functions.lower_triangular_matrix import LowerTriangularMatrix\n\n\[email protected](\n {'n': 1},\n {'n': 2},\n {'n': 3},\n {'n': 4},\n {'n': 5},\n)\nclass TestLowerTriangularMatrix(unittest.TestCase):\n\n def setUp(self):\n self.batch_size = 5\n self.diag = numpy.random.uniform(\n 0.1, 1, (self.batch_size, self.n)).astype(numpy.float32)\n non_diag_size = self.n * (self.n - 1) // 2\n self.non_diag = numpy.random.uniform(\n -1, 1, (self.batch_size, non_diag_size)).astype(numpy.float32)\n self.gy = numpy.random.uniform(\n -1, 1, (self.batch_size, self.n, self.n)).astype(numpy.float32)\n\n def check_forward(self, diag_data, non_diag_data):\n diag = chainer.Variable(diag_data)\n non_diag = chainer.Variable(non_diag_data)\n y = lower_triangular_matrix(diag, non_diag)\n\n correct_y = numpy.zeros(\n (self.batch_size, self.n, self.n), dtype=numpy.float32)\n\n tril_rows, tril_cols = numpy.tril_indices(self.n, -1)\n correct_y[:, tril_rows, tril_cols] = cuda.to_cpu(non_diag_data)\n\n diag_rows, diag_cols = numpy.diag_indices(self.n)\n correct_y[:, diag_rows, diag_cols] = cuda.to_cpu(diag_data)\n\n gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))\n\n def test_forward_cpu(self):\n self.check_forward(self.diag, self.non_diag)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.diag), cuda.to_gpu(self.non_diag))\n\n def check_backward(self, x_data, y_grad):\n gradient_check.check_backward(\n LowerTriangularMatrix(),\n x_data, y_grad, eps=1e-2, rtol=1e-2)\n\n def test_backward_cpu(self):\n self.check_backward((self.diag, self.non_diag), self.gy)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward((cuda.to_gpu(self.diag), cuda.to_gpu(\n self.non_diag)), cuda.to_gpu(self.gy))\n\n\ntesting.run_module(__name__, __file__)\n"
] | [
[
"numpy.asarray",
"numpy.arange"
],
[
"numpy.random.uniform",
"numpy.sqrt"
],
[
"numpy.zeros",
"numpy.ones"
],
[
"numpy.tril_indices",
"numpy.random.uniform",
"numpy.diag_indices",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdand2000/keras-team-keras | [
"5eecd55a6f1d6d149b42f9b76aa53d4c5ab8d3eb",
"5eecd55a6f1d6d149b42f9b76aa53d4c5ab8d3eb",
"5eecd55a6f1d6d149b42f9b76aa53d4c5ab8d3eb"
] | [
"tests/keras/test_callbacks.py",
"tests/test_loss_masking.py",
"examples/imdb_bidirectional_lstm.py"
] | [
"import os\nimport multiprocessing\n\nimport numpy as np\nimport pytest\nfrom csv import reader\nfrom csv import Sniffer\nimport shutil\nfrom keras import optimizers\nfrom keras import initializers\nfrom keras import callbacks\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Dropout, add, dot, Lambda\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D, GlobalAveragePooling1D, GlobalAveragePooling2D\nfrom keras.utils.test_utils import get_test_data\nfrom keras.utils.test_utils import keras_test\nfrom keras import backend as K\nfrom keras.utils import np_utils\ntry:\n from unittest.mock import patch\nexcept:\n from mock import patch\n\n\ninput_dim = 2\nnum_hidden = 4\nnum_classes = 2\nbatch_size = 5\ntrain_samples = 20\ntest_samples = 20\n\n\n@keras_test\ndef test_TerminateOnNaN():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [callbacks.TerminateOnNaN()]\n model = Sequential()\n initializer = initializers.Constant(value=1e5)\n for _ in range(5):\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',\n kernel_initializer=initializer))\n model.add(Dense(num_classes, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n # case 1 fit\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf\n\n # case 2 fit_generator\n def data_generator():\n max_batch_index = len(X_train) // batch_size\n i = 0\n while 1:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n history = model.fit_generator(data_generator(),\n len(X_train),\n validation_data=(X_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf or np.isnan(loss[0])\n\n\n@keras_test\ndef test_stop_training_csv(tmpdir):\n np.random.seed(1337)\n fp = str(tmpdir / 'test.csv')\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]\n model = Sequential()\n for _ in range(5):\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n def data_generator():\n i = 0\n max_batch_index = len(X_train) // batch_size\n tot = 0\n while 1:\n if tot > 3 * len(X_train):\n yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan\n else:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n i += 1\n tot += 1\n i = i % max_batch_index\n\n history = model.fit_generator(data_generator(),\n len(X_train) // batch_size,\n validation_data=(X_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) > 1\n assert loss[-1] == np.inf or np.isnan(loss[-1])\n\n values = []\n with open(fp) as f:\n for x in reader(f):\n values.append(x)\n\n assert 'nan' in values[-1], 'The last epoch was not logged.'\n os.remove(fp)\n\n\n@keras_test\ndef test_ModelCheckpoint(tmpdir):\n np.random.seed(1337)\n filepath = str(tmpdir / 'checkpoint.h5')\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n # case 1\n monitor = 'val_loss'\n save_best_only = False\n mode = 'auto'\n\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 2\n mode = 'min'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 3\n mode = 'max'\n monitor = 'val_acc'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 4\n save_best_only = True\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 5\n save_best_only = False\n period = 2\n mode = 'auto'\n filepath = 'checkpoint.{epoch:02d}.h5'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode,\n period=period)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=4)\n assert os.path.isfile(filepath.format(epoch=2))\n assert os.path.isfile(filepath.format(epoch=4))\n assert not os.path.exists(filepath.format(epoch=1))\n assert not os.path.exists(filepath.format(epoch=3))\n os.remove(filepath.format(epoch=2))\n os.remove(filepath.format(epoch=4))\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_EarlyStopping():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n mode = 'max'\n monitor = 'val_acc'\n patience = 0\n cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n\n mode = 'auto'\n monitor = 'val_acc'\n patience = 2\n cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n\n\n@keras_test\ndef test_EarlyStopping_reuse():\n np.random.seed(1337)\n patience = 3\n data = np.random.random((100, 1))\n labels = np.where(data > 0.5, 1, 0)\n model = Sequential((\n Dense(1, input_dim=1, activation='relu'),\n Dense(1, activation='sigmoid'),\n ))\n model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)\n weights = model.get_weights()\n\n hist = model.fit(data, labels, callbacks=[stopper], epochs=20)\n assert len(hist.epoch) >= patience\n\n # This should allow training to go for at least `patience` epochs\n model.set_weights(weights)\n hist = model.fit(data, labels, callbacks=[stopper], epochs=20)\n assert len(hist.epoch) >= patience\n\n\n@keras_test\ndef test_EarlyStopping_patience():\n class DummyModel(object):\n def __init__(self):\n self.stop_training = False\n\n early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)\n early_stop.model = DummyModel()\n\n losses = [0.0860, 0.1096, 0.1040, 0.1019]\n\n # Should stop after epoch 3, as the loss has not improved after patience=2 epochs.\n epochs_trained = 0\n early_stop.on_train_begin()\n\n for epoch in range(len(losses)):\n epochs_trained += 1\n early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n\n if early_stop.model.stop_training:\n break\n\n assert epochs_trained == 3\n\n\n@keras_test\ndef test_EarlyStopping_baseline():\n class DummyModel(object):\n def __init__(self):\n self.stop_training = False\n\n def baseline_tester(acc_levels):\n early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75, patience=2)\n early_stop.model = DummyModel()\n epochs_trained = 0\n early_stop.on_train_begin()\n for epoch in range(len(acc_levels)):\n epochs_trained += 1\n early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})\n if early_stop.model.stop_training:\n break\n return epochs_trained\n\n acc_levels = [0.55, 0.76, 0.81, 0.81]\n baseline_met = baseline_tester(acc_levels)\n acc_levels = [0.55, 0.74, 0.81, 0.81]\n baseline_not_met = baseline_tester(acc_levels)\n\n # All epochs should run because baseline was met in second epoch\n assert baseline_met == 4\n # Baseline was not met by second epoch and should stop\n assert baseline_not_met == 2\n\n\n@keras_test\ndef test_LearningRateScheduler():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5)\n assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()\n\n\n@keras_test\ndef test_ReduceLROnPlateau():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n model = make_model()\n\n # This should reduce the LR after the first epoch (due to high epsilon).\n cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)\n assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())\n\n model = make_model()\n cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)\n assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())\n\n\n@keras_test\ndef test_ReduceLROnPlateau_patience():\n class DummyOptimizer(object):\n def __init__(self):\n self.lr = K.variable(1.0)\n\n class DummyModel(object):\n def __init__(self):\n self.optimizer = DummyOptimizer()\n\n reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',\n patience=2)\n reduce_on_plateau.model = DummyModel()\n\n losses = [0.0860, 0.1096, 0.1040]\n lrs = []\n\n for epoch in range(len(losses)):\n reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))\n\n # The learning rates should be 1.0 except the last one\n assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0\n\n\n@keras_test\ndef test_ReduceLROnPlateau_backwards_compatibility():\n import warnings\n with warnings.catch_warnings(record=True) as ws:\n reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)\n # Check if warnings are disabled\n if os.environ.get(\"PYTHONWARNINGS\") != \"ignore\":\n assert \"`epsilon` argument is deprecated\" in str(ws[0].message)\n assert not hasattr(reduce_on_plateau, 'epsilon')\n assert hasattr(reduce_on_plateau, 'min_delta')\n assert reduce_on_plateau.min_delta == 1e-13\n\n\n@keras_test\ndef test_CSVLogger(tmpdir):\n np.random.seed(1337)\n filepath = str(tmpdir / 'log.tsv')\n sep = '\\t'\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n # case 1, create new file with defined separator\n model = make_model()\n cbks = [callbacks.CSVLogger(filepath, separator=sep)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n assert os.path.isfile(filepath)\n with open(filepath) as csvfile:\n dialect = Sniffer().sniff(csvfile.read())\n assert dialect.delimiter == sep\n del model\n del cbks\n\n # case 2, append data to existing file, skip header\n model = make_model()\n cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n # case 3, reuse of CSVLogger object\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n import re\n with open(filepath) as csvfile:\n output = \" \".join(csvfile.readlines())\n assert len(re.findall('epoch', output)) == 1\n\n os.remove(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_TensorBoard(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n inp = Input((input_dim,))\n hidden = Dense(num_hidden, activation='relu')(inp)\n hidden = Dropout(0.1)(hidden)\n output = Dense(num_classes, activation='softmax')(hidden)\n model = Model(inputs=inp, outputs=output)\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)]\n\n # fit without validation data\n model.fit(X_train, y_train, batch_size=batch_size,\n callbacks=callbacks_factory(histogram_freq=0), epochs=3)\n\n # fit with validation data and accuracy\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test),\n callbacks=callbacks_factory(histogram_freq=0), epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=0))\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=(X_test, y_test),\n callbacks=callbacks_factory(histogram_freq=1))\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\[email protected]((K.backend() != 'tensorflow'),\n reason='Requires TensorFlow backend')\ndef test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n inp = Input((input_dim,))\n hidden = Dense(num_hidden, activation='relu')(inp)\n hidden = Dropout(0.1)(hidden)\n output = Dense(num_classes, activation='softmax')(hidden)\n model = Model(inputs=inp, outputs=output)\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)]\n\n # fit without validation data should raise ValueError if histogram_freq > 0\n with pytest.raises(ValueError) as raised_exception:\n model.fit(X_train, y_train, batch_size=batch_size,\n callbacks=callbacks_factory(histogram_freq=1), epochs=3)\n assert 'validation_data must be provided' in str(raised_exception.value)\n\n # fit generator without validation data should raise ValueError if\n # histogram_freq > 0\n with pytest.raises(ValueError) as raised_exception:\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=1))\n assert 'validation_data must be provided' in str(raised_exception.value)\n\n # fit generator with validation data generator should raise ValueError if\n # histogram_freq > 0\n with pytest.raises(ValueError) as raised_exception:\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=data_generator(False),\n validation_steps=1,\n callbacks=callbacks_factory(histogram_freq=1))\n assert 'validation_data must be provided' in str(raised_exception.value)\n\n\n@keras_test\ndef test_TensorBoard_multi_input_output(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim, input_dim),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2,\n [y_train[i * batch_size: (i + 1) * batch_size]] * 2)\n else:\n yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2,\n [y_test[i * batch_size: (i + 1) * batch_size]] * 2)\n i += 1\n i = i % max_batch_index\n\n inp1 = Input((input_dim, input_dim))\n inp2 = Input((input_dim, input_dim))\n inp_3d = add([inp1, inp2])\n inp_2d = GlobalAveragePooling1D()(inp_3d)\n inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d]) # test a layer with a list of output tensors\n hidden = dot(inp_pair, axes=-1)\n hidden = Dense(num_hidden, activation='relu')(hidden)\n hidden = Dropout(0.1)(hidden)\n output1 = Dense(num_classes, activation='softmax')(hidden)\n output2 = Dense(num_classes, activation='softmax')(hidden)\n model = Model(inputs=[inp1, inp2], outputs=[output1, output2])\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)]\n\n # fit without validation data\n model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,\n callbacks=callbacks_factory(histogram_freq=0), epochs=3)\n\n # fit with validation data and accuracy\n model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,\n validation_data=([X_test] * 2, [y_test] * 2),\n callbacks=callbacks_factory(histogram_freq=1), epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=0))\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=([X_test] * 2, [y_test] * 2),\n callbacks=callbacks_factory(histogram_freq=1))\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_TensorBoard_convnet(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n input_shape = (16, 16, 3)\n (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,\n num_test=200,\n input_shape=input_shape,\n classification=True,\n num_classes=num_classes)\n y_train = np_utils.to_categorical(y_train)\n y_test = np_utils.to_categorical(y_test)\n\n model = Sequential([\n Conv2D(filters=8, kernel_size=3,\n activation='relu',\n input_shape=input_shape),\n MaxPooling2D(pool_size=2),\n Conv2D(filters=4, kernel_size=(3, 3),\n activation='relu', padding='same'),\n GlobalAveragePooling2D(),\n Dense(num_classes, activation='softmax')\n ])\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,\n write_images=True, write_grads=True,\n batch_size=16)\n cbks = [tsb]\n model.summary()\n history = model.fit(x_train, y_train, epochs=2, batch_size=16,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n verbose=0)\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_CallbackValData():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)\n model.fit_generator(data_generator(True), len(X_train), epochs=1,\n validation_data=(X_test, y_test),\n callbacks=[cbk2])\n\n # callback validation data should always have x, y, and sample weights\n assert len(cbk.validation_data) == len(cbk2.validation_data) == 3\n assert cbk.validation_data[0] is cbk2.validation_data[0]\n assert cbk.validation_data[1] is cbk2.validation_data[1]\n assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape\n\n\n@keras_test\ndef test_LambdaCallback():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # Start an arbitrary process that should run during model training and be terminated after training has completed.\n def f():\n while True:\n pass\n\n p = multiprocessing.Process(target=f)\n p.start()\n cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())\n\n cbks = [cleanup_callback]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5)\n p.join()\n assert not p.is_alive()\n\n\n@keras_test\ndef test_TensorBoard_with_ReduceLROnPlateau(tmpdir):\n import shutil\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='binary_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [\n callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.5,\n patience=4,\n verbose=1),\n callbacks.TensorBoard(\n log_dir=filepath)]\n\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=2)\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef tests_RemoteMonitor():\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n cbks = [callbacks.RemoteMonitor()]\n\n with patch('requests.post'):\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n\n@keras_test\ndef tests_RemoteMonitorWithJsonPayload():\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n cbks = [callbacks.RemoteMonitor(send_as_json=True)]\n\n with patch('requests.post'):\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n",
"import numpy as np\nimport pytest\n\nfrom keras.models import Sequential\nfrom keras.engine.training_utils import weighted_masked_objective\nfrom keras.layers import TimeDistributed, Masking, Dense\nfrom keras.utils.test_utils import keras_test\nfrom keras import losses\nfrom keras import backend as K\n\n\n@keras_test\ndef test_masking():\n np.random.seed(1337)\n x = np.array([[[1], [1]],\n [[0], [0]]])\n model = Sequential()\n model.add(Masking(mask_value=0, input_shape=(2, 1)))\n model.add(TimeDistributed(Dense(1, kernel_initializer='one')))\n model.compile(loss='mse', optimizer='sgd')\n y = np.array([[[1], [1]],\n [[1], [1]]])\n loss = model.train_on_batch(x, y)\n assert loss == 0\n\n\n@keras_test\ndef test_loss_masking():\n weighted_loss = weighted_masked_objective(losses.get('mae'))\n shape = (3, 4, 2)\n x = np.arange(24).reshape(shape)\n y = 2 * x\n\n # Normally the trailing 1 is added by standardize_weights\n weights = np.ones((3,))\n mask = np.ones((3, 4))\n mask[1, 0] = 0\n\n out = K.eval(weighted_loss(K.variable(x),\n K.variable(y),\n K.variable(weights),\n K.variable(mask)))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n",
"'''Trains a Bidirectional LSTM on the IMDB sentiment classification task.\n\nOutput after 4 epochs on CPU: ~0.8146\nTime per epoch on CPU (Core i7): ~150s.\n'''\n\nfrom __future__ import print_function\nimport numpy as np\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional\nfrom keras.datasets import imdb\n\n\nmax_features = 20000\n# cut texts after this number of words\n# (among top max_features most common words)\nmaxlen = 100\nbatch_size = 32\n\nprint('Loading data...')\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\nprint('x_train shape:', x_train.shape)\nprint('x_test shape:', x_test.shape)\ny_train = np.array(y_train)\ny_test = np.array(y_test)\n\nmodel = Sequential()\nmodel.add(Embedding(max_features, 128, input_length=maxlen))\nmodel.add(Bidirectional(LSTM(64)))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='sigmoid'))\n\n# try using different optimizers and different optimizer configs\nmodel.compile('adam', 'binary_crossentropy', metrics=['accuracy'])\n\nprint('Train...')\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=4,\n validation_data=[x_test, y_test])\n"
] | [
[
"numpy.random.random",
"numpy.random.seed",
"numpy.isnan",
"numpy.ones",
"numpy.where",
"numpy.random.randint"
],
[
"numpy.arange",
"numpy.array",
"numpy.random.seed",
"numpy.ones"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gruschwick/ECG_PLATFORM | [
"4a1ee568e8593938a3b51c595d4834f861a6db6e"
] | [
"Framework/Sketch/Helpers/Metrices.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 11 16:56:51 2019\n\n@author: x\n\"\"\"\n\nimport numpy as np\nfrom collections import Counter\n\nclass MetricesConstants(object):\n #qrs_cutoff_distance = 0.2\n qrs_cutoff_distance = 0.120 #https://www.sciencedirect.com/science/article/abs/pii/S1746809417300216\n\ndef sample_to_time(samples, freq):\n return samples/freq\n\ndef match_peaks( ref_peaks, pred_peaks, cutoff_distance = None):\n '''\n calc best matching between ref_peaks and pred_peaks with cutoff (error time distance no longer than cutoff_distance)\n [(ref_peaks[r], pred_peaks[c]) for r, c in zip(row_ind, col_ind)\n '''\n from scipy.optimize import linear_sum_assignment\n assert np.all(ref_peaks >= 0), \"positive time\"\n assert np.all(pred_peaks >= 0), \"positive time\"\n \n if cutoff_distance is None:\n cutoff_distance = MetricesConstants.qrs_cutoff_distance\n \n max_ref_peaks = np.max(ref_peaks)\n len_ref_peaks = len(ref_peaks)\n max_pred_peaks = np.max(pred_peaks)\n len_pred_peaks = len(pred_peaks)\n \n max_len = max(len_ref_peaks, len_pred_peaks)\n max_peaks = max(max_ref_peaks, max_pred_peaks)\n max_distance = max_peaks*10000 \n \n ref_peaks = np.pad(ref_peaks, ((0,max_len - len_ref_peaks),), 'constant', constant_values=(0, max_distance)) \n pred_peaks = np.pad(pred_peaks, ((0,max_len - len_pred_peaks),), 'constant', constant_values=(0, max_distance)) \n\n distance_matrix = np.abs(ref_peaks[:,np.newaxis] - pred_peaks[np.newaxis,:])\n \n distance_matrix[distance_matrix > cutoff_distance] = max_distance\n \n row_ind, col_ind= linear_sum_assignment(distance_matrix)\n \n matching_filtered = [(r,c) for r, c in zip(row_ind, col_ind) if distance_matrix[r,c] <= cutoff_distance]\n \n #ref_peaks[r], pred_peaks[c]\n return matching_filtered\n\ndef qrs_detection_scores( ref_peaks, pred_peaks, peaks_matching):\n deltas = [(ref_peaks[r] - pred_peaks[c]) for r, c in peaks_matching]\n tpr = len(peaks_matching)/len(ref_peaks)\n ppv = len(peaks_matching)/len(pred_peaks)\n \n return np.mean(deltas), np.std(deltas), tpr, ppv\n\ndef qrs_detection_by_class(ref_peaks_class, peaks_matching):\n ref_counts = Counter(ref_peaks_class)\n detected_counts = Counter(ref_peaks_class[r] for r, c in peaks_matching)\n \n return {(k, detected_counts.get(k,0)/ref_counts[k]) for k in ref_counts.keys()}, ref_counts, detected_counts\n"
] | [
[
"numpy.pad",
"numpy.abs",
"numpy.all",
"numpy.max",
"numpy.std",
"numpy.mean",
"scipy.optimize.linear_sum_assignment"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
Kiiwi/Syssel | [
"83705e3fd0edf40f09df950d5ce91c95586573f5"
] | [
"venv/Lib/site-packages/IPython/lib/latextools.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Tools for handling LaTeX.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom io import BytesIO, open\nfrom base64 import encodestring\nimport os\nimport tempfile\nimport shutil\nimport subprocess\n\nfrom IPython.utils.process import find_cmd, FindCmdError\nfrom IPython.config import get_config\nfrom IPython.config.configurable import SingletonConfigurable\nfrom IPython.utils.traitlets import List, Bool, Unicode\nfrom IPython.utils.py3compat import cast_unicode, cast_unicode_py2 as u\n\n\nclass LaTeXTool(SingletonConfigurable):\n \"\"\"An object to store configuration of the LaTeX tool.\"\"\"\n def _config_default(self):\n return get_config()\n \n backends = List(\n Unicode, [\"matplotlib\", \"dvipng\"],\n help=\"Preferred backend to draw LaTeX math equations. \"\n \"Backends in the list are checked one by one and the first \"\n \"usable one is used. Note that `matplotlib` backend \"\n \"is usable only for inline style equations. To draw \"\n \"display style equations, `dvipng` backend must be specified. \",\n # It is a List instead of Enum, to make configuration more\n # flexible. For example, to use matplotlib mainly but dvipng\n # for display style, the default [\"matplotlib\", \"dvipng\"] can\n # be used. To NOT use dvipng so that other repr such as\n # unicode pretty printing is used, you can use [\"matplotlib\"].\n config=True)\n\n use_breqn = Bool(\n True,\n help=\"Use breqn.sty to automatically break long equations. \"\n \"This configuration takes effect only for dvipng backend.\",\n config=True)\n\n packages = List(\n ['amsmath', 'amsthm', 'amssymb', 'bm'],\n help=\"A list of packages to use for dvipng backend. \"\n \"'breqn' will be automatically appended when use_breqn=True.\",\n config=True)\n\n preamble = Unicode(\n help=\"Additional preamble to use when generating LaTeX source \"\n \"for dvipng backend.\",\n config=True)\n\n\ndef latex_to_png(s, encode=False, backend=None, wrap=False):\n \"\"\"Render a LaTeX string to PNG.\n\n Parameters\n ----------\n s : text\n The raw string containing valid inline LaTeX.\n encode : bool, optional\n Should the PNG data base64 encoded to make it JSON'able.\n backend : {matplotlib, dvipng}\n Backend for producing PNG data.\n wrap : bool\n If true, Automatically wrap `s` as a LaTeX equation.\n\n None is returned when the backend cannot be used.\n\n \"\"\"\n s = cast_unicode(s)\n allowed_backends = LaTeXTool.instance().backends\n if backend is None:\n backend = allowed_backends[0]\n if backend not in allowed_backends:\n return None\n if backend == 'matplotlib':\n f = latex_to_png_mpl\n elif backend == 'dvipng':\n f = latex_to_png_dvipng\n else:\n raise ValueError('No such backend {0}'.format(backend))\n bin_data = f(s, wrap)\n if encode and bin_data:\n bin_data = encodestring(bin_data)\n return bin_data\n\n\ndef latex_to_png_mpl(s, wrap):\n try:\n from matplotlib import mathtext\n except ImportError:\n return None\n \n # mpl mathtext doesn't support display math, force inline\n s = s.replace('$$', '$')\n if wrap:\n s = u'${0}$'.format(s)\n \n mt = mathtext.MathTextParser('bitmap')\n f = BytesIO()\n mt.to_png(f, s, fontsize=12)\n return f.getvalue()\n\n\ndef latex_to_png_dvipng(s, wrap):\n try:\n find_cmd('latex')\n find_cmd('dvipng')\n except FindCmdError:\n return None\n try:\n workdir = tempfile.mkdtemp()\n tmpfile = os.path.join(workdir, \"tmp.tex\")\n dvifile = os.path.join(workdir, \"tmp.dvi\")\n outfile = os.path.join(workdir, \"tmp.png\")\n\n with open(tmpfile, \"w\", encoding='utf8') as f:\n f.writelines(genelatex(s, wrap))\n\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(\n [\"latex\", \"-halt-on-error\", \"-interaction\", \"batchmode\", tmpfile],\n cwd=workdir, stdout=devnull, stderr=devnull)\n\n subprocess.check_call(\n [\"dvipng\", \"-T\", \"tight\", \"-x\", \"1500\", \"-z\", \"9\",\n \"-bg\", \"transparent\", \"-o\", outfile, dvifile], cwd=workdir,\n stdout=devnull, stderr=devnull)\n\n with open(outfile, \"rb\") as f:\n return f.read()\n finally:\n shutil.rmtree(workdir)\n\n\ndef kpsewhich(filename):\n \"\"\"Invoke kpsewhich command with an argument `filename`.\"\"\"\n try:\n find_cmd(\"kpsewhich\")\n proc = subprocess.Popen(\n [\"kpsewhich\", filename],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (stdout, stderr) = proc.communicate()\n return stdout.strip().decode('utf8', 'replace')\n except FindCmdError:\n pass\n\n\ndef genelatex(body, wrap):\n \"\"\"Generate LaTeX document for dvipng backend.\"\"\"\n lt = LaTeXTool.instance()\n breqn = wrap and lt.use_breqn and kpsewhich(\"breqn.sty\")\n yield u(r'\\documentclass{article}')\n packages = lt.packages\n if breqn:\n packages = packages + ['breqn']\n for pack in packages:\n yield u(r'\\usepackage{{{0}}}'.format(pack))\n yield u(r'\\pagestyle{empty}')\n if lt.preamble:\n yield lt.preamble\n yield u(r'\\begin{document}')\n if breqn:\n yield u(r'\\begin{dmath*}')\n yield body\n yield u(r'\\end{dmath*}')\n elif wrap:\n yield u'$${0}$$'.format(body)\n else:\n yield body\n yield u'\\end{document}'\n\n\n_data_uri_template_png = u\"\"\"<img src=\"data:image/png;base64,%s\" alt=%s />\"\"\"\n\ndef latex_to_html(s, alt='image'):\n \"\"\"Render LaTeX to HTML with embedded PNG data using data URIs.\n\n Parameters\n ----------\n s : str\n The raw string containing valid inline LateX.\n alt : str\n The alt text to use for the HTML.\n \"\"\"\n base64_data = latex_to_png(s, encode=True).decode('ascii')\n if base64_data:\n return _data_uri_template_png % (base64_data, alt)\n\n\n"
] | [
[
"matplotlib.mathtext.MathTextParser"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tropp/ACQ4 | [
"792e05e99cedfc175593d200aeabecd6fa6304ce"
] | [
"acq4/devices/PatchStar/patchstar.py"
] | [
"# -*- coding: utf-8 -*-\nimport time\nimport numpy as np\nfrom PyQt4 import QtGui, QtCore\nfrom ..Stage import Stage, MoveFuture, StageInterface\nfrom acq4.drivers.PatchStar import PatchStar as PatchStarDriver\nfrom acq4.util.Mutex import Mutex\nfrom acq4.util.Thread import Thread\nfrom acq4.pyqtgraph import debug, ptime, SpinBox\n\n\nclass PatchStar(Stage):\n \"\"\"\n A Scientifica PatchStar manipulator.\n\n port: <serial port> # eg. 'COM1' or '/dev/ttyACM0'\n \"\"\"\n def __init__(self, man, config, name):\n self.port = config.pop('port')\n self.scale = config.pop('scale', (1e-7, 1e-7, 1e-7))\n self.dev = PatchStarDriver(self.port)\n self._lastMove = None\n man.sigAbortAll.connect(self.stop)\n\n Stage.__init__(self, man, config, name)\n\n # clear cached position for this device and re-read to generate an initial position update\n self._lastPos = None\n self.getPosition(refresh=True)\n self.setUserSpeed(3e-3)\n\n # Set scaling for each axis\n self.dev.send('UUX 6.4')\n self.dev.send('UUY 6.4')\n self.dev.send('UUZ 6.4')\n\n # makes 1 roe turn == 1 second movement for any speed\n self.dev.send('JS 200')\n\n # Set approach angle\n self.dev.send('ANGLE %f' % self.pitch)\n self.dev.send('APPROACH 0')\n\n # thread for polling position changes\n self.monitor = MonitorThread(self)\n self.monitor.start()\n\n def capabilities(self):\n \"\"\"Return a structure describing the capabilities of this device\"\"\"\n if 'capabilities' in self.config:\n return self.config['capabilities']\n else:\n return {\n 'getPos': (True, True, True),\n 'setPos': (True, True, True),\n 'limits': (False, False, False),\n }\n\n def stop(self):\n \"\"\"Stop the manipulator immediately.\n \"\"\"\n with self.lock:\n self.dev.stop()\n if self._lastMove is not None:\n self._lastMove._stopped()\n self._lastMove = None\n\n def setUserSpeed(self, v):\n \"\"\"Set the speed of the rotary controller (m/turn).\n \"\"\"\n self.userSpeed = v\n self.dev.setSpeed(v / self.scale[0])\n\n def _getPosition(self):\n # Called by superclass when user requests position refresh\n with self.lock:\n pos = self.dev.getPos()\n pos = [pos[i] * self.scale[i] for i in (0, 1, 2)]\n if pos != self._lastPos:\n self._lastPos = pos\n emit = True\n else:\n emit = False\n\n if emit:\n # don't emit signal while locked\n self.posChanged(pos)\n\n return pos\n\n def targetPosition(self):\n with self.lock:\n if self._lastMove is None or self._lastMove.isDone():\n return self.getPosition()\n else:\n return self._lastMove.targetPos\n\n def quit(self):\n self.monitor.stop()\n Stage.quit(self)\n\n def _move(self, abs, rel, speed, linear):\n with self.lock:\n if self._lastMove is not None and not self._lastMove.isDone():\n self.stop()\n pos = self._toAbsolutePosition(abs, rel)\n self._lastMove = PatchStarMoveFuture(self, pos, speed, self.userSpeed)\n return self._lastMove\n\n def deviceInterface(self, win):\n return PatchStarGUI(self, win)\n\n\nclass MonitorThread(Thread):\n \"\"\"Thread to poll for manipulator position changes.\n \"\"\"\n def __init__(self, dev):\n self.dev = dev\n self.lock = Mutex(recursive=True)\n self.stopped = False\n self.interval = 0.3\n \n Thread.__init__(self)\n\n def start(self):\n self.stopped = False\n Thread.start(self)\n\n def stop(self):\n with self.lock:\n self.stopped = True\n\n def setInterval(self, i):\n with self.lock:\n self.interval = i\n \n def run(self):\n minInterval = 100e-3\n interval = minInterval\n lastPos = None\n while True:\n try:\n with self.lock:\n if self.stopped:\n break\n maxInterval = self.interval\n\n pos = self.dev._getPosition() # this causes sigPositionChanged to be emitted\n if pos != lastPos:\n # if there was a change, then loop more rapidly for a short time.\n interval = minInterval\n lastPos = pos\n else:\n interval = min(maxInterval, interval*2)\n\n time.sleep(interval)\n except:\n debug.printExc('Error in PatchStar monitor thread:')\n time.sleep(maxInterval)\n \n\nclass PatchStarMoveFuture(MoveFuture):\n \"\"\"Provides access to a move-in-progress on a PatchStar manipulator.\n \"\"\"\n def __init__(self, dev, pos, speed, userSpeed):\n MoveFuture.__init__(self, dev, pos, speed)\n self._interrupted = False\n self._errorMSg = None\n self._finished = False\n pos = (np.array(pos) / np.array(self.dev.scale)).astype(int)\n if speed == 'fast':\n speed = 1e-3\n elif speed == 'slow':\n speed = 1e-6\n with self.dev.dev.lock:\n self.dev.dev.moveTo(pos, speed / self.dev.scale[0])\n # reset to user speed immediately after starting move\n # (the move itself will run with the previous speed)\n self.dev.dev.setSpeed(userSpeed / self.dev.scale[0])\n \n def wasInterrupted(self):\n \"\"\"Return True if the move was interrupted before completing.\n \"\"\"\n return self._interrupted\n\n def isDone(self):\n \"\"\"Return True if the move is complete.\n \"\"\"\n return self._getStatus() != 0\n\n def _getStatus(self):\n # check status of move unless we already know it is complete.\n # 0: still moving; 1: finished successfully; -1: finished unsuccessfully\n if self._finished:\n if self._interrupted:\n return -1\n else:\n return 1\n if self.dev.dev.isMoving():\n # Still moving\n return 0\n # did we reach target?\n pos = self.dev._getPosition()\n if ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5 < 1e-6:\n # reached target\n self._finished = True\n return 1\n else:\n # missed\n self._finished = True\n self._interrupted = True\n self._errorMsg = \"Move did not complete.\"\n return -1\n\n def _stopped(self):\n # Called when the manipulator is stopped, possibly interrupting this move.\n status = self._getStatus()\n if status == 1:\n # finished; ignore stop\n return\n elif status == -1:\n self._errorMsg = \"Move was interrupted before completion.\"\n elif status == 0:\n # not actually stopped! This should not happen.\n raise RuntimeError(\"Interrupted move but manipulator is still running!\")\n else:\n raise Exception(\"Unknown status: %s\" % status)\n\n def errorMessage(self):\n return self._errorMsg\n\n\n\nclass PatchStarGUI(StageInterface):\n def __init__(self, dev, win):\n StageInterface.__init__(self, dev, win)\n\n # Insert patchstar-specific controls into GUI\n self.psGroup = QtGui.QGroupBox('PatchStar Rotary Controller')\n self.layout.addWidget(self.psGroup, self.nextRow, 0, 1, 2)\n self.nextRow += 1\n\n self.psLayout = QtGui.QGridLayout()\n self.psGroup.setLayout(self.psLayout)\n self.speedLabel = QtGui.QLabel('Speed')\n self.speedSpin = SpinBox(value=self.dev.userSpeed, suffix='m/turn', siPrefix=True, dec=True, limits=[1e-6, 10e-3])\n self.revXBtn = QtGui.QPushButton('Reverse X')\n self.revYBtn = QtGui.QPushButton('Reverse Y')\n self.revZBtn = QtGui.QPushButton('Reverse Z')\n self.psLayout.addWidget(self.speedLabel, 0, 0)\n self.psLayout.addWidget(self.speedSpin, 0, 1)\n self.psLayout.addWidget(self.revXBtn, 1, 1)\n self.psLayout.addWidget(self.revYBtn, 2, 1)\n self.psLayout.addWidget(self.revZBtn, 3, 1)\n\n self.revXBtn.clicked.connect(lambda: self.dev.dev.send('JDX'))\n self.revYBtn.clicked.connect(lambda: self.dev.dev.send('JDY'))\n self.revZBtn.clicked.connect(lambda: self.dev.dev.send('JDZ'))\n\n self.speedSpin.valueChanged.connect(lambda v: self.dev.setDefaultSpeed(v))\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JiaXingBinggan/MSRL | [
"fcc8b06eb1938a78549868b27f2962cb47b3d866"
] | [
"agent/DQN_agent.py"
] | [
"import numpy as np\nimport mindspore\nfrom mindspore import context, ops, Tensor, nn\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nimport copy\n\n\ncontext.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\n\n\n_update_op = ops.MultitypeFuncGraph(\"update_op\")\n\n\n@_update_op.register(\"Tensor\", \"Tensor\")\ndef _parameter_update(policy_param, target_param):\n assign = ops.Assign()\n output = assign(target_param, policy_param)\n return output\n\n\nclass DQN(nn.Cell):\n neuron_nums = 16\n\n def __init__(self, n_features, n_actions):\n super(DQN, self).__init__()\n self.net = nn.SequentialCell(\n nn.Dense(n_features, self.neuron_nums),\n nn.ReLU(),\n nn.Dense(self.neuron_nums, n_actions),\n )\n\n def construct(self, s):\n return self.net(s)\n\n\nclass PolicyNetWithLossCell(nn.Cell):\n \"\"\"DQN policy network with loss cell\"\"\"\n\n def __init__(self, backbone, loss_fn):\n super(PolicyNetWithLossCell,\n self).__init__(auto_prefix=False)\n self._backbone = backbone\n self._loss_fn = loss_fn\n self.gather = ops.GatherD()\n\n def construct(self, x, a0, label):\n \"\"\"constructor for Loss Cell\"\"\"\n out = self._backbone(x)\n out = self.gather(out, 1, a0)\n loss = self._loss_fn(out, label)\n return loss\n\n# Deep Q Network off-policy\nclass DeepQNetwork:\n def __init__(\n self,\n n_actions,\n n_features,\n learning_rate=0.01,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=300,\n memory_size=500,\n batch_size=3,\n e_greedy_increment=None,\n ):\n self.n_actions = n_actions\n self.n_features = n_features\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon_max = e_greedy\n self.replace_target_iter = replace_target_iter\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.epsilon_increment = e_greedy_increment\n self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max\n\n # total learning step\n self.learn_step_counter = 0\n\n # initialize zero memory [s, a, r, s_]\n self.memory = np.zeros((self.memory_size, n_features * 2 + 2))\n\n self.eval_net = DQN(self.n_features, self.n_actions)\n self.target_net = copy.deepcopy(self.eval_net)\n self.policy_param = ParameterTuple(\n self.eval_net.get_parameters())\n self.target_param = ParameterTuple(\n self.target_net.get_parameters())\n\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n\n loss_func = nn.MSELoss()\n opt = nn.Adam(self.eval_net.trainable_params(), learning_rate=self.lr)\n loss_q_net = PolicyNetWithLossCell(self.eval_net, loss_func)\n self.policy_network_train = nn.TrainOneStepCell(loss_q_net, opt)\n self.policy_network_train.set_train(mode=True)\n\n self.hyper_map = ops.HyperMap()\n self.cost_his = []\n\n def store_transition(self, transition):\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def reset_epsilon(self, epsilon):\n self.epsilon = epsilon\n\n def choose_action(self, observation):\n observation = Tensor(observation[np.newaxis, :], mindspore.float32)\n if np.random.uniform() < self.epsilon:\n self.eval_net.set_train(mode=False)\n action_v = self.eval_net(observation)\n action = np.argmax(action_v)\n else:\n action = np.random.randint(0, self.n_actions)\n return action\n\n def update_param(self):\n assign_result = self.hyper_map(\n _update_op,\n self.policy_param,\n self.target_param\n )\n return assign_result\n\n def learn(self):\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.update_param()\n\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size, replace=False)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size, replace=False)\n\n batch_memory = Tensor(self.memory[sample_index, :], mindspore.float32)\n b_s = batch_memory[:, :self.n_features]\n b_a = ops.ExpandDims()(batch_memory[:, self.n_features], 1).astype(mindspore.int32)\n b_r = ops.ExpandDims()(batch_memory[:, self.n_features + 1], 1)\n b_s_ = batch_memory[:, -self.n_features:]\n\n q_next = self.target_net(b_s_).max(axis=1)\n q_target = b_r + self.gamma * q_next\n\n loss = self.policy_network_train(b_s, b_a, q_target)\n self.cost_his.append(round(float(np.mean(loss.asnumpy())), 3))\n\n # increasing epsilon\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learn_step_counter += 1\n\n return loss\n\n def plot_cost(self):\n import matplotlib.pyplot as plt\n plt.plot(np.arange(len(self.cost_his)), self.cost_his)\n plt.ylabel('Cost')\n plt.xlabel('training steps')\n plt.show()\n\n\n\n"
] | [
[
"numpy.random.choice",
"numpy.random.uniform",
"numpy.argmax",
"numpy.random.randint",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AndersDHenriksen/Tensorflow-Project-Template | [
"32dfeaaf1243587af4ceb7b378c135092ddb9258"
] | [
"base/base_train.py"
] | [
"import tensorflow as tf\n\n\nclass BaseTrain:\n def __init__(self, sess, model, data, config, logger):\n self.model = model\n self.logger = logger\n self.config = config\n self.sess = sess\n self.data = data\n self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n if not self.model.is_loaded:\n self.sess.run(self.init)\n\n def train(self):\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\n self.train_epoch()\n self.sess.run(self.model.increment_cur_epoch_tensor)\n\n def train_epoch(self):\n \"\"\"\n implement the logic of epoch:\n -loop over the number of iterations in the config and call the train step\n -add any summaries you want using the summary\n \"\"\"\n raise NotImplementedError\n\n def train_step(self):\n \"\"\"\n implement the logic of the train step\n - run the tensorflow session\n - return any metrics you need to summarize\n \"\"\"\n raise NotImplementedError\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.local_variables_initializer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
tjuwlz/MachineTranslation | [
"7335c7e95d2ca23ca7e26c45d4b8b13e2ce96704"
] | [
"modules/nmt.py"
] | [
"from datautil.dataloader import batch_iter\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.utils as nn_utils\nimport time\nimport torch\nimport numpy as np\nfrom config.Const import *\n\n\nclass NMT(object):\n def __init__(self, encoder, decoder):\n super(NMT, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def summary(self):\n print('encoder:', self.encoder)\n print('decoder:', self.decoder)\n\n # 训练一轮\n def train(self, train_pairs, enc_optimizer, dec_optimizer, args, src_vocab, tgt_vocab):\n train_loss = 0\n for src_batch, tgt_batch in batch_iter(train_pairs, args, src_vocab, tgt_vocab):\n loss = 0\n # enc_out: (batch_size, seq_len, hidden_size * nb_directions)\n # enc_hidden: (num_layers * nb_directions, batch_size, hidden_size)\n enc_out, enc_hidden = self.encoder(src_batch.src_idxs, mask=src_batch.non_pad_mask)\n\n self.encoder.zero_grad()\n self.decoder.zero_grad()\n\n dec_hidden = enc_hidden\n dec_input = tgt_batch.src_idxs[0].unsqueeze(1)\n if np.random.uniform(0, 1) <= args.teacher_force:\n # print('以目标作为下一个输入')\n for i in range(1, tgt_batch.src_idxs.size(0)):\n dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n loss += self.calc_loss(dec_out, tgt_batch.src_idxs[i])\n train_loss += loss.data.item()\n\n dec_input = tgt_batch.src_idxs[i].unsqueeze(1)\n else:\n # print('以网络的预测输出作为下一个输入')\n for i in range(1, tgt_batch.src_idxs.size(0)):\n dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n loss += self.calc_loss(dec_out, tgt_batch.src_idxs[i])\n train_loss += loss.data.item()\n\n _, top_i = dec_out.data.topk(1)\n dec_input = top_i # (batch_size, 1)\n\n loss.backward()\n\n nn_utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.encoder.parameters()), max_norm=5.0)\n nn_utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.decoder.parameters()), max_norm=5.0)\n\n enc_optimizer.step()\n dec_optimizer.step()\n\n return train_loss / len(train_pairs)\n\n # 训练多轮\n def train_iter(self, train_pairs, args, src_vocab, tgt_vocab):\n self.encoder.train()\n self.decoder.train()\n enc_optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.encoder.parameters()), lr=args.lr)\n dec_optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.decoder.parameters()), lr=args.lr)\n enc_lr_scheduler = optim.lr_scheduler.LambdaLR(enc_optimizer, lambda ep: max(0.95**ep, 1e-4))\n dec_lr_scheduler = optim.lr_scheduler.LambdaLR(dec_optimizer, lambda ep: max(0.95**ep, 1e-4))\n # enc_lr_scheduler = optim.lr_scheduler.LambdaLR(enc_optimizer, lambda ep: max(1 - 0.75 * ep / args.epoch, 1e-4))\n # dec_lr_scheduler = optim.lr_scheduler.LambdaLR(dec_optimizer, lambda ep: max(1 - 0.75 * ep / args.epoch, 1e-4))\n\n for i in range(args.epoch):\n enc_lr_scheduler.step()\n dec_lr_scheduler.step()\n t1 = time.time()\n train_loss = self.train(train_pairs, enc_optimizer, dec_optimizer, args, src_vocab, tgt_vocab)\n t2 = time.time()\n print('[Epoch %d] train loss: %.3f' % (i+1, train_loss))\n print('encoder lr:', enc_lr_scheduler.get_lr())\n print('decoder lr:', dec_lr_scheduler.get_lr())\n print('time cost: %.2fs' % (t2 - t1))\n\n def calc_loss(self, pred, tgt):\n return F.nll_loss(pred, tgt, ignore_index=0)\n\n # def evaluate(self, test_pairs, args, src_vocab, tgt_vocab):\n # self.encoder.eval()\n # self.decoder.eval()\n # pred_wds, tgt_wds = [], []\n # for src_batch, tgt_batch in batch_iter(test_pairs, args, src_vocab, tgt_vocab):\n # batch_pred_wds, batch_tgt_wds = [], []\n # enc_out, enc_hidden = self.encoder(src_batch.src_idxs, mask=src_batch.non_pad_mask)\n #\n # dec_hidden = enc_hidden\n # dec_input = tgt_batch.src_idxs[0]\n # for i in range(1, tgt_batch.src_idxs.size(0)):\n # dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n #\n # dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n # tgt_idxs = tgt_batch.src_idxs[i]\n # # greedy search\n # pred_idxs = dec_out.data.argmax(dim=1)\n # batch_pred_wds.append(tgt_vocab.index2word(pred_idxs.tolist()))\n # batch_tgt_wds.append(tgt_vocab.index2word(tgt_idxs.tolist()))\n # dec_input = pred_idxs\n #\n # pred_wds.extend(self.extract_valid(np.asarray(batch_pred_wds).T.tolist()))\n # tgt_wds.extend(self.extract_valid(np.asarray(batch_tgt_wds).T.tolist()))\n #\n # print('BLEU:', self.corpus_bleu(pred_wds, tgt_wds))\n\n # beam search\n '''\n 执行过程:设beam size = 3\n 1、选择t1时刻输出的概率分数最大的3个词\n 2、分别将t-1时刻选择的3个词作为当前时刻的输入\n 3、求t时刻累积的(序列)概率分数(历史所选择词的对数似然和),选择分数值最大的3个词\n 4、重复2-3过程,直到到达最大长度(或遇到<eos>)\n '''\n def evaluate(self, test_pairs, args, src_vocab, tgt_vocab):\n self.encoder.eval()\n self.decoder.eval()\n # pred_wds, tgt_wds = [], []\n for src_batch, tgt_batch in batch_iter(test_pairs, args, src_vocab, tgt_vocab):\n # batch_pred_wds, batch_tgt_wds = [], []\n enc_out, enc_hidden = self.encoder(src_batch.src_idxs, mask=src_batch.non_pad_mask)\n\n # 保存历史分数\n seq_len, batch_size = tgt_batch.src_idxs.size()\n # (bz, beam_size)\n hist_score = torch.zeros((batch_size, args.beam_size), device=args.device)\n # (beam_size, bz, vocab_size)\n beam_score = torch.zeros((args.beam_size, batch_size, tgt_vocab.vocab_size), device=args.device)\n # (bz, beam_size, max_len)\n best_paths = torch.zeros((MAX_LEN, batch_size, args.beam_size), device=args.device)\n\n dec_hidden = enc_hidden\n dec_input = tgt_batch.src_idxs[0].unsqueeze(1)\n for i in range(1, min(MAX_LEN, seq_len)):\n if i == 1:\n # dec_input: (bz, 1)\n # dec_out: (bz, vocab_size)\n dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n # (bz, beam_size)\n top_prob, top_idxs = dec_out.data.topk(args.beam_size, dim=1)\n hist_score = top_prob\n best_paths[i] = top_idxs\n # (bz, beam_size)\n dec_input = top_idxs\n else:\n # dec_input: (bz, beam_size) -> (beam_size, bz)\n dec_input = dec_input.transpose(0, 1)\n for j in range(args.beam_size):\n # dec_out: (bz, vocab_size)\n dec_out, dec_hidden = self.decoder(dec_input[j].unsqueeze(1), dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n beam_score[j] = dec_out\n # (bz, beam_size, 1) -> (bz, beam_size, vocab_size)\n hist_score = hist_score.unsqueeze(-1).expand((-1, -1, tgt_vocab.vocab_size))\n hist_score += beam_score.transpose(0, 1) # (bz, beam_size, vocab_size)\n # (bz, beam_size * vocab_size)\n hist_score = hist_score.reshape((batch_size, -1))\n # (bz, beam_size)\n top_prob, top_idxs = hist_score.topk(args.beam_size, dim=1)\n hist_score = top_prob\n top_idxs %= tgt_vocab.vocab_size\n best_paths[i] = top_idxs\n dec_input = top_idxs\n\n # pred_wds.extend(self.extract_valid(np.asarray(batch_pred_wds).T.tolist()))\n # tgt_wds.extend(self.extract_valid(np.asarray(batch_tgt_wds).T.tolist()))\n\n # 提取序列的非填充部分\n def extract_valid(self, seqs: list):\n return list(map(lambda x: x[:x.index(EOS)] if EOS in x else x, seqs))\n\n # 统计ngram数目\n def count_ngram(self, cand: list, ref: list, n=1) -> int:\n assert len(cand) != 0 and len(ref) != 0\n\n total_count = 0\n for i in range(len(cand) - n + 1):\n cand_count, ref_count = 1, 0\n ngram = cand[i: i + n]\n # 统计ngram在机器翻译译文中出现的次数\n for j in range(i + n, len(cand) - n + 1):\n if ngram == cand[j: j + n]:\n cand_count += 1\n # 统计ngram在人工译文中出现的次数\n for k in range(len(ref) - n + 1):\n if ngram == ref[k: k + n]:\n ref_count += 1\n total_count += min(cand_count, ref_count)\n\n return total_count\n\n # 计算单句话的BLEU值,取值在[0, 1]之间,越大越好\n def sentence_bleu(self, cand: list, ref: list, N=4) -> float:\n '''\n :param cand: sentence_tokens\n :param ref: sentence_tokens\n :return:\n '''\n assert len(cand) != 0 and len(ref) != 0\n # n-gram中n的取值在[1, 4]之间\n res = 0\n cand_len, ref_len = len(cand), len(ref)\n for n in range(1, N+1):\n cand_gram = max(0, cand_len - n + 1)\n res += 0.25 * np.log(self.count_ngram(cand, ref, n) / cand_gram)\n # 短译句惩罚因子\n # bp = np.exp(1 - max(1., len(ref) / len(cand)))\n return np.exp(res + min(0., 1 - ref_len / cand_len))\n\n # 计算多句话的BLEU值(注:不是直接对sentence bleu求和求平均)\n def corpus_bleu(self, cands: list, refs: list, N=4) -> float:\n '''\n :param cands: [sentence_tokens1, sentence_tokens2]\n :param refs: [sentence_tokens1, sentence_tokens2]\n :return:\n '''\n assert len(cands) != 0 and len(cands) == len(refs)\n\n ref_len, cand_len = 0, 0\n for cand, ref in zip(cands, refs):\n ref_len += len(ref)\n cand_len += len(cand)\n\n res = 0\n for n in range(1, N+1):\n n_match, n_grams = 0, 0\n for cand, ref in zip(cands, refs):\n n_match += self.count_ngram(cand, ref, n)\n n_grams += max(0, len(cand) - n + 1)\n res += 0.25 * np.log(n_match / n_grams + 1e-8)\n\n return np.exp(res + min(0., 1 - ref_len / cand_len))\n"
] | [
[
"numpy.random.uniform",
"numpy.log",
"torch.nn.functional.nll_loss",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
formalabstracts/CNL-CIC | [
"c857ee0d52b4ba91dd06a51c8f9f3ec2749ca0eb"
] | [
"2parser/sample.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 16 05:48:26 2021\n\n@author: thales\n\nGenerate random samples from parsers\n\n\"\"\"\n\nfrom numpy.random import (poisson , binomial, randint)\n\nfrom tokenlib import (Item , Etok, mk_stream)\n\nimport lib\n\nimport state\n\ndef bernoulli(p):\n return binomial(1,p)\n\ndef ran(ls):\n if not ls:\n raise TypeError(f'ran, expected nonempty list {ls}')\n return ls\n return ls[randint(0,len(ls))]\n\ndef mk_tok(v):\n toks = mk_stream(v)\n try: \n return toks.stream[0]\n except:\n raise IndexError(f'List index out of range. Empty list mk_tok({v})')\n \ndef mk_toks(vs):\n toks = mk_stream(vs)\n return toks.stream\n\ndef next_token():\n return mk_tok('blah')\n\ndef none():\n return None\n\ndef add_sample(self,other):\n def sample():\n try: # debug\n acc1 = self.sample()\n acc2 = other.sample()\n return (acc1,acc2)\n except AttributeError as ex:\n raise AttributeError(f'MyAttributeError {other}')\n return sample\n\ndef or_sample(self,other):\n def sample():\n if bernoulli(0.5):\n return self.sample()\n return other.sample()\n return sample\n\ndef treat_sample(self,treatment):\n def sample():\n return treatment(self.sample())\n return sample \n\ndef some(self,sep,m):\n def sample():\n if sep:\n if m==0:\n return []\n return lib.flatten((self.sample(),sep.sample()) for _ in range(0,m-1))+[self.sample()]\n return [self.sample() for _ in range(0,m-1)]\n return sample\n\ndef plus(self,sep):\n return some(self,sep,1 + poisson(0.5))\n \ndef many(self,sep):\n return some(self,sep,0 + poisson(0.5))\n\ndef atleast(self,n):\n return some(self,None,n + poisson(0.5))\n\ndef possibly(self):\n def sample():\n if state.state.include_possibly:\n return self.sample()\n if bernoulli(0.5):\n return self.sample()\n return None\n return sample\n\ndef if_test(self,p):\n def sample():\n iteration_limit = 10 # arbitrary limit\n for _ in range(0,iteration_limit):\n acc = self.sample() # randomized guess\n if p(acc):\n return acc \n return next_token() # give up on test\n return sample\n\ndef if_value(v):\n def sample():\n return mk_tok(v)\n return sample\n\ndef if_rawvalue(v):\n return if_value(v)\n\ndef type_sample(ty:str):\n \"\"\" \n >>> type_sample('WORD')\n '...'\n \"\"\"\n d = {'STRING': ['\"'+s+'\"' for s in 'hello world so little time'.split()],\n 'CONTROLSEQ':['\\\\'+s for s in 'alpha beta gamma delta sum prod deg circ ast lneg times rtimes'.split()],\n 'DECIMAL':['3.14','2.718','1.0','4.96'],\n 'INTEGER': [str(i) for i in range(0,10)] ,\n 'SYMBOL':['<','>','!=','+','-','*','^'],\n 'SYMBOL_QED':[r'\\qed'],\n 'MAPSTO':[r'\\mapsto'],\n 'MID':[r'\\mid'],\n 'TMID':[r'\\tmid'],\n 'ASSIGN':[':='],\n 'ARROW':[r'\\to'],\n 'BLANK':['_'],\n 'ALT':['|'],\n 'PERIOD':['.'],\n 'COLON':[':'],\n 'APPLYSUB':[r'\\sub'],\n 'COERCION': [r'\\^'],\n 'LAMBDA':[r'\\lambda'],\n 'PITY':[r'\\Pity'],\n 'QUANTIFIER':[r'\\forall',r'\\exists'],\n 'VAR':[ f'{x}{n}' for x in 'b c x y z u v w'.split() for n in range(0,5)],\n 'WORD':\"\"\"estimate equation solution expression inequality random sample \n mean pair ordered function evaluate order operation property divisible \n exponent base multiple square common prime form factorization point \n plane line angle ray parallel intersecting perpendicular regular \n polygon degree circle diameter chord similar congruent symmetry \n leg triangle scalene equilateral trapezoid rotation transformation \n translation polyhedron integer positive opposite value origin \n coordinate area circumference word number blah part\"\"\".split(),\n 'ATOMIC_IDENTIFIER':'foo_bar bar3 foo22 sin_ cos_ atan2 ceil_ comb_ fabs_ factorial_ floor_ gcd_ sqrt_ log2 log10 pow_ '.split(),\n 'HIERARCHICAL_IDENTIFIER':['math.pi','math.ceil','math.abs'],\n 'FIELD_ACCESSOR':['.assoc','.distrib'],\n 'UNKNOWN':['?'],\n 'TEX_ERROR':[r'\\error']\n }\n return ran(d[ty])\n\ndef if_types(tys):\n \"\"\" \n >>> if_types(['WORD','INTEGER','DECIMAL'])()\n LexToken(...)\n \"\"\"\n def sample():\n ty = ran(tys)\n return mk_tok(type_sample(ty))\n return sample\n\ndef all_sample(prs):\n def sample():\n return [p.sample() for p in prs]\n return sample\n\ndef first(prs):\n def sample():\n if not prs:\n return None\n i = randint(0,len(prs))\n return prs[i].sample()\n return sample\n\n#def lazy_call(pr):\n# def sample():\n# return pr().sample()\n# return sample\n\ndef first_word(ss):\n #DEBUG if not(ss):\n # raise IndexError(f'Index out of range, split first_word({ss})')\n s = ran(ss.split())\n def sample():\n return mk_tok(s)\n return sample\n\ndef word_net_string(wn):\n s = ran([k for k in wn])\n if not s:\n return ''\n return s + ' ' + word_net_string(wn[s])\n\ndef word_net(wn):\n def sample():\n s = word_net_string(wn)\n return mk_toks(s)\n return sample\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n# doctest.testmod(verbose=True, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n# doctest.testmod()\n\n \n\n"
] | [
[
"numpy.random.binomial",
"numpy.random.poisson"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.