repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
bibofeng/DeepRL-1 | [
"7b14d9720a8ea1e08b05a2889d699a70174caf8f"
] | [
"utils/normalizer.py"
] | [
"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\nimport torch\nimport numpy as np\n\nclass Normalizer:\n def __init__(self, o_size):\n self.stats = SharedStats(o_size)\n\n def __call__(self, o_):\n o = torch.FloatTensor(o_)\n self.stats.feed(o)\n std = (self.stats.v + 1e-6) ** .5\n o = (o - self.stats.m) / std\n return o.numpy().reshape(o_.shape)\n\nclass StaticNormalizer:\n def __init__(self, o_size):\n self.offline_stats = SharedStats(o_size)\n self.online_stats = SharedStats(o_size)\n\n def __call__(self, o_):\n if np.isscalar(o_):\n o = torch.FloatTensor([o_])\n else:\n o = torch.FloatTensor(o_)\n self.online_stats.feed(o)\n if self.offline_stats.n[0] == 0:\n return o_\n std = (self.offline_stats.v + 1e-6) ** .5\n o = (o - self.offline_stats.m) / std\n o = o.numpy()\n if np.isscalar(o_):\n o = np.asscalar(o)\n else:\n o = o.reshape(o_.shape)\n return o\n\nclass SharedStats:\n def __init__(self, o_size):\n self.m = torch.zeros(o_size)\n self.v = torch.zeros(o_size)\n self.n = torch.zeros(1)\n self.m.share_memory_()\n self.v.share_memory_()\n self.n.share_memory_()\n\n def feed(self, o):\n n = self.n[0]\n new_m = self.m * (n / (n + 1)) + o / (n + 1)\n self.v.copy_(self.v * (n / (n + 1)) + (o - self.m) * (o - new_m) / (n + 1))\n self.m.copy_(new_m)\n self.n.add_(1)\n\n def zero(self):\n self.m.zero_()\n self.v.zero_()\n self.n.zero_()\n\n def load(self, stats):\n self.m.copy_(stats.m)\n self.v.copy_(stats.v)\n self.n.copy_(stats.n)\n\n def merge(self, B):\n A = self\n n_A = self.n[0]\n n_B = B.n[0]\n n = n_A + n_B\n delta = B.m - A.m\n m = A.m + delta * n_B / n\n v = A.v * n_A + B.v * n_B + delta * delta * n_A * n_B / n\n v /= n\n self.m.copy_(m)\n self.v.copy_(v)\n self.n.add_(B.n)\n\n def state_dict(self):\n return {'m': self.m.numpy(),\n 'v': self.v.numpy(),\n 'n': self.n.numpy()}\n\n def load_state_dict(self, saved):\n self.m = torch.FloatTensor(saved['m'])\n self.v = torch.FloatTensor(saved['v'])\n self.n = torch.FloatTensor(saved['n'])"
] | [
[
"torch.zeros",
"numpy.asscalar",
"torch.FloatTensor",
"numpy.isscalar"
]
] |
DagonDD/google-research | [
"ccd5d36e7a8ee1d672c93a801634bfd8f2e0c3eb"
] | [
"t5_closed_book_qa/t5_cbqa/preprocessors.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"T5 CBQA preprocessors.\"\"\"\nimport tensorflow.compat.v1 as tf\n\n\ndef natural_questions_nocontext(\n dataset,\n prefix='nq question: ',\n drop_yes_no=False,\n max_tokens=None,\n max_answers=None,\n ):\n \"\"\"Convert Natural Questions TFDS to open domain with multiple answers.\n\n Examples with no short or yes/no answers are filtered. All short and yes/no\n answers (even across annotations) are emitted, so the targets produced by this\n preprocessor are invalid in the case of multiple annotations. However, these\n should not occur in the train set.\n\n The function takes the natural_questions TFDS dataset an emits examples of the\n form:\n {\n 'inputs': 'nq question: what are the names of the olsen twins'\n 'targets': 'answer: Mary-Kate answer: Ashley'\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n drop_yes_no: bool, whether to drop yes/no answers, keeping only short\n answers.\n max_tokens: (Optional) int, the maximum number of tokens (as specified by\n NQ) beyond which a short answer is dropped. None are dropped if set to\n `None`.\n max_answers: (Optional) int, the maximum number of answers to include in the\n targets. Will be selected deterministically from the beginning of the\n list. All answers are included if set to `None`.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n def nq_map(ex):\n \"\"\"Map Natural Questions example to text-to-text example.\"\"\"\n inputs = prefix + ex['question']['text']\n\n annotations = ex['annotations']\n\n yes_no_labels = annotations['yes_no_answer']\n if drop_yes_no:\n yes_no_labels = -1 * tf.ones_like(yes_no_labels)\n yes_no_answers = tf.boolean_mask(yes_no_labels, yes_no_labels > -1)\n yes_no_answers = tf.where_v2(tf.equal(yes_no_answers, 1), 'yes', 'no')\n\n short_answers = annotations['short_answers']['text'].flat_values\n short_answer_starts = annotations['short_answers']['text'].row_starts()\n if max_tokens:\n start_tokens = annotations['short_answers']['start_token']\n end_tokens = annotations['short_answers']['end_token']\n dropped_answers = end_tokens - start_tokens > max_tokens\n short_answers = tf.boolean_mask(\n short_answers, tf.math.logical_not(dropped_answers.values))\n # Subtract dropped answers from row starts.\n row_drop_count = tf.math.reduce_sum(\n tf.cast(dropped_answers, tf.int64), axis=1)\n short_answer_starts -= tf.concat(\n [[0], tf.math.cumsum(row_drop_count[:-1])], axis=0)\n\n answers = tf.concat([yes_no_answers, short_answers], axis=0)\n if max_answers:\n answers = answers[:max_answers]\n targets = tf.strings.reduce_join('answer: ' + answers, separator=' ')\n\n return {\n 'inputs': inputs,\n 'targets': targets,\n 'short_answers/values': short_answers,\n 'short_answers/row_starts': short_answer_starts,\n 'yes_no_answers': yes_no_labels\n }\n\n dataset = dataset.map(\n nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n return dataset.filter(lambda ex: tf.strings.length(ex['targets']) > 0)\n\n\ndef natural_questions_open(\n dataset,\n prefix='nq question: '\n ):\n \"\"\"Convert Natural Questions Open TFDS to examples.\n\n If there are multiple answers in the input, selects the first one as the\n target.\n\n The function takes the natural_question_open TFDS dataset and emits examples\n of the form:\n {\n 'inputs': 'nq question: What are the names of the Olsen Twins?'\n 'targets': 'Mary-Kate and Ashley',\n 'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n\n def nq_map(ex):\n \"\"\"Map Natural Questions example to text-to-text example.\"\"\"\n return {\n 'inputs': prefix + ex['question'],\n 'targets': ex['answer'][0],\n 'answers': ex['answer'],\n }\n return dataset.map(nq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef trivia_qa_open(\n dataset,\n prefix='trivia_qa question: '\n ):\n \"\"\"Convert TriviaQA dataset to open domain qa examples.\n\n The function takes the trivia_qa TFDS dataset and emits examples of the\n form:\n {\n 'inputs': 'trivia_qa question: What are the names of the Olsen Twins?'\n 'targets': 'Mary-Kate and Ashley',\n 'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n def tqa_map(ex):\n \"\"\"Map TriviaQA example to text-to-text example.\"\"\"\n return {\n 'inputs': prefix + ex['question'],\n 'targets': ex['answer']['value'],\n 'answers': ex['answer']['aliases'],\n }\n\n return dataset.map(tqa_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef web_questions_open(\n dataset,\n prefix='wq question: '\n ):\n \"\"\"Convert WebQuestions TFDS to open domain examples.\n\n If there are multiple answers in the input, selects the first one as the\n target.\n\n The function takes the web_questions TFDS dataset and emits examples of the\n form:\n {\n 'inputs': 'wq question: What are the names of the Olsen Twins?'\n 'targets': 'Mary-Kate and Ashley',\n 'answers': ['Mary-Kate and Ashley', 'Ashley and Mary-Kate']\n }\n\n Args:\n dataset: a tf.data.Dataset to process.\n prefix: str, prefix to prepend to the inputs.\n\n Returns:\n a tf.data.Dataset\n \"\"\"\n\n def wq_map(ex):\n \"\"\"Map WebQuestions example to text-to-text example.\"\"\"\n return {\n 'inputs': prefix + ex['question'],\n 'targets': ex['answers'][0],\n 'answers': ex['answers'],\n }\n return dataset.map(wq_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n\ndef sample_answer(\n dataset,\n ):\n \"\"\"Replaces target with sampled answer.\"\"\"\n\n def samp_map(ex):\n answers = tf.random.shuffle(ex['answers'])\n return {\n 'inputs': ex['inputs'],\n 'targets': answers[0],\n 'answers': answers,\n }\n return dataset.map(samp_map, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n"
] | [
[
"tensorflow.compat.v1.math.logical_not",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.random.shuffle",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.strings.length",
"tensorflow.compat.v1.math.cumsum",
"tensorflow.compat.v1.strings.reduce_join",
"tensorflow.compat.v1.equal"
]
] |
vrooje/Data-digging | [
"ae4ee1de0df0d2686115510ac35f5960d5cfaf08"
] | [
"example_scripts/basic_project_stats.py"
] | [
"#Python 2.7.9 (default, Apr 5 2015, 22:21:35)\n# the full environment I used to test this is in basic_project_stats.yml\nimport sys\n\n# file with raw classifications (csv)\n# put this way up here so if there are no inputs we exit quickly before even trying to load everything else\ntry:\n classfile_in = sys.argv[1]\nexcept:\n print(\"\\nUsage: %s classifications_infile\" % sys.argv[0])\n print(\" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.\\n\")\n print(\" Optional inputs:\")\n print(\" workflow_id=N\")\n print(\" specify the program should only consider classifications from workflow id N\")\n print(\" workflow_version=M\")\n print(\" specify the program should only consider classifications from workflow version M\")\n print(\" (note the program will only consider the major version, i.e. the integer part)\")\n print(\" outfile_csv=filename.csv\")\n print(\" if you want the program to save a sub-file with only classification info from the workflow specified, give the filename here\")\n print(\" --time_elapsed\")\n print(\" specify the program should compute classification durations and total classification work effort\")\n print(\" --remove_duplicates\")\n print(\" remove duplicate classifications (subject-user pairs) before analysis.\")\n print(\" memory-intensive for big files; probably best to pair with outfile_csv so you save the output.\")\n print(\" --keep_nonlive\")\n print(\" by default the program ignores classifications made while the project wasn't 'Live'; setting this will keep them in.\")\n print(\" --keep_allcols\")\n print(\" by default the program only keeps columns required for stats; use this with a specified outfile_csv to save all columns, including annotations. (If you're not using outfile_csv this will just waste memory.)\")\n print(\"\\nAll output will be to stdout (about 1-2 paragraphs' worth).\\n\")\n sys.exit(0)\n\n\n\nimport numpy as np # works in 1.10.1\nimport pandas as pd # works in 0.13.1\nimport datetime\nimport dateutil.parser\nimport json, ujson\nimport gc\n\n# default value is not to care about workflow ID or version\nworkflow_id = -1\nworkflow_version = -1\n# by default we won't worry about computing how much time effort the volunteers cumulatively spent\ntime_elapsed = False\n# by default we won't write the subset of classifications we used to a new csv file\noutput_csv = False\n# by default we'll ignore the possibility of duplicate classifications\n# note duplicates are relatively rare, usually <2% of all classifications\n# the Zooniverse has squashed several bugs related to this, but some still\n# happen client-side and there's nothing we can do about that.\nremove_duplicates = False\n# by default, restrict the analysis to \"Live\" classifications\nkeep_nonlive = False\n# by default, don't keep every column of the classifications when writing to an outfile\nkeep_allcols = False\n\n# check for other command-line arguments\nif len(sys.argv) > 2:\n # if there are additional arguments, loop through them\n for i_arg, argstr in enumerate(sys.argv[2:]):\n arg = argstr.split('=')\n\n if arg[0] == \"workflow_id\":\n workflow_id = int(arg[1])\n elif arg[0] == \"workflow_version\":\n workflow_version = float(arg[1])\n elif (arg[0] == \"outfile_csv\") | (arg[0] == \"outfile\"):\n outfile_csv = arg[1]\n output_csv = True\n elif arg[0] == \"--keep_allcols\":\n keep_allcols = True\n elif arg[0] == \"--time_elapsed\":\n time_elapsed = True\n elif arg[0] == \"--remove_duplicates\":\n remove_duplicates = True\n elif arg[0] == \"--keep_nonlive\":\n keep_nonlive = True\n\n\n\n# columns currently in an exported Panoptes classification file:\n# classification_id,user_name,user_id,user_ip,workflow_id,workflow_name,workflow_version,created_at,gold_standard,expert,metadata,annotations,subject_data,subject_ids\n\n# classification_id identifies the specific classification - should be unique for each row in this file\n# user_name is either their registered name or \"not-logged-in\"+their hashed IP\n# user_id is their numeric Zooniverse ID or blank if they're unregistered\n# user_ip is a hashed version of their IP\n# workflow_id is the numeric ID of this workflow, which you can find in the project builder URL for managing the workflow:\n# https://www.zooniverse.org/lab/[project_id]/workflow/[workflow_id]/\n# workflow_name is the name you gave your workflow (for sanity checks)\n# workflow_version is [bigchangecount].[smallchangecount] and is probably pretty big\n# created_at is the date the entry for the classification was recorded\n# gold_standard is 1 if this classification was done in gold standard mode\n# expert is 1 if this classification was done in expert mode... I think\n# metadata (json) is the data the browser sent along with the classification.\n# Includes browser information, language, started_at and finished_at\n# note started_at and finished_at are perhaps the easiest way to calculate the length of a classification\n# (the duration elapsed between consecutive created_at by the same user is another way)\n# the difference here is back-end vs front-end\n# annotations (json) contains the actual classification information\n# which for this analysis we will ignore completely, for now\n# subject_data is cross-matched from the subjects table and is for convenience in data reduction\n# subject_ids has just the subject ids in the given classification\n# here we will ignore this too, except to count subjects once.\n# we'll also ignore classification_id, user_ip, workflow information, gold_standard, and expert.\n#\n\n\n# Print out the input parameters just as a sanity check\nprint(\"Computing project stats using:\")\nprint(\" infile: %s\" % classfile_in)\n\n\n\n\n#################################################################################\n#################################################################################\n#################################################################################\n\n\n# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient\n#\n# The Gini coefficient measures inequality in distributions of things.\n# It was originally conceived for economics (e.g. where is the wealth in a country?\n# in the hands of many citizens or a few?), but it's just as applicable to many\n# other fields. In this case we'll use it to see how classifications are\n# distributed among classifiers.\n# G = 0 is a completely even distribution (everyone does the same number of\n# classifications), and ~1 is uneven (~all the classifications are done\n# by one classifier).\n# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are\n# in the range of 0.7-0.9.\n# That range is generally indicative of a project with a loyal core group of\n# volunteers who contribute the bulk of the classification effort, but balanced\n# out by a regular influx of new classifiers trying out the project, from which\n# you continue to draw to maintain a core group of prolific classifiers.\n# Once your project is fairly well established, you can compare it to past Zooniverse\n# projects to see how you're doing.\n# If your G is << 0.7, you may be having trouble recruiting classifiers into a loyal\n# group of volunteers. People are trying it, but not many are staying.\n# If your G is > 0.9, it's a little more complicated. If your total classification\n# count is lower than you'd like it to be, you may be having trouble recruiting\n# classifiers to the project, such that your classification counts are\n# dominated by a few people.\n# But if you have G > 0.9 and plenty of classifications, this may be a sign that your\n# loyal users are -really- committed, so a very high G is not necessarily a bad thing.\n#\n# Of course the Gini coefficient is a simplified measure that doesn't always capture\n# subtle nuances and so forth, but it's still a useful broad metric.\n\ndef gini(list_of_values):\n sorted_list = sorted(list_of_values)\n height, area = 0, 0\n for value in sorted_list:\n height += value\n area += height - value / 2.\n fair_area = height * len(list_of_values) / 2\n return (fair_area - area) / fair_area\n\n\n\n\n#################################################################################\n#################################################################################\n#################################################################################\n\n\ndef get_duplicate_ids(grp):\n # groupbys and dfs have slightly different indexing and just NOPE\n #thegrp = pd.DataFrame(grp)\n thegrp = grp\n\n if len(thegrp) == 1:\n return\n else:\n # we have a duplicate set, so return the details\n return thegrp\n\n\n\n\ndef get_live_project(meta_json):\n try:\n return meta_json['live_project']\n except:\n # apparently some subject metadata doesn't have this? dunno?\n return False\n\ndef get_live_project_incl_missing(meta_json):\n try:\n return meta_json['live_project']\n except:\n return -1\n\n# Begin the main stuff\n\n\nprint(\"Reading classifications from %s\" % classfile_in)\n\n#classifications = pd.read_csv(classfile_in)\n# the above will work but uses a LOT of memory for projects with > 1 million\n# classifications. Nothing here uses the actual classification data so don't read it\n'''\nIf you are using this code on an older project, where the data export is from\nbefore subject_ids were exported as their own column, change \"subject_id\" below\nto \"subject_data\", and then when you define the groupby \"by_subject\" and count\nsubjects, you'll need to use subject_data instead of subject_ids.\n\nApologies for doing this, but subject_data contains the whole manifest so for\nbig projects with big catalogs it can take up a lot of memory, so we don't want to\nuse it if we don't have to.\n'''\ncols_keep = [\"classification_id\", \"user_name\", \"user_id\", \"user_ip\", \"workflow_id\", \"workflow_version\", \"created_at\", \"metadata\", \"subject_ids\"]\nif not keep_allcols:\n try:\n classifications = pd.read_csv(classfile_in, usecols=cols_keep)\n except:\n print(\"Some columns missing from classifications infile, reading without specifying columns (uses more memory)... \")\n classifications = pd.read_csv(classfile_in)\nelse:\n try:\n classifications = pd.read_csv(classfile_in, low_memory=False)\n except:\n classifications = pd.read_csv(classfile_in)\n\n cols_used = classifications.columns.tolist()\n cols_out = classifications.columns.tolist()\n if not 'created_day' in cols_used:\n cols_used.append('created_day')\n if not 'meta_json' in cols_used:\n cols_used.append('meta_json')\n\nn_class_raw = len(classifications)\n\n# now restrict classifications to a particular workflow id/version if requested\nif (workflow_id > 0) | (workflow_version > 0):\n\n # only keep the stuff that matches these workflow properties\n if (workflow_id > 0):\n\n print(\"Considering only workflow id %d\" % workflow_id)\n\n in_workflow = classifications.workflow_id == workflow_id\n else:\n # the workflow id wasn't specified, so just make an array of true\n in_workflow = np.array([True for q in classifications.workflow_id])\n\n if (workflow_version > 0):\n\n classifications['version_int'] = [int(q) for q in classifications.workflow_version]\n\n print(\"Considering only major workflow version %d\" % int(workflow_version))\n\n # we only care about the major workflow version, not the minor version\n in_version = classifications.version_int == int(workflow_version)\n else:\n in_version = np.array([True for q in classifications.workflow_version])\n\n\n if (sum(in_workflow & in_version) == 0):\n print(\"ERROR: your combination of workflow_id and workflow_version does not exist!\\nIgnoring workflow id/version request and computing stats for ALL classifications instead.\")\n #classifications = classifications_all\n else:\n # select the subset of classifications\n classifications = classifications[in_workflow & in_version]\n\n del in_workflow\n del in_version\n\nelse:\n # just use everything\n #classifications = classifications_all\n\n workflow_ids = classifications.workflow_id.unique()\n # this takes too much CPU time just for a print statement. Just use float versions\n #classifications['version_int'] = [int(q) for q in classifications.workflow_version]\n version_ints = classifications.workflow_version.unique()\n\n print(\"Considering all classifications in workflow ids:\")\n print(workflow_ids)\n print(\" and workflow_versions:\")\n print(version_ints)\n\n\n# Remove classifications collected before the project went Live\n# note: it makes logical sense to do this *before* we extract the classifications\n# from the workflow we care about, *but* the meta_json setting step (which we\n# need in order to extract Live project status) can take a while (up to ~minutes)\n# and adds to memory usage, so I'd rather do it after we've already culled\n# the table of potentially a lot of unused rows.\n# OTOH culling duplicates takes more time and memory than culling unused workflow\n# versions, so wait to do that until after we've removed non-Live classifications\n\n# first, extract the metadata column into a json we can read entries for\n#\n# ujson is quite a bit faster than json but seems to use a bit more memory as it works\nclassifications['meta_json'] = [ujson.loads(q) for q in classifications.metadata]\n\nif keep_nonlive:\n print(\"Retaining all non-live classifications in analysis.\")\nelse:\n # would that we could just do q['live_project'] but if that tag is missing for\n # any classifications (which it is in some cases) it crashes\n classifications['live_project'] = [get_live_project(q) for q in classifications.meta_json]\n\n # if this line gives you an error you've read in this boolean as a string\n # so need to convert \"True\" --> True and \"False\" --> False\n class_live = classifications[classifications.live_project].copy()\n n_class_thiswf = len(classifications)\n n_live = sum(classifications.live_project)\n n_notlive = n_class_thiswf - n_live\n print(\" Removing %d non-live classifications...\" % n_notlive)\n\n # don't make a slice but also save memory\n classifications = pd.DataFrame(class_live)\n del class_live\n gc.collect()\n\n\n\n# if we've been asked to remove duplicates, do that now\nif remove_duplicates:\n '''\n a duplicate can be that the classification id is submitted twice by the client\n but it can also be that the classifier classified the same subject twice in different classification_ids.\n\n So identify duplicates based on username + subject id + workflow info, not based on classification_id.\n '''\n subj_classifications = classifications.groupby('user_name subject_ids workflow_id workflow_version'.split())\n\n n_class = len(classifications)\n # just take the first of each of the groups\n classifications_nodups = subj_classifications.head(1)\n n_class_nodup = len(classifications_nodups)\n\n n_dups = n_class - n_class_nodup\n\n if n_dups == 0:\n print(\"Searched for duplicate classifications; none found.\")\n else:\n duplicate_outfile = classfile_in.replace(\".csv\", \"_duplicated_only.csv\")\n if duplicate_outfile == classfile_in:\n duplicate_outfile += \"_duplicated_only.csv\"\n\n print(\"Found %d duplicate classifications (%.2f percent of total).\" % (n_dups, float(n_dups)/float(n_class)*100.0))\n\n # get the duplicate classifications and save them before we remove them\n #class_dups = pd.DataFrame(subj_classifications.apply(get_duplicate_ids))\n\n # if you want to keep a record of everything with just the dups flagged,\n # this is your thing\n #dups_flagged = pd.merge(classifications, classifications_nodups['classification_id subject_id'.split()], how='outer', on='classification_id', suffixes=('', '_2'), indicator=True)\n # if you just need something that has only the dups in it, here you go\n dups_only = classifications[~classifications.isin(classifications_nodups)].dropna(how='all')\n\n # dups_only has the duplicates only - not the original classification in each set\n # i.e. if classifications 123, 456, and 789 are all from the same user\n # classifying the same subject, dups_only will only contain classifications\n # 456 and 789. When we save the duplicate classifications we want to save\n # the initial classification (that was later duplicated) as well, so we\n # need to retrieve those.\n # I don't see a really easy way to do it based on the groupby we already did\n # (subj_classifications)\n # so let's just define what identifies the duplicate (user_name + subject_ids)\n # and pick them out.\n # even for a reasonably big dataset this is relatively fast (seconds, not minutes)\n try:\n dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(int).astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)\n except:\n dups_only['user_subj_pair'] = dups_only['user_name']+'_'+dups_only['subject_ids'].astype(str)+'_'+dups_only['workflow_id'].astype(str)+'v'+dups_only['workflow_version'].astype(str)\n\n # n_dup_pairs tracks unique user-subject pairs that were duplicated\n dup_pairs = dups_only['user_subj_pair'].unique()\n n_dup_pairs = len(dup_pairs)\n\n try:\n classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(int).astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)\n except:\n classifications['user_subj_pair'] = classifications['user_name']+'_'+classifications['subject_ids'].astype(str)+'_'+classifications['workflow_id'].astype(str)+'v'+classifications['workflow_version'].astype(str)\n\n # this keeps things that are any part of a duplicate set, including first\n is_a_dup = classifications['user_subj_pair'].isin(dup_pairs)\n\n class_dups = classifications[is_a_dup].copy()\n # counts any classification that is any part of a duplicate set\n n_partofdup = len(class_dups)\n\n class_dups.to_csv(duplicate_outfile)\n #print(class_dups.head(3))\n\n # now throw away the duplicates (but keep the first of each set) from\n # the main classifications table\n classifications = pd.DataFrame(classifications_nodups)\n\n del class_dups\n del is_a_dup\n print(\"Duplicates removed from analysis (%d unique user-subject-workflow groups).\" % n_dup_pairs)\n\n del subj_classifications\n del classifications_nodups\n gc.collect()\n\n\nclassifications['created_day'] = [q[:10] for q in classifications.created_at]\n\nfirst_class_day = min(classifications.created_day).replace(' ', '')\nlast_class_day = max(classifications.created_day).replace(' ', '')\n\n\n# save processing time and memory in the groupby.apply(); only keep the columns we're going to use or want to save\nif output_csv:\n if not keep_allcols:\n # if we'll be writing to a file at the end of this we need to save a few extra columns\n cols_used = [\"classification_id\", \"user_name\", \"user_id\", \"user_ip\", \"created_at\", \"created_day\", \"metadata\", \"meta_json\", \"subject_ids\", \"workflow_id\", \"workflow_version\"]\nelse:\n if not keep_allcols:\n cols_used = [\"classification_id\", \"user_name\", \"user_id\", \"user_ip\", \"created_at\", \"created_day\", \"meta_json\", \"subject_ids\"]\nclassifications = classifications[cols_used]\n# collect() calls PyInt_ClearFreeList(), so explicitly helps free some active memory\ngc.collect()\n\n# grab the subject counts\nn_subj_tot = len(classifications.subject_ids.unique())\nby_subject = classifications.groupby('subject_ids')\nsubj_class = by_subject.created_at.aggregate('count')\n\n# basic stats on how classified the subjects are\nsubj_class_mean = np.mean(subj_class)\nsubj_class_med = np.median(subj_class)\nsubj_class_min = np.min(subj_class)\nsubj_class_max = np.max(subj_class)\n\n# free up some memory - note calling this does take CPU time but\n# can free up GBs of active memory for big classification files\ndel by_subject\ngc.collect()\n\n\n# index by created_at as a timeseries\n# note: this means things might not be uniquely indexed\n# but it makes a lot of things easier and faster.\n# update: it's not really needed in the main bit, but will do it on each group later.\n#classifications.set_index('created_at_ts', inplace=True)\n\n\n# get some user information\nall_users = classifications.user_name.unique()\nby_user = classifications.groupby('user_name')\n\n# also count IP addresses\nn_ip = len(classifications.user_ip.unique())\n\n# get total classification and user counts\nn_class_tot = len(classifications)\nn_users_tot = len(all_users)\n\nunregistered = [q.startswith(\"not-logged-in\") for q in all_users]\nn_unreg = sum(unregistered)\nn_reg = n_users_tot - n_unreg\n\nis_unreg_class = [q.startswith(\"not-logged-in\") for q in classifications.user_name]\nn_unreg_class = sum(is_unreg_class)\nn_reg_class = n_class_tot - n_unreg_class\n\n# for the leaderboard, which I recommend project builders never make public because\n# Just Say No to gamification\n# But it's still interesting to see who your most prolific classifiers are, and\n# e.g. whether they're also your most prolific Talk users\nnclass_byuser = by_user.created_at.aggregate('count')\nnclass_byuser_ranked = nclass_byuser.copy()\nnclass_byuser_ranked.sort_values(inplace=True, ascending=False)\n# rename the columns properly so they'll print as useful csv headers\nnclass_byuser_ranked.name = 'user_name'\nnc = pd.DataFrame(nclass_byuser_ranked)\nnc.columns = ['n_class']\n\n# write this to a file, so you don't have to re-calculate it later\nnclass_byuser_outfile = classfile_in.replace(\".csv\", \"_nclass_byuser_ranked.csv\")\n# don't accidentally overwrite the classifications file just because someone\n# renamed it to not end in .csv\nif nclass_byuser_outfile == classfile_in:\n nclass_byuser_outfile = \"project_nclass_byuser_ranked.csv\"\nnc.to_csv(nclass_byuser_outfile)\n\n# very basic stats\nnclass_med = np.median(nclass_byuser)\nnclass_mean = np.mean(nclass_byuser)\n\n# Gini coefficient - see the comments above the gini() function for more notes\nnclass_gini = gini(nclass_byuser)\n\nprint(\"\\nOverall:\\n\\n%d classifications of %d subjects by %d classifiers,\" % (n_class_tot,n_subj_tot,n_users_tot))\nprint(\"%d logged in and %d not logged in, from %d unique IP addresses.\" % (n_reg,n_unreg,n_ip))\nprint(\"%d classifications were from logged-in users, %d from not-logged-in users.\\n\" % (n_reg_class, n_unreg_class))\nprint(\"That's %.2f classifications per subject on average (median = %.1f).\" % (subj_class_mean, subj_class_med))\nprint(\"The most classified subject has %d classifications; the least-classified subject has %d.\\n\" % (subj_class_max,subj_class_min))\nprint(\"Median number of classifications per user: %.2f\" %nclass_med)\nprint(\"Mean number of classifications per user: %.2f\" % nclass_mean)\nprint(\"\\nTop 10 most prolific classifiers:\")\nprint(nclass_byuser_ranked.head(10))\nprint(\"\\n\\nGini coefficient for classifications by user: %.2f\" % nclass_gini)\nprint(\"\\nClassifications were collected between %s and %s.\" % (first_class_day, last_class_day))\nprint(\"The highest classification id considered here is %d.\\n\" % max(classifications.classification_id))\n\n\n# if the input specified we should compute total time spent by classifiers, compute it\nif time_elapsed:\n # free up some memory\n # do this inside the if because if we're not computing times then the program\n # is about to end so this memory will be freed up anyway\n del unregistered\n del by_user\n gc.collect()\n\n\n classifications['started_at_str'] = [q['started_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]\n classifications['finished_at_str'] = [q['finished_at'].replace('T',' ').replace('Z', '') for q in classifications.meta_json]\n\n sa_temp = classifications['started_at_str']\n fa_temp = classifications['finished_at_str']\n\n #print(\"Creating timeseries...\")#,datetime.datetime.now().strftime('%H:%M:%S.%f')\n\n\n try:\n classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S.%f')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n try:\n classifications['started_at'] = pd.to_datetime(sa_temp, format='%Y-%m-%d %H:%M:%S %Z')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n classifications['started_at'] = pd.to_datetime(sa_temp)\n\n\n try:\n classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S.%f')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n try:\n classifications['finished_at'] = pd.to_datetime(fa_temp, format='%Y-%m-%d %H:%M:%S %Z')\n except Exception as the_error:\n print(\"Oops:\\n%s\" % the_error)\n classifications['finished_at'] = pd.to_datetime(fa_temp)\n\n # we did all that above so that this would only take one line and be quite fast\n classifications['class_t_length'] = (classifications.finished_at - classifications.started_at)\n\n # throw away absurd time counts: accept lengths between 0 < dt < 30 minutes\n # anything outside that is either a wrongly reported time or the user walked away from their computer\n ok_times = (classifications.class_t_length > np.timedelta64(0, 's')) & (classifications.class_t_length < np.timedelta64(30, 'm'))\n\n # how many turned out to be okay?\n n_t_ok = sum(ok_times)\n\n # compute total times\n time_spent_classifying = np.sum(classifications['class_t_length'][ok_times])\n days_spent_classifying = time_spent_classifying / np.timedelta64(1, 'D')\n frac_good_durations = float(n_t_ok)/float(n_class_tot)\n\n print(\"Based on %d classifications (%.1f percent) where we can probably\\ntrust the classification durations, the classifiers spent a total of %.2f days\\n(or %.2f years) classifying in the project.\\n\" % (n_t_ok, frac_good_durations*100., days_spent_classifying, days_spent_classifying / 365.))\n\n mean_t_class = np.mean(classifications['class_t_length'][ok_times])\n median_t_class = np.median(classifications['class_t_length'][ok_times])\n\n human_effort_extrap = float(n_class_tot)*float(mean_t_class / np.timedelta64(1, 'D')) / 365. # in years\n\n print(\"Mean classification length: %8.1f seconds\" % float(mean_t_class / np.timedelta64(1, 's')))\n print(\"Median classification length: %6.1f seconds\" % float(median_t_class / np.timedelta64(1, 's')))\n\n\n\n print(\"\\nIf we use the mean to extrapolate and include the %.1f percent of\\nclassifications where the reported duration had an error, that means\\nthe total time spent is equivalent to %.2f years of human effort, or\\n%.2f years of FTE (1 person working 40 hours/week, no holiday.)\\n\" % ((1-frac_good_durations)*100., human_effort_extrap, human_effort_extrap * (24.*7.)/40.))\n\nif output_csv:\n # free up what memory we can before doing this (matters for big files)\n if time_elapsed:\n del ok_times\n del sa_temp\n del fa_temp\n del nclass_byuser\n del all_users\n del subj_class\n gc.collect()\n\n if keep_allcols:\n classifications[cols_out].to_csv(outfile_csv)\n else:\n classifications.to_csv(outfile_csv)\n print(\"File with used subset of classification info written to %s .\" % outfile_csv)\n\nprint(\"File with ranked list of user classification counts written to %s .\" % nclass_byuser_outfile)\n\nif remove_duplicates:\n if (n_dups > 0):\n print(\"Saved info for all classifications that have duplicates to %s .\" % duplicate_outfile)\n\n\n#end\n"
] | [
[
"numpy.sum",
"numpy.timedelta64",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.median",
"pandas.to_datetime",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.mean"
]
] |
welly87/zipline | [
"dbdfa8ed86417f954e95bd7468e144589f2cd482"
] | [
"zipline/pipeline/term.py"
] | [
"\"\"\"\nBase class for Filters, Factors and Classifiers\n\"\"\"\nfrom abc import ABCMeta, abstractproperty\nfrom bisect import insort\nfrom collections import Mapping\nfrom weakref import WeakValueDictionary\n\nfrom numpy import (\n array,\n dtype as dtype_class,\n ndarray,\n searchsorted,\n)\nfrom six import with_metaclass\n\nfrom zipline.assets import Asset\nfrom zipline.errors import (\n DTypeNotSpecified,\n InvalidOutputName,\n NonExistentAssetInTimeFrame,\n NonSliceableTerm,\n NonWindowSafeInput,\n NotDType,\n NonPipelineInputs,\n TermInputsNotSpecified,\n TermOutputsEmpty,\n UnsupportedDType,\n WindowLengthNotSpecified,\n)\nfrom zipline.lib.adjusted_array import can_represent_dtype\nfrom zipline.lib.labelarray import LabelArray\nfrom zipline.utils.input_validation import expect_types\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.numpy_utils import (\n bool_dtype,\n categorical_dtype,\n datetime64ns_dtype,\n default_missing_value_for_dtype,\n)\nfrom zipline.utils.sharedoc import (\n templated_docstring,\n PIPELINE_ALIAS_NAME_DOC,\n PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,\n)\n\nfrom .domain import Domain, GENERIC, infer_domain\nfrom .downsample_helpers import expect_downsample_frequency\nfrom .sentinels import NotSpecified\n\n\nclass Term(with_metaclass(ABCMeta, object)):\n \"\"\"\n Base class for objects that can appear in the compute graph of a\n :class:`zipline.pipeline.Pipeline`.\n\n Notes\n -----\n Most Pipeline API users only interact with :class:`Term` via subclasses:\n\n - :class:`~zipline.pipeline.data.BoundColumn`\n - :class:`~zipline.pipeline.Factor`\n - :class:`~zipline.pipeline.Filter`\n - :class:`~zipline.pipeline.Classifier`\n\n Instances of :class:`Term` are **memoized**. If you call a Term's\n constructor with the same arguments twice, the same object will be returned\n from both calls:\n\n **Example:**\n\n >>> from zipline.pipeline.data import EquityPricing\n >>> from zipline.pipeline.factors import SimpleMovingAverage\n >>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)\n >>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)\n >>> x is y\n True\n\n .. warning::\n\n Memoization of terms means that it's generally unsafe to modify\n attributes of a term after construction.\n \"\"\"\n # These are NotSpecified because a subclass is required to provide them.\n dtype = NotSpecified\n missing_value = NotSpecified\n\n # Subclasses aren't required to provide `params`. The default behavior is\n # no params.\n params = ()\n\n # All terms are generic by default.\n domain = GENERIC\n\n # Determines if a term is safe to be used as a windowed input.\n window_safe = False\n\n # The dimensions of the term's output (1D or 2D).\n ndim = 2\n\n _term_cache = WeakValueDictionary()\n\n def __new__(cls,\n domain=NotSpecified,\n dtype=NotSpecified,\n missing_value=NotSpecified,\n window_safe=NotSpecified,\n ndim=NotSpecified,\n # params is explicitly not allowed to be passed to an instance.\n *args,\n **kwargs):\n \"\"\"\n Memoized constructor for Terms.\n\n Caching previously-constructed Terms is useful because it allows us to\n only compute equivalent sub-expressions once when traversing a Pipeline\n dependency graph.\n\n Caching previously-constructed Terms is **sane** because terms and\n their inputs are both conceptually immutable.\n \"\"\"\n # Subclasses can override these class-level attributes to provide\n # different default values for instances.\n if domain is NotSpecified:\n domain = cls.domain\n if dtype is NotSpecified:\n dtype = cls.dtype\n if missing_value is NotSpecified:\n missing_value = cls.missing_value\n if ndim is NotSpecified:\n ndim = cls.ndim\n if window_safe is NotSpecified:\n window_safe = cls.window_safe\n\n dtype, missing_value = validate_dtype(\n cls.__name__,\n dtype,\n missing_value,\n )\n params = cls._pop_params(kwargs)\n\n identity = cls._static_identity(\n domain=domain,\n dtype=dtype,\n missing_value=missing_value,\n window_safe=window_safe,\n ndim=ndim,\n params=params,\n *args, **kwargs\n )\n\n try:\n return cls._term_cache[identity]\n except KeyError:\n new_instance = cls._term_cache[identity] = \\\n super(Term, cls).__new__(cls)._init(\n domain=domain,\n dtype=dtype,\n missing_value=missing_value,\n window_safe=window_safe,\n ndim=ndim,\n params=params,\n *args, **kwargs\n )\n return new_instance\n\n @classmethod\n def _pop_params(cls, kwargs):\n \"\"\"\n Pop entries from the `kwargs` passed to cls.__new__ based on the values\n in `cls.params`.\n\n Parameters\n ----------\n kwargs : dict\n The kwargs passed to cls.__new__.\n\n Returns\n -------\n params : list[(str, object)]\n A list of string, value pairs containing the entries in cls.params.\n\n Raises\n ------\n TypeError\n Raised if any parameter values are not passed or not hashable.\n \"\"\"\n params = cls.params\n if not isinstance(params, Mapping):\n params = {k: NotSpecified for k in params}\n param_values = []\n for key, default_value in params.items():\n try:\n value = kwargs.pop(key, default_value)\n if value is NotSpecified:\n raise KeyError(key)\n\n # Check here that the value is hashable so that we fail here\n # instead of trying to hash the param values tuple later.\n hash(value)\n except KeyError:\n raise TypeError(\n \"{typename} expected a keyword parameter {name!r}.\".format(\n typename=cls.__name__,\n name=key\n )\n )\n except TypeError:\n # Value wasn't hashable.\n raise TypeError(\n \"{typename} expected a hashable value for parameter \"\n \"{name!r}, but got {value!r} instead.\".format(\n typename=cls.__name__,\n name=key,\n value=value,\n )\n )\n\n param_values.append((key, value))\n return tuple(param_values)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Noop constructor to play nicely with our caching __new__. Subclasses\n should implement _init instead of this method.\n\n When a class' __new__ returns an instance of that class, Python will\n automatically call __init__ on the object, even if a new object wasn't\n actually constructed. Because we memoize instances, we often return an\n object that was already initialized from __new__, in which case we\n don't want to call __init__ again.\n\n Subclasses that need to initialize new instances should override _init,\n which is guaranteed to be called only once.\n \"\"\"\n pass\n\n @expect_types(key=Asset)\n def __getitem__(self, key):\n if isinstance(self, LoadableTerm):\n raise NonSliceableTerm(term=self)\n return Slice(self, key)\n\n @classmethod\n def _static_identity(cls,\n domain,\n dtype,\n missing_value,\n window_safe,\n ndim,\n params):\n \"\"\"\n Return the identity of the Term that would be constructed from the\n given arguments.\n\n Identities that compare equal will cause us to return a cached instance\n rather than constructing a new one. We do this primarily because it\n makes dependency resolution easier.\n\n This is a classmethod so that it can be called from Term.__new__ to\n determine whether to produce a new instance.\n \"\"\"\n return (cls, domain, dtype, missing_value, window_safe, ndim, params)\n\n def _init(self, domain, dtype, missing_value, window_safe, ndim, params):\n \"\"\"\n Parameters\n ----------\n domain : zipline.pipeline.domain.Domain\n The domain of this term.\n dtype : np.dtype\n Dtype of this term's output.\n missing_value : object\n Missing value for this term.\n ndim : 1 or 2\n The dimensionality of this term.\n params : tuple[(str, hashable)]\n Tuple of key/value pairs of additional parameters.\n \"\"\"\n self.domain = domain\n self.dtype = dtype\n self.missing_value = missing_value\n self.window_safe = window_safe\n self.ndim = ndim\n\n for name, value in params:\n if hasattr(self, name):\n raise TypeError(\n \"Parameter {name!r} conflicts with already-present\"\n \" attribute with value {value!r}.\".format(\n name=name,\n value=getattr(self, name),\n )\n )\n # TODO: Consider setting these values as attributes and replacing\n # the boilerplate in NumericalExpression, Rank, and\n # PercentileFilter.\n\n self.params = dict(params)\n\n # Make sure that subclasses call super() in their _validate() methods\n # by setting this flag. The base class implementation of _validate\n # should set this flag to True.\n self._subclass_called_super_validate = False\n self._validate()\n assert self._subclass_called_super_validate, (\n \"Term._validate() was not called.\\n\"\n \"This probably means that you overrode _validate\"\n \" without calling super().\"\n )\n del self._subclass_called_super_validate\n\n return self\n\n def _validate(self):\n \"\"\"\n Assert that this term is well-formed. This should be called exactly\n once, at the end of Term._init().\n \"\"\"\n # mark that we got here to enforce that subclasses overriding _validate\n # call super().\n self._subclass_called_super_validate = True\n\n def compute_extra_rows(self,\n all_dates,\n start_date,\n end_date,\n min_extra_rows):\n \"\"\"\n Calculate the number of extra rows needed to compute ``self``.\n\n Must return at least ``min_extra_rows``, and the default implementation\n is to just return ``min_extra_rows``. This is overridden by\n downsampled terms to ensure that the first date computed is a\n recomputation date.\n\n Parameters\n ----------\n all_dates : pd.DatetimeIndex\n The trading sessions against which ``self`` will be computed.\n start_date : pd.Timestamp\n The first date for which final output is requested.\n end_date : pd.Timestamp\n The last date for which final output is requested.\n min_extra_rows : int\n The minimum number of extra rows required of ``self``, as\n determined by other terms that depend on ``self``.\n\n Returns\n -------\n extra_rows : int\n The number of extra rows to compute. Must be at least\n ``min_extra_rows``.\n \"\"\"\n return min_extra_rows\n\n @abstractproperty\n def inputs(self):\n \"\"\"\n A tuple of other Terms needed as inputs for ``self``.\n \"\"\"\n raise NotImplementedError('inputs')\n\n @abstractproperty\n def windowed(self):\n \"\"\"\n Boolean indicating whether this term is a trailing-window computation.\n \"\"\"\n raise NotImplementedError('windowed')\n\n @abstractproperty\n def mask(self):\n \"\"\"\n A :class:`~zipline.pipeline.Filter` representing asset/date pairs to\n while computing this Term. True means include; False means exclude.\n \"\"\"\n raise NotImplementedError('mask')\n\n @abstractproperty\n def dependencies(self):\n \"\"\"\n A dictionary mapping terms that must be computed before `self` to the\n number of extra rows needed for those terms.\n \"\"\"\n raise NotImplementedError('dependencies')\n\n def graph_repr(self):\n \"\"\"A short repr to use when rendering GraphViz graphs.\n \"\"\"\n # Default graph_repr is just the name of the type.\n return type(self).__name__\n\n def recursive_repr(self):\n \"\"\"A short repr to use when recursively rendering terms with inputs.\n \"\"\"\n # Default recursive_repr is just the name of the type.\n return type(self).__name__\n\n\nclass AssetExists(Term):\n \"\"\"\n Pseudo-filter describing whether or not an asset existed on a given day.\n This is the default mask for all terms that haven't been passed a mask\n explicitly.\n\n This is morally a Filter, in the sense that it produces a boolean value for\n every asset on every date. We don't subclass Filter, however, because\n `AssetExists` is computed directly by the PipelineEngine.\n\n This term is guaranteed to be available as an input for any term computed\n by SimplePipelineEngine.run_pipeline().\n\n See Also\n --------\n zipline.assets.AssetFinder.lifetimes\n \"\"\"\n dtype = bool_dtype\n dataset = None\n inputs = ()\n dependencies = {}\n mask = None\n windowed = False\n\n def __repr__(self):\n return \"AssetExists()\"\n\n graph_repr = __repr__\n\n def _compute(self, today, assets, out):\n raise NotImplementedError(\n \"AssetExists cannot be computed directly.\"\n \" Check your PipelineEngine configuration.\"\n )\n\n\nclass InputDates(Term):\n \"\"\"\n 1-Dimensional term providing date labels for other term inputs.\n\n This term is guaranteed to be available as an input for any term computed\n by SimplePipelineEngine.run_pipeline().\n \"\"\"\n ndim = 1\n dataset = None\n dtype = datetime64ns_dtype\n inputs = ()\n dependencies = {}\n mask = None\n windowed = False\n window_safe = True\n\n def __repr__(self):\n return \"InputDates()\"\n\n graph_repr = __repr__\n\n def _compute(self, today, assets, out):\n raise NotImplementedError(\n \"InputDates cannot be computed directly.\"\n \" Check your PipelineEngine configuration.\"\n )\n\n\nclass LoadableTerm(Term):\n \"\"\"\n A Term that should be loaded from an external resource by a PipelineLoader.\n\n This is the base class for :class:`zipline.pipeline.data.BoundColumn`.\n \"\"\"\n windowed = False\n inputs = ()\n\n @lazyval\n def dependencies(self):\n return {self.mask: 0}\n\n\nclass ComputableTerm(Term):\n \"\"\"\n A Term that should be computed from a tuple of inputs.\n\n This is the base class for :class:`zipline.pipeline.Factor`,\n :class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.\n \"\"\"\n inputs = NotSpecified\n outputs = NotSpecified\n window_length = NotSpecified\n mask = NotSpecified\n domain = NotSpecified\n\n def __new__(cls,\n inputs=inputs,\n outputs=outputs,\n window_length=window_length,\n mask=mask,\n domain=domain,\n *args, **kwargs):\n\n if inputs is NotSpecified:\n inputs = cls.inputs\n\n # Having inputs = NotSpecified is an error, but we handle it later\n # in self._validate rather than here.\n if inputs is not NotSpecified:\n # Allow users to specify lists as class-level defaults, but\n # normalize to a tuple so that inputs is hashable.\n inputs = tuple(inputs)\n\n # Make sure all our inputs are valid pipeline objects before trying\n # to infer a domain.\n non_terms = [t for t in inputs if not isinstance(t, Term)]\n if non_terms:\n raise NonPipelineInputs(cls.__name__, non_terms)\n\n if domain is NotSpecified:\n domain = infer_domain(inputs)\n\n if outputs is NotSpecified:\n outputs = cls.outputs\n if outputs is not NotSpecified:\n outputs = tuple(outputs)\n\n if mask is NotSpecified:\n mask = cls.mask\n if mask is NotSpecified:\n mask = AssetExists()\n\n if window_length is NotSpecified:\n window_length = cls.window_length\n\n return super(ComputableTerm, cls).__new__(\n cls,\n inputs=inputs,\n outputs=outputs,\n mask=mask,\n window_length=window_length,\n domain=domain,\n *args, **kwargs\n )\n\n def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):\n self.inputs = inputs\n self.outputs = outputs\n self.window_length = window_length\n self.mask = mask\n return super(ComputableTerm, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls,\n inputs,\n outputs,\n window_length,\n mask,\n *args,\n **kwargs):\n return (\n super(ComputableTerm, cls)._static_identity(*args, **kwargs),\n inputs,\n outputs,\n window_length,\n mask,\n )\n\n def _validate(self):\n super(ComputableTerm, self)._validate()\n\n # Check inputs.\n if self.inputs is NotSpecified:\n raise TermInputsNotSpecified(termname=type(self).__name__)\n\n if not isinstance(self.domain, Domain):\n raise TypeError(\n \"Expected {}.domain to be an instance of Domain, \"\n \"but got {}.\".format(type(self).__name__, type(self.domain))\n )\n\n # Check outputs.\n if self.outputs is NotSpecified:\n pass\n elif not self.outputs:\n raise TermOutputsEmpty(termname=type(self).__name__)\n else:\n # Raise an exception if there are any naming conflicts between the\n # term's output names and certain attributes.\n disallowed_names = [\n attr for attr in dir(ComputableTerm)\n if not attr.startswith('_')\n ]\n\n # The name 'compute' is an added special case that is disallowed.\n # Use insort to add it to the list in alphabetical order.\n insort(disallowed_names, 'compute')\n\n for output in self.outputs:\n if output.startswith('_') or output in disallowed_names:\n raise InvalidOutputName(\n output_name=output,\n termname=type(self).__name__,\n disallowed_names=disallowed_names,\n )\n\n if self.window_length is NotSpecified:\n raise WindowLengthNotSpecified(termname=type(self).__name__)\n\n if self.mask is NotSpecified:\n # This isn't user error, this is a bug in our code.\n raise AssertionError(\"{term} has no mask\".format(term=self))\n\n if self.window_length > 1:\n for child in self.inputs:\n if not child.window_safe:\n raise NonWindowSafeInput(parent=self, child=child)\n\n def _compute(self, inputs, dates, assets, mask):\n \"\"\"\n Subclasses should implement this to perform actual computation.\n\n This is named ``_compute`` rather than just ``compute`` because\n ``compute`` is reserved for user-supplied functions in\n CustomFilter/CustomFactor/CustomClassifier.\n \"\"\"\n raise NotImplementedError()\n\n @lazyval\n def windowed(self):\n \"\"\"\n Whether or not this term represents a trailing window computation.\n\n If term.windowed is truthy, its compute_from_windows method will be\n called with instances of AdjustedArray as inputs.\n\n If term.windowed is falsey, its compute_from_baseline will be called\n with instances of np.ndarray as inputs.\n \"\"\"\n return (\n self.window_length is not NotSpecified\n and self.window_length > 0\n )\n\n @lazyval\n def dependencies(self):\n \"\"\"\n The number of extra rows needed for each of our inputs to compute this\n term.\n \"\"\"\n extra_input_rows = max(0, self.window_length - 1)\n out = {}\n for term in self.inputs:\n out[term] = extra_input_rows\n out[self.mask] = 0\n return out\n\n @expect_types(data=ndarray)\n def postprocess(self, data):\n \"\"\"\n Called with an result of ``self``, unravelled (i.e. 1-dimensional)\n after any user-defined screens have been applied.\n\n This is mostly useful for transforming the dtype of an output, e.g., to\n convert a LabelArray into a pandas Categorical.\n\n The default implementation is to just return data unchanged.\n \"\"\"\n return data\n\n def to_workspace_value(self, result, assets):\n \"\"\"\n Called with a column of the result of a pipeline. This needs to put\n the data into a format that can be used in a workspace to continue\n doing computations.\n\n Parameters\n ----------\n result : pd.Series\n A multiindexed series with (dates, assets) whose values are the\n results of running this pipeline term over the dates.\n assets : pd.Index\n All of the assets being requested. This allows us to correctly\n shape the workspace value.\n\n Returns\n -------\n workspace_value : array-like\n An array like value that the engine can consume.\n \"\"\"\n return result.unstack().fillna(self.missing_value).reindex(\n columns=assets,\n fill_value=self.missing_value,\n ).values\n\n def _downsampled_type(self, *args, **kwargs):\n \"\"\"\n The expression type to return from self.downsample().\n \"\"\"\n raise NotImplementedError(\n \"downsampling is not yet implemented \"\n \"for instances of %s.\" % type(self).__name__\n )\n\n @expect_downsample_frequency\n @templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)\n def downsample(self, frequency):\n \"\"\"\n Make a term that computes from ``self`` at lower-than-daily frequency.\n\n Parameters\n ----------\n {frequency}\n \"\"\"\n return self._downsampled_type(term=self, frequency=frequency)\n\n def _aliased_type(self, *args, **kwargs):\n \"\"\"\n The expression type to return from self.alias().\n \"\"\"\n raise NotImplementedError(\n \"alias is not yet implemented \"\n \"for instances of %s.\" % type(self).__name__\n )\n\n @templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)\n def alias(self, name):\n \"\"\"\n Make a term from ``self`` that names the expression.\n\n Parameters\n ----------\n {name}\n\n Returns\n -------\n aliased : Aliased\n ``self`` with a name.\n\n Notes\n -----\n This is useful for giving a name to a numerical or boolean expression.\n \"\"\"\n return self._aliased_type(term=self, name=name)\n\n def __repr__(self):\n return (\n \"{type}([{inputs}], {window_length})\"\n ).format(\n type=type(self).__name__,\n inputs=', '.join(i.recursive_repr() for i in self.inputs),\n window_length=self.window_length,\n )\n\n def recursive_repr(self):\n return type(self).__name__ + '(...)'\n\n\nclass Slice(ComputableTerm):\n \"\"\"\n Term for extracting a single column of a another term's output.\n\n Parameters\n ----------\n term : zipline.pipeline.Term\n The term from which to extract a column of data.\n asset : zipline.assets.Asset\n The asset corresponding to the column of `term` to be extracted.\n\n Notes\n -----\n Users should rarely construct instances of `Slice` directly. Instead, they\n should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.\n \"\"\"\n def __new__(cls, term, asset):\n return super(Slice, cls).__new__(\n cls,\n asset=asset,\n inputs=[term],\n window_length=0,\n mask=term.mask,\n dtype=term.dtype,\n missing_value=term.missing_value,\n window_safe=term.window_safe,\n ndim=1,\n )\n\n def __repr__(self):\n return \"{parent_term}[{asset}]\".format(\n type=type(self).__name__,\n parent_term=self.inputs[0].recursive_repr(),\n asset=self._asset,\n )\n\n def _init(self, asset, *args, **kwargs):\n self._asset = asset\n return super(Slice, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls, asset, *args, **kwargs):\n return (super(Slice, cls)._static_identity(*args, **kwargs), asset)\n\n def _compute(self, windows, dates, assets, mask):\n asset = self._asset\n asset_column = searchsorted(assets.values, asset.sid)\n if assets[asset_column] != asset.sid:\n raise NonExistentAssetInTimeFrame(\n asset=asset, start_date=dates[0], end_date=dates[-1],\n )\n\n # Return a 2D array with one column rather than a 1D array of the\n # column.\n return windows[0][:, [asset_column]]\n\n @property\n def asset(self):\n \"\"\"Get the asset whose data is selected by this slice.\n \"\"\"\n return self._asset\n\n @property\n def _downsampled_type(self):\n raise NotImplementedError(\n 'downsampling of slices is not yet supported'\n )\n\n\ndef validate_dtype(termname, dtype, missing_value):\n \"\"\"\n Validate a `dtype` and `missing_value` passed to Term.__new__.\n\n Ensures that we know how to represent ``dtype``, and that missing_value\n is specified for types without default missing values.\n\n Returns\n -------\n validated_dtype, validated_missing_value : np.dtype, any\n The dtype and missing_value to use for the new term.\n\n Raises\n ------\n DTypeNotSpecified\n When no dtype was passed to the instance, and the class doesn't\n provide a default.\n NotDType\n When either the class or the instance provides a value not\n coercible to a numpy dtype.\n NoDefaultMissingValue\n When dtype requires an explicit missing_value, but\n ``missing_value`` is NotSpecified.\n \"\"\"\n if dtype is NotSpecified:\n raise DTypeNotSpecified(termname=termname)\n\n try:\n dtype = dtype_class(dtype)\n except TypeError:\n raise NotDType(dtype=dtype, termname=termname)\n\n if not can_represent_dtype(dtype):\n raise UnsupportedDType(dtype=dtype, termname=termname)\n\n if missing_value is NotSpecified:\n missing_value = default_missing_value_for_dtype(dtype)\n\n try:\n if (dtype == categorical_dtype):\n # This check is necessary because we use object dtype for\n # categoricals, and numpy will allow us to promote numerical\n # values to object even though we don't support them.\n _assert_valid_categorical_missing_value(missing_value)\n\n # For any other type, we can check if the missing_value is safe by\n # making an array of that value and trying to safely convert it to\n # the desired type.\n # 'same_kind' allows casting between things like float32 and\n # float64, but not str and int.\n array([missing_value]).astype(dtype=dtype, casting='same_kind')\n except TypeError as e:\n raise TypeError(\n \"Missing value {value!r} is not a valid choice \"\n \"for term {termname} with dtype {dtype}.\\n\\n\"\n \"Coercion attempt failed with: {error}\".format(\n termname=termname,\n value=missing_value,\n dtype=dtype,\n error=e,\n )\n )\n\n return dtype, missing_value\n\n\ndef _assert_valid_categorical_missing_value(value):\n \"\"\"\n Check that value is a valid categorical missing_value.\n\n Raises a TypeError if the value is cannot be used as the missing_value for\n a categorical_dtype Term.\n \"\"\"\n label_types = LabelArray.SUPPORTED_SCALAR_TYPES\n if not isinstance(value, label_types):\n raise TypeError(\n \"Categorical terms must have missing values of type \"\n \"{types}.\".format(\n types=' or '.join([t.__name__ for t in label_types]),\n )\n )\n"
] | [
[
"numpy.array",
"numpy.dtype",
"numpy.searchsorted"
]
] |
PhylomatX/PhiFlow | [
"2b7a73c1f595e288d26945cd53cc482952bb1db9"
] | [
"phi/tf/tf_backend.py"
] | [
"import numbers\nimport uuid\nimport warnings\nfrom packaging import version\nimport six\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom packaging import version\n\nfrom phi.backend.backend_helper import split_multi_mode_pad, PadSettings, general_grid_sample_nd, equalize_shapes, circular_pad, replicate_pad\nfrom phi.backend.scipy_backend import SciPyBackend\nfrom phi.tf.tf_cuda_resample import *\nfrom . import tf\n\nfrom phi.backend.backend import Backend\nfrom phi.backend.tensorop import expand, collapsed_gather_nd\n\n\nclass TFBackend(Backend):\n\n def __init__(self):\n Backend.__init__(self, \"TensorFlow\")\n\n @property\n def precision_dtype(self):\n return {16: np.float16, 32: np.float32, 64: np.float64, None: np.float32}[self.precision]\n\n def is_tensor(self, x, only_native=False):\n if not only_native and SciPyBackend().is_tensor(x, only_native=False):\n return True\n return isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.Operation))\n\n def as_tensor(self, x, convert_external=True):\n if self.is_tensor(x, only_native=convert_external):\n tensor = x\n elif isinstance(x, np.ndarray):\n tensor = tf.convert_to_tensor(SciPyBackend(precision=self.precision).as_tensor(x))\n else:\n tensor = tf.convert_to_tensor(x)\n # --- Enforce Precision ---\n if not isinstance(tensor, numbers.Number):\n if isinstance(tensor, np.ndarray):\n tensor = SciPyBackend(precision=self.precision).as_tensor(tensor)\n elif tensor.dtype.is_floating and self.has_fixed_precision:\n tensor = self.to_float(tensor)\n return tensor\n\n def copy(self, tensor, only_mutable=False):\n if not only_mutable or tf.executing_eagerly():\n return tf.identity(tensor)\n else:\n return tensor\n\n def equal(self, x, y):\n return tf.equal(x, y)\n\n def divide_no_nan(self, x, y):\n if version.parse(tf.__version__) >= version.parse('1.11.0'):\n return tf.div_no_nan(x, y)\n else:\n result = x / y\n return tf.where(tf.is_finite(result), result, tf.zeros_like(result))\n\n def random_uniform(self, shape, low=0, high=1):\n return tf.random.uniform(shape, minval=low, maxval=high, dtype=self.precision_dtype)\n\n def random_normal(self, shape):\n return tf.random.normal(shape, dtype=self.precision_dtype)\n\n def rank(self, value):\n return len(value.shape)\n\n def range(self, start, limit=None, delta=1, dtype=None):\n return tf.range(start, limit, delta, dtype)\n\n def tile(self, value, multiples):\n if isinstance(multiples, (tuple, list)) and self.ndims(value) < len(multiples):\n value = self.expand_dims(value, axis=0, number=len(multiples) - self.ndims(value))\n return tf.tile(value, multiples)\n\n def stack(self, values, axis=0):\n return tf.stack(values, axis=axis)\n\n def concat(self, values, axis):\n return tf.concat(values, axis)\n\n def pad(self, value, pad_width, mode='constant', constant_values=0):\n passes = split_multi_mode_pad(self.ndims(value), PadSettings(pad_width, mode, constant_values), split_by_constant_value=True)\n for pad_pass in passes:\n value = self._single_mode_single_constant_pad(value, *pad_pass)\n return value\n\n def _single_mode_single_constant_pad(self, value, pad_width, single_mode, constant_value=0):\n assert single_mode in ('constant', 'symmetric', 'circular', 'reflect', 'replicate'), single_mode\n if single_mode == 'circular':\n return circular_pad(value, pad_width, self)\n if single_mode == 'replicate':\n if np.any(np.array(pad_width) > 1):\n return replicate_pad(value, pad_width, self)\n else:\n single_mode = 'symmetric'\n return tf.pad(value, pad_width, single_mode.upper(), constant_values=constant_value) # constant, symmetric, reflect\n\n def reshape(self, value, shape):\n return tf.reshape(value, shape)\n\n def sum(self, value, axis=None, keepdims=False):\n if axis is not None:\n if not isinstance(axis, int):\n axis = list(axis)\n return tf.reduce_sum(value, axis=axis, keepdims=keepdims)\n\n def prod(self, value, axis=None):\n if axis is not None:\n if not isinstance(axis, int):\n axis = list(axis)\n if value.dtype == bool:\n return tf.reduce_all(value, axis=axis)\n return tf.reduce_prod(value, axis=axis)\n\n def where(self, condition, x=None, y=None):\n c = self.cast(condition, self.dtype(x))\n return c * x + (1 - c) * y\n # return tf.where(condition, x, y) # TF1 has an inconsistent broadcasting rule for where\n\n def mean(self, value, axis=None, keepdims=False):\n if axis is not None:\n if not isinstance(axis, int):\n axis = list(axis)\n return tf.reduce_mean(value, axis, keepdims=keepdims)\n\n def py_func(self, func, inputs, Tout, shape_out, stateful=True, name=None, grad=None):\n if grad is None:\n result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)\n else:\n # Need to generate a unique name to avoid duplicates:\n rnd_name = 'PyFuncGrad' + str(uuid.uuid4())\n\n tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example\n g = tf.get_default_graph()\n with g.gradient_override_map({\"PyFunc\": rnd_name}):\n result = tf.py_func(func, inputs, Tout, stateful=stateful, name=name)\n if shape_out is not None:\n result.set_shape(shape_out)\n return result\n\n def resample(self, inputs, sample_coords, interpolation='linear', boundary='constant', constant_values=0):\n assert interpolation == 'linear'\n if use_cuda(inputs):\n return resample_cuda(inputs, sample_coords, boundary)\n else:\n return general_grid_sample_nd(inputs, sample_coords, boundary, constant_values, self) # while this is a bit slower than niftynet, it give consisten results at the boundaries\n\n def zeros_like(self, tensor):\n return tf.zeros_like(tensor)\n\n def ones_like(self, tensor):\n return tf.ones_like(tensor)\n\n def dot(self, a, b, axes):\n return tf.tensordot(a, b, axes)\n\n def matmul(self, A, b):\n if isinstance(A, tf.SparseTensor):\n result = tf.sparse_tensor_dense_matmul(A, tf.transpose(b))\n result = tf.transpose(result)\n result.set_shape(tf.TensorShape([b.shape[0], A.shape[0]]))\n return result\n else:\n return tf.matmul(A, b)\n\n def einsum(self, equation, *tensors):\n return tf.einsum(equation, *tensors)\n\n def while_loop(self, cond, body, loop_vars, shape_invariants=None, parallel_iterations=10, back_prop=True,\n swap_memory=False, name=None, maximum_iterations=None):\n return tf.while_loop(cond, body, loop_vars,\n shape_invariants=shape_invariants,\n parallel_iterations=parallel_iterations,\n back_prop=back_prop,\n swap_memory=swap_memory,\n name=name,\n maximum_iterations=maximum_iterations)\n\n def abs(self, x):\n return tf.abs(x)\n\n def sign(self, x):\n return tf.sign(x)\n\n def round(self, x):\n return tf.round(x)\n\n def ceil(self, x):\n return tf.ceil(x)\n\n def floor(self, x):\n return tf.floor(x)\n\n def max(self, x, axis=None, keepdims=False):\n return tf.reduce_max(x, axis=axis, keepdims=keepdims)\n\n def min(self, x, axis=None, keepdims=False):\n return tf.reduce_min(x, axis=axis, keepdims=keepdims)\n\n def with_custom_gradient(self, function, inputs, gradient, input_index=0, output_index=None, name_base=\"custom_gradient_func\"):\n # Setup custom gradient\n gradient_name = name_base + \"_\" + str(uuid.uuid4())\n tf.RegisterGradient(gradient_name)(gradient)\n\n g = tf.get_default_graph()\n with g.gradient_override_map({\"Identity\": gradient_name}):\n fake_function = tf.identity(inputs[input_index])\n\n outputs = function(*inputs)\n output = outputs if output_index is None else outputs[output_index]\n output_with_gradient = fake_function + tf.stop_gradient(output - fake_function)\n if output_index is None:\n return output_with_gradient\n else:\n outputs = list(outputs)\n outputs[output_index] = output_with_gradient\n return outputs\n\n def maximum(self, a, b):\n return tf.maximum(a, b)\n\n def minimum(self, a, b):\n return tf.minimum(a, b)\n\n def clip(self, x, minimum, maximum):\n return tf.clip_by_value(x, minimum, maximum)\n\n def sqrt(self, x):\n return tf.sqrt(x)\n\n def exp(self, x):\n return tf.exp(x)\n\n def conv(self, tensor, kernel, padding=\"SAME\"):\n rank = tensor_spatial_rank(tensor)\n padding = padding.upper()\n if rank == 1:\n result = tf.nn.conv1d(tensor, kernel, 1, padding)\n elif rank == 2:\n result = tf.nn.conv2d(tensor, kernel, [1, 1, 1, 1], padding)\n elif rank == 3:\n result = tf.nn.conv3d(tensor, kernel, [1, 1, 1, 1, 1], padding)\n else:\n raise ValueError(\"Tensor must be of rank 1, 2 or 3 but is %d\" % rank)\n return result\n\n def expand_dims(self, a, axis=0, number=1):\n if number == 0:\n return a\n for _i in range(number):\n a = tf.expand_dims(a, axis)\n return a\n\n def shape(self, tensor):\n return tf.shape(tensor)\n\n def to_float(self, x, float64=False):\n if float64:\n warnings.warn('float64 argument is deprecated, set Backend.precision = 64 to use 64 bit operations.', DeprecationWarning)\n return tf.cast(x, tf.float64)\n else:\n return tf.cast(x, self.precision_dtype)\n\n def staticshape(self, tensor):\n if self.is_tensor(tensor, only_native=True):\n return tuple(tensor.shape.as_list())\n else:\n return np.shape(tensor)\n\n def to_int(self, x, int64=False):\n return tf.cast(x, tf.int64) if int64 else tf.cast(x, tf.int32)\n\n def to_complex(self, x):\n if self.dtype(x) in (np.complex64, np.complex128):\n return x\n if self.dtype(x) == np.float64:\n return tf.to_complex128(x)\n else:\n return tf.to_complex64(x)\n\n def gather(self, values, indices):\n if isinstance(indices, slice):\n return values[indices]\n return tf.gather(values, indices)\n\n def gather_nd(self, values, indices, batch_dims=0):\n if batch_dims == 0:\n return tf.gather_nd(values, indices)\n elif version.parse(tf.__version__) >= version.parse('1.14.0'):\n return tf.gather_nd(values, indices, batch_dims=batch_dims)\n else:\n if batch_dims > 1:\n raise NotImplementedError('batch_dims > 1 only supported on TensorFlow >= 1.14')\n batch_size = self.shape(values)[0]\n batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * (self.ndims(indices) - 1))\n batch_ids = tf.tile(batch_ids, [1] + self.shape(indices)[1:-1] + [1])\n indices = tf.concat([batch_ids, indices], -1)\n return tf.gather_nd(values, indices)\n\n def unstack(self, tensor, axis=0, keepdims=False):\n unstacked = tf.unstack(tensor, axis=axis)\n if keepdims:\n unstacked = [self.expand_dims(c, axis=axis) for c in unstacked]\n return unstacked\n\n def std(self, x, axis=None, keepdims=False):\n _mean, var = tf.nn.moments(x, axis, keepdims=keepdims)\n return tf.sqrt(var)\n\n def boolean_mask(self, x, mask):\n return tf.boolean_mask(x, mask)\n\n def isfinite(self, x):\n return tf.is_finite(x)\n\n def any(self, boolean_tensor, axis=None, keepdims=False):\n return tf.reduce_any(boolean_tensor, axis=axis, keepdims=keepdims)\n\n def all(self, boolean_tensor, axis=None, keepdims=False):\n return tf.reduce_all(boolean_tensor, axis=axis, keepdims=keepdims)\n\n def scatter(self, points, indices, values, shape, duplicates_handling='undefined'):\n # Change indexing so batch number is included as first element of the index, for example: [0,31,24] indexes the first batch (batch 0) and 2D coordinates (31,24).\n buffer = tf.zeros(shape, dtype=values.dtype)\n\n repetitions = []\n for dim in range(len(indices.shape) - 1):\n if values.shape[dim] == 1:\n repetitions.append(indices.shape[dim])\n else:\n assert indices.shape[dim] == values.shape[dim]\n repetitions.append(1)\n repetitions.append(1)\n values = self.tile(values, repetitions)\n\n if duplicates_handling == 'add':\n # Only for Tensorflow with custom gradient\n @tf.custom_gradient\n def scatter_density(points, indices, values):\n result = tf.tensor_scatter_add(buffer, indices, values)\n\n def grad(dr):\n return self.resample(gradient(dr, difference='central'), points), None, None\n\n return result, grad\n\n return scatter_density(points, indices, values)\n elif duplicates_handling == 'mean':\n # Won't entirely work with out of bounds particles (still counted in mean)\n count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))\n total = tf.tensor_scatter_add(buffer, indices, values)\n return total / tf.maximum(1.0, count)\n else: # last, any, undefined\n # indices = self.to_int(indices, int64=True)\n # st = tf.SparseTensor(indices, values, shape) # ToDo this only supports 2D shapes\n # st = tf.sparse.reorder(st) # only needed if not ordered\n # return tf.sparse.to_dense(st)\n count = tf.tensor_scatter_add(buffer, indices, tf.ones_like(values))\n total = tf.tensor_scatter_add(buffer, indices, values)\n return total / tf.maximum(1.0, count)\n\n def fft(self, x):\n rank = len(x.shape) - 2\n assert rank >= 1\n x = self.to_complex(x)\n if rank == 1:\n return tf.stack([tf.fft(c) for c in tf.unstack(x, axis=-1)], axis=-1)\n elif rank == 2:\n return tf.stack([tf.fft2d(c) for c in tf.unstack(x, axis=-1)], axis=-1)\n elif rank == 3:\n return tf.stack([tf.fft3d(c) for c in tf.unstack(x, axis=-1)], axis=-1)\n else:\n raise NotImplementedError('n-dimensional FFT not implemented.')\n\n def ifft(self, k):\n rank = len(k.shape) - 2\n assert rank >= 1\n if rank == 1:\n return tf.stack([tf.ifft(c) for c in tf.unstack(k, axis=-1)], axis=-1)\n elif rank == 2:\n return tf.stack([tf.ifft2d(c) for c in tf.unstack(k, axis=-1)], axis=-1)\n elif rank == 3:\n return tf.stack([tf.ifft3d(c) for c in tf.unstack(k, axis=-1)], axis=-1)\n else:\n raise NotImplementedError('n-dimensional inverse FFT not implemented.')\n\n def imag(self, complex):\n return tf.imag(complex)\n\n def real(self, complex):\n return tf.real(complex)\n\n def cast(self, x, dtype):\n return tf.cast(x, dtype)\n\n def sin(self, x):\n return tf.sin(x)\n\n def cos(self, x):\n return tf.cos(x)\n\n def dtype(self, array):\n if self.is_tensor(array, only_native=True):\n return array.dtype.as_numpy_dtype\n else:\n return SciPyBackend().dtype(array)\n\n def sparse_tensor(self, indices, values, shape):\n return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)\n\n\n# from niftynet.layer.resampler.py\n# https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyNet/blob/69c98e5a95cc6788ad9fb8c5e27dc24d1acec634/niftynet/layer/resampler.py\n\n\nCOORDINATES_TYPE = tf.int32\nEPS = 1e-6\n\n\ndef tensor_spatial_rank(tensor):\n return len(tensor.shape) - 2\n\n\ndef unit_direction(dim, spatial_rank): # ordered like z,y,x\n direction = [1 if i == dim else 0 for i in range(spatial_rank)]\n for _i in range(spatial_rank):\n direction = tf.expand_dims(direction, axis=0)\n return direction\n\n\ndef _resample_no_pack(grid, coords, boundary_func):\n resolution = np.array([int(d) for d in grid.shape[1:-1]])\n sp_rank = tensor_spatial_rank(grid)\n\n floor = boundary_func(tf.floor(coords), resolution)\n up_weights = coords - floor\n lo_weights = TFBackend().unstack(1 - up_weights, axis=-1, keepdims=True)\n up_weights = TFBackend().unstack(up_weights, axis=-1, keepdims=True)\n base_coords = tf.cast(floor, tf.int32)\n\n def interpolate_nd(coords, axis):\n direction = np.array([1 if ax == axis else 0 for ax in range(sp_rank)])\n print(direction.shape)\n with tf.variable_scope('coord_plus_one'):\n up_coords = coords + direction # This is extremely slow for some reason - ToDo tile direction array to have same dimensions before calling interpolate_nd?\n if axis == sp_rank - 1:\n # up_coords = boundary_func(up_coords, resolution)\n lo_values = tf.gather_nd(grid, coords, batch_dims=1)\n up_values = tf.gather_nd(grid, up_coords, batch_dims=1)\n else:\n lo_values = interpolate_nd(coords, axis + 1)\n up_values = interpolate_nd(up_coords, axis + 1)\n with tf.variable_scope('weighted_sum_axis_%d' % axis):\n return lo_values * lo_weights[axis] + up_values * up_weights[axis]\n\n with tf.variable_scope('interpolate_nd'):\n result = interpolate_nd(base_coords, 0)\n return result\n\n\ndef _resample_linear_niftynet(inputs, sample_coords, boundary, boundary_func, float_type):\n inputs = tf.convert_to_tensor(inputs)\n sample_coords = tf.convert_to_tensor(sample_coords)\n\n in_spatial_size = [int(d) for d in inputs.shape[1:-1]]\n in_spatial_rank = tensor_spatial_rank(inputs)\n batch_size = tf.shape(inputs)[0]\n\n out_spatial_rank = tensor_spatial_rank(sample_coords)\n out_spatial_size = sample_coords.get_shape().as_list()[1:-1]\n\n if sample_coords.shape[0] != inputs.shape[0]:\n sample_coords = tf.tile(sample_coords, [batch_size] + [1] * (len(sample_coords.shape) - 1))\n\n xy = tf.unstack(sample_coords, axis=-1)\n base_coords = [tf.floor(coords) for coords in xy]\n floor_coords = [tf.cast(boundary_func(x, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]\n ceil_coords = [tf.cast(boundary_func(x + 1.0, in_spatial_size[idx]), COORDINATES_TYPE) for (idx, x) in enumerate(base_coords)]\n\n if boundary.upper() == 'ZERO':\n weight_0 = [tf.expand_dims(x - tf.cast(i, float_type), -1) for (x, i) in zip(xy, floor_coords)]\n weight_1 = [tf.expand_dims(tf.cast(i, float_type) - x, -1) for (x, i) in zip(xy, ceil_coords)]\n else:\n weight_0 = [tf.expand_dims(x - i, -1) for (x, i) in zip(xy, base_coords)]\n weight_1 = [1.0 - w for w in weight_0]\n\n batch_ids = tf.reshape(tf.range(batch_size), [batch_size] + [1] * out_spatial_rank)\n batch_ids = tf.tile(batch_ids, [1] + out_spatial_size)\n sc = (floor_coords, ceil_coords)\n binary_neighbour_ids = [[int(c) for c in format(i, '0%ib' % in_spatial_rank)] for i in range(2 ** in_spatial_rank)]\n\n def get_knot(bc):\n coord = [sc[c][i] for i, c in enumerate(bc)]\n if version.parse(tf.__version__) >= version.parse('1.14.0'):\n coord = tf.stack(coord, -1)\n return tf.gather_nd(inputs, coord, batch_dims=1) # NaN can cause negative integers here\n else:\n coord = tf.stack([batch_ids] + coord, -1)\n return tf.gather_nd(inputs, coord) # NaN can cause negative integers here\n\n samples = [get_knot(bc) for bc in binary_neighbour_ids]\n\n def _pyramid_combination(samples, w_0, w_1):\n if len(w_0) == 1:\n return samples[0] * w_1[0] + samples[1] * w_0[0]\n f_0 = _pyramid_combination(samples[::2], w_0[:-1], w_1[:-1])\n f_1 = _pyramid_combination(samples[1::2], w_0[:-1], w_1[:-1])\n return f_0 * w_1[-1] + f_1 * w_0[-1]\n\n return _pyramid_combination(samples, weight_0, weight_1)\n\n\ndef _boundary_snap(sample_coords, spatial_shape):\n max_indices = [l - 1 for l in spatial_shape]\n for _i in range(len(spatial_shape)):\n max_indices = tf.expand_dims(max_indices, 0)\n sample_coords = tf.minimum(sample_coords, max_indices)\n sample_coords = tf.maximum(sample_coords, 0)\n return sample_coords\n\n\ndef _boundary_replicate(sample_coords, input_size):\n return tf.maximum(tf.minimum(sample_coords, input_size - 1), 0)\n\n\ndef _boundary_circular(sample_coords, input_size):\n return tf.mod(tf.mod(sample_coords, input_size) + input_size, input_size)\n\n\ndef _boundary_symmetric(sample_coords, input_size):\n sample_coords = _boundary_circular(sample_coords, 2 * input_size)\n return ((2 * input_size - 1) - tf.abs((2 * input_size - 1) - 2 * sample_coords)) // 2\n\n\ndef _boundary_reflect(sample_coords, input_size):\n sample_coords = _boundary_circular(sample_coords, 2 * input_size - 2)\n return (input_size - 1) - tf.abs((input_size - 1) - sample_coords)\n\n\nSUPPORTED_BOUNDARY = {\n 'zero': _boundary_replicate,\n 'replicate': _boundary_replicate,\n 'circular': _boundary_circular,\n 'symmetric': _boundary_symmetric,\n 'reflect': _boundary_reflect,\n}\n"
] | [
[
"tensorflow.real",
"tensorflow.reduce_max",
"tensorflow.reshape",
"tensorflow.unstack",
"tensorflow.cos",
"tensorflow.gather_nd",
"tensorflow.round",
"tensorflow.variable_scope",
"tensorflow.matmul",
"tensorflow.to_complex128",
"tensorflow.abs",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.einsum",
"tensorflow.identity",
"tensorflow.executing_eagerly",
"tensorflow.reduce_sum",
"tensorflow.mod",
"tensorflow.minimum",
"tensorflow.sign",
"tensorflow.div_no_nan",
"tensorflow.ifft2d",
"tensorflow.is_finite",
"tensorflow.reduce_all",
"tensorflow.reduce_prod",
"tensorflow.clip_by_value",
"tensorflow.fft",
"tensorflow.reduce_min",
"tensorflow.transpose",
"tensorflow.sin",
"tensorflow.to_complex64",
"tensorflow.ifft",
"tensorflow.stack",
"tensorflow.shape",
"tensorflow.imag",
"tensorflow.ones_like",
"tensorflow.fft2d",
"tensorflow.expand_dims",
"tensorflow.zeros_like",
"tensorflow.tensordot",
"tensorflow.cast",
"tensorflow.while_loop",
"tensorflow.SparseTensor",
"tensorflow.TensorShape",
"tensorflow.RegisterGradient",
"tensorflow.boolean_mask",
"tensorflow.tile",
"tensorflow.floor",
"tensorflow.tensor_scatter_add",
"tensorflow.reduce_any",
"numpy.array",
"tensorflow.fft3d",
"tensorflow.py_func",
"tensorflow.nn.moments",
"tensorflow.zeros",
"tensorflow.equal",
"tensorflow.ifft3d",
"tensorflow.ceil",
"tensorflow.range",
"tensorflow.sqrt",
"tensorflow.reduce_mean",
"tensorflow.nn.conv3d",
"tensorflow.stop_gradient",
"tensorflow.nn.conv2d",
"tensorflow.exp",
"tensorflow.nn.conv1d",
"numpy.shape",
"tensorflow.random.normal",
"tensorflow.random.uniform",
"tensorflow.get_default_graph",
"tensorflow.gather",
"tensorflow.maximum"
]
] |
Nitinram23/text-to-image | [
"f819bed3dffbccd8e20b03741e3f67178729812b"
] | [
"Python 3 Codes/bert_embed.py"
] | [
"from bert_embedding import BertEmbedding\nimport numpy as np\nimport pickle\nimport argparse\nimport json\nimport os\nfrom os.path import join, isfile\nimport re\nimport h5py\n\ndef save_caption_vectors_flowers(data_dir):\n\timport time\n\t\n\timg_dir = join(data_dir, 'flowers/jpg')\n\timage_files = [f for f in os.listdir(img_dir) if 'jpg' in f]\n\t# print(image_files[300:400])\n\t# print(len(image_files))\n\timage_captions = { img_file : [] for img_file in image_files }\n\n\tcaption_dir = join(data_dir, 'flowers/text_c10')\n\tclass_dirs = []\n\tfor i in range(1, 103):\n\t\tclass_dir_name = 'class_%.5d'%(i)\n\t\tclass_dirs.append( join(caption_dir, class_dir_name))\n\n\tfor class_dir in class_dirs:\n\t\tcaption_files = [f for f in os.listdir(class_dir) if 'txt' in f]\n\t\tfor cap_file in caption_files:\n\t\t\twith open(join(class_dir,cap_file)) as f:\n\t\t\t\tcaptions = f.read().split('\\n')\n\t\t\timg_file = cap_file[0:11] + \".jpg\"\n\t\t\t# 5 captions per image\n\t\t\timage_captions[img_file] += [cap for cap in captions if len(cap) > 0][0:5]\n\n\tencoded_captions = {}\n\tbert_embedding = BertEmbedding()\n\t\n\tfor i, img in enumerate(image_captions):\n\t\tst = time.time()\n\t\tembed_list = []\n\t\tembed_sum = np.zeros(768)\n\t\tembedding = bert_embedding(image_captions[img],'avg')\n\t\tfor sent in range(len(image_captions[img])):\n\t\t\tword_embed_list = embedding[sent][1]\n\t\t\tfor word_embed in word_embed_list:\n\t\t\t\tembed_sum += word_embed\n\t\t\tembed_list.append(embed_sum/len(word_embed_list))\n\t\tembed_list_np = np.asarray(embed_list)\n\t\tencoded_captions[img] = embed_list_np\n\t\tprint(i, len(image_captions), img)\n\t\tprint(\"Seconds\", time.time() - st)\n\t\t\n\th = h5py.File(join(data_dir, 'flower_bert.hdf5'))\n\tfor key in encoded_captions:\n\t\th.create_dataset(key, data=encoded_captions[key])\n\th.close()\n\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--split', type=str, default='train',\n help='train/val')\n\tparser.add_argument('--data_dir', type=str, default='Data',\n help='Data directory')\n\tparser.add_argument('--batch_size', type=int, default=64,\n help='Batch Size')\n\tparser.add_argument('--data_set', type=str, default='flowers',\n help='Data Set : Flowers, MS-COCO')\n\targs = parser.parse_args()\n\t\n\tif args.data_set == 'flowers':\n\t\tsave_caption_vectors_flowers(args.data_dir)\n\telse:\n\t\tprint('incorrect data')\n\nif __name__ == '__main__':\n\tmain()\n\n"
] | [
[
"numpy.asarray",
"numpy.zeros"
]
] |
developing-coder/pandas | [
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037",
"9feb3ad92cc0397a04b665803a49299ee7aa1037"
] | [
"pandas/tests/reshape/test_cut.py",
"pandas/tests/internals/test_internals.py",
"pandas/core/strings.py"
] | [
"import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical, DataFrame, DatetimeIndex, Index, Interval, IntervalIndex,\n Series, TimedeltaIndex, Timestamp, cut, date_range, isna, qcut,\n timedelta_range, to_datetime)\nfrom pandas.api.types import CategoricalDtype as CDT\nimport pandas.core.reshape.tile as tmod\nimport pandas.util.testing as tm\n\n\ndef test_simple():\n data = np.ones(5, dtype=\"int64\")\n result = cut(data, 4, labels=False)\n\n expected = np.array([1, 1, 1, 1, 1])\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n\ndef test_bins():\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])\n result, bins = cut(data, 3, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n\ndef test_right():\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=True, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n expected = Categorical(intervals, ordered=True)\n expected = expected.take([0, 0, 0, 2, 3, 0, 0])\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7]))\n\n\ndef test_no_right():\n data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])\n result, bins = cut(data, 4, right=False, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3), closed=\"left\")\n intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))\n\n\ndef test_array_like():\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n result, bins = cut(data, 3, retbins=True)\n\n intervals = IntervalIndex.from_breaks(bins.round(3))\n intervals = intervals.take([0, 0, 0, 1, 2, 0])\n expected = Categorical(intervals, ordered=True)\n\n tm.assert_categorical_equal(result, expected)\n tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,\n 6.53333333, 9.7]))\n\n\ndef test_bins_from_interval_index():\n c = cut(range(5), 3)\n expected = c\n result = cut(range(5), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n expected = Categorical.from_codes(np.append(c.codes, -1),\n categories=c.categories,\n ordered=True)\n result = cut(range(6), bins=expected.categories)\n tm.assert_categorical_equal(result, expected)\n\n\ndef test_bins_from_interval_index_doc_example():\n # Make sure we preserve the bins.\n ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])\n c = cut(ages, bins=[0, 18, 35, 70])\n expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])\n tm.assert_index_equal(c.categories, expected)\n\n result = cut([25, 20, 50], bins=c.categories)\n tm.assert_index_equal(result.categories, expected)\n tm.assert_numpy_array_equal(result.codes,\n np.array([1, 1, 2], dtype=\"int8\"))\n\n\ndef test_bins_not_overlapping_from_interval_index():\n # see gh-23980\n msg = \"Overlapping IntervalIndex is not accepted\"\n ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)])\n\n with pytest.raises(ValueError, match=msg):\n cut([5, 6], bins=ii)\n\n\ndef test_bins_not_monotonic():\n msg = \"bins must increase monotonically\"\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n\n with pytest.raises(ValueError, match=msg):\n cut(data, [0.1, 1.5, 1, 10])\n\n\[email protected](\"x, bins, expected\", [\n (date_range(\"2017-12-31\", periods=3),\n [Timestamp.min, Timestamp('2018-01-01'), Timestamp.max],\n IntervalIndex.from_tuples([\n (Timestamp.min, Timestamp('2018-01-01')),\n (Timestamp('2018-01-01'), Timestamp.max)])),\n\n ([-1, 0, 1],\n np.array([np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max],\n dtype=\"int64\"),\n IntervalIndex.from_tuples([\n (np.iinfo(np.int64).min, 0),\n (0, np.iinfo(np.int64).max)])),\n\n ([np.timedelta64(-1), np.timedelta64(0), np.timedelta64(1)],\n np.array([\n np.timedelta64(-np.iinfo(np.int64).max),\n np.timedelta64(0),\n np.timedelta64(np.iinfo(np.int64).max)]),\n IntervalIndex.from_tuples([\n (np.timedelta64(-np.iinfo(np.int64).max), np.timedelta64(0)),\n (np.timedelta64(0), np.timedelta64(np.iinfo(np.int64).max))])),\n])\ndef test_bins_monotonic_not_overflowing(x, bins, expected):\n # GH 26045\n result = cut(x, bins)\n tm.assert_index_equal(result.categories, expected)\n\n\ndef test_wrong_num_labels():\n msg = \"Bin labels must be one fewer than the number of bin edges\"\n data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]\n\n with pytest.raises(ValueError, match=msg):\n cut(data, [0, 1, 10], labels=[\"foo\", \"bar\", \"baz\"])\n\n\[email protected](\"x,bins,msg\", [\n ([], 2, \"Cannot cut empty array\"),\n ([1, 2, 3], 0.5, \"`bins` should be a positive integer\")\n])\ndef test_cut_corner(x, bins, msg):\n with pytest.raises(ValueError, match=msg):\n cut(x, bins)\n\n\[email protected](\"arg\", [2, np.eye(2), DataFrame(np.eye(2))])\[email protected](\"cut_func\", [cut, qcut])\ndef test_cut_not_1d_arg(arg, cut_func):\n msg = \"Input array must be 1 dimensional\"\n with pytest.raises(ValueError, match=msg):\n cut_func(arg, 2)\n\n\[email protected]('data', [\n [0, 1, 2, 3, 4, np.inf],\n [-np.inf, 0, 1, 2, 3, 4],\n [-np.inf, 0, 1, 2, 3, 4, np.inf]])\ndef test_int_bins_with_inf(data):\n # GH 24314\n msg = 'cannot specify integer `bins` when input data contains infinity'\n with pytest.raises(ValueError, match=msg):\n cut(data, bins=3)\n\n\ndef test_cut_out_of_range_more():\n # see gh-1511\n name = \"x\"\n\n ser = Series([0, -1, 0, 1, -3], name=name)\n ind = cut(ser, [0, 1], labels=False)\n\n exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name)\n tm.assert_series_equal(ind, exp)\n\n\[email protected](\"right,breaks,closed\", [\n (True, [-1e-3, 0.25, 0.5, 0.75, 1], \"right\"),\n (False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], \"left\")\n])\ndef test_labels(right, breaks, closed):\n arr = np.tile(np.arange(0, 1.01, 0.1), 4)\n\n result, bins = cut(arr, 4, retbins=True, right=right)\n ex_levels = IntervalIndex.from_breaks(breaks, closed=closed)\n tm.assert_index_equal(result.categories, ex_levels)\n\n\ndef test_cut_pass_series_name_to_factor():\n name = \"foo\"\n ser = Series(np.random.randn(100), name=name)\n\n factor = cut(ser, 4)\n assert factor.name == name\n\n\ndef test_label_precision():\n arr = np.arange(0, 0.73, 0.01)\n result = cut(arr, 4, precision=2)\n\n ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72])\n tm.assert_index_equal(result.categories, ex_levels)\n\n\[email protected](\"labels\", [None, False])\ndef test_na_handling(labels):\n arr = np.arange(0, 0.75, 0.01)\n arr[::3] = np.nan\n\n result = cut(arr, 4, labels=labels)\n result = np.asarray(result)\n\n expected = np.where(isna(arr), np.nan, result)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_inf_handling():\n data = np.arange(6)\n data_ser = Series(data, dtype=\"int64\")\n\n bins = [-np.inf, 2, 4, np.inf]\n result = cut(data, bins)\n result_ser = cut(data_ser, bins)\n\n ex_uniques = IntervalIndex.from_breaks(bins)\n tm.assert_index_equal(result.categories, ex_uniques)\n\n assert result[5] == Interval(4, np.inf)\n assert result[0] == Interval(-np.inf, 2)\n assert result_ser[5] == Interval(4, np.inf)\n assert result_ser[0] == Interval(-np.inf, 2)\n\n\ndef test_cut_out_of_bounds():\n arr = np.random.randn(100)\n result = cut(arr, [-1, 0, 1])\n\n mask = isna(result)\n ex_mask = (arr < -1) | (arr > 1)\n tm.assert_numpy_array_equal(mask, ex_mask)\n\n\[email protected](\"get_labels,get_expected\", [\n (lambda labels: labels,\n lambda labels: Categorical([\"Medium\"] + 4 * [\"Small\"] +\n [\"Medium\", \"Large\"],\n categories=labels, ordered=True)),\n (lambda labels: Categorical.from_codes([0, 1, 2], labels),\n lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels))\n])\ndef test_cut_pass_labels(get_labels, get_expected):\n bins = [0, 25, 50, 100]\n arr = [50, 5, 10, 15, 20, 30, 70]\n labels = [\"Small\", \"Medium\", \"Large\"]\n\n result = cut(arr, bins, labels=get_labels(labels))\n tm.assert_categorical_equal(result, get_expected(labels))\n\n\ndef test_cut_pass_labels_compat():\n # see gh-16459\n arr = [50, 5, 10, 15, 20, 30, 70]\n labels = [\"Good\", \"Medium\", \"Bad\"]\n\n result = cut(arr, 3, labels=labels)\n exp = cut(arr, 3, labels=Categorical(labels, categories=labels,\n ordered=True))\n tm.assert_categorical_equal(result, exp)\n\n\[email protected](\"x\", [np.arange(11.), np.arange(11.) / 1e10])\ndef test_round_frac_just_works(x):\n # It works.\n cut(x, 2)\n\n\[email protected](\"val,precision,expected\", [\n (-117.9998, 3, -118),\n (117.9998, 3, 118),\n (117.9998, 2, 118),\n (0.000123456, 2, 0.00012)\n])\ndef test_round_frac(val, precision, expected):\n # see gh-1979\n result = tmod._round_frac(val, precision=precision)\n assert result == expected\n\n\ndef test_cut_return_intervals():\n ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])\n result = cut(ser, 3)\n\n exp_bins = np.linspace(0, 8, num=4).round(3)\n exp_bins[0] -= 0.008\n\n expected = Series(IntervalIndex.from_breaks(exp_bins, closed=\"right\").take(\n [0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_ret_bins():\n # see gh-8589\n ser = Series(np.arange(4))\n result, bins = cut(ser, 2, retbins=True)\n\n expected = Series(IntervalIndex.from_breaks(\n [-0.003, 1.5, 3], closed=\"right\").repeat(2)).astype(CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"kwargs,msg\", [\n (dict(duplicates=\"drop\"), None),\n (dict(), \"Bin edges must be unique\"),\n (dict(duplicates=\"raise\"), \"Bin edges must be unique\"),\n (dict(duplicates=\"foo\"), \"invalid value for 'duplicates' parameter\")\n])\ndef test_cut_duplicates_bin(kwargs, msg):\n # see gh-20947\n bins = [0, 2, 4, 6, 10, 10]\n values = Series(np.array([1, 3, 5, 7, 9]), index=[\"a\", \"b\", \"c\", \"d\", \"e\"])\n\n if msg is not None:\n with pytest.raises(ValueError, match=msg):\n cut(values, bins, **kwargs)\n else:\n result = cut(values, bins, **kwargs)\n expected = cut(values, pd.unique(bins))\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"data\", [9.0, -9.0, 0.0])\[email protected](\"length\", [1, 2])\ndef test_single_bin(data, length):\n # see gh-14652, gh-15428\n ser = Series([data] * length)\n result = cut(ser, 1, labels=False)\n\n expected = Series([0] * length)\n tm.assert_series_equal(result, expected)\n\n\[email protected](\n \"array_1_writeable,array_2_writeable\",\n [(True, True), (True, False), (False, False)])\ndef test_cut_read_only(array_1_writeable, array_2_writeable):\n # issue 18773\n array_1 = np.arange(0, 100, 10)\n array_1.flags.writeable = array_1_writeable\n\n array_2 = np.arange(0, 100, 10)\n array_2.flags.writeable = array_2_writeable\n\n hundred_elements = np.arange(100)\n tm.assert_categorical_equal(cut(hundred_elements, array_1),\n cut(hundred_elements, array_2))\n\n\[email protected](\"conv\", [\n lambda v: Timestamp(v),\n lambda v: to_datetime(v),\n lambda v: np.datetime64(v),\n lambda v: Timestamp(v).to_pydatetime(),\n])\ndef test_datetime_bin(conv):\n data = [np.datetime64(\"2012-12-13\"), np.datetime64(\"2012-12-15\")]\n bin_data = [\"2012-12-12\", \"2012-12-14\", \"2012-12-16\"]\n\n expected = Series(IntervalIndex([\n Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),\n Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])).astype(\n CDT(ordered=True))\n\n bins = [conv(v) for v in bin_data]\n result = Series(cut(data, bins=bins))\n tm.assert_series_equal(result, expected)\n\n\[email protected](\"data\", [\n to_datetime(Series([\"2013-01-01\", \"2013-01-02\", \"2013-01-03\"])),\n [np.datetime64(\"2013-01-01\"), np.datetime64(\"2013-01-02\"),\n np.datetime64(\"2013-01-03\")],\n np.array([np.datetime64(\"2013-01-01\"), np.datetime64(\"2013-01-02\"),\n np.datetime64(\"2013-01-03\")]),\n DatetimeIndex([\"2013-01-01\", \"2013-01-02\", \"2013-01-03\"])\n])\ndef test_datetime_cut(data):\n # see gh-14714\n #\n # Testing time data when it comes in various collection types.\n result, _ = cut(data, 3, retbins=True)\n expected = Series(IntervalIndex([\n Interval(Timestamp(\"2012-12-31 23:57:07.200000\"),\n Timestamp(\"2013-01-01 16:00:00\")),\n Interval(Timestamp(\"2013-01-01 16:00:00\"),\n Timestamp(\"2013-01-02 08:00:00\")),\n Interval(Timestamp(\"2013-01-02 08:00:00\"),\n Timestamp(\"2013-01-03 00:00:00\"))])).astype(CDT(ordered=True))\n tm.assert_series_equal(Series(result), expected)\n\n\[email protected](\"bins\", [\n 3, [Timestamp(\"2013-01-01 04:57:07.200000\"),\n Timestamp(\"2013-01-01 21:00:00\"),\n Timestamp(\"2013-01-02 13:00:00\"),\n Timestamp(\"2013-01-03 05:00:00\")]])\[email protected](\"box\", [list, np.array, Index, Series])\ndef test_datetime_tz_cut(bins, box):\n # see gh-19872\n tz = \"US/Eastern\"\n s = Series(date_range(\"20130101\", periods=3, tz=tz))\n\n if not isinstance(bins, int):\n bins = box(bins)\n\n result = cut(s, bins)\n expected = Series(IntervalIndex([\n Interval(Timestamp(\"2012-12-31 23:57:07.200000\", tz=tz),\n Timestamp(\"2013-01-01 16:00:00\", tz=tz)),\n Interval(Timestamp(\"2013-01-01 16:00:00\", tz=tz),\n Timestamp(\"2013-01-02 08:00:00\", tz=tz)),\n Interval(Timestamp(\"2013-01-02 08:00:00\", tz=tz),\n Timestamp(\"2013-01-03 00:00:00\", tz=tz))])).astype(\n CDT(ordered=True))\n tm.assert_series_equal(result, expected)\n\n\ndef test_datetime_nan_error():\n msg = \"bins must be of datetime64 dtype\"\n\n with pytest.raises(ValueError, match=msg):\n cut(date_range(\"20130101\", periods=3), bins=[0, 2, 4])\n\n\ndef test_datetime_nan_mask():\n result = cut(date_range(\"20130102\", periods=5),\n bins=date_range(\"20130101\", periods=2))\n\n mask = result.categories.isna()\n tm.assert_numpy_array_equal(mask, np.array([False]))\n\n mask = result.isna()\n tm.assert_numpy_array_equal(mask, np.array([False, True, True,\n True, True]))\n\n\[email protected](\"tz\", [None, \"UTC\", \"US/Pacific\"])\ndef test_datetime_cut_roundtrip(tz):\n # see gh-19891\n ser = Series(date_range(\"20180101\", periods=3, tz=tz))\n result, result_bins = cut(ser, 2, retbins=True)\n\n expected = cut(ser, result_bins)\n tm.assert_series_equal(result, expected)\n\n expected_bins = DatetimeIndex([\"2017-12-31 23:57:07.200000\",\n \"2018-01-02 00:00:00\",\n \"2018-01-03 00:00:00\"])\n expected_bins = expected_bins.tz_localize(tz)\n tm.assert_index_equal(result_bins, expected_bins)\n\n\ndef test_timedelta_cut_roundtrip():\n # see gh-19891\n ser = Series(timedelta_range(\"1day\", periods=3))\n result, result_bins = cut(ser, 2, retbins=True)\n\n expected = cut(ser, result_bins)\n tm.assert_series_equal(result, expected)\n\n expected_bins = TimedeltaIndex([\"0 days 23:57:07.200000\",\n \"2 days 00:00:00\",\n \"3 days 00:00:00\"])\n tm.assert_index_equal(result_bins, expected_bins)\n",
"from collections import OrderedDict\nfrom datetime import date, datetime\nfrom distutils.version import LooseVersion\nimport itertools\nimport operator\nimport re\nimport sys\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.internals import BlockPlacement\nfrom pandas.compat import lrange\n\nimport pandas as pd\nfrom pandas import (\n Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,\n SparseArray)\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import DatetimeArray, TimedeltaArray\nfrom pandas.core.internals import BlockManager, SingleBlockManager, make_block\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_almost_equal, assert_frame_equal, assert_series_equal, randn)\n\n# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h\nPY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')\n\n\[email protected]\ndef mgr():\n return create_mgr(\n 'a: f8; b: object; c: f8; d: object; e: f8;'\n 'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'\n 'k: M8[ns, US/Eastern]; l: M8[ns, CET];')\n\n\ndef assert_block_equal(left, right):\n tm.assert_numpy_array_equal(left.values, right.values)\n assert left.dtype == right.dtype\n assert isinstance(left.mgr_locs, BlockPlacement)\n assert isinstance(right.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(left.mgr_locs.as_array,\n right.mgr_locs.as_array)\n\n\ndef get_numeric_mat(shape):\n arr = np.arange(shape[0])\n return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(\n arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()\n\n\nN = 10\n\n\ndef create_block(typestr, placement, item_shape=None, num_offset=0):\n \"\"\"\n Supported typestr:\n\n * float, f8, f4, f2\n * int, i8, i4, i2, i1\n * uint, u8, u4, u2, u1\n * complex, c16, c8\n * bool\n * object, string, O\n * datetime, dt, M8[ns], M8[ns, tz]\n * timedelta, td, m8[ns]\n * sparse (SparseArray with fill_value=0.0)\n * sparse_na (SparseArray with fill_value=np.nan)\n * category, category2\n\n \"\"\"\n placement = BlockPlacement(placement)\n num_items = len(placement)\n\n if item_shape is None:\n item_shape = (N, )\n\n shape = (num_items, ) + item_shape\n\n mat = get_numeric_mat(shape)\n\n if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',\n 'uint', 'u8', 'u4', 'u2', 'u1'):\n values = mat.astype(typestr) + num_offset\n elif typestr in ('complex', 'c16', 'c8'):\n values = 1.j * (mat.astype(typestr) + num_offset)\n elif typestr in ('object', 'string', 'O'):\n values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],\n shape)\n elif typestr in ('b', 'bool', ):\n values = np.ones(shape, dtype=np.bool_)\n elif typestr in ('datetime', 'dt', 'M8[ns]'):\n values = (mat * 1e9).astype('M8[ns]')\n elif typestr.startswith('M8[ns'):\n # datetime with tz\n m = re.search(r'M8\\[ns,\\s*(\\w+\\/?\\w*)\\]', typestr)\n assert m is not None, \"incompatible typestr -> {0}\".format(typestr)\n tz = m.groups()[0]\n assert num_items == 1, \"must have only 1 num items for a tz-aware\"\n values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)\n elif typestr in ('timedelta', 'td', 'm8[ns]'):\n values = (mat * 1).astype('m8[ns]')\n elif typestr in ('category', ):\n values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])\n elif typestr in ('category2', ):\n values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'\n ])\n elif typestr in ('sparse', 'sparse_na'):\n # FIXME: doesn't support num_rows != 10\n assert shape[-1] == 10\n assert all(s == 1 for s in shape[:-1])\n if typestr.endswith('_na'):\n fill_value = np.nan\n else:\n fill_value = 0.0\n values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,\n 4, 5, fill_value, 6], fill_value=fill_value)\n arr = values.sp_values.view()\n arr += (num_offset - 1)\n else:\n raise ValueError('Unsupported typestr: \"%s\"' % typestr)\n\n return make_block(values, placement=placement, ndim=len(shape))\n\n\ndef create_single_mgr(typestr, num_rows=None):\n if num_rows is None:\n num_rows = N\n\n return SingleBlockManager(\n create_block(typestr, placement=slice(0, num_rows), item_shape=()),\n np.arange(num_rows))\n\n\ndef create_mgr(descr, item_shape=None):\n \"\"\"\n Construct BlockManager from string description.\n\n String description syntax looks similar to np.matrix initializer. It looks\n like this::\n\n a,b,c: f8; d,e,f: i8\n\n Rules are rather simple:\n\n * see list of supported datatypes in `create_block` method\n * components are semicolon-separated\n * each component is `NAME,NAME,NAME: DTYPE_ID`\n * whitespace around colons & semicolons are removed\n * components with same DTYPE_ID are combined into single block\n * to force multiple blocks with same dtype, use '-SUFFIX'::\n\n 'a:f8-1; b:f8-2; c:f8-foobar'\n\n \"\"\"\n if item_shape is None:\n item_shape = (N, )\n\n offset = 0\n mgr_items = []\n block_placements = OrderedDict()\n for d in descr.split(';'):\n d = d.strip()\n if not len(d):\n continue\n names, blockstr = d.partition(':')[::2]\n blockstr = blockstr.strip()\n names = names.strip().split(',')\n\n mgr_items.extend(names)\n placement = list(np.arange(len(names)) + offset)\n try:\n block_placements[blockstr].extend(placement)\n except KeyError:\n block_placements[blockstr] = placement\n offset += len(names)\n\n mgr_items = Index(mgr_items)\n\n blocks = []\n num_offset = 0\n for blockstr, placement in block_placements.items():\n typestr = blockstr.split('-')[0]\n blocks.append(create_block(typestr,\n placement,\n item_shape=item_shape,\n num_offset=num_offset, ))\n num_offset += len(placement)\n\n return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),\n [mgr_items] + [np.arange(n) for n in item_shape])\n\n\nclass TestBlock:\n\n def setup_method(self, method):\n # self.fblock = get_float_ex() # a,c,e\n # self.cblock = get_complex_ex() #\n # self.oblock = get_obj_ex()\n # self.bool_block = get_bool_ex()\n # self.int_block = get_int_ex()\n\n self.fblock = create_block('float', [0, 2, 4])\n self.cblock = create_block('complex', [7])\n self.oblock = create_block('object', [1, 3])\n self.bool_block = create_block('bool', [5])\n self.int_block = create_block('int', [6])\n\n def test_constructor(self):\n int32block = create_block('i4', [0])\n assert int32block.dtype == np.int32\n\n def test_pickle(self):\n def _check(blk):\n assert_block_equal(tm.round_trip_pickle(blk), blk)\n\n _check(self.fblock)\n _check(self.cblock)\n _check(self.oblock)\n _check(self.bool_block)\n\n def test_mgr_locs(self):\n assert isinstance(self.fblock.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,\n np.array([0, 2, 4], dtype=np.int64))\n\n def test_attrs(self):\n assert self.fblock.shape == self.fblock.values.shape\n assert self.fblock.dtype == self.fblock.values.dtype\n assert len(self.fblock) == len(self.fblock.values)\n\n def test_merge(self):\n avals = randn(2, 10)\n bvals = randn(2, 10)\n\n ref_cols = Index(['e', 'a', 'b', 'd', 'f'])\n\n ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))\n bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))\n merged = ablock.merge(bblock)\n tm.assert_numpy_array_equal(merged.mgr_locs.as_array,\n np.array([0, 1, 2, 3], dtype=np.int64))\n tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))\n tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))\n\n # TODO: merge with mixed type?\n\n def test_copy(self):\n cop = self.fblock.copy()\n assert cop is not self.fblock\n assert_block_equal(self.fblock, cop)\n\n def test_reindex_index(self):\n pass\n\n def test_reindex_cast(self):\n pass\n\n def test_insert(self):\n pass\n\n def test_delete(self):\n newb = self.fblock.copy()\n newb.delete(0)\n assert isinstance(newb.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(newb.mgr_locs.as_array,\n np.array([2, 4], dtype=np.int64))\n assert (newb.values[0] == 1).all()\n\n newb = self.fblock.copy()\n newb.delete(1)\n assert isinstance(newb.mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(newb.mgr_locs.as_array,\n np.array([0, 4], dtype=np.int64))\n assert (newb.values[1] == 2).all()\n\n newb = self.fblock.copy()\n newb.delete(2)\n tm.assert_numpy_array_equal(newb.mgr_locs.as_array,\n np.array([0, 2], dtype=np.int64))\n assert (newb.values[1] == 1).all()\n\n newb = self.fblock.copy()\n with pytest.raises(Exception):\n newb.delete(3)\n\n def test_make_block_same_class(self):\n # issue 19431\n block = create_block('M8[ns, US/Eastern]', [3])\n with tm.assert_produces_warning(DeprecationWarning,\n check_stacklevel=False):\n block.make_block_same_class(block.values,\n dtype=block.values.dtype)\n\n\nclass TestDatetimeBlock:\n\n def test_try_coerce_arg(self):\n block = create_block('datetime', [0])\n\n # coerce None\n none_coerced = block._try_coerce_args(block.values, None)[1]\n assert pd.Timestamp(none_coerced) is pd.NaT\n\n # coerce different types of date bojects\n vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),\n date(2010, 10, 10))\n for val in vals:\n coerced = block._try_coerce_args(block.values, val)[1]\n assert np.int64 == type(coerced)\n assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)\n\n\nclass TestBlockManager:\n\n def test_constructor_corner(self):\n pass\n\n def test_attrs(self):\n mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')\n assert mgr.nblocks == 2\n assert len(mgr) == 6\n\n def test_is_mixed_dtype(self):\n assert not create_mgr('a,b:f8').is_mixed_type\n assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type\n\n assert create_mgr('a,b:f8; c,d: f4').is_mixed_type\n assert create_mgr('a,b:f8; c,d: object').is_mixed_type\n\n def test_duplicate_ref_loc_failure(self):\n tmp_mgr = create_mgr('a:bool; a: f8')\n\n axes, blocks = tmp_mgr.axes, tmp_mgr.blocks\n\n blocks[0].mgr_locs = np.array([0])\n blocks[1].mgr_locs = np.array([0])\n\n # test trying to create block manager with overlapping ref locs\n with pytest.raises(AssertionError):\n BlockManager(blocks, axes)\n\n blocks[0].mgr_locs = np.array([0])\n blocks[1].mgr_locs = np.array([1])\n mgr = BlockManager(blocks, axes)\n mgr.iget(1)\n\n def test_contains(self, mgr):\n assert 'a' in mgr\n assert 'baz' not in mgr\n\n def test_pickle(self, mgr):\n\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n # share ref_items\n # assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items\n\n # GH2431\n assert hasattr(mgr2, \"_is_consolidated\")\n assert hasattr(mgr2, \"_known_consolidated\")\n\n # reset to False on load\n assert not mgr2._is_consolidated\n assert not mgr2._known_consolidated\n\n def test_non_unique_pickle(self):\n\n mgr = create_mgr('a,a,a:f8')\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n mgr = create_mgr('a: f8; a: i8')\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n def test_categorical_block_pickle(self):\n mgr = create_mgr('a: category')\n mgr2 = tm.round_trip_pickle(mgr)\n assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))\n\n smgr = create_single_mgr('category')\n smgr2 = tm.round_trip_pickle(smgr)\n assert_series_equal(Series(smgr), Series(smgr2))\n\n def test_get(self):\n cols = Index(list('abc'))\n values = np.random.rand(3, 3)\n block = make_block(values=values.copy(), placement=np.arange(3))\n mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])\n\n assert_almost_equal(mgr.get('a', fastpath=False), values[0])\n assert_almost_equal(mgr.get('b', fastpath=False), values[1])\n assert_almost_equal(mgr.get('c', fastpath=False), values[2])\n assert_almost_equal(mgr.get('a').internal_values(), values[0])\n assert_almost_equal(mgr.get('b').internal_values(), values[1])\n assert_almost_equal(mgr.get('c').internal_values(), values[2])\n\n def test_set(self):\n mgr = create_mgr('a,b,c: int', item_shape=(3, ))\n\n mgr.set('d', np.array(['foo'] * 3))\n mgr.set('b', np.array(['bar'] * 3))\n tm.assert_numpy_array_equal(mgr.get('a').internal_values(),\n np.array([0] * 3))\n tm.assert_numpy_array_equal(mgr.get('b').internal_values(),\n np.array(['bar'] * 3, dtype=np.object_))\n tm.assert_numpy_array_equal(mgr.get('c').internal_values(),\n np.array([2] * 3))\n tm.assert_numpy_array_equal(mgr.get('d').internal_values(),\n np.array(['foo'] * 3, dtype=np.object_))\n\n def test_set_change_dtype(self, mgr):\n mgr.set('baz', np.zeros(N, dtype=bool))\n\n mgr.set('baz', np.repeat('foo', N))\n assert mgr.get('baz').dtype == np.object_\n\n mgr2 = mgr.consolidate()\n mgr2.set('baz', np.repeat('foo', N))\n assert mgr2.get('baz').dtype == np.object_\n\n mgr2.set('quux', randn(N).astype(int))\n assert mgr2.get('quux').dtype == np.int_\n\n mgr2.set('quux', randn(N))\n assert mgr2.get('quux').dtype == np.float_\n\n def test_set_change_dtype_slice(self): # GH8850\n cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')\n ])\n df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)\n df['2nd'] = df['2nd'] * 2.0\n\n blocks = df._to_dict_of_blocks()\n assert sorted(blocks.keys()) == ['float64', 'int64']\n assert_frame_equal(blocks['float64'], DataFrame(\n [[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))\n assert_frame_equal(blocks['int64'], DataFrame(\n [[3], [6]], columns=cols[2:]))\n\n def test_copy(self, mgr):\n cp = mgr.copy(deep=False)\n for blk, cp_blk in zip(mgr.blocks, cp.blocks):\n\n # view assertion\n assert cp_blk.equals(blk)\n if isinstance(blk.values, np.ndarray):\n assert cp_blk.values.base is blk.values.base\n else:\n # DatetimeTZBlock has DatetimeIndex values\n assert cp_blk.values._data.base is blk.values._data.base\n\n cp = mgr.copy(deep=True)\n for blk, cp_blk in zip(mgr.blocks, cp.blocks):\n\n # copy assertion we either have a None for a base or in case of\n # some blocks it is an array (e.g. datetimetz), but was copied\n assert cp_blk.equals(blk)\n if not isinstance(cp_blk.values, np.ndarray):\n assert cp_blk.values._data.base is not blk.values._data.base\n else:\n assert cp_blk.values.base is None and blk.values.base is None\n\n def test_sparse(self):\n mgr = create_mgr('a: sparse-1; b: sparse-2')\n # what to test here?\n assert mgr.as_array().dtype == np.float64\n\n def test_sparse_mixed(self):\n mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')\n assert len(mgr.blocks) == 3\n assert isinstance(mgr, BlockManager)\n\n # what to test here?\n\n def test_as_array_float(self):\n mgr = create_mgr('c: f4; d: f2; e: f8')\n assert mgr.as_array().dtype == np.float64\n\n mgr = create_mgr('c: f4; d: f2')\n assert mgr.as_array().dtype == np.float32\n\n def test_as_array_int_bool(self):\n mgr = create_mgr('a: bool-1; b: bool-2')\n assert mgr.as_array().dtype == np.bool_\n\n mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')\n assert mgr.as_array().dtype == np.int64\n\n mgr = create_mgr('c: i4; d: i2; e: u1')\n assert mgr.as_array().dtype == np.int32\n\n def test_as_array_datetime(self):\n mgr = create_mgr('h: datetime-1; g: datetime-2')\n assert mgr.as_array().dtype == 'M8[ns]'\n\n def test_as_array_datetime_tz(self):\n mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')\n assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'\n assert mgr.get('g').dtype == 'datetime64[ns, CET]'\n assert mgr.as_array().dtype == 'object'\n\n def test_astype(self):\n # coerce all\n mgr = create_mgr('c: f4; d: f2; e: f8')\n for t in ['float16', 'float32', 'float64', 'int32', 'int64']:\n t = np.dtype(t)\n tmgr = mgr.astype(t)\n assert tmgr.get('c').dtype.type == t\n assert tmgr.get('d').dtype.type == t\n assert tmgr.get('e').dtype.type == t\n\n # mixed\n mgr = create_mgr('a,b: object; c: bool; d: datetime;'\n 'e: f4; f: f2; g: f8')\n for t in ['float16', 'float32', 'float64', 'int32', 'int64']:\n t = np.dtype(t)\n tmgr = mgr.astype(t, errors='ignore')\n assert tmgr.get('c').dtype.type == t\n assert tmgr.get('e').dtype.type == t\n assert tmgr.get('f').dtype.type == t\n assert tmgr.get('g').dtype.type == t\n\n assert tmgr.get('a').dtype.type == np.object_\n assert tmgr.get('b').dtype.type == np.object_\n if t != np.int64:\n assert tmgr.get('d').dtype.type == np.datetime64\n else:\n assert tmgr.get('d').dtype.type == t\n\n def test_convert(self):\n def _compare(old_mgr, new_mgr):\n \"\"\" compare the blocks, numeric compare ==, object don't \"\"\"\n old_blocks = set(old_mgr.blocks)\n new_blocks = set(new_mgr.blocks)\n assert len(old_blocks) == len(new_blocks)\n\n # compare non-numeric\n for b in old_blocks:\n found = False\n for nb in new_blocks:\n if (b.values == nb.values).all():\n found = True\n break\n assert found\n\n for b in new_blocks:\n found = False\n for ob in old_blocks:\n if (b.values == ob.values).all():\n found = True\n break\n assert found\n\n # noops\n mgr = create_mgr('f: i8; g: f8')\n new_mgr = mgr.convert()\n _compare(mgr, new_mgr)\n\n mgr = create_mgr('a, b: object; f: i8; g: f8')\n new_mgr = mgr.convert()\n _compare(mgr, new_mgr)\n\n # convert\n mgr = create_mgr('a,b,foo: object; f: i8; g: f8')\n mgr.set('a', np.array(['1'] * N, dtype=np.object_))\n mgr.set('b', np.array(['2.'] * N, dtype=np.object_))\n mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))\n new_mgr = mgr.convert(numeric=True)\n assert new_mgr.get('a').dtype == np.int64\n assert new_mgr.get('b').dtype == np.float64\n assert new_mgr.get('foo').dtype == np.object_\n assert new_mgr.get('f').dtype == np.int64\n assert new_mgr.get('g').dtype == np.float64\n\n mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'\n 'i: i8; g: f8; h: f2')\n mgr.set('a', np.array(['1'] * N, dtype=np.object_))\n mgr.set('b', np.array(['2.'] * N, dtype=np.object_))\n mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))\n new_mgr = mgr.convert(numeric=True)\n assert new_mgr.get('a').dtype == np.int64\n assert new_mgr.get('b').dtype == np.float64\n assert new_mgr.get('foo').dtype == np.object_\n assert new_mgr.get('f').dtype == np.int32\n assert new_mgr.get('bool').dtype == np.bool_\n assert new_mgr.get('dt').dtype.type, np.datetime64\n assert new_mgr.get('i').dtype == np.int64\n assert new_mgr.get('g').dtype == np.float64\n assert new_mgr.get('h').dtype == np.float16\n\n def test_interleave(self):\n\n # self\n for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',\n 'm8[ns]']:\n mgr = create_mgr('a: {0}'.format(dtype))\n assert mgr.as_array().dtype == dtype\n mgr = create_mgr('a: {0}; b: {0}'.format(dtype))\n assert mgr.as_array().dtype == dtype\n\n # will be converted according the actual dtype of the underlying\n mgr = create_mgr('a: category')\n assert mgr.as_array().dtype == 'i8'\n mgr = create_mgr('a: category; b: category')\n assert mgr.as_array().dtype == 'i8'\n mgr = create_mgr('a: category; b: category2')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: category2')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: category2; b: category2')\n assert mgr.as_array().dtype == 'object'\n\n # combinations\n mgr = create_mgr('a: f8')\n assert mgr.as_array().dtype == 'f8'\n mgr = create_mgr('a: f8; b: i8')\n assert mgr.as_array().dtype == 'f8'\n mgr = create_mgr('a: f4; b: i8')\n assert mgr.as_array().dtype == 'f8'\n mgr = create_mgr('a: f4; b: i8; d: object')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: bool; b: i8')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: complex')\n assert mgr.as_array().dtype == 'complex'\n mgr = create_mgr('a: f8; b: category')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: category')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: bool')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: i8')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: m8[ns]; b: bool')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: m8[ns]; b: i8')\n assert mgr.as_array().dtype == 'object'\n mgr = create_mgr('a: M8[ns]; b: m8[ns]')\n assert mgr.as_array().dtype == 'object'\n\n def test_interleave_non_unique_cols(self):\n df = DataFrame([\n [pd.Timestamp('20130101'), 3.5],\n [pd.Timestamp('20130102'), 4.5]],\n columns=['x', 'x'],\n index=[1, 2])\n\n df_unique = df.copy()\n df_unique.columns = ['x', 'y']\n assert df_unique.values.shape == df.values.shape\n tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])\n tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])\n\n def test_consolidate(self):\n pass\n\n def test_consolidate_ordering_issues(self, mgr):\n mgr.set('f', randn(N))\n mgr.set('d', randn(N))\n mgr.set('b', randn(N))\n mgr.set('g', randn(N))\n mgr.set('h', randn(N))\n\n # we have datetime/tz blocks in mgr\n cons = mgr.consolidate()\n assert cons.nblocks == 4\n cons = mgr.consolidate().get_numeric_data()\n assert cons.nblocks == 1\n assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)\n tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,\n np.arange(len(cons.items), dtype=np.int64))\n\n def test_reindex_index(self):\n pass\n\n def test_reindex_items(self):\n # mgr is not consolidated, f8 & f8-2 blocks\n mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'\n 'f: bool; g: f8-2')\n\n reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)\n assert reindexed.nblocks == 2\n tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))\n assert_almost_equal(\n mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))\n assert_almost_equal(\n mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))\n assert_almost_equal(\n mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))\n assert_almost_equal(\n mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))\n assert_almost_equal(\n mgr.get('g').internal_values(),\n reindexed.get('g').internal_values())\n assert_almost_equal(\n mgr.get('c').internal_values(),\n reindexed.get('c').internal_values())\n assert_almost_equal(\n mgr.get('a').internal_values(),\n reindexed.get('a').internal_values())\n assert_almost_equal(\n mgr.get('d').internal_values(),\n reindexed.get('d').internal_values())\n\n def test_multiindex_xs(self):\n mgr = create_mgr('a,b,c: f8; d,e,f: i8')\n\n index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',\n 'three']],\n codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n\n mgr.set_axis(1, index)\n result = mgr.xs('bar', axis=1)\n assert result.shape == (6, 2)\n assert result.axes[1][0] == ('bar', 'one')\n assert result.axes[1][1] == ('bar', 'two')\n\n def test_get_numeric_data(self):\n mgr = create_mgr('int: int; float: float; complex: complex;'\n 'str: object; bool: bool; obj: object; dt: datetime',\n item_shape=(3, ))\n mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))\n\n numeric = mgr.get_numeric_data()\n tm.assert_index_equal(numeric.items,\n pd.Index(['int', 'float', 'complex', 'bool']))\n assert_almost_equal(\n mgr.get('float', fastpath=False), numeric.get('float',\n fastpath=False))\n assert_almost_equal(\n mgr.get('float').internal_values(),\n numeric.get('float').internal_values())\n\n # Check sharing\n numeric.set('float', np.array([100., 200., 300.]))\n assert_almost_equal(\n mgr.get('float', fastpath=False), np.array([100., 200., 300.]))\n assert_almost_equal(\n mgr.get('float').internal_values(), np.array([100., 200., 300.]))\n\n numeric2 = mgr.get_numeric_data(copy=True)\n tm.assert_index_equal(numeric.items,\n pd.Index(['int', 'float', 'complex', 'bool']))\n numeric2.set('float', np.array([1000., 2000., 3000.]))\n assert_almost_equal(\n mgr.get('float', fastpath=False), np.array([100., 200., 300.]))\n assert_almost_equal(\n mgr.get('float').internal_values(), np.array([100., 200., 300.]))\n\n def test_get_bool_data(self):\n mgr = create_mgr('int: int; float: float; complex: complex;'\n 'str: object; bool: bool; obj: object; dt: datetime',\n item_shape=(3, ))\n mgr.set('obj', np.array([True, False, True], dtype=np.object_))\n\n bools = mgr.get_bool_data()\n tm.assert_index_equal(bools.items, pd.Index(['bool']))\n assert_almost_equal(mgr.get('bool', fastpath=False),\n bools.get('bool', fastpath=False))\n assert_almost_equal(\n mgr.get('bool').internal_values(),\n bools.get('bool').internal_values())\n\n bools.set('bool', np.array([True, False, True]))\n tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),\n np.array([True, False, True]))\n tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),\n np.array([True, False, True]))\n\n # Check sharing\n bools2 = mgr.get_bool_data(copy=True)\n bools2.set('bool', np.array([False, True, False]))\n tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),\n np.array([True, False, True]))\n tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),\n np.array([True, False, True]))\n\n def test_unicode_repr_doesnt_raise(self):\n repr(create_mgr('b,\\u05d0: object'))\n\n def test_missing_unicode_key(self):\n df = DataFrame({\"a\": [1]})\n try:\n df.loc[:, \"\\u05d0\"] # should not raise UnicodeEncodeError\n except KeyError:\n pass # this is the expected exception\n\n def test_equals(self):\n # unique items\n bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')\n bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)\n assert bm1.equals(bm2)\n\n bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')\n bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)\n assert bm1.equals(bm2)\n\n def test_equals_block_order_different_dtypes(self):\n # GH 9330\n\n mgr_strings = [\n \"a:i8;b:f8\", # basic case\n \"a:i8;b:f8;c:c8;d:b\", # many types\n \"a:i8;e:dt;f:td;g:string\", # more types\n \"a:i8;b:category;c:category2;d:category2\", # categories\n \"c:sparse;d:sparse_na;b:f8\", # sparse\n ]\n\n for mgr_string in mgr_strings:\n bm = create_mgr(mgr_string)\n block_perms = itertools.permutations(bm.blocks)\n for bm_perm in block_perms:\n bm_this = BlockManager(bm_perm, bm.axes)\n assert bm.equals(bm_this)\n assert bm_this.equals(bm)\n\n def test_single_mgr_ctor(self):\n mgr = create_single_mgr('f8', num_rows=5)\n assert mgr.as_array().tolist() == [0., 1., 2., 3., 4.]\n\n def test_validate_bool_args(self):\n invalid_values = [1, \"True\", [1, 2, 3], 5.0]\n bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')\n\n for value in invalid_values:\n with pytest.raises(ValueError):\n bm1.replace_list([1], [2], inplace=value)\n\n\nclass TestIndexing:\n # Nosetests-style data-driven tests.\n #\n # This test applies different indexing routines to block managers and\n # compares the outcome to the result of same operations on np.ndarray.\n #\n # NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests\n # and are disabled.\n\n MANAGERS = [\n create_single_mgr('f8', N),\n create_single_mgr('i8', N),\n\n # 2-dim\n create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),\n create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),\n create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),\n create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),\n\n # 3-dim\n create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),\n create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),\n create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),\n create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),\n ]\n\n # MANAGERS = [MANAGERS[6]]\n\n def test_get_slice(self):\n def assert_slice_ok(mgr, axis, slobj):\n mat = mgr.as_array()\n\n # we maybe using an ndarray to test slicing and\n # might not be the full length of the axis\n if isinstance(slobj, np.ndarray):\n ax = mgr.axes[axis]\n if len(ax) and len(slobj) and len(slobj) != len(ax):\n slobj = np.concatenate([slobj, np.zeros(\n len(ax) - len(slobj), dtype=bool)])\n sliced = mgr.get_slice(slobj, axis=axis)\n mat_slobj = (slice(None), ) * axis + (slobj, )\n tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_array(),\n check_dtype=False)\n tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n # slice\n assert_slice_ok(mgr, ax, slice(None))\n assert_slice_ok(mgr, ax, slice(3))\n assert_slice_ok(mgr, ax, slice(100))\n assert_slice_ok(mgr, ax, slice(1, 4))\n assert_slice_ok(mgr, ax, slice(3, 0, -2))\n\n # boolean mask\n assert_slice_ok(\n mgr, ax, np.array([], dtype=np.bool_))\n assert_slice_ok(\n mgr, ax,\n np.ones(mgr.shape[ax], dtype=np.bool_))\n assert_slice_ok(\n mgr, ax,\n np.zeros(mgr.shape[ax], dtype=np.bool_))\n\n if mgr.shape[ax] >= 3:\n assert_slice_ok(\n mgr, ax,\n np.arange(mgr.shape[ax]) % 3 == 0)\n assert_slice_ok(\n mgr, ax, np.array(\n [True, True, False], dtype=np.bool_))\n\n # fancy indexer\n assert_slice_ok(mgr, ax, [])\n assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))\n\n if mgr.shape[ax] >= 3:\n assert_slice_ok(mgr, ax, [0, 1, 2])\n assert_slice_ok(mgr, ax, [-1, -2, -3])\n\n def test_take(self):\n def assert_take_ok(mgr, axis, indexer):\n mat = mgr.as_array()\n taken = mgr.take(indexer, axis)\n tm.assert_numpy_array_equal(np.take(mat, indexer, axis),\n taken.as_array(), check_dtype=False)\n tm.assert_index_equal(mgr.axes[axis].take(indexer),\n taken.axes[axis])\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n # take/fancy indexer\n assert_take_ok(mgr, ax, [])\n assert_take_ok(mgr, ax, [0, 0, 0])\n assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))\n\n if mgr.shape[ax] >= 3:\n assert_take_ok(mgr, ax, [0, 1, 2])\n assert_take_ok(mgr, ax, [-1, -2, -3])\n\n def test_reindex_axis(self):\n def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):\n mat = mgr.as_array()\n indexer = mgr.axes[axis].get_indexer_for(new_labels)\n\n reindexed = mgr.reindex_axis(new_labels, axis,\n fill_value=fill_value)\n tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,\n fill_value=fill_value),\n reindexed.as_array(),\n check_dtype=False)\n tm.assert_index_equal(reindexed.axes[axis], new_labels)\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n for fill_value in (None, np.nan, 100.):\n assert_reindex_axis_is_ok(\n mgr, ax,\n pd.Index([]), fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax, mgr.axes[ax],\n fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][[0, 0, 0]], fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']), fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n pd.Index(['foo', mgr.axes[ax][0], 'baz']),\n fill_value)\n\n if mgr.shape[ax] >= 3:\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][:-3], fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][-3::-1], fill_value)\n assert_reindex_axis_is_ok(\n mgr, ax,\n mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)\n\n def test_reindex_indexer(self):\n\n def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,\n fill_value):\n mat = mgr.as_array()\n reindexed_mat = algos.take_nd(mat, indexer, axis,\n fill_value=fill_value)\n reindexed = mgr.reindex_indexer(new_labels, indexer, axis,\n fill_value=fill_value)\n tm.assert_numpy_array_equal(reindexed_mat,\n reindexed.as_array(),\n check_dtype=False)\n tm.assert_index_equal(reindexed.axes[axis], new_labels)\n\n for mgr in self.MANAGERS:\n for ax in range(mgr.ndim):\n for fill_value in (None, np.nan, 100.):\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index([]), [], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo'] * mgr.shape[ax]),\n np.arange(mgr.shape[ax]), fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),\n fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax, mgr.axes[ax],\n np.arange(mgr.shape[ax])[::-1], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']),\n [0, 0, 0], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']),\n [-1, 0, -1], fill_value)\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', mgr.axes[ax][0], 'baz']),\n [-1, -1, -1], fill_value)\n\n if mgr.shape[ax] >= 3:\n assert_reindex_indexer_is_ok(\n mgr, ax,\n pd.Index(['foo', 'bar', 'baz']),\n [0, 1, 2], fill_value)\n\n # test_get_slice(slice_like, axis)\n # take(indexer, axis)\n # reindex_axis(new_labels, axis)\n # reindex_indexer(new_labels, indexer, axis)\n\n\nclass TestBlockPlacement:\n\n def test_slice_len(self):\n assert len(BlockPlacement(slice(0, 4))) == 4\n assert len(BlockPlacement(slice(0, 4, 2))) == 2\n assert len(BlockPlacement(slice(0, 3, 2))) == 2\n\n assert len(BlockPlacement(slice(0, 1, 2))) == 1\n assert len(BlockPlacement(slice(1, 0, -1))) == 1\n\n def test_zero_step_raises(self):\n with pytest.raises(ValueError):\n BlockPlacement(slice(1, 1, 0))\n with pytest.raises(ValueError):\n BlockPlacement(slice(1, 2, 0))\n\n def test_unbounded_slice_raises(self):\n def assert_unbounded_slice_error(slc):\n with pytest.raises(ValueError, match=\"unbounded slice\"):\n BlockPlacement(slc)\n\n assert_unbounded_slice_error(slice(None, None))\n assert_unbounded_slice_error(slice(10, None))\n assert_unbounded_slice_error(slice(None, None, -1))\n assert_unbounded_slice_error(slice(None, 10, -1))\n\n # These are \"unbounded\" because negative index will change depending on\n # container shape.\n assert_unbounded_slice_error(slice(-1, None))\n assert_unbounded_slice_error(slice(None, -1))\n assert_unbounded_slice_error(slice(-1, -1))\n assert_unbounded_slice_error(slice(-1, None, -1))\n assert_unbounded_slice_error(slice(None, -1, -1))\n assert_unbounded_slice_error(slice(-1, -1, -1))\n\n def test_not_slice_like_slices(self):\n def assert_not_slice_like(slc):\n assert not BlockPlacement(slc).is_slice_like\n\n assert_not_slice_like(slice(0, 0))\n assert_not_slice_like(slice(100, 0))\n\n assert_not_slice_like(slice(100, 100, -1))\n assert_not_slice_like(slice(0, 100, -1))\n\n assert not BlockPlacement(slice(0, 0)).is_slice_like\n assert not BlockPlacement(slice(100, 100)).is_slice_like\n\n def test_array_to_slice_conversion(self):\n def assert_as_slice_equals(arr, slc):\n assert BlockPlacement(arr).as_slice == slc\n\n assert_as_slice_equals([0], slice(0, 1, 1))\n assert_as_slice_equals([100], slice(100, 101, 1))\n\n assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))\n assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))\n assert_as_slice_equals([0, 100], slice(0, 200, 100))\n\n assert_as_slice_equals([2, 1], slice(2, 0, -1))\n\n if not PY361:\n assert_as_slice_equals([2, 1, 0], slice(2, None, -1))\n assert_as_slice_equals([100, 0], slice(100, None, -100))\n\n def test_not_slice_like_arrays(self):\n def assert_not_slice_like(arr):\n assert not BlockPlacement(arr).is_slice_like\n\n assert_not_slice_like([])\n assert_not_slice_like([-1])\n assert_not_slice_like([-1, -2, -3])\n assert_not_slice_like([-10])\n assert_not_slice_like([-1])\n assert_not_slice_like([-1, 0, 1, 2])\n assert_not_slice_like([-2, 0, 2, 4])\n assert_not_slice_like([1, 0, -1])\n assert_not_slice_like([1, 1, 1])\n\n def test_slice_iter(self):\n assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]\n assert list(BlockPlacement(slice(0, 0))) == []\n assert list(BlockPlacement(slice(3, 0))) == []\n\n if not PY361:\n assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]\n assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]\n\n def test_slice_to_array_conversion(self):\n def assert_as_array_equals(slc, asarray):\n tm.assert_numpy_array_equal(\n BlockPlacement(slc).as_array,\n np.asarray(asarray, dtype=np.int64))\n\n assert_as_array_equals(slice(0, 3), [0, 1, 2])\n assert_as_array_equals(slice(0, 0), [])\n assert_as_array_equals(slice(3, 0), [])\n\n assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])\n\n if not PY361:\n assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])\n assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])\n\n def test_blockplacement_add(self):\n bpl = BlockPlacement(slice(0, 5))\n assert bpl.add(1).as_slice == slice(1, 6, 1)\n assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)\n assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]\n\n def test_blockplacement_add_int(self):\n def assert_add_equals(val, inc, result):\n assert list(BlockPlacement(val).add(inc)) == result\n\n assert_add_equals(slice(0, 0), 0, [])\n assert_add_equals(slice(1, 4), 0, [1, 2, 3])\n assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])\n assert_add_equals([1, 2, 4], 0, [1, 2, 4])\n\n assert_add_equals(slice(0, 0), 10, [])\n assert_add_equals(slice(1, 4), 10, [11, 12, 13])\n assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])\n assert_add_equals([1, 2, 4], 10, [11, 12, 14])\n\n assert_add_equals(slice(0, 0), -1, [])\n assert_add_equals(slice(1, 4), -1, [0, 1, 2])\n assert_add_equals([1, 2, 4], -1, [0, 1, 3])\n\n with pytest.raises(ValueError):\n BlockPlacement(slice(1, 4)).add(-10)\n with pytest.raises(ValueError):\n BlockPlacement([1, 2, 4]).add(-10)\n\n if not PY361:\n assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])\n assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])\n assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])\n\n with pytest.raises(ValueError):\n BlockPlacement(slice(2, None, -1)).add(-1)\n\n\nclass DummyElement:\n def __init__(self, value, dtype):\n self.value = value\n self.dtype = np.dtype(dtype)\n\n def __array__(self):\n return np.array(self.value, dtype=self.dtype)\n\n def __str__(self):\n return \"DummyElement({}, {})\".format(self.value, self.dtype)\n\n def __repr__(self):\n return str(self)\n\n def astype(self, dtype, copy=False):\n self.dtype = dtype\n return self\n\n def view(self, dtype):\n return type(self)(self.value.view(dtype), dtype)\n\n def any(self, axis=None):\n return bool(self.value)\n\n\nclass TestCanHoldElement:\n @pytest.mark.parametrize('value, dtype', [\n (1, 'i8'),\n (1.0, 'f8'),\n (2**63, 'f8'),\n (1j, 'complex128'),\n (2**63, 'complex128'),\n (True, 'bool'),\n (np.timedelta64(20, 'ns'), '<m8[ns]'),\n (np.datetime64(20, 'ns'), '<M8[ns]'),\n ])\n @pytest.mark.parametrize('op', [\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.mod,\n operator.pow,\n ], ids=lambda x: x.__name__)\n def test_binop_other(self, op, value, dtype):\n skip = {(operator.add, 'bool'),\n (operator.sub, 'bool'),\n (operator.mul, 'bool'),\n (operator.truediv, 'bool'),\n (operator.mod, 'i8'),\n (operator.mod, 'complex128'),\n (operator.pow, 'bool')}\n if (op, dtype) in skip:\n pytest.skip(\"Invalid combination {},{}\".format(op, dtype))\n\n e = DummyElement(value, dtype)\n s = pd.DataFrame({\"A\": [e.value, e.value]}, dtype=e.dtype)\n\n invalid = {(operator.pow, '<M8[ns]'),\n (operator.mod, '<M8[ns]'),\n (operator.truediv, '<M8[ns]'),\n (operator.mul, '<M8[ns]'),\n (operator.add, '<M8[ns]'),\n (operator.pow, '<m8[ns]'),\n (operator.mul, '<m8[ns]')}\n\n if (op, dtype) in invalid:\n with pytest.raises(TypeError):\n op(s, e.value)\n else:\n # FIXME: Since dispatching to Series, this test no longer\n # asserts anything meaningful\n result = op(s, e.value).dtypes\n expected = op(s, value).dtypes\n assert_series_equal(result, expected)\n\n\[email protected]('typestr, holder', [\n ('category', Categorical),\n ('M8[ns]', DatetimeArray),\n ('M8[ns, US/Central]', DatetimeArray),\n ('m8[ns]', TimedeltaArray),\n ('sparse', SparseArray),\n])\ndef test_holder(typestr, holder):\n blk = create_block(typestr, [1])\n assert blk._holder is holder\n\n\ndef test_deprecated_fastpath():\n # GH#19265\n values = np.random.rand(3, 3)\n with tm.assert_produces_warning(DeprecationWarning,\n check_stacklevel=False):\n make_block(values, placement=np.arange(3), fastpath=True)\n\n\ndef test_validate_ndim():\n values = np.array([1.0, 2.0])\n placement = slice(2)\n msg = r\"Wrong number of dimensions. values.ndim != ndim \\[1 != 2\\]\"\n\n with pytest.raises(ValueError, match=msg):\n make_block(values, placement, ndim=2)\n\n\ndef test_block_shape():\n idx = pd.Index([0, 1, 2, 3, 4])\n a = pd.Series([1, 2, 3]).reindex(idx)\n b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)\n\n assert (a._data.blocks[0].mgr_locs.indexer ==\n b._data.blocks[0].mgr_locs.indexer)\n\n\ndef test_make_block_no_pandas_array():\n # https://github.com/pandas-dev/pandas/pull/24866\n arr = pd.array([1, 2])\n\n # PandasArray, no dtype\n result = make_block(arr, slice(len(arr)))\n assert result.is_integer is True\n assert result.is_extension is False\n\n # PandasArray, PandasDtype\n result = make_block(arr, slice(len(arr)), dtype=arr.dtype)\n assert result.is_integer is True\n assert result.is_extension is False\n\n # ndarray, PandasDtype\n result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype)\n assert result.is_integer is True\n assert result.is_extension is False\n",
"import codecs\nimport re\nimport textwrap\nfrom typing import Dict\nimport warnings\n\nimport numpy as np\n\nimport pandas._libs.lib as lib\nimport pandas._libs.ops as libops\nfrom pandas.util._decorators import Appender, deprecate_kwarg\n\nfrom pandas.core.dtypes.common import (\n ensure_object, is_bool_dtype, is_categorical_dtype, is_integer,\n is_list_like, is_object_dtype, is_re, is_scalar, is_string_like)\nfrom pandas.core.dtypes.generic import ABCIndexClass, ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.algorithms import take_1d\nfrom pandas.core.base import NoNewAttributesMixin\nimport pandas.core.common as com\n\n_cpython_optimized_encoders = (\n \"utf-8\", \"utf8\", \"latin-1\", \"latin1\", \"iso-8859-1\", \"mbcs\", \"ascii\"\n)\n_cpython_optimized_decoders = _cpython_optimized_encoders + (\n \"utf-16\", \"utf-32\"\n)\n\n_shared_docs = dict() # type: Dict[str, str]\n\n\ndef cat_core(list_of_columns, sep):\n \"\"\"\n Auxiliary function for :meth:`str.cat`\n\n Parameters\n ----------\n list_of_columns : list of numpy arrays\n List of arrays to be concatenated with sep;\n these arrays may not contain NaNs!\n sep : string\n The separator string for concatenating the columns\n\n Returns\n -------\n nd.array\n The concatenation of list_of_columns with sep\n \"\"\"\n list_with_sep = [sep] * (2 * len(list_of_columns) - 1)\n list_with_sep[::2] = list_of_columns\n return np.sum(list_with_sep, axis=0)\n\n\ndef _na_map(f, arr, na_result=np.nan, dtype=object):\n # should really _check_ for NA\n return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)\n\n\ndef _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):\n if not len(arr):\n return np.ndarray(0, dtype=dtype)\n\n if isinstance(arr, ABCSeries):\n arr = arr.values\n if not isinstance(arr, np.ndarray):\n arr = np.asarray(arr, dtype=object)\n if na_mask:\n mask = isna(arr)\n try:\n convert = not all(mask)\n result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)\n except (TypeError, AttributeError) as e:\n # Reraise the exception if callable `f` got wrong number of args.\n # The user may want to be warned by this, instead of getting NaN\n p_err = (r'((takes)|(missing)) (?(2)from \\d+ to )?\\d+ '\n r'(?(3)required )positional arguments?')\n\n if len(e.args) >= 1 and re.search(p_err, e.args[0]):\n raise e\n\n def g(x):\n try:\n return f(x)\n except (TypeError, AttributeError):\n return na_value\n\n return _map(g, arr, dtype=dtype)\n if na_value is not np.nan:\n np.putmask(result, mask, na_value)\n if result.dtype == object:\n result = lib.maybe_convert_objects(result)\n return result\n else:\n return lib.map_infer(arr, f)\n\n\ndef str_count(arr, pat, flags=0):\n \"\"\"\n Count occurrences of pattern in each string of the Series/Index.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~pandas.Series`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n <https://docs.python.org/3/howto/regex.html#compilation-flags>`_.\n **kwargs\n For compatibility with other string methods. Not used.\n\n Returns\n -------\n Series or Index\n Same type as the calling object containing the integer counts.\n\n See Also\n --------\n re : Standard library module for regular expressions.\n str.count : Standard library version, without regular expression support.\n\n Notes\n -----\n Some characters need to be escaped when passing in `pat`.\n eg. ``'$'`` has a special meaning in regex and must be escaped when\n finding this literal character.\n\n Examples\n --------\n >>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])\n >>> s.str.count('a')\n 0 0.0\n 1 0.0\n 2 2.0\n 3 2.0\n 4 NaN\n 5 0.0\n 6 1.0\n dtype: float64\n\n Escape ``'$'`` to find the literal dollar sign.\n\n >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])\n >>> s.str.count('\\\\$')\n 0 1\n 1 0\n 2 1\n 3 2\n 4 2\n 5 0\n dtype: int64\n\n This is also available on Index\n\n >>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')\n Int64Index([0, 0, 2, 1], dtype='int64')\n \"\"\"\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return _na_map(f, arr, dtype=int)\n\n\ndef str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):\n \"\"\"\n Test if pattern or regex is contained within a string of a Series or Index.\n\n Return boolean Series or Index based on whether a given pattern or regex is\n contained within a string of a Series or Index.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n Flags to pass through to the re module, e.g. re.IGNORECASE.\n na : default NaN\n Fill value for missing values.\n regex : bool, default True\n If True, assumes the pat is a regular expression.\n\n If False, treats the pat as a literal string.\n\n Returns\n -------\n Series or Index of boolean values\n A Series or Index of boolean values indicating whether the\n given pattern is contained within the string of each element\n of the Series or Index.\n\n See Also\n --------\n match : Analogous, but stricter, relying on re.match instead of re.search.\n Series.str.startswith : Test if the start of each string element matches a\n pattern.\n Series.str.endswith : Same as startswith, but tests the end of string.\n\n Examples\n --------\n\n Returning a Series of booleans using only a literal pattern.\n\n >>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])\n >>> s1.str.contains('og', regex=False)\n 0 False\n 1 True\n 2 False\n 3 False\n 4 NaN\n dtype: object\n\n Returning an Index of booleans using only a literal pattern.\n\n >>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])\n >>> ind.str.contains('23', regex=False)\n Index([False, False, False, True, nan], dtype='object')\n\n Specifying case sensitivity using `case`.\n\n >>> s1.str.contains('oG', case=True, regex=True)\n 0 False\n 1 False\n 2 False\n 3 False\n 4 NaN\n dtype: object\n\n Specifying `na` to be `False` instead of `NaN` replaces NaN values\n with `False`. If Series or Index does not contain NaN values\n the resultant dtype will be `bool`, otherwise, an `object` dtype.\n\n >>> s1.str.contains('og', na=False, regex=True)\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False\n dtype: bool\n\n Returning 'house' or 'dog' when either expression occurs in a string.\n\n >>> s1.str.contains('house|dog', regex=True)\n 0 False\n 1 True\n 2 True\n 3 False\n 4 NaN\n dtype: object\n\n Ignoring case sensitivity using `flags` with regex.\n\n >>> import re\n >>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)\n 0 False\n 1 False\n 2 True\n 3 False\n 4 NaN\n dtype: object\n\n Returning any digit using regular expression.\n\n >>> s1.str.contains('\\\\d', regex=True)\n 0 False\n 1 False\n 2 False\n 3 True\n 4 NaN\n dtype: object\n\n Ensure `pat` is a not a literal pattern when `regex` is set to True.\n Note in the following example one might expect only `s2[1]` and `s2[3]` to\n return `True`. However, '.0' as a regex matches any character\n followed by a 0.\n\n >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])\n >>> s2.str.contains('.0', regex=True)\n 0 True\n 1 True\n 2 False\n 3 True\n 4 False\n dtype: bool\n \"\"\"\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if regex.groups > 0:\n warnings.warn(\"This pattern has match groups. To actually get the\"\n \" groups, use str.extract.\", UserWarning,\n stacklevel=3)\n\n f = lambda x: bool(regex.search(x))\n else:\n if case:\n f = lambda x: pat in x\n else:\n upper_pat = pat.upper()\n f = lambda x: upper_pat in x\n uppered = _na_map(lambda x: x.upper(), arr)\n return _na_map(f, uppered, na, dtype=bool)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_startswith(arr, pat, na=np.nan):\n \"\"\"\n Test if the start of each string element matches a pattern.\n\n Equivalent to :meth:`str.startswith`.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n na : object, default NaN\n Object shown if element tested is not a string.\n\n Returns\n -------\n Series or Index of bool\n A Series of booleans indicating whether the given pattern matches\n the start of each string element.\n\n See Also\n --------\n str.startswith : Python standard library string method.\n Series.str.endswith : Same as startswith, but tests the end of string.\n Series.str.contains : Tests if string element contains a pattern.\n\n Examples\n --------\n >>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])\n >>> s\n 0 bat\n 1 Bear\n 2 cat\n 3 NaN\n dtype: object\n\n >>> s.str.startswith('b')\n 0 True\n 1 False\n 2 False\n 3 NaN\n dtype: object\n\n Specifying `na` to be `False` instead of `NaN`.\n\n >>> s.str.startswith('b', na=False)\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n \"\"\"\n f = lambda x: x.startswith(pat)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_endswith(arr, pat, na=np.nan):\n \"\"\"\n Test if the end of each string element matches a pattern.\n\n Equivalent to :meth:`str.endswith`.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n na : object, default NaN\n Object shown if element tested is not a string.\n\n Returns\n -------\n Series or Index of bool\n A Series of booleans indicating whether the given pattern matches\n the end of each string element.\n\n See Also\n --------\n str.endswith : Python standard library string method.\n Series.str.startswith : Same as endswith, but tests the start of string.\n Series.str.contains : Tests if string element contains a pattern.\n\n Examples\n --------\n >>> s = pd.Series(['bat', 'bear', 'caT', np.nan])\n >>> s\n 0 bat\n 1 bear\n 2 caT\n 3 NaN\n dtype: object\n\n >>> s.str.endswith('t')\n 0 True\n 1 False\n 2 False\n 3 NaN\n dtype: object\n\n Specifying `na` to be `False` instead of `NaN`.\n\n >>> s.str.endswith('t', na=False)\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n \"\"\"\n f = lambda x: x.endswith(pat)\n return _na_map(f, arr, na, dtype=bool)\n\n\ndef str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):\n r\"\"\"\n Replace occurrences of pattern/regex in the Series/Index with\n some other string. Equivalent to :meth:`str.replace` or\n :func:`re.sub`.\n\n Parameters\n ----------\n pat : str or compiled regex\n String can be a character sequence or regular expression.\n\n .. versionadded:: 0.20.0\n `pat` also accepts a compiled regex.\n\n repl : str or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n .. versionadded:: 0.20.0\n `repl` also accepts a callable.\n\n n : int, default -1 (all)\n Number of replacements to make from start.\n case : bool, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : bool, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n Series or Index of object\n A copy of the object with all matching occurrences of `pat` replaced by\n `repl`.\n\n Raises\n ------\n ValueError\n * if `regex` is False and `repl` is a callable or `pat` is a compiled\n regex\n * if `pat` is a compiled regex and `case` or `flags` is set\n\n Notes\n -----\n When `pat` is a compiled regex, all flags should be included in the\n compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled\n regex will raise an error.\n\n Examples\n --------\n When `pat` is a string and `regex` is True (the default), the given `pat`\n is compiled as a regex. When `repl` is a string, it replaces matching\n regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are\n left as is:\n\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)\n 0 bao\n 1 baz\n 2 NaN\n dtype: object\n\n When `pat` is a string and `regex` is False, every `pat` is replaced with\n `repl` as with :meth:`str.replace`:\n\n >>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)\n 0 bao\n 1 fuz\n 2 NaN\n dtype: object\n\n When `repl` is a callable, it is called on every `pat` using\n :func:`re.sub`. The callable should expect one positional argument\n (a regex object) and return a string.\n\n To get the idea:\n\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)\n 0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo\n 1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz\n 2 NaN\n dtype: object\n\n Reverse every lowercase alphabetic word:\n\n >>> repl = lambda m: m.group(0)[::-1]\n >>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)\n 0 oof 123\n 1 rab zab\n 2 NaN\n dtype: object\n\n Using regex groups (extract second group and swap case):\n\n >>> pat = r\"(?P<one>\\w+) (?P<two>\\w+) (?P<three>\\w+)\"\n >>> repl = lambda m: m.group('two').swapcase()\n >>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)\n 0 tWO\n 1 bAR\n dtype: object\n\n Using a compiled regex with flags\n\n >>> import re\n >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)\n >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')\n 0 foo\n 1 bar\n 2 NaN\n dtype: object\n \"\"\"\n\n # Check whether repl is valid (GH 13438, GH 15055)\n if not (is_string_like(repl) or callable(repl)):\n raise TypeError(\"repl must be a string or callable\")\n\n is_compiled_re = is_re(pat)\n if regex:\n if is_compiled_re:\n if (case is not None) or (flags != 0):\n raise ValueError(\"case and flags cannot be set\"\n \" when pat is a compiled regex\")\n else:\n # not a compiled regex\n # set default case\n if case is None:\n case = True\n\n # add case flag, if provided\n if case is False:\n flags |= re.IGNORECASE\n if is_compiled_re or len(pat) > 1 or flags or callable(repl):\n n = n if n >= 0 else 0\n compiled = re.compile(pat, flags=flags)\n f = lambda x: compiled.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n else:\n if is_compiled_re:\n raise ValueError(\"Cannot use a compiled regex as replacement \"\n \"pattern with regex=False\")\n if callable(repl):\n raise ValueError(\"Cannot use a callable replacement when \"\n \"regex=False\")\n f = lambda x: x.replace(pat, repl, n)\n\n return _na_map(f, arr)\n\n\ndef str_repeat(arr, repeats):\n \"\"\"\n Duplicate each string in the Series or Index.\n\n Parameters\n ----------\n repeats : int or sequence of int\n Same value for all (int) or different value per (sequence).\n\n Returns\n -------\n Series or Index of object\n Series or Index of repeated string objects specified by\n input parameter repeats.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'b', 'c'])\n >>> s\n 0 a\n 1 b\n 2 c\n dtype: object\n\n Single int repeats string in Series\n\n >>> s.str.repeat(repeats=2)\n 0 aa\n 1 bb\n 2 cc\n dtype: object\n\n Sequence of int repeats corresponding string in Series\n\n >>> s.str.repeat(repeats=[1, 2, 3])\n 0 a\n 1 bb\n 2 ccc\n dtype: object\n \"\"\"\n if is_scalar(repeats):\n def scalar_rep(x):\n try:\n return bytes.__mul__(x, repeats)\n except TypeError:\n return str.__mul__(x, repeats)\n\n return _na_map(scalar_rep, arr)\n else:\n\n def rep(x, r):\n try:\n return bytes.__mul__(x, r)\n except TypeError:\n return str.__mul__(x, r)\n\n repeats = np.asarray(repeats, dtype=object)\n result = libops.vec_binop(com.values_from_object(arr), repeats, rep)\n return result\n\n\ndef str_match(arr, pat, case=True, flags=0, na=np.nan):\n \"\"\"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE.\n na : default NaN\n Fill value for missing values.\n\n Returns\n -------\n Series/array of boolean values\n\n See Also\n --------\n contains : Analogous, but less strict, relying on re.search instead of\n re.match.\n extract : Extract matched groups.\n \"\"\"\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n dtype = bool\n f = lambda x: bool(regex.match(x))\n\n return _na_map(f, arr, na, dtype=dtype)\n\n\ndef _get_single_group_name(rx):\n try:\n return list(rx.groupindex.keys()).pop()\n except IndexError:\n return None\n\n\ndef _groups_or_na_fun(regex):\n \"\"\"Used in both extract_noexpand and extract_frame\"\"\"\n if regex.groups == 0:\n raise ValueError(\"pattern contains no capture groups\")\n empty_row = [np.nan] * regex.groups\n\n def f(x):\n if not isinstance(x, str):\n return empty_row\n m = regex.search(x)\n if m:\n return [np.nan if item is None else item for item in m.groups()]\n else:\n return empty_row\n return f\n\n\ndef _str_extract_noexpand(arr, pat, flags=0):\n \"\"\"\n Find groups in each string in the Series using passed regular\n expression. This function is called from\n str_extract(expand=False), and can return Series, DataFrame, or\n Index.\n\n \"\"\"\n from pandas import DataFrame, Index\n\n regex = re.compile(pat, flags=flags)\n groups_or_na = _groups_or_na_fun(regex)\n\n if regex.groups == 1:\n result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)\n name = _get_single_group_name(regex)\n else:\n if isinstance(arr, Index):\n raise ValueError(\"only one regex group is supported with Index\")\n name = None\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n if arr.empty:\n result = DataFrame(columns=columns, dtype=object)\n else:\n result = DataFrame(\n [groups_or_na(val) for val in arr],\n columns=columns,\n index=arr.index,\n dtype=object)\n return result, name\n\n\ndef _str_extract_frame(arr, pat, flags=0):\n \"\"\"\n For each subject string in the Series, extract groups from the\n first match of regular expression pat. This function is called from\n str_extract(expand=True), and always returns a DataFrame.\n\n \"\"\"\n from pandas import DataFrame\n\n regex = re.compile(pat, flags=flags)\n groups_or_na = _groups_or_na_fun(regex)\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n\n if len(arr) == 0:\n return DataFrame(columns=columns, dtype=object)\n try:\n result_index = arr.index\n except AttributeError:\n result_index = None\n return DataFrame(\n [groups_or_na(val) for val in arr],\n columns=columns,\n index=result_index,\n dtype=object)\n\n\ndef str_extract(arr, pat, flags=0, expand=True):\n r\"\"\"\n Extract capture groups in the regex `pat` as columns in a DataFrame.\n\n For each subject string in the Series, extract groups from the\n first match of regular expression `pat`.\n\n Parameters\n ----------\n pat : str\n Regular expression pattern with capturing groups.\n flags : int, default 0 (no flags)\n Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that\n modify regular expression matching for things like case,\n spaces, etc. For more details, see :mod:`re`.\n expand : bool, default True\n If True, return DataFrame with one column per capture group.\n If False, return a Series/Index if there is one capture group\n or DataFrame if there are multiple capture groups.\n\n .. versionadded:: 0.18.0\n\n Returns\n -------\n DataFrame or Series or Index\n A DataFrame with one row for each subject string, and one\n column for each group. Any capture group names in regular\n expression pat will be used for column names; otherwise\n capture group numbers will be used. The dtype of each result\n column is always object, even when no match is found. If\n ``expand=False`` and pat has only one capture group, then\n return a Series (if subject is a Series) or Index (if subject\n is an Index).\n\n See Also\n --------\n extractall : Returns all matches (not just the first match).\n\n Examples\n --------\n A pattern with two groups will return a DataFrame with two columns.\n Non-matches will be NaN.\n\n >>> s = pd.Series(['a1', 'b2', 'c3'])\n >>> s.str.extract(r'([ab])(\\d)')\n 0 1\n 0 a 1\n 1 b 2\n 2 NaN NaN\n\n A pattern may contain optional groups.\n\n >>> s.str.extract(r'([ab])?(\\d)')\n 0 1\n 0 a 1\n 1 b 2\n 2 NaN 3\n\n Named groups will become column names in the result.\n\n >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\\d)')\n letter digit\n 0 a 1\n 1 b 2\n 2 NaN NaN\n\n A pattern with one group will return a DataFrame with one column\n if expand=True.\n\n >>> s.str.extract(r'[ab](\\d)', expand=True)\n 0\n 0 1\n 1 2\n 2 NaN\n\n A pattern with one group will return a Series if expand=False.\n\n >>> s.str.extract(r'[ab](\\d)', expand=False)\n 0 1\n 1 2\n 2 NaN\n dtype: object\n \"\"\"\n if not isinstance(expand, bool):\n raise ValueError(\"expand must be True or False\")\n if expand:\n return _str_extract_frame(arr._orig, pat, flags=flags)\n else:\n result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)\n return arr._wrap_result(result, name=name, expand=expand)\n\n\ndef str_extractall(arr, pat, flags=0):\n r\"\"\"\n For each subject string in the Series, extract groups from all\n matches of regular expression pat. When each subject string in the\n Series has exactly one match, extractall(pat).xs(0, level='match')\n is the same as extract(pat).\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n pat : str\n Regular expression pattern with capturing groups.\n flags : int, default 0 (no flags)\n A ``re`` module flag, for example ``re.IGNORECASE``. These allow\n to modify regular expression matching for things like case, spaces,\n etc. Multiple flags can be combined with the bitwise OR operator,\n for example ``re.IGNORECASE | re.MULTILINE``.\n\n Returns\n -------\n DataFrame\n A ``DataFrame`` with one row for each match, and one column for each\n group. Its rows have a ``MultiIndex`` with first levels that come from\n the subject ``Series``. The last level is named 'match' and indexes the\n matches in each item of the ``Series``. Any capture group names in\n regular expression pat will be used for column names; otherwise capture\n group numbers will be used.\n\n See Also\n --------\n extract : Returns first match only (not all matches).\n\n Examples\n --------\n A pattern with one group will return a DataFrame with one column.\n Indices with no matches will not appear in the result.\n\n >>> s = pd.Series([\"a1a2\", \"b1\", \"c1\"], index=[\"A\", \"B\", \"C\"])\n >>> s.str.extractall(r\"[ab](\\d)\")\n 0\n match\n A 0 1\n 1 2\n B 0 1\n\n Capture group names are used for column names of the result.\n\n >>> s.str.extractall(r\"[ab](?P<digit>\\d)\")\n digit\n match\n A 0 1\n 1 2\n B 0 1\n\n A pattern with two groups will return a DataFrame with two columns.\n\n >>> s.str.extractall(r\"(?P<letter>[ab])(?P<digit>\\d)\")\n letter digit\n match\n A 0 a 1\n 1 a 2\n B 0 b 1\n\n Optional groups that do not match are NaN in the result.\n\n >>> s.str.extractall(r\"(?P<letter>[ab])?(?P<digit>\\d)\")\n letter digit\n match\n A 0 a 1\n 1 a 2\n B 0 b 1\n C 0 NaN 1\n \"\"\"\n\n regex = re.compile(pat, flags=flags)\n # the regex must contain capture groups.\n if regex.groups == 0:\n raise ValueError(\"pattern contains no capture groups\")\n\n if isinstance(arr, ABCIndexClass):\n arr = arr.to_series().reset_index(drop=True)\n\n names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))\n columns = [names.get(1 + i, i) for i in range(regex.groups)]\n match_list = []\n index_list = []\n is_mi = arr.index.nlevels > 1\n\n for subject_key, subject in arr.iteritems():\n if isinstance(subject, str):\n\n if not is_mi:\n subject_key = (subject_key, )\n\n for match_i, match_tuple in enumerate(regex.findall(subject)):\n if isinstance(match_tuple, str):\n match_tuple = (match_tuple,)\n na_tuple = [np.NaN if group == \"\" else group\n for group in match_tuple]\n match_list.append(na_tuple)\n result_key = tuple(subject_key + (match_i, ))\n index_list.append(result_key)\n\n from pandas import MultiIndex\n index = MultiIndex.from_tuples(\n index_list, names=arr.index.names + [\"match\"])\n\n result = arr._constructor_expanddim(match_list, index=index,\n columns=columns)\n return result\n\n\ndef str_get_dummies(arr, sep='|'):\n \"\"\"\n Split each string in the Series by sep and return a DataFrame\n of dummy/indicator variables.\n\n Parameters\n ----------\n sep : str, default \"|\"\n String to split on.\n\n Returns\n -------\n DataFrame\n Dummy variables corresponding to values of the Series.\n\n See Also\n --------\n get_dummies : Convert categorical variable into dummy/indicator\n variables.\n\n Examples\n --------\n >>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()\n a b c\n 0 1 1 0\n 1 1 0 0\n 2 1 0 1\n\n >>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()\n a b c\n 0 1 1 0\n 1 0 0 0\n 2 1 0 1\n \"\"\"\n arr = arr.fillna('')\n try:\n arr = sep + arr + sep\n except TypeError:\n arr = sep + arr.astype(str) + sep\n\n tags = set()\n for ts in arr.str.split(sep):\n tags.update(ts)\n tags = sorted(tags - {\"\"})\n\n dummies = np.empty((len(arr), len(tags)), dtype=np.int64)\n\n for i, t in enumerate(tags):\n pat = sep + t + sep\n dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)\n return dummies, tags\n\n\ndef str_join(arr, sep):\n \"\"\"\n Join lists contained as elements in the Series/Index with passed delimiter.\n\n If the elements of a Series are lists themselves, join the content of these\n lists using the delimiter passed to the function.\n This function is an equivalent to :meth:`str.join`.\n\n Parameters\n ----------\n sep : str\n Delimiter to use between list entries.\n\n Returns\n -------\n Series/Index: object\n The list entries concatenated by intervening occurrences of the\n delimiter.\n\n Raises\n -------\n AttributeError\n If the supplied Series contains neither strings nor lists.\n\n See Also\n --------\n str.join : Standard library version of this method.\n Series.str.split : Split strings around given separator/delimiter.\n\n Notes\n -----\n If any of the list items is not a string object, the result of the join\n will be `NaN`.\n\n Examples\n --------\n Example with a list that contains non-string elements.\n\n >>> s = pd.Series([['lion', 'elephant', 'zebra'],\n ... [1.1, 2.2, 3.3],\n ... ['cat', np.nan, 'dog'],\n ... ['cow', 4.5, 'goat'],\n ... ['duck', ['swan', 'fish'], 'guppy']])\n >>> s\n 0 [lion, elephant, zebra]\n 1 [1.1, 2.2, 3.3]\n 2 [cat, nan, dog]\n 3 [cow, 4.5, goat]\n 4 [duck, [swan, fish], guppy]\n dtype: object\n\n Join all lists using a '-'. The lists containing object(s) of types other\n than str will produce a NaN.\n\n >>> s.str.join('-')\n 0 lion-elephant-zebra\n 1 NaN\n 2 NaN\n 3 NaN\n 4 NaN\n dtype: object\n \"\"\"\n return _na_map(sep.join, arr)\n\n\ndef str_findall(arr, pat, flags=0):\n \"\"\"\n Find all occurrences of pattern or regular expression in the Series/Index.\n\n Equivalent to applying :func:`re.findall` to all the elements in the\n Series/Index.\n\n Parameters\n ----------\n pat : str\n Pattern or regular expression.\n flags : int, default 0\n Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which\n means no flags).\n\n Returns\n -------\n Series/Index of lists of strings\n All non-overlapping matches of pattern or regular expression in each\n string of this Series/Index.\n\n See Also\n --------\n count : Count occurrences of pattern or regular expression in each string\n of the Series/Index.\n extractall : For each string in the Series, extract groups from all matches\n of regular expression and return a DataFrame with one row for each\n match and one column for each group.\n re.findall : The equivalent ``re`` function to all non-overlapping matches\n of pattern or regular expression in string, as a list of strings.\n\n Examples\n --------\n\n >>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])\n\n The search for the pattern 'Monkey' returns one match:\n\n >>> s.str.findall('Monkey')\n 0 []\n 1 [Monkey]\n 2 []\n dtype: object\n\n On the other hand, the search for the pattern 'MONKEY' doesn't return any\n match:\n\n >>> s.str.findall('MONKEY')\n 0 []\n 1 []\n 2 []\n dtype: object\n\n Flags can be added to the pattern or regular expression. For instance,\n to find the pattern 'MONKEY' ignoring the case:\n\n >>> import re\n >>> s.str.findall('MONKEY', flags=re.IGNORECASE)\n 0 []\n 1 [Monkey]\n 2 []\n dtype: object\n\n When the pattern matches more than one string in the Series, all matches\n are returned:\n\n >>> s.str.findall('on')\n 0 [on]\n 1 [on]\n 2 []\n dtype: object\n\n Regular expressions are supported too. For instance, the search for all the\n strings ending with the word 'on' is shown next:\n\n >>> s.str.findall('on$')\n 0 [on]\n 1 []\n 2 []\n dtype: object\n\n If the pattern is found more than once in the same string, then a list of\n multiple strings is returned:\n\n >>> s.str.findall('b')\n 0 []\n 1 []\n 2 [b, b]\n dtype: object\n \"\"\"\n regex = re.compile(pat, flags=flags)\n return _na_map(regex.findall, arr)\n\n\ndef str_find(arr, sub, start=0, end=None, side='left'):\n \"\"\"\n Return indexes in each strings in the Series/Index where the\n substring is fully contained between [start:end]. Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched.\n start : int\n Left edge index.\n end : int\n Right edge index.\n side : {'left', 'right'}, default 'left'\n Specifies a starting side, equivalent to ``find`` or ``rfind``.\n\n Returns\n -------\n Series or Index\n Indexes where substring is found.\n \"\"\"\n\n if not isinstance(sub, str):\n msg = 'expected a string object, not {0}'\n raise TypeError(msg.format(type(sub).__name__))\n\n if side == 'left':\n method = 'find'\n elif side == 'right':\n method = 'rfind'\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return _na_map(f, arr, dtype=int)\n\n\ndef str_index(arr, sub, start=0, end=None, side='left'):\n if not isinstance(sub, str):\n msg = 'expected a string object, not {0}'\n raise TypeError(msg.format(type(sub).__name__))\n\n if side == 'left':\n method = 'index'\n elif side == 'right':\n method = 'rindex'\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return _na_map(f, arr, dtype=int)\n\n\ndef str_pad(arr, width, side='left', fillchar=' '):\n \"\"\"\n Pad strings in the Series/Index up to width.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with character defined in `fillchar`.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to fill resulting string.\n fillchar : str, default ' '\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n Series or Index of object\n Returns Series or Index with minimum number of char in object.\n\n See Also\n --------\n Series.str.rjust : Fills the left side of strings with an arbitrary\n character. Equivalent to ``Series.str.pad(side='left')``.\n Series.str.ljust : Fills the right side of strings with an arbitrary\n character. Equivalent to ``Series.str.pad(side='right')``.\n Series.str.center : Fills boths sides of strings with an arbitrary\n character. Equivalent to ``Series.str.pad(side='both')``.\n Series.str.zfill : Pad strings in the Series/Index by prepending '0'\n character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.\n\n Examples\n --------\n >>> s = pd.Series([\"caribou\", \"tiger\"])\n >>> s\n 0 caribou\n 1 tiger\n dtype: object\n\n >>> s.str.pad(width=10)\n 0 caribou\n 1 tiger\n dtype: object\n\n >>> s.str.pad(width=10, side='right', fillchar='-')\n 0 caribou---\n 1 tiger-----\n dtype: object\n\n >>> s.str.pad(width=10, side='both', fillchar='-')\n 0 -caribou--\n 1 --tiger---\n dtype: object\n \"\"\"\n if not isinstance(fillchar, str):\n msg = 'fillchar must be a character, not {0}'\n raise TypeError(msg.format(type(fillchar).__name__))\n\n if len(fillchar) != 1:\n raise TypeError('fillchar must be a character, not str')\n\n if not is_integer(width):\n msg = 'width must be of integer type, not {0}'\n raise TypeError(msg.format(type(width).__name__))\n\n if side == 'left':\n f = lambda x: x.rjust(width, fillchar)\n elif side == 'right':\n f = lambda x: x.ljust(width, fillchar)\n elif side == 'both':\n f = lambda x: x.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError('Invalid side')\n\n return _na_map(f, arr)\n\n\ndef str_split(arr, pat=None, n=None):\n\n if pat is None:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n if len(pat) == 1:\n if n is None or n == 0:\n n = -1\n f = lambda x: x.split(pat, n)\n else:\n if n is None or n == -1:\n n = 0\n regex = re.compile(pat)\n f = lambda x: regex.split(x, maxsplit=n)\n res = _na_map(f, arr)\n return res\n\n\ndef str_rsplit(arr, pat=None, n=None):\n\n if n is None or n == 0:\n n = -1\n f = lambda x: x.rsplit(pat, n)\n res = _na_map(f, arr)\n return res\n\n\ndef str_slice(arr, start=None, stop=None, step=None):\n \"\"\"\n Slice substrings from each element in the Series or Index.\n\n Parameters\n ----------\n start : int, optional\n Start position for slice operation.\n stop : int, optional\n Stop position for slice operation.\n step : int, optional\n Step size for slice operation.\n\n Returns\n -------\n Series or Index of object\n Series or Index from sliced substring from original string object.\n\n See Also\n --------\n Series.str.slice_replace : Replace a slice with a string.\n Series.str.get : Return element at position.\n Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`\n being the position.\n\n Examples\n --------\n >>> s = pd.Series([\"koala\", \"fox\", \"chameleon\"])\n >>> s\n 0 koala\n 1 fox\n 2 chameleon\n dtype: object\n\n >>> s.str.slice(start=1)\n 0 oala\n 1 ox\n 2 hameleon\n dtype: object\n\n >>> s.str.slice(stop=2)\n 0 ko\n 1 fo\n 2 ch\n dtype: object\n\n >>> s.str.slice(step=2)\n 0 kaa\n 1 fx\n 2 caeen\n dtype: object\n\n >>> s.str.slice(start=0, stop=5, step=3)\n 0 kl\n 1 f\n 2 cm\n dtype: object\n\n Equivalent behaviour to:\n\n >>> s.str[0:5:3]\n 0 kl\n 1 f\n 2 cm\n dtype: object\n \"\"\"\n obj = slice(start, stop, step)\n f = lambda x: x[obj]\n return _na_map(f, arr)\n\n\ndef str_slice_replace(arr, start=None, stop=None, repl=None):\n \"\"\"\n Replace a positional slice of a string with another value.\n\n Parameters\n ----------\n start : int, optional\n Left index position to use for the slice. If not specified (None),\n the slice is unbounded on the left, i.e. slice from the start\n of the string.\n stop : int, optional\n Right index position to use for the slice. If not specified (None),\n the slice is unbounded on the right, i.e. slice until the\n end of the string.\n repl : str, optional\n String for replacement. If not specified (None), the sliced region\n is replaced with an empty string.\n\n Returns\n -------\n Series or Index\n Same type as the original object.\n\n See Also\n --------\n Series.str.slice : Just slicing without replacement.\n\n Examples\n --------\n >>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])\n >>> s\n 0 a\n 1 ab\n 2 abc\n 3 abdc\n 4 abcde\n dtype: object\n\n Specify just `start`, meaning replace `start` until the end of the\n string with `repl`.\n\n >>> s.str.slice_replace(1, repl='X')\n 0 aX\n 1 aX\n 2 aX\n 3 aX\n 4 aX\n dtype: object\n\n Specify just `stop`, meaning the start of the string to `stop` is replaced\n with `repl`, and the rest of the string is included.\n\n >>> s.str.slice_replace(stop=2, repl='X')\n 0 X\n 1 X\n 2 Xc\n 3 Xdc\n 4 Xcde\n dtype: object\n\n Specify `start` and `stop`, meaning the slice from `start` to `stop` is\n replaced with `repl`. Everything before or after `start` and `stop` is\n included as is.\n\n >>> s.str.slice_replace(start=1, stop=3, repl='X')\n 0 aX\n 1 aX\n 2 aX\n 3 aXc\n 4 aXde\n dtype: object\n \"\"\"\n if repl is None:\n repl = ''\n\n def f(x):\n if x[start:stop] == '':\n local_stop = start\n else:\n local_stop = stop\n y = ''\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return _na_map(f, arr)\n\n\ndef str_strip(arr, to_strip=None, side='both'):\n \"\"\"\n Strip whitespace (including newlines) from each string in the\n Series/Index.\n\n Parameters\n ----------\n to_strip : str or unicode\n side : {'left', 'right', 'both'}, default 'both'\n\n Returns\n -------\n Series or Index\n \"\"\"\n if side == 'both':\n f = lambda x: x.strip(to_strip)\n elif side == 'left':\n f = lambda x: x.lstrip(to_strip)\n elif side == 'right':\n f = lambda x: x.rstrip(to_strip)\n else: # pragma: no cover\n raise ValueError('Invalid side')\n return _na_map(f, arr)\n\n\ndef str_wrap(arr, width, **kwargs):\n r\"\"\"\n Wrap long strings in the Series/Index to be formatted in\n paragraphs with length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line width.\n expand_tabs : bool, optional\n If True, tab characters will be expanded to spaces (default: True).\n replace_whitespace : bool, optional\n If True, each whitespace character (as defined by string.whitespace)\n remaining after tab expansion will be replaced by a single space\n (default: True).\n drop_whitespace : bool, optional\n If True, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True).\n break_long_words : bool, optional\n If True, then words longer than width will be broken in order to ensure\n that no lines are longer than width. If it is false, long words will\n not be broken, and some lines may be longer than width (default: True).\n break_on_hyphens : bool, optional\n If True, wrapping will occur preferably on whitespace and right after\n hyphens in compound words, as it is customary in English. If false,\n only whitespaces will be considered as potentially good places for line\n breaks, but you need to set break_long_words to false if you want truly\n insecable words (default: True).\n\n Returns\n -------\n Series or Index\n\n Notes\n -----\n Internally, this method uses a :class:`textwrap.TextWrapper` instance with\n default settings. To achieve behavior matching R's stringr library str_wrap\n function, use the arguments:\n\n - expand_tabs = False\n - replace_whitespace = True\n - drop_whitespace = True\n - break_long_words = False\n - break_on_hyphens = False\n\n Examples\n --------\n\n >>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])\n >>> s.str.wrap(12)\n 0 line to be\\nwrapped\n 1 another line\\nto be\\nwrapped\n dtype: object\n \"\"\"\n kwargs['width'] = width\n\n tw = textwrap.TextWrapper(**kwargs)\n\n return _na_map(lambda s: '\\n'.join(tw.wrap(s)), arr)\n\n\ndef str_translate(arr, table):\n \"\"\"\n Map all characters in the string through the given mapping table.\n Equivalent to standard :meth:`str.translate`.\n\n Parameters\n ----------\n table : dict\n table is a mapping of Unicode ordinals to Unicode ordinals, strings, or\n None. Unmapped characters are left untouched.\n Characters mapped to None are deleted. :meth:`str.maketrans` is a\n helper function for making translation tables.\n\n Returns\n -------\n Series or Index\n \"\"\"\n return _na_map(lambda x: x.translate(table), arr)\n\n\ndef str_get(arr, i):\n \"\"\"\n Extract element from each component at specified position.\n\n Extract element from lists, tuples, or strings in each element in the\n Series/Index.\n\n Parameters\n ----------\n i : int\n Position of element to extract.\n\n Returns\n -------\n Series or Index\n\n Examples\n --------\n >>> s = pd.Series([\"String\",\n ... (1, 2, 3),\n ... [\"a\", \"b\", \"c\"],\n ... 123,\n ... -456,\n ... {1: \"Hello\", \"2\": \"World\"}])\n >>> s\n 0 String\n 1 (1, 2, 3)\n 2 [a, b, c]\n 3 123\n 4 -456\n 5 {1: 'Hello', '2': 'World'}\n dtype: object\n\n >>> s.str.get(1)\n 0 t\n 1 2\n 2 b\n 3 NaN\n 4 NaN\n 5 Hello\n dtype: object\n\n >>> s.str.get(-1)\n 0 g\n 1 3\n 2 c\n 3 NaN\n 4 NaN\n 5 None\n dtype: object\n \"\"\"\n def f(x):\n if isinstance(x, dict):\n return x.get(i)\n elif len(x) > i >= -len(x):\n return x[i]\n return np.nan\n return _na_map(f, arr)\n\n\ndef str_decode(arr, encoding, errors=\"strict\"):\n \"\"\"\n Decode character string in the Series/Index using indicated encoding.\n Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in\n python3.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n Series or Index\n \"\"\"\n if encoding in _cpython_optimized_decoders:\n # CPython optimized implementation\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n return _na_map(f, arr)\n\n\ndef str_encode(arr, encoding, errors=\"strict\"):\n \"\"\"\n Encode character string in the Series/Index using indicated encoding.\n Equivalent to :meth:`str.encode`.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : Series/Index of objects\n \"\"\"\n if encoding in _cpython_optimized_encoders:\n # CPython optimized implementation\n f = lambda x: x.encode(encoding, errors)\n else:\n encoder = codecs.getencoder(encoding)\n f = lambda x: encoder(x, errors)[0]\n return _na_map(f, arr)\n\n\ndef _noarg_wrapper(f, docstring=None, **kargs):\n def wrapper(self):\n result = _na_map(f, self._parent, **kargs)\n return self._wrap_result(result)\n\n wrapper.__name__ = f.__name__\n if docstring is not None:\n wrapper.__doc__ = docstring\n else:\n raise ValueError('Provide docstring')\n\n return wrapper\n\n\ndef _pat_wrapper(f, flags=False, na=False, **kwargs):\n def wrapper1(self, pat):\n result = f(self._parent, pat)\n return self._wrap_result(result)\n\n def wrapper2(self, pat, flags=0, **kwargs):\n result = f(self._parent, pat, flags=flags, **kwargs)\n return self._wrap_result(result)\n\n def wrapper3(self, pat, na=np.nan):\n result = f(self._parent, pat, na=na)\n return self._wrap_result(result)\n\n wrapper = wrapper3 if na else wrapper2 if flags else wrapper1\n\n wrapper.__name__ = f.__name__\n if f.__doc__:\n wrapper.__doc__ = f.__doc__\n\n return wrapper\n\n\ndef copy(source):\n \"Copy a docstring from another source function (if present)\"\n\n def do_copy(target):\n if source.__doc__:\n target.__doc__ = source.__doc__\n return target\n\n return do_copy\n\n\nclass StringMethods(NoNewAttributesMixin):\n \"\"\"\n Vectorized string functions for Series and Index. NAs stay NA unless\n handled otherwise by a particular method. Patterned after Python's string\n methods, with some inspiration from R's stringr package.\n\n Examples\n --------\n >>> s.str.split('_')\n >>> s.str.replace('_', '')\n \"\"\"\n\n def __init__(self, data):\n self._validate(data)\n self._is_categorical = is_categorical_dtype(data)\n\n # .values.categories works for both Series/Index\n self._parent = data.values.categories if self._is_categorical else data\n # save orig to blow up categoricals to the right type\n self._orig = data\n self._freeze()\n\n @staticmethod\n def _validate(data):\n from pandas.core.index import Index\n\n if (isinstance(data, ABCSeries) and\n not ((is_categorical_dtype(data.dtype) and\n is_object_dtype(data.values.categories)) or\n (is_object_dtype(data.dtype)))):\n # it's neither a string series not a categorical series with\n # strings inside the categories.\n # this really should exclude all series with any non-string values\n # (instead of test for object dtype), but that isn't practical for\n # performance reasons until we have a str dtype (GH 9343)\n raise AttributeError(\"Can only use .str accessor with string \"\n \"values, which use np.object_ dtype in \"\n \"pandas\")\n elif isinstance(data, Index):\n # can't use ABCIndex to exclude non-str\n\n # see src/inference.pyx which can contain string values\n allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')\n if is_categorical_dtype(data.dtype):\n inf_type = data.categories.inferred_type\n else:\n inf_type = data.inferred_type\n if inf_type not in allowed_types:\n message = (\"Can only use .str accessor with string values \"\n \"(i.e. inferred_type is 'string', 'unicode' or \"\n \"'mixed')\")\n raise AttributeError(message)\n if data.nlevels > 1:\n message = (\"Can only use .str accessor with Index, not \"\n \"MultiIndex\")\n raise AttributeError(message)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n return self.slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self.get(key)\n\n def __iter__(self):\n i = 0\n g = self.get(i)\n while g.notna().any():\n yield g\n i += 1\n g = self.get(i)\n\n def _wrap_result(self, result, use_codes=True,\n name=None, expand=None, fill_value=np.nan):\n\n from pandas import Index, Series, MultiIndex\n\n # for category, we do the stuff on the categories, so blow it up\n # to the full series again\n # But for some operations, we have to do the stuff on the full values,\n # so make it possible to skip this step as the method already did this\n # before the transformation...\n if use_codes and self._is_categorical:\n # if self._orig is a CategoricalIndex, there is no .cat-accessor\n result = take_1d(result, Series(self._orig, copy=False).cat.codes,\n fill_value=fill_value)\n\n if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):\n return result\n assert result.ndim < 3\n\n if expand is None:\n # infer from ndim if expand is not specified\n expand = result.ndim != 1\n\n elif expand is True and not isinstance(self._orig, Index):\n # required when expand=True is explicitly specified\n # not needed when inferred\n\n def cons_row(x):\n if is_list_like(x):\n return x\n else:\n return [x]\n\n result = [cons_row(x) for x in result]\n if result:\n # propagate nan values to match longest sequence (GH 18450)\n max_len = max(len(x) for x in result)\n result = [x * max_len if len(x) == 0 or x[0] is np.nan\n else x for x in result]\n\n if not isinstance(expand, bool):\n raise ValueError(\"expand must be True or False\")\n\n if expand is False:\n # if expand is False, result should have the same name\n # as the original otherwise specified\n if name is None:\n name = getattr(result, 'name', None)\n if name is None:\n # do not use logical or, _orig may be a DataFrame\n # which has \"name\" column\n name = self._orig.name\n\n # Wait until we are sure result is a Series or Index before\n # checking attributes (GH 12180)\n if isinstance(self._orig, Index):\n # if result is a boolean np.array, return the np.array\n # instead of wrapping it into a boolean Index (GH 8875)\n if is_bool_dtype(result):\n return result\n\n if expand:\n result = list(result)\n out = MultiIndex.from_tuples(result, names=name)\n if out.nlevels == 1:\n # We had all tuples of length-one, which are\n # better represented as a regular Index.\n out = out.get_level_values(0)\n return out\n else:\n return Index(result, name=name)\n else:\n index = self._orig.index\n if expand:\n cons = self._orig._constructor_expanddim\n return cons(result, columns=name, index=index)\n else:\n # Must be a Series\n cons = self._orig._constructor\n return cons(result, name=name, index=index)\n\n def _get_series_list(self, others, ignore_index=False):\n \"\"\"\n Auxiliary function for :meth:`str.cat`. Turn potentially mixed input\n into a list of Series (elements without an index must match the length\n of the calling Series/Index).\n\n Parameters\n ----------\n others : Series, Index, DataFrame, np.ndarray, list-like or list-like\n of objects that are Series, Index or np.ndarray (1-dim)\n ignore_index : boolean, default False\n Determines whether to forcefully align others with index of caller\n\n Returns\n -------\n tuple : (others transformed into list of Series,\n boolean whether FutureWarning should be raised)\n \"\"\"\n\n # Once str.cat defaults to alignment, this function can be simplified;\n # will not need `ignore_index` and the second boolean output anymore\n\n from pandas import Index, Series, DataFrame\n\n # self._orig is either Series or Index\n idx = self._orig if isinstance(self._orig, Index) else self._orig.index\n\n err_msg = ('others must be Series, Index, DataFrame, np.ndarrary or '\n 'list-like (either containing only strings or containing '\n 'only objects of type Series/Index/list-like/np.ndarray)')\n\n # Generally speaking, all objects without an index inherit the index\n # `idx` of the calling Series/Index - i.e. must have matching length.\n # Objects with an index (i.e. Series/Index/DataFrame) keep their own\n # index, *unless* ignore_index is set to True.\n if isinstance(others, Series):\n warn = not others.index.equals(idx)\n # only reconstruct Series when absolutely necessary\n los = [Series(others.values, index=idx)\n if ignore_index and warn else others]\n return (los, warn)\n elif isinstance(others, Index):\n warn = not others.equals(idx)\n los = [Series(others.values,\n index=(idx if ignore_index else others))]\n return (los, warn)\n elif isinstance(others, DataFrame):\n warn = not others.index.equals(idx)\n if ignore_index and warn:\n # without copy, this could change \"others\"\n # that was passed to str.cat\n others = others.copy()\n others.index = idx\n return ([others[x] for x in others], warn)\n elif isinstance(others, np.ndarray) and others.ndim == 2:\n others = DataFrame(others, index=idx)\n return ([others[x] for x in others], False)\n elif is_list_like(others, allow_sets=False):\n others = list(others) # ensure iterators do not get read twice etc\n\n # in case of list-like `others`, all elements must be\n # either one-dimensional list-likes or scalars\n if all(is_list_like(x, allow_sets=False) for x in others):\n los = []\n join_warn = False\n depr_warn = False\n # iterate through list and append list of series for each\n # element (which we check to be one-dimensional and non-nested)\n while others:\n nxt = others.pop(0) # nxt is guaranteed list-like by above\n\n # GH 21950 - DeprecationWarning\n # only allowing Series/Index/np.ndarray[1-dim] will greatly\n # simply this function post-deprecation.\n if not (isinstance(nxt, (Series, Index)) or\n (isinstance(nxt, np.ndarray) and nxt.ndim == 1)):\n depr_warn = True\n\n if not isinstance(nxt, (DataFrame, Series,\n Index, np.ndarray)):\n # safety for non-persistent list-likes (e.g. iterators)\n # do not map indexed/typed objects; info needed below\n nxt = list(nxt)\n\n # known types for which we can avoid deep inspection\n no_deep = ((isinstance(nxt, np.ndarray) and nxt.ndim == 1)\n or isinstance(nxt, (Series, Index)))\n # nested list-likes are forbidden:\n # -> elements of nxt must not be list-like\n is_legal = ((no_deep and nxt.dtype == object)\n or all(not is_list_like(x) for x in nxt))\n\n # DataFrame is false positive of is_legal\n # because \"x in df\" returns column names\n if not is_legal or isinstance(nxt, DataFrame):\n raise TypeError(err_msg)\n\n nxt, wnx = self._get_series_list(nxt,\n ignore_index=ignore_index)\n los = los + nxt\n join_warn = join_warn or wnx\n\n if depr_warn:\n warnings.warn('list-likes other than Series, Index, or '\n 'np.ndarray WITHIN another list-like are '\n 'deprecated and will be removed in a future '\n 'version.', FutureWarning, stacklevel=3)\n return (los, join_warn)\n elif all(not is_list_like(x) for x in others):\n return ([Series(others, index=idx)], False)\n raise TypeError(err_msg)\n\n def cat(self, others=None, sep=None, na_rep=None, join=None):\n \"\"\"\n Concatenate strings in the Series/Index with given separator.\n\n If `others` is specified, this function concatenates the Series/Index\n and elements of `others` element-wise.\n If `others` is not passed, then all values in the Series/Index are\n concatenated into a single string with a given `sep`.\n\n Parameters\n ----------\n others : Series, Index, DataFrame, np.ndarrary or list-like\n Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and\n other list-likes of strings must have the same length as the\n calling Series/Index, with the exception of indexed objects (i.e.\n Series/Index/DataFrame) if `join` is not None.\n\n If others is a list-like that contains a combination of Series,\n Index or np.ndarray (1-dim), then all elements will be unpacked and\n must satisfy the above criteria individually.\n\n If others is None, the method returns the concatenation of all\n strings in the calling Series/Index.\n sep : str, default ''\n The separator between the different elements/columns. By default\n the empty string `''` is used.\n na_rep : str or None, default None\n Representation that is inserted for all missing values:\n\n - If `na_rep` is None, and `others` is None, missing values in the\n Series/Index are omitted from the result.\n - If `na_rep` is None, and `others` is not None, a row containing a\n missing value in any of the columns (before concatenation) will\n have a missing value in the result.\n join : {'left', 'right', 'outer', 'inner'}, default None\n Determines the join-style between the calling Series/Index and any\n Series/Index/DataFrame in `others` (objects without an index need\n to match the length of the calling Series/Index). If None,\n alignment is disabled, but this option will be removed in a future\n version of pandas and replaced with a default of `'left'`. To\n disable alignment, use `.values` on any Series/Index/DataFrame in\n `others`.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n str, Series or Index\n If `others` is None, `str` is returned, otherwise a `Series/Index`\n (same type as caller) of objects is returned.\n\n See Also\n --------\n split : Split each string in the Series/Index.\n join : Join lists contained as elements in the Series/Index.\n\n Examples\n --------\n When not passing `others`, all values are concatenated into a single\n string:\n\n >>> s = pd.Series(['a', 'b', np.nan, 'd'])\n >>> s.str.cat(sep=' ')\n 'a b d'\n\n By default, NA values in the Series are ignored. Using `na_rep`, they\n can be given a representation:\n\n >>> s.str.cat(sep=' ', na_rep='?')\n 'a b ? d'\n\n If `others` is specified, corresponding values are concatenated with\n the separator. Result will be a Series of strings.\n\n >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')\n 0 a,A\n 1 b,B\n 2 NaN\n 3 d,D\n dtype: object\n\n Missing values will remain missing in the result, but can again be\n represented using `na_rep`\n\n >>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')\n 0 a,A\n 1 b,B\n 2 -,C\n 3 d,D\n dtype: object\n\n If `sep` is not specified, the values are concatenated without\n separation.\n\n >>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')\n 0 aA\n 1 bB\n 2 -C\n 3 dD\n dtype: object\n\n Series with different indexes can be aligned before concatenation. The\n `join`-keyword works as in other methods.\n\n >>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])\n >>> s.str.cat(t, join='left', na_rep='-')\n 0 aa\n 1 b-\n 2 -c\n 3 dd\n dtype: object\n >>>\n >>> s.str.cat(t, join='outer', na_rep='-')\n 0 aa\n 1 b-\n 2 -c\n 3 dd\n 4 -e\n dtype: object\n >>>\n >>> s.str.cat(t, join='inner', na_rep='-')\n 0 aa\n 2 -c\n 3 dd\n dtype: object\n >>>\n >>> s.str.cat(t, join='right', na_rep='-')\n 3 dd\n 0 aa\n 4 -e\n 2 -c\n dtype: object\n\n For more examples, see :ref:`here <text.concatenate>`.\n \"\"\"\n from pandas import Index, Series, concat\n\n if isinstance(others, str):\n raise ValueError(\"Did you mean to supply a `sep` keyword?\")\n if sep is None:\n sep = ''\n\n if isinstance(self._orig, Index):\n data = Series(self._orig, index=self._orig)\n else: # Series\n data = self._orig\n\n # concatenate Series/Index with itself if no \"others\"\n if others is None:\n data = ensure_object(data)\n na_mask = isna(data)\n if na_rep is None and na_mask.any():\n data = data[~na_mask]\n elif na_rep is not None and na_mask.any():\n data = np.where(na_mask, na_rep, data)\n return sep.join(data)\n\n try:\n # turn anything in \"others\" into lists of Series\n others, warn = self._get_series_list(others,\n ignore_index=(join is None))\n except ValueError: # do not catch TypeError raised by _get_series_list\n if join is None:\n raise ValueError('All arrays must be same length, except '\n 'those having an index if `join` is not None')\n else:\n raise ValueError('If `others` contains arrays or lists (or '\n 'other list-likes without an index), these '\n 'must all be of the same length as the '\n 'calling Series/Index.')\n\n if join is None and warn:\n warnings.warn(\"A future version of pandas will perform index \"\n \"alignment when `others` is a Series/Index/\"\n \"DataFrame (or a list-like containing one). To \"\n \"disable alignment (the behavior before v.0.23) and \"\n \"silence this warning, use `.values` on any Series/\"\n \"Index/DataFrame in `others`. To enable alignment \"\n \"and silence this warning, pass `join='left'|\"\n \"'outer'|'inner'|'right'`. The future default will \"\n \"be `join='left'`.\", FutureWarning, stacklevel=2)\n\n # if join is None, _get_series_list already force-aligned indexes\n join = 'left' if join is None else join\n\n # align if required\n if any(not data.index.equals(x.index) for x in others):\n # Need to add keys for uniqueness in case of duplicate columns\n others = concat(others, axis=1,\n join=(join if join == 'inner' else 'outer'),\n keys=range(len(others)), sort=False, copy=False)\n data, others = data.align(others, join=join)\n others = [others[x] for x in others] # again list of Series\n\n all_cols = [ensure_object(x) for x in [data] + others]\n na_masks = np.array([isna(x) for x in all_cols])\n union_mask = np.logical_or.reduce(na_masks, axis=0)\n\n if na_rep is None and union_mask.any():\n # no na_rep means NaNs for all rows where any column has a NaN\n # only necessary if there are actually any NaNs\n result = np.empty(len(data), dtype=object)\n np.putmask(result, union_mask, np.nan)\n\n not_masked = ~union_mask\n result[not_masked] = cat_core([x[not_masked] for x in all_cols],\n sep)\n elif na_rep is not None and union_mask.any():\n # fill NaNs with na_rep in case there are actually any NaNs\n all_cols = [np.where(nm, na_rep, col)\n for nm, col in zip(na_masks, all_cols)]\n result = cat_core(all_cols, sep)\n else:\n # no NaNs - can just concatenate\n result = cat_core(all_cols, sep)\n\n if isinstance(self._orig, Index):\n # add dtype for case that result is all-NA\n result = Index(result, dtype=object, name=self._orig.name)\n else: # Series\n result = Series(result, dtype=object, index=data.index,\n name=self._orig.name)\n return result\n\n _shared_docs['str_split'] = (\"\"\"\n Split strings around given separator/delimiter.\n\n Splits the string in the Series/Index from the %(side)s,\n at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n pat : str, optional\n String or regular expression to split on.\n If not specified, split on whitespace.\n n : int, default -1 (all)\n Limit number of splits in output.\n ``None``, 0 and -1 will be interpreted as return all splits.\n expand : bool, default False\n Expand the splitted strings into separate columns.\n\n * If ``True``, return DataFrame/MultiIndex expanding dimensionality.\n * If ``False``, return Series/Index, containing lists of strings.\n\n Returns\n -------\n Series, Index, DataFrame or MultiIndex\n Type matches caller unless ``expand=True`` (see Notes).\n\n See Also\n --------\n Series.str.split : Split strings around given separator/delimiter.\n Series.str.rsplit : Splits string around given separator/delimiter,\n starting from the right.\n Series.str.join : Join lists contained as elements in the Series/Index\n with passed delimiter.\n str.split : Standard library version for split.\n str.rsplit : Standard library version for rsplit.\n\n Notes\n -----\n The handling of the `n` keyword depends on the number of found splits:\n\n - If found splits > `n`, make first `n` splits only\n - If found splits <= `n`, make all splits\n - If for a certain row the number of found splits < `n`,\n append `None` for padding up to `n` if ``expand=True``\n\n If using ``expand=True``, Series and Index callers return DataFrame and\n MultiIndex objects, respectively.\n\n Examples\n --------\n >>> s = pd.Series([\"this is a regular sentence\",\n \"https://docs.python.org/3/tutorial/index.html\", np.nan])\n\n In the default setting, the string is split by whitespace.\n\n >>> s.str.split()\n 0 [this, is, a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n Without the `n` parameter, the outputs of `rsplit` and `split`\n are identical.\n\n >>> s.str.rsplit()\n 0 [this, is, a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n The `n` parameter can be used to limit the number of splits on the\n delimiter. The outputs of `split` and `rsplit` are different.\n\n >>> s.str.split(n=2)\n 0 [this, is, a regular sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n >>> s.str.rsplit(n=2)\n 0 [this is a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n The `pat` parameter can be used to split by other characters.\n\n >>> s.str.split(pat = \"/\")\n 0 [this is a regular sentence]\n 1 [https:, , docs.python.org, 3, tutorial, index...\n 2 NaN\n dtype: object\n\n When using ``expand=True``, the split elements will expand out into\n separate columns. If NaN is present, it is propagated throughout\n the columns during the split.\n\n >>> s.str.split(expand=True)\n 0 1 2 3\n 0 this is a regular\n 1 https://docs.python.org/3/tutorial/index.html None None None\n 2 NaN NaN NaN NaN \\\n\n 4\n 0 sentence\n 1 None\n 2 NaN\n\n For slightly more complex use cases like splitting the html document name\n from a url, a combination of parameter settings can be used.\n\n >>> s.str.rsplit(\"/\", n=1, expand=True)\n 0 1\n 0 this is a regular sentence None\n 1 https://docs.python.org/3/tutorial index.html\n 2 NaN NaN\n \"\"\")\n\n @Appender(_shared_docs['str_split'] % {\n 'side': 'beginning',\n 'method': 'split'})\n def split(self, pat=None, n=-1, expand=False):\n result = str_split(self._parent, pat, n=n)\n return self._wrap_result(result, expand=expand)\n\n @Appender(_shared_docs['str_split'] % {\n 'side': 'end',\n 'method': 'rsplit'})\n def rsplit(self, pat=None, n=-1, expand=False):\n result = str_rsplit(self._parent, pat, n=n)\n return self._wrap_result(result, expand=expand)\n\n _shared_docs['str_partition'] = (\"\"\"\n Split the string at the %(side)s occurrence of `sep`.\n\n This method splits the string at the %(side)s occurrence of `sep`,\n and returns 3 elements containing the part before the separator,\n the separator itself, and the part after the separator.\n If the separator is not found, return %(return)s.\n\n Parameters\n ----------\n sep : str, default whitespace\n String to split on.\n pat : str, default whitespace\n .. deprecated:: 0.24.0\n Use ``sep`` instead\n expand : bool, default True\n If True, return DataFrame/MultiIndex expanding dimensionality.\n If False, return Series/Index.\n\n Returns\n -------\n DataFrame/MultiIndex or Series/Index of objects\n\n See Also\n --------\n %(also)s\n Series.str.split : Split strings around given separators.\n str.partition : Standard library version.\n\n Examples\n --------\n\n >>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])\n >>> s\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n dtype: object\n\n >>> s.str.partition()\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n\n To partition by the last space instead of the first one:\n\n >>> s.str.rpartition()\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n\n To partition by something different than a space:\n\n >>> s.str.partition('-')\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt - Rivers\n\n To return a Series containining tuples instead of a DataFrame:\n\n >>> s.str.partition('-', expand=False)\n 0 (Linda van der Berg, , )\n 1 (George Pitt, -, Rivers)\n dtype: object\n\n Also available on indices:\n\n >>> idx = pd.Index(['X 123', 'Y 999'])\n >>> idx\n Index(['X 123', 'Y 999'], dtype='object')\n\n Which will create a MultiIndex:\n\n >>> idx.str.partition()\n MultiIndex(levels=[['X', 'Y'], [' '], ['123', '999']],\n codes=[[0, 1], [0, 0], [0, 1]])\n\n Or an index with tuples with ``expand=False``:\n\n >>> idx.str.partition(expand=False)\n Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')\n \"\"\")\n\n @Appender(_shared_docs['str_partition'] % {\n 'side': 'first',\n 'return': '3 elements containing the string itself, followed by two '\n 'empty strings',\n 'also': 'rpartition : Split the string at the last occurrence of '\n '`sep`.'\n })\n @deprecate_kwarg(old_arg_name='pat', new_arg_name='sep')\n def partition(self, sep=' ', expand=True):\n f = lambda x: x.partition(sep)\n result = _na_map(f, self._parent)\n return self._wrap_result(result, expand=expand)\n\n @Appender(_shared_docs['str_partition'] % {\n 'side': 'last',\n 'return': '3 elements containing two empty strings, followed by the '\n 'string itself',\n 'also': 'partition : Split the string at the first occurrence of '\n '`sep`.'\n })\n @deprecate_kwarg(old_arg_name='pat', new_arg_name='sep')\n def rpartition(self, sep=' ', expand=True):\n f = lambda x: x.rpartition(sep)\n result = _na_map(f, self._parent)\n return self._wrap_result(result, expand=expand)\n\n @copy(str_get)\n def get(self, i):\n result = str_get(self._parent, i)\n return self._wrap_result(result)\n\n @copy(str_join)\n def join(self, sep):\n result = str_join(self._parent, sep)\n return self._wrap_result(result)\n\n @copy(str_contains)\n def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):\n result = str_contains(self._parent, pat, case=case, flags=flags, na=na,\n regex=regex)\n return self._wrap_result(result, fill_value=na)\n\n @copy(str_match)\n def match(self, pat, case=True, flags=0, na=np.nan):\n result = str_match(self._parent, pat, case=case, flags=flags, na=na)\n return self._wrap_result(result, fill_value=na)\n\n @copy(str_replace)\n def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n result = str_replace(self._parent, pat, repl, n=n, case=case,\n flags=flags, regex=regex)\n return self._wrap_result(result)\n\n @copy(str_repeat)\n def repeat(self, repeats):\n result = str_repeat(self._parent, repeats)\n return self._wrap_result(result)\n\n @copy(str_pad)\n def pad(self, width, side='left', fillchar=' '):\n result = str_pad(self._parent, width, side=side, fillchar=fillchar)\n return self._wrap_result(result)\n\n _shared_docs['str_pad'] = (\"\"\"\n Filling %(side)s side of strings in the Series/Index with an\n additional character. Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : Series/Index of objects\n \"\"\")\n\n @Appender(_shared_docs['str_pad'] % dict(side='left and right',\n method='center'))\n def center(self, width, fillchar=' '):\n return self.pad(width, side='both', fillchar=fillchar)\n\n @Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))\n def ljust(self, width, fillchar=' '):\n return self.pad(width, side='right', fillchar=fillchar)\n\n @Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))\n def rjust(self, width, fillchar=' '):\n return self.pad(width, side='left', fillchar=fillchar)\n\n def zfill(self, width):\n \"\"\"\n Pad strings in the Series/Index by prepending '0' characters.\n\n Strings in the Series/Index are padded with '0' characters on the\n left of the string to reach a total string length `width`. Strings\n in the Series/Index with length greater or equal to `width` are\n unchanged.\n\n Parameters\n ----------\n width : int\n Minimum length of resulting string; strings with length less\n than `width` be prepended with '0' characters.\n\n Returns\n -------\n Series/Index of objects\n\n See Also\n --------\n Series.str.rjust : Fills the left side of strings with an arbitrary\n character.\n Series.str.ljust : Fills the right side of strings with an arbitrary\n character.\n Series.str.pad : Fills the specified sides of strings with an arbitrary\n character.\n Series.str.center : Fills boths sides of strings with an arbitrary\n character.\n\n Notes\n -----\n Differs from :meth:`str.zfill` which has special handling\n for '+'/'-' in the string.\n\n Examples\n --------\n >>> s = pd.Series(['-1', '1', '1000', 10, np.nan])\n >>> s\n 0 -1\n 1 1\n 2 1000\n 3 10\n 4 NaN\n dtype: object\n\n Note that ``10`` and ``NaN`` are not strings, therefore they are\n converted to ``NaN``. The minus sign in ``'-1'`` is treated as a\n regular character and the zero is added to the left of it\n (:meth:`str.zfill` would have moved it to the left). ``1000``\n remains unchanged as it is longer than `width`.\n\n >>> s.str.zfill(3)\n 0 0-1\n 1 001\n 2 1000\n 3 NaN\n 4 NaN\n dtype: object\n \"\"\"\n result = str_pad(self._parent, width, side='left', fillchar='0')\n return self._wrap_result(result)\n\n @copy(str_slice)\n def slice(self, start=None, stop=None, step=None):\n result = str_slice(self._parent, start, stop, step)\n return self._wrap_result(result)\n\n @copy(str_slice_replace)\n def slice_replace(self, start=None, stop=None, repl=None):\n result = str_slice_replace(self._parent, start, stop, repl)\n return self._wrap_result(result)\n\n @copy(str_decode)\n def decode(self, encoding, errors=\"strict\"):\n result = str_decode(self._parent, encoding, errors)\n return self._wrap_result(result)\n\n @copy(str_encode)\n def encode(self, encoding, errors=\"strict\"):\n result = str_encode(self._parent, encoding, errors)\n return self._wrap_result(result)\n\n _shared_docs['str_strip'] = (r\"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the Series/Index from %(side)s.\n Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n Series/Index of objects\n\n See Also\n --------\n Series.str.strip : Remove leading and trailing characters in Series/Index.\n Series.str.lstrip : Remove leading characters in Series/Index.\n Series.str.rstrip : Remove trailing characters in Series/Index.\n\n Examples\n --------\n >>> s = pd.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', np.nan])\n >>> s\n 0 1. Ant.\n 1 2. Bee!\\n\n 2 3. Cat?\\t\n 3 NaN\n dtype: object\n\n >>> s.str.strip()\n 0 1. Ant.\n 1 2. Bee!\n 2 3. Cat?\n 3 NaN\n dtype: object\n\n >>> s.str.lstrip('123.')\n 0 Ant.\n 1 Bee!\\n\n 2 Cat?\\t\n 3 NaN\n dtype: object\n\n >>> s.str.rstrip('.!? \\n\\t')\n 0 1. Ant\n 1 2. Bee\n 2 3. Cat\n 3 NaN\n dtype: object\n\n >>> s.str.strip('123.!? \\n\\t')\n 0 Ant\n 1 Bee\n 2 Cat\n 3 NaN\n dtype: object\n \"\"\")\n\n @Appender(_shared_docs['str_strip'] % dict(side='left and right sides',\n method='strip'))\n def strip(self, to_strip=None):\n result = str_strip(self._parent, to_strip, side='both')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['str_strip'] % dict(side='left side',\n method='lstrip'))\n def lstrip(self, to_strip=None):\n result = str_strip(self._parent, to_strip, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['str_strip'] % dict(side='right side',\n method='rstrip'))\n def rstrip(self, to_strip=None):\n result = str_strip(self._parent, to_strip, side='right')\n return self._wrap_result(result)\n\n @copy(str_wrap)\n def wrap(self, width, **kwargs):\n result = str_wrap(self._parent, width, **kwargs)\n return self._wrap_result(result)\n\n @copy(str_get_dummies)\n def get_dummies(self, sep='|'):\n # we need to cast to Series of strings as only that has all\n # methods available for making the dummies...\n data = self._orig.astype(str) if self._is_categorical else self._parent\n result, name = str_get_dummies(data, sep)\n return self._wrap_result(result, use_codes=(not self._is_categorical),\n name=name, expand=True)\n\n @copy(str_translate)\n def translate(self, table):\n result = str_translate(self._parent, table)\n return self._wrap_result(result)\n\n count = _pat_wrapper(str_count, flags=True)\n startswith = _pat_wrapper(str_startswith, na=True)\n endswith = _pat_wrapper(str_endswith, na=True)\n findall = _pat_wrapper(str_findall, flags=True)\n\n @copy(str_extract)\n def extract(self, pat, flags=0, expand=True):\n return str_extract(self, pat, flags=flags, expand=expand)\n\n @copy(str_extractall)\n def extractall(self, pat, flags=0):\n return str_extractall(self._orig, pat, flags=flags)\n\n _shared_docs['find'] = (\"\"\"\n Return %(side)s indexes in each strings in the Series/Index\n where the substring is fully contained between [start:end].\n Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : Series/Index of integer values\n\n See Also\n --------\n %(also)s\n \"\"\")\n\n @Appender(_shared_docs['find'] %\n dict(side='lowest', method='find',\n also='rfind : Return highest indexes in each strings.'))\n def find(self, sub, start=0, end=None):\n result = str_find(self._parent, sub, start=start, end=end, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['find'] %\n dict(side='highest', method='rfind',\n also='find : Return lowest indexes in each strings.'))\n def rfind(self, sub, start=0, end=None):\n result = str_find(self._parent, sub,\n start=start, end=end, side='right')\n return self._wrap_result(result)\n\n def normalize(self, form):\n \"\"\"\n Return the Unicode normal form for the strings in the Series/Index.\n For more information on the forms, see the\n :func:`unicodedata.normalize`.\n\n Parameters\n ----------\n form : {'NFC', 'NFKC', 'NFD', 'NFKD'}\n Unicode form\n\n Returns\n -------\n normalized : Series/Index of objects\n \"\"\"\n import unicodedata\n f = lambda x: unicodedata.normalize(form, x)\n result = _na_map(f, self._parent)\n return self._wrap_result(result)\n\n _shared_docs['index'] = (\"\"\"\n Return %(side)s indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.%(similar)s`` except instead of returning -1, it raises a ValueError\n when the substring is not found. Equivalent to standard ``str.%(method)s``.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : Series/Index of objects\n\n See Also\n --------\n %(also)s\n \"\"\")\n\n @Appender(_shared_docs['index'] %\n dict(side='lowest', similar='find', method='index',\n also='rindex : Return highest indexes in each strings.'))\n def index(self, sub, start=0, end=None):\n result = str_index(self._parent, sub,\n start=start, end=end, side='left')\n return self._wrap_result(result)\n\n @Appender(_shared_docs['index'] %\n dict(side='highest', similar='rfind', method='rindex',\n also='index : Return lowest indexes in each strings.'))\n def rindex(self, sub, start=0, end=None):\n result = str_index(self._parent, sub,\n start=start, end=end, side='right')\n return self._wrap_result(result)\n\n _shared_docs['len'] = (\"\"\"\n Compute the length of each element in the Series/Index. The element may be\n a sequence (such as a string, tuple or list) or a collection\n (such as a dictionary).\n\n Returns\n -------\n Series or Index of int\n A Series or Index of integer values indicating the length of each\n element in the Series or Index.\n\n See Also\n --------\n str.len : Python built-in function returning the length of an object.\n Series.size : Returns the length of the Series.\n\n Examples\n --------\n Returns the length (number of characters) in a string. Returns the\n number of entries for dictionaries, lists or tuples.\n\n >>> s = pd.Series(['dog',\n ... '',\n ... 5,\n ... {'foo' : 'bar'},\n ... [2, 3, 5, 7],\n ... ('one', 'two', 'three')])\n >>> s\n 0 dog\n 1\n 2 5\n 3 {'foo': 'bar'}\n 4 [2, 3, 5, 7]\n 5 (one, two, three)\n dtype: object\n >>> s.str.len()\n 0 3.0\n 1 0.0\n 2 NaN\n 3 1.0\n 4 4.0\n 5 3.0\n dtype: float64\n \"\"\")\n len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)\n\n _shared_docs['casemethods'] = (\"\"\"\n Convert strings in the Series/Index to %(type)s.\n %(version)s\n Equivalent to :meth:`str.%(method)s`.\n\n Returns\n -------\n Series/Index of objects\n\n See Also\n --------\n Series.str.lower : Converts all characters to lowercase.\n Series.str.upper : Converts all characters to uppercase.\n Series.str.title : Converts first character of each word to uppercase and\n remaining to lowercase.\n Series.str.capitalize : Converts first character to uppercase and\n remaining to lowercase.\n Series.str.swapcase : Converts uppercase to lowercase and lowercase to\n uppercase.\n Series.str.casefold: Removes all case distinctions in the string.\n\n Examples\n --------\n >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])\n >>> s\n 0 lower\n 1 CAPITALS\n 2 this is a sentence\n 3 SwApCaSe\n dtype: object\n\n >>> s.str.lower()\n 0 lower\n 1 capitals\n 2 this is a sentence\n 3 swapcase\n dtype: object\n\n >>> s.str.upper()\n 0 LOWER\n 1 CAPITALS\n 2 THIS IS A SENTENCE\n 3 SWAPCASE\n dtype: object\n\n >>> s.str.title()\n 0 Lower\n 1 Capitals\n 2 This Is A Sentence\n 3 Swapcase\n dtype: object\n\n >>> s.str.capitalize()\n 0 Lower\n 1 Capitals\n 2 This is a sentence\n 3 Swapcase\n dtype: object\n\n >>> s.str.swapcase()\n 0 LOWER\n 1 capitals\n 2 THIS IS A SENTENCE\n 3 sWaPcAsE\n dtype: object\n \"\"\")\n\n # _doc_args holds dict of strings to use in substituting casemethod docs\n _doc_args = {} # type: Dict[str, Dict[str, str]]\n _doc_args['lower'] = dict(type='lowercase', method='lower', version='')\n _doc_args['upper'] = dict(type='uppercase', method='upper', version='')\n _doc_args['title'] = dict(type='titlecase', method='title', version='')\n _doc_args['capitalize'] = dict(type='be capitalized', method='capitalize',\n version='')\n _doc_args['swapcase'] = dict(type='be swapcased', method='swapcase',\n version='')\n _doc_args['casefold'] = dict(type='be casefolded', method='casefold',\n version='\\n .. versionadded:: 0.25.0\\n')\n lower = _noarg_wrapper(lambda x: x.lower(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['lower'])\n upper = _noarg_wrapper(lambda x: x.upper(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['upper'])\n title = _noarg_wrapper(lambda x: x.title(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['title'])\n capitalize = _noarg_wrapper(lambda x: x.capitalize(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['capitalize'])\n swapcase = _noarg_wrapper(lambda x: x.swapcase(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['swapcase'])\n casefold = _noarg_wrapper(lambda x: x.casefold(),\n docstring=_shared_docs['casemethods'] %\n _doc_args['casefold'])\n\n _shared_docs['ismethods'] = (\"\"\"\n Check whether all characters in each string are %(type)s.\n\n This is equivalent to running the Python string method\n :meth:`str.%(method)s` for each element of the Series/Index. If a string\n has zero characters, ``False`` is returned for that check.\n\n Returns\n -------\n Series or Index of bool\n Series or Index of boolean values with the same length as the original\n Series/Index.\n\n See Also\n --------\n Series.str.isalpha : Check whether all characters are alphabetic.\n Series.str.isnumeric : Check whether all characters are numeric.\n Series.str.isalnum : Check whether all characters are alphanumeric.\n Series.str.isdigit : Check whether all characters are digits.\n Series.str.isdecimal : Check whether all characters are decimal.\n Series.str.isspace : Check whether all characters are whitespace.\n Series.str.islower : Check whether all characters are lowercase.\n Series.str.isupper : Check whether all characters are uppercase.\n Series.str.istitle : Check whether all characters are titlecase.\n\n Examples\n --------\n **Checks for Alphabetic and Numeric Characters**\n\n >>> s1 = pd.Series(['one', 'one1', '1', ''])\n\n >>> s1.str.isalpha()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n >>> s1.str.isnumeric()\n 0 False\n 1 False\n 2 True\n 3 False\n dtype: bool\n\n >>> s1.str.isalnum()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Note that checks against characters mixed with any additional punctuation\n or whitespace will evaluate to false for an alphanumeric check.\n\n >>> s2 = pd.Series(['A B', '1.5', '3,000'])\n >>> s2.str.isalnum()\n 0 False\n 1 False\n 2 False\n dtype: bool\n\n **More Detailed Checks for Numeric Characters**\n\n There are several different but overlapping sets of numeric characters that\n can be checked for.\n\n >>> s3 = pd.Series(['23', '³', '⅕', ''])\n\n The ``s3.str.isdecimal`` method checks for characters used to form numbers\n in base 10.\n\n >>> s3.str.isdecimal()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also\n includes special digits, like superscripted and subscripted digits in\n unicode.\n\n >>> s3.str.isdigit()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also\n includes other characters that can represent quantities such as unicode\n fractions.\n\n >>> s3.str.isnumeric()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n **Checks for Whitespace**\n\n >>> s4 = pd.Series([' ', '\\\\t\\\\r\\\\n ', ''])\n >>> s4.str.isspace()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n **Checks for Character Case**\n\n >>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])\n\n >>> s5.str.islower()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n >>> s5.str.isupper()\n 0 False\n 1 False\n 2 True\n 3 False\n dtype: bool\n\n The ``s5.str.istitle`` method checks for whether all words are in title\n case (whether only the first letter of each word is capitalized). Words are\n assumed to be as any sequence of non-numeric characters seperated by\n whitespace characters.\n\n >>> s5.str.istitle()\n 0 False\n 1 True\n 2 False\n 3 False\n dtype: bool\n \"\"\")\n _doc_args['isalnum'] = dict(type='alphanumeric', method='isalnum')\n _doc_args['isalpha'] = dict(type='alphabetic', method='isalpha')\n _doc_args['isdigit'] = dict(type='digits', method='isdigit')\n _doc_args['isspace'] = dict(type='whitespace', method='isspace')\n _doc_args['islower'] = dict(type='lowercase', method='islower')\n _doc_args['isupper'] = dict(type='uppercase', method='isupper')\n _doc_args['istitle'] = dict(type='titlecase', method='istitle')\n _doc_args['isnumeric'] = dict(type='numeric', method='isnumeric')\n _doc_args['isdecimal'] = dict(type='decimal', method='isdecimal')\n isalnum = _noarg_wrapper(lambda x: x.isalnum(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isalnum'])\n isalpha = _noarg_wrapper(lambda x: x.isalpha(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isalpha'])\n isdigit = _noarg_wrapper(lambda x: x.isdigit(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isdigit'])\n isspace = _noarg_wrapper(lambda x: x.isspace(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isspace'])\n islower = _noarg_wrapper(lambda x: x.islower(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['islower'])\n isupper = _noarg_wrapper(lambda x: x.isupper(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isupper'])\n istitle = _noarg_wrapper(lambda x: x.istitle(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['istitle'])\n isnumeric = _noarg_wrapper(lambda x: x.isnumeric(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isnumeric'])\n isdecimal = _noarg_wrapper(lambda x: x.isdecimal(),\n docstring=_shared_docs['ismethods'] %\n _doc_args['isdecimal'])\n\n @classmethod\n def _make_accessor(cls, data):\n cls._validate(data)\n return cls(data)\n"
] | [
[
"pandas.timedelta_range",
"numpy.ones",
"pandas.Series",
"numpy.asarray",
"pandas.Categorical",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.assert_almost_equal",
"numpy.datetime64",
"pandas.unique",
"pandas.util.testing.assert_categorical_equal",
"numpy.append",
"numpy.timedelta64",
"pandas.to_datetime",
"pandas.core.reshape.tile._round_frac",
"numpy.linspace",
"pandas.Timestamp",
"numpy.eye",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.api.types.CategoricalDtype",
"pandas.date_range",
"pandas.IntervalIndex.from_breaks",
"pandas.TimedeltaIndex",
"numpy.arange",
"pandas.Categorical.from_codes",
"pandas.cut",
"pandas.util.testing.assert_index_equal",
"pandas.DatetimeIndex",
"numpy.random.randn",
"pandas.IntervalIndex.from_tuples",
"numpy.iinfo",
"numpy.array",
"pandas.isna",
"pandas.Interval"
],
[
"numpy.ones",
"pandas.Series",
"pandas.array",
"pandas.util.testing.round_trip_pickle",
"pandas.util.testing.randn",
"numpy.dtype",
"numpy.take",
"numpy.asarray",
"pandas.Categorical",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_series_equal",
"numpy.datetime64",
"numpy.timedelta64",
"pandas._libs.internals.BlockPlacement",
"numpy.random.rand",
"pandas.Timestamp",
"pandas.SparseArray",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.zeros",
"pandas.core.internals.make_block",
"pandas.core.algorithms.take_nd",
"numpy.repeat",
"numpy.arange",
"pandas.MultiIndex.from_tuples",
"pandas.util.testing.assert_index_equal",
"pandas.Index",
"numpy.array",
"pandas.DataFrame",
"pandas.core.internals.BlockManager",
"pandas.compat.lrange",
"pandas.MultiIndex"
],
[
"numpy.sum",
"pandas.core.common.values_from_object",
"pandas.Series",
"pandas.core.dtypes.common.is_re",
"numpy.asarray",
"pandas.core.dtypes.common.is_integer",
"numpy.logical_or.reduce",
"pandas._libs.lib.maybe_convert_objects",
"pandas.core.dtypes.common.is_list_like",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.util._decorators.deprecate_kwarg",
"numpy.ndarray",
"numpy.where",
"pandas.core.dtypes.common.ensure_object",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.MultiIndex.from_tuples",
"numpy.putmask",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.missing.isna",
"pandas.Index",
"pandas.util._decorators.Appender",
"pandas.DataFrame",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.common.is_string_like"
]
] |
rlaplaza/Deep-Reinforcement-Learning-for-Automated-Stock-Trading-Ensemble-Strategy-ICAIF-2020 | [
"5fe6b8554587320bc6044164270635166c93616d"
] | [
"env/EnvMultipleStock_train.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom gym.utils import seeding\nimport gym\nfrom gym import spaces\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\n# shares normalization factor\n# 100 shares per trade\nHMAX_NORMALIZE = 100\n# initial amount of money we have in our account\nINITIAL_ACCOUNT_BALANCE = 1000000\n# total number of stocks in our portfolio\nSTOCK_DIM = 30\n# transaction fee: 1/1000 reasonable percentage\nTRANSACTION_FEE_PERCENT = 0.001\nREWARD_SCALING = 1e-4\n\n\nclass StockEnvTrain(gym.Env):\n \"\"\"A stock trading environment for OpenAI gym\"\"\"\n\n metadata = {\"render.modes\": [\"human\"]}\n\n def __init__(self, df, day=0):\n # super(StockEnv, self).__init__()\n # money = 10 , scope = 1\n self.day = day\n self.df = df\n\n # action_space normalization and shape is STOCK_DIM\n self.action_space = spaces.Box(low=-1, high=1, shape=(STOCK_DIM,))\n # Shape = 181: [Current Balance]+[prices 1-30]+[owned shares 1-30]\n # +[macd 1-30]+ [rsi 1-30] + [cci 1-30] + [adx 1-30]\n self.observation_space = spaces.Box(low=0, high=np.inf, shape=(181,))\n # load data from a pandas dataframe\n self.data = self.df.loc[self.day, :]\n self.terminal = False\n # initalize state\n self.state = (\n [INITIAL_ACCOUNT_BALANCE]\n + self.data.adjcp.values.tolist()\n + [0] * STOCK_DIM\n + self.data.macd.values.tolist()\n + self.data.rsi.values.tolist()\n + self.data.cci.values.tolist()\n + self.data.adx.values.tolist()\n )\n # initialize reward\n self.reward = 0\n self.cost = 0\n # memorize all the total balance change\n self.asset_memory = [INITIAL_ACCOUNT_BALANCE]\n self.rewards_memory = []\n self.trades = 0\n # self.reset()\n self._seed()\n\n def _sell_stock(self, index, action):\n # perform sell action based on the sign of the action\n if self.state[index + STOCK_DIM + 1] > 0:\n # update balance\n self.state[0] += (\n self.state[index + 1]\n * min(abs(action), self.state[index + STOCK_DIM + 1])\n * (1 - TRANSACTION_FEE_PERCENT)\n )\n\n self.state[index + STOCK_DIM + 1] -= min(\n abs(action), self.state[index + STOCK_DIM + 1]\n )\n self.cost += (\n self.state[index + 1]\n * min(abs(action), self.state[index + STOCK_DIM + 1])\n * TRANSACTION_FEE_PERCENT\n )\n self.trades += 1\n else:\n pass\n\n def _buy_stock(self, index, action):\n # perform buy action based on the sign of the action\n available_amount = self.state[0] // self.state[index + 1]\n # print('available_amount:{}'.format(available_amount))\n\n # update balance\n self.state[0] -= (\n self.state[index + 1]\n * min(available_amount, action)\n * (1 + TRANSACTION_FEE_PERCENT)\n )\n\n self.state[index + STOCK_DIM + 1] += min(available_amount, action)\n\n self.cost += (\n self.state[index + 1]\n * min(available_amount, action)\n * TRANSACTION_FEE_PERCENT\n )\n self.trades += 1\n\n def step(self, actions):\n # print(self.day)\n self.terminal = self.day >= len(self.df.index.unique()) - 1\n # print(actions)\n\n if self.terminal:\n plt.plot(self.asset_memory, \"r\")\n plt.savefig(\"results/account_value_train.png\")\n plt.close()\n end_total_asset = self.state[0] + sum(\n np.array(self.state[1 : (STOCK_DIM + 1)])\n * np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n )\n\n # print(\"end_total_asset:{}\".format(end_total_asset))\n df_total_value = pd.DataFrame(self.asset_memory)\n df_total_value.to_csv(\"results/account_value_train.csv\")\n # print(\"total_reward:{}\".format(self.state[0]+sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):61]))- INITIAL_ACCOUNT_BALANCE ))\n # print(\"total_cost: \", self.cost)\n # print(\"total_trades: \", self.trades)\n df_total_value.columns = [\"account_value\"]\n df_total_value[\"daily_return\"] = df_total_value.pct_change(1)\n sharpe = (\n (252 ** 0.5)\n * df_total_value[\"daily_return\"].mean()\n / df_total_value[\"daily_return\"].std()\n )\n # print(\"Sharpe: \",sharpe)\n # print(\"=================================\")\n df_rewards = pd.DataFrame(self.rewards_memory)\n # df_rewards.to_csv('results/account_rewards_train.csv')\n\n # print('total asset: {}'.format(self.state[0]+ sum(np.array(self.state[1:29])*np.array(self.state[29:]))))\n # with open('obs.pkl', 'wb') as f:\n # pickle.dump(self.state, f)\n\n return self.state, self.reward, self.terminal, {}\n\n else:\n # print(np.array(self.state[1:29]))\n\n actions = actions * HMAX_NORMALIZE\n # actions = (actions.astype(int))\n\n begin_total_asset = self.state[0] + sum(\n np.array(self.state[1 : (STOCK_DIM + 1)])\n * np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n )\n # print(\"begin_total_asset:{}\".format(begin_total_asset))\n\n argsort_actions = np.argsort(actions)\n\n sell_index = argsort_actions[: np.where(actions < 0)[0].shape[0]]\n buy_index = argsort_actions[::-1][: np.where(actions > 0)[0].shape[0]]\n\n for index in sell_index:\n # print('take sell action'.format(actions[index]))\n self._sell_stock(index, actions[index])\n\n for index in buy_index:\n # print('take buy action: {}'.format(actions[index]))\n self._buy_stock(index, actions[index])\n\n self.day += 1\n self.data = self.df.loc[self.day, :]\n # load next state\n # print(\"stock_shares:{}\".format(self.state[29:]))\n self.state = (\n [self.state[0]]\n + self.data.adjcp.values.tolist()\n + list(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n + self.data.macd.values.tolist()\n + self.data.rsi.values.tolist()\n + self.data.cci.values.tolist()\n + self.data.adx.values.tolist()\n )\n\n end_total_asset = self.state[0] + sum(\n np.array(self.state[1 : (STOCK_DIM + 1)])\n * np.array(self.state[(STOCK_DIM + 1) : (STOCK_DIM * 2 + 1)])\n )\n self.asset_memory.append(end_total_asset)\n # print(\"end_total_asset:{}\".format(end_total_asset))\n\n self.reward = end_total_asset - begin_total_asset\n # print(\"step_reward:{}\".format(self.reward))\n self.rewards_memory.append(self.reward)\n\n self.reward = self.reward * REWARD_SCALING\n\n return self.state, self.reward, self.terminal, {}\n\n def reset(self):\n self.asset_memory = [INITIAL_ACCOUNT_BALANCE]\n self.day = 0\n self.data = self.df.loc[self.day, :]\n self.cost = 0\n self.trades = 0\n self.terminal = False\n self.rewards_memory = []\n # initiate state\n self.state = (\n [INITIAL_ACCOUNT_BALANCE]\n + self.data.adjcp.values.tolist()\n + [0] * STOCK_DIM\n + self.data.macd.values.tolist()\n + self.data.rsi.values.tolist()\n + self.data.cci.values.tolist()\n + self.data.adx.values.tolist()\n )\n # iteration += 1\n return self.state\n\n def render(self, mode=\"human\"):\n return self.state\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n"
] | [
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"numpy.argsort",
"matplotlib.pyplot.close",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.where",
"numpy.array"
]
] |
Ravnit202/PYJAC | [
"65987f8afd2e54e1b308b09f45f291e374e79bd2"
] | [
"Game/finger.py"
] | [
"import cv2\nimport mediapipe\nimport numpy\nimport pydirectinput\nclass FingerDetector:\n\n\n wScr, hScr = pydirectinput.size() #Get the current screen resolution\n pX, pY = 0, 0 \n cX, cY = 0, 0 \n\n def __init__(self):\n \"\"\"\n Initialize all objects\n \"\"\"\n #Load the mediapipe libraries/solutions\n self.initHand = mediapipe.solutions.hands\n self.mainHand = self.initHand.Hands(min_detection_confidence=0.7, min_tracking_confidence=0.7)\n self.draw = mediapipe.solutions.drawing_utils\n\n self.fingerTips = []\n self.img = None\n\n def handLandmarks(self, colorImg):\n \"\"\"\n Detect the hand landmarks\n \"\"\"\n landmarkList = []\n\n landmarkPositions = self.mainHand.process(colorImg) # Process the given image\n landmarkCheck = landmarkPositions.multi_hand_landmarks \n\n if landmarkCheck: # Checks if landmarks exist\n for index, hand in enumerate(landmarkCheck): # differentiate by hand\n for index, landmark in enumerate(hand.landmark): \n self.draw.draw_landmarks(self.img, hand, self.initHand.HAND_CONNECTIONS) \n h, w, c = self.img.shape \n centerX, centerY = int(landmark.x * w), int(landmark.y * h) \n landmarkList.append([index, centerX, centerY]) \n \n return landmarkList\n\n def fingers(self, landmarks):\n \"\"\"\n Check the action of the fingers\n \"\"\"\n fingerTips = []\n tipIds = [4, 8, 12, 16, 20] #Values for each fingertip\n \n #Check if the thumb is up\n if landmarks[tipIds[0]][1] > self.lmList[tipIds[0] - 1][1]:\n fingerTips.append(1)\n else:\n fingerTips.append(0)\n \n #Check if fingers are up and the thumb is down\n for id in range(1, 5):\n if landmarks[tipIds[id]][2] < landmarks[tipIds[id] - 3][2]: # Checks to see if the tip of the finger is higher than the joint\n fingerTips.append(1)\n else:\n fingerTips.append(0)\n\n return fingerTips\n\n\n def fingerDetection(self, frame):\n \"\"\"\n Detect the fingers positions through the frame\n \"\"\"\n frame = cv2.flip(frame, 1)\n self.img = frame\n imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Changes the format of the frames from BGR to RGB\n \n self.lmList = self.handLandmarks(imgRGB)\n\n if len(self.lmList) > 12:\n x1, y1 = self.lmList[8][1:] \n finger = self.fingers(self.lmList) \n if finger[1] == 1 and finger[2] == 0: \n x3 = numpy.interp(x1, (75, 720 - 75), (75, self.wScr)) # Converts the width of the window relative to the screen width\n y3 = numpy.interp(y1, (75, 560 - 75), (75, self.hScr)) # Converts the height of the window relative to the screen height\n \n cX = self.pX + (x3 - self.pX) /2 # Smooth out the mouse x movement\n cY = self.pY + (y3 - self.pY) /2 # Smooth out the mouse y movement\n\n pydirectinput.moveTo(int(cX), int(cY)) #Move the mouse using pydirectinput\n self.pX, self.pY = cX, cY # Save the current x and y values\n\n if finger[1] == 0 and finger[0] == 1: # Check if the pointer finger is down and the thumb finger is up\n pydirectinput.rightClick()\n \n return\n"
] | [
[
"numpy.interp"
]
] |
LiPengze97/oneflow | [
"1c1d2d3faa1c02d20e009046a290cf1095ee12e0"
] | [
"python/oneflow/test/modules/test_ne.py"
] | [
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom test_util import GenArgList\n\nimport oneflow as flow\nimport oneflow.unittest\n\nfrom oneflow.test_utils.automated_test_util import *\n\n\ndef _test_ne(test_case, shape, device):\n arr1 = np.random.randn(*shape)\n arr2 = np.random.randn(*shape)\n input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))\n other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))\n of_out = flow.ne(input, other)\n of_out2 = flow.not_equal(input, other)\n np_out = np.not_equal(arr1, arr2)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n test_case.assertTrue(np.array_equal(of_out2.numpy(), np_out))\n\n\ndef _test_tensor_ne_operator(test_case, shape, device):\n arr1 = np.random.randn(*shape)\n arr2 = np.random.randn(*shape)\n input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))\n other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))\n of_out = input.ne(other)\n np_out = np.not_equal(arr1, arr2)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_ne_int(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1\n of_out = flow.ne(input, num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_tensor_ne_operator_int(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1\n of_out = input.ne(num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_ne_float(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1.0\n of_out = flow.ne(input, num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\ndef _test_tensor_ne_operator_float(test_case, shape, device):\n arr = np.random.randn(*shape)\n input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))\n num = 1.0\n of_out = input.ne(num)\n np_out = np.not_equal(arr, num)\n test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))\n\n\[email protected]_unless_1n1d()\nclass TestNe(flow.unittest.TestCase):\n def test_ne(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"test_func\"] = [\n _test_ne,\n _test_tensor_ne_operator,\n _test_ne_int,\n _test_tensor_ne_operator_int,\n _test_ne_float,\n _test_tensor_ne_operator_float,\n ]\n arg_dict[\"shape\"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]\n arg_dict[\"device\"] = [\"cpu\", \"cuda\"]\n for arg in GenArgList(arg_dict):\n arg[0](test_case, *arg[1:])\n\n @autotest(auto_backward=False, check_graph=False)\n def test_ne_with_0shape_data(test_case):\n device = random_device()\n x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)\n x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)\n y1 = torch.ne(x1, x2)\n y2 = torch.ne(x1, 2)\n y3 = torch.ne(x1, 2.0)\n return (y1, y2, y3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.not_equal",
"numpy.random.randn"
]
] |
Ray0089/PSGMN | [
"0363d558add24034e035d26121e2e1b61d97c198"
] | [
"utils/utils.py"
] | [
"# import PIL\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport cv2\nimport torch\nfrom torch_geometric.data import Data\n\n\ndef load_ply(path):\n \"\"\"\n Loads a 3D mesh model from a PLY file.\n\n :param path: Path to a PLY file.\n :return: The loaded model given by a dictionary with items:\n 'pts' (nx3 ndarray), 'normals' (nx3 ndarray), 'colors' (nx3 ndarray),\n 'faces' (mx3 ndarray) - the latter three are optional.\n \"\"\"\n f = open(path, 'r')\n\n n_pts = 0\n n_faces = 0\n face_n_corners = 3 # Only triangular faces are supported\n pt_props = []\n face_props = []\n is_binary = False\n header_vertex_section = False\n header_face_section = False\n\n # Read header\n while True:\n line = f.readline().rstrip('\\n').rstrip('\\r') # Strip the newline character(s)\n if line.startswith('element vertex'):\n n_pts = int(line.split()[-1])\n header_vertex_section = True\n header_face_section = False\n elif line.startswith('element face'):\n n_faces = int(line.split()[-1])\n header_vertex_section = False\n header_face_section = True\n elif line.startswith('element'): # Some other element\n header_vertex_section = False\n header_face_section = False\n elif line.startswith('property') and header_vertex_section:\n # (name of the property, data type)\n pt_props.append((line.split()[-1], line.split()[-2]))\n elif line.startswith('property list') and header_face_section:\n elems = line.split()\n if elems[-1] == 'vertex_indices':\n # (name of the property, data type)\n face_props.append(('n_corners', elems[2]))\n for i in range(face_n_corners):\n face_props.append(('ind_' + str(i), elems[3]))\n else:\n print('Warning: Not supported face property: ' + elems[-1])\n elif line.startswith('format'):\n if 'binary' in line:\n is_binary = True\n elif line.startswith('end_header'):\n break\n\n # Prepare data structures\n model = {}\n model['pts'] = np.zeros((n_pts, 3), np.float)\n if n_faces > 0:\n model['faces'] = np.zeros((n_faces, face_n_corners), np.float)\n\n pt_props_names = [p[0] for p in pt_props]\n is_normal = False\n if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):\n is_normal = True\n model['normals'] = np.zeros((n_pts, 3), np.float)\n\n is_color = False\n if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):\n is_color = True\n model['colors'] = np.zeros((n_pts, 3), np.float)\n\n is_texture = False\n if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):\n is_texture = True\n model['texture_uv'] = np.zeros((n_pts, 2), np.float)\n\n formats = { # For binary format\n 'float': ('f', 4),\n 'double': ('d', 8),\n 'int': ('i', 4),\n 'uchar': ('B', 1)\n }\n\n # Load vertices\n for pt_id in range(n_pts):\n prop_vals = {}\n load_props = ['x', 'y', 'z', 'nx', 'ny', 'nz',\n 'red', 'green', 'blue', 'texture_u', 'texture_v']\n if is_binary:\n for prop in pt_props:\n format = formats[prop[1]]\n val = struct.unpack(format[0], f.read(format[1]))[0]\n if prop[0] in load_props:\n prop_vals[prop[0]] = val\n else:\n elems = f.readline().rstrip('\\n').rstrip('\\r').split()\n for prop_id, prop in enumerate(pt_props):\n if prop[0] in load_props:\n prop_vals[prop[0]] = elems[prop_id]\n\n model['pts'][pt_id, 0] = float(prop_vals['x'])\n model['pts'][pt_id, 1] = float(prop_vals['y'])\n model['pts'][pt_id, 2] = float(prop_vals['z'])\n\n if is_normal:\n model['normals'][pt_id, 0] = float(prop_vals['nx'])\n model['normals'][pt_id, 1] = float(prop_vals['ny'])\n model['normals'][pt_id, 2] = float(prop_vals['nz'])\n\n if is_color:\n model['colors'][pt_id, 0] = float(prop_vals['red'])\n model['colors'][pt_id, 1] = float(prop_vals['green'])\n model['colors'][pt_id, 2] = float(prop_vals['blue'])\n\n if is_texture:\n model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u'])\n model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v'])\n\n # Load faces\n for face_id in range(n_faces):\n prop_vals = {}\n if is_binary:\n for prop in face_props:\n format = formats[prop[1]]\n val = struct.unpack(format[0], f.read(format[1]))[0]\n if prop[0] == 'n_corners':\n if val != face_n_corners:\n print('Error: Only triangular faces are supported.')\n print('Number of face corners: ' + str(val))\n exit(-1)\n else:\n prop_vals[prop[0]] = val\n else:\n elems = f.readline().rstrip('\\n').rstrip('\\r').split()\n for prop_id, prop in enumerate(face_props):\n if prop[0] == 'n_corners':\n if int(elems[prop_id]) != face_n_corners:\n print('Error: Only triangular faces are supported.')\n print('Number of face corners: ' + str(int(elems[prop_id])))\n exit(-1)\n else:\n prop_vals[prop[0]] = elems[prop_id]\n\n model['faces'][face_id, 0] = int(prop_vals['ind_0'])\n model['faces'][face_id, 1] = int(prop_vals['ind_1'])\n model['faces'][face_id, 2] = int(prop_vals['ind_2'])\n\n f.close()\n\n return model\n\ndef read_ply_to_data(path):\n\n model = load_ply(path)\n mean=[0.485, 0.456, 0.406] \n std=[0.229, 0.224, 0.225]\n x = model['colors']\n\n x = x / 255.0\n x -= mean\n x /= std\n x = np.concatenate([x,model['pts'],model['normals']],axis=-1)\n x = torch.tensor(x,dtype=torch.float32)\n \n pos = torch.tensor(model['pts'],dtype=torch.float32)\n face = torch.tensor(model['faces'],dtype=torch.long).transpose(1,0)\n data = Data(x = x, pos=pos,face = face)\n return data\n\ndef read_mask(path, split, cls_idx=1):\n if split == \"train\" or split == \"test\":\n return (np.asarray(Image.open(path))[:, :, 0] != 0).astype(np.uint8)\n elif split == \"fuse\":\n return (np.asarray(Image.open(path)) == cls_idx).astype(np.uint8)\n elif split == \"render\":\n return (np.asarray(Image.open(path))).astype(np.uint8)\n\n\ndef mask_iou(self, output, batch):\n mask_pred = torch.argmax(output[\"seg\"], dim=1)[0].detach().cpu().numpy()\n mask_gt = batch[\"mask\"][0].detach().cpu().numpy()\n iou = (mask_pred & mask_gt).sum() / (mask_pred | mask_gt).sum()\n self.mask_ap.append(iou > 0.7)\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n\ndef cal_error(S, y, img_shape=(480, 640)):\n S = S[:, y[0, :, 0], :]\n S = S.detach().cpu().numpy()\n y = y.detach().cpu().numpy()\n S = np.argmax(S, axis=-1)\n S = S.reshape(-1)\n y = y[:, :, 1].reshape(-1)\n\n gt_pos = []\n for idx in y:\n v = math.floor(idx / img_shape[1])\n u = idx - img_shape[1] * v\n gt_pos.append([u, v])\n\n est_pos = []\n for idx in S:\n v = math.floor(idx / (img_shape[1] / 2)) * 2\n u = (idx - img_shape[1] / 2 * (v / 2)) * 2\n est_pos.append([u, v])\n \n gt_pos = np.array(gt_pos, dtype=np.float32)\n est_pos = np.array(est_pos, dtype=np.float32)\n error = np.abs(gt_pos - est_pos)\n dist = np.sqrt(error[0] ** 2 + error[1] ** 2)\n avg_error = np.mean(dist)\n sigma = np.std(dist)\n\n return avg_error, sigma\n\n\ndef project(xyz, K, RT):\n \"\"\"\n xyz: [N, 3]\n K: [3, 3]\n RT: [3, 4]\n \"\"\"\n xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T\n xyz = np.dot(xyz, K.T)\n xy = xyz[:, :2] / xyz[:, 2:]\n return xy\n\n\ndef mesh_project(xyz, K, RT):\n \"\"\"\n xyz: [N, 3]\n K: [3, 3]\n RT: [3, 4]\n \"\"\"\n xyz = xyz.astype(np.float32)\n K = K.astype(np.float32)\n RT = RT.astype(np.float32)\n xyz = np.dot(xyz, RT[:, :3].T) + RT[:, 3:].T\n z = xyz[:, 2].copy()\n xyz = np.dot(xyz, K.astype(np.float32).T)\n xyz = xyz / xyz[:, 2:]\n\n xyz[:, 2] = z\n return xyz\n\ndef find_neighborhold_node(model):\n pts = model[\"pts\"]\n faces = model[\"faces\"]\n neighbors = [[] for i in range(pts.shape[0])]\n for i in range(pts.shape[0]):\n dim0, dim1 = np.where(faces == i)\n for idx in faces[dim0]:\n for id in idx:\n if id not in neighbors[i] and id != i:\n neighbors[i].append(id)\n\n return neighbors\n\n\ndef bbox_from_mask(mask_img, stride=0):\n\n mask_img = np.array(mask_img)\n mask = mask_img[:, :, 0]\n img_shape = mask.shape\n coor = np.nonzero(mask)\n coor[0].sort()\n xmin = coor[0][0]\n xmax = coor[0][-1]\n coor[1].sort()\n ymin = coor[1][0]\n ymax = coor[1][-1]\n\n if xmin >= stride:\n xmin -= stride\n else:\n xmin = 0\n if xmax + stride <= img_shape[0]:\n xmax += stride\n else:\n xmax = img_shape[0]\n\n if ymin >= stride:\n ymin -= stride\n else:\n ymin = 0\n\n if ymax + stride <= img_shape[1]:\n ymax += stride\n else:\n ymax = img_shape[1]\n\n return xmax, ymax, xmin, ymin\n\n\ndef concate_graph(x, edge, attribute):\n\n batch_size = x.shape[0]\n x_num = 0\n if x.ndim == 3:\n x_num = x.shape[1]\n elif x.ndim == 4:\n x_num = x.shape[1] * x.shape[2]\n x = x.reshape(-1, x.shape[-1])\n for i in range(batch_size):\n edge[i, :, :] += i * x_num\n\n edge = edge.permute(0, 2, 1)\n edge = edge.reshape(-1, 2)\n edge = edge.permute(1, 0)\n attribute = attribute.reshape(-1, attribute.shape[-1])\n\n return [x, edge, attribute]\n\n\ndef adjust_learning_rate(optimizer, epoch, init_lr):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = init_lr * (0.5 ** (epoch // 20))\n print(\"LR:{}\".format(lr))\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\ndef draw_error(S, y, image):\n\n S = S[:, y[0, :, 0], :]\n S = S.detach().cpu().numpy()\n batch_size = S.shape[0]\n y = y.detach().cpu().numpy()\n img = image.detach().cpu().numpy()[0]\n\n S = np.argmax(S, axis=-1)\n S = S.reshape(-1)\n y = y[:, :, 1].reshape(-1)\n gt_pos = []\n for idx in y:\n v = math.floor(idx / img.shape[1])\n u = idx - img.shape[1] * v\n gt_pos.append([u, v])\n est_pos = []\n for idx in S:\n v = math.floor(idx / (img.shape[1] / 2)) * 2\n u = (idx - img.shape[1] / 2 * (v / 2)) * 2\n est_pos.append([u, v])\n gt_pos = np.array(gt_pos, dtype=np.float32)\n est_pos = np.array(est_pos, dtype=np.float32)\n\n\nif __name__ == \"__main__\":\n\n img = plt.imread(\"/home/ray/data/LINEMOD/ape/mask/0000.png\")\n img = np.array(img)\n bbox_from_mask(img)\n"
] | [
[
"numpy.sqrt",
"matplotlib.pyplot.imread",
"numpy.zeros",
"numpy.dot",
"torch.argmax",
"torch.tensor",
"numpy.abs",
"numpy.argmax",
"numpy.exp",
"numpy.where",
"numpy.array",
"numpy.std",
"numpy.concatenate",
"numpy.nonzero",
"numpy.mean"
]
] |
vumichien/hummingbird | [
"8981e11ce2536167c329a5d9d20e81125a792fe4"
] | [
"tests/test_sklearn_pipeline.py"
] | [
"import unittest\nimport numpy as np\nfrom sklearn import datasets\n\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.datasets import load_iris, load_diabetes\nfrom sklearn.svm import LinearSVC, LinearSVR\nfrom sklearn.datasets import make_regression\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression, RidgeCV\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler, MinMaxScaler\n\nimport hummingbird.ml\nfrom hummingbird.ml._utils import pandas_installed, onnx_runtime_installed\nfrom hummingbird.ml import constants\n\nfrom onnxconverter_common.data_types import (\n FloatTensorType,\n Int64TensorType,\n StringTensorType,\n)\n\ntry:\n from sklearn.impute import SimpleImputer\nexcept ImportError:\n from sklearn.preprocessing import Imputer as SimpleImputer\n\ntry:\n from sklearn.ensemble import StackingClassifier, StackingRegressor\nexcept ImportError:\n StackingClassifier = None\n\nif pandas_installed():\n import pandas\n\n\nclass TestSklearnPipeline(unittest.TestCase):\n def test_pipeline(self):\n data = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], dtype=np.float32)\n scaler = StandardScaler()\n scaler.fit(data)\n model = Pipeline([(\"scaler1\", scaler), (\"scaler2\", scaler)])\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n def test_pipeline2(self):\n data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)\n scaler = StandardScaler()\n scaler.fit(data)\n model = Pipeline([(\"scaler1\", scaler), (\"scaler2\", scaler)])\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n def test_combine_inputs_union_in_pipeline(self):\n from sklearn.preprocessing import StandardScaler\n from sklearn.pipeline import Pipeline\n\n data = np.array([[0.0, 0.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], dtype=np.float32)\n model = Pipeline(\n [\n (\"scaler1\", StandardScaler()),\n (\"union\", FeatureUnion([(\"scaler2\", StandardScaler()), (\"scaler3\", MinMaxScaler())])),\n ]\n )\n model.fit(data)\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n def test_combine_inputs_floats_ints(self):\n data = [[0, 0.0], [0, 0.0], [1, 1.0], [1, 1.0]]\n scaler = StandardScaler()\n scaler.fit(data)\n model = Pipeline([(\"scaler1\", scaler), (\"scaler2\", scaler)])\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.transform(data), torch_model.transform(data), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_1(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n preprocessor = ColumnTransformer(transformers=[(\"num\", numeric_transformer, numeric_features)])\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_string(self):\n \"\"\"\n TODO: Hummingbird does not yet support strings in this context. Should raise error.\n When this feature is complete, change this test.\n \"\"\"\n # fit\n titanic_url = \"https://raw.githubusercontent.com/amueller/scipy-2017-sklearn/091d371/notebooks/datasets/titanic3.csv\"\n data = pandas.read_csv(titanic_url)\n X = data.drop(\"survived\", axis=1)\n y = data[\"survived\"]\n # SimpleImputer on string is not available for string\n # in ONNX-ML specifications.\n # So we do it beforehand.\n X[\"pclass\"].fillna(\"missing\", inplace=True)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n\n numeric_features = [\"age\", \"fare\"]\n numeric_transformer = Pipeline(steps=[(\"imputer\", SimpleImputer(strategy=\"median\")), (\"scaler\", StandardScaler())])\n\n categorical_features = [\"pclass\"]\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n clf = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"classifier\", LogisticRegression(solver=\"liblinear\"))])\n\n to_drop = {\"parch\", \"sibsp\", \"cabin\", \"ticket\", \"name\", \"body\", \"home.dest\", \"boat\", \"sex\", \"embarked\"}\n\n X_train = X_train.copy()\n X_test = X_test.copy()\n X_train[\"pclass\"] = X_train[\"pclass\"].astype(np.int64)\n X_test[\"pclass\"] = X_test[\"pclass\"].astype(np.int64)\n X_train = X_train.drop(to_drop, axis=1)\n X_test = X_test.drop(to_drop, axis=1)\n\n clf.fit(X_train, y_train)\n\n torch_model = hummingbird.ml.convert(clf, \"torch\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n clf.predict(X_test), torch_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_pandas(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_pandas_ts(self):\n iris = datasets.load_iris()\n X = np.array(iris.data[:, :3], np.float32) # If we don't use float32 here, with python 3.5 and torch 1.5.1 will fail.\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ]\n )\n\n model = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch.jit\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_weights(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n )\n\n model = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_weights_pandas(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1, 2] # [\"vA\", \"vB\", \"vC\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\", X_test)\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_drop(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n remainder=\"drop\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_drop_noweights(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n remainder=\"drop\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_passthrough(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n remainder=\"passthrough\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_passthrough_noweights(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = [0, 1] # [\"vA\", \"vB\"]\n categorical_features = [3, 4] # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n remainder=\"passthrough\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not pandas_installed(), reason=\"Test requires pandas installed\")\n def test_pipeline_column_transformer_passthrough_slice(self):\n iris = datasets.load_iris()\n X = iris.data[:, :3]\n y = iris.target\n X_train = pandas.DataFrame(X, columns=[\"vA\", \"vB\", \"vC\"])\n X_train[\"vcat\"] = X_train[\"vA\"].apply(lambda x: 1 if x > 0.5 else 2)\n X_train[\"vcat2\"] = X_train[\"vB\"].apply(lambda x: 3 if x > 0.5 else 4)\n y_train = y % 2\n numeric_features = slice(0, 1) # [\"vA\", \"vB\"]\n categorical_features = slice(3, 4) # [\"vcat\", \"vcat2\"]\n\n classifier = LogisticRegression(\n C=0.01, class_weight=dict(zip([False, True], [0.2, 0.8])), n_jobs=1, max_iter=10, solver=\"liblinear\", tol=1e-3,\n )\n\n numeric_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[(\"onehot\", OneHotEncoder(sparse=True, handle_unknown=\"ignore\"))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n (\"num\", numeric_transformer, numeric_features),\n (\"cat\", categorical_transformer, categorical_features),\n ],\n transformer_weights={\"num\": 2, \"cat\": 3},\n remainder=\"passthrough\",\n )\n\n model = Pipeline(steps=[(\"precprocessor\", preprocessor), (\"classifier\", classifier)])\n\n model.fit(X_train, y_train)\n\n X_test = X_train[:11]\n\n torch_model = hummingbird.ml.convert(model, \"torch\")\n\n self.assertTrue(torch_model is not None)\n\n np.testing.assert_allclose(\n model.predict_proba(X_test), torch_model.predict_proba(X_test.values), rtol=1e-06, atol=1e-06,\n )\n\n # Taken from https://github.com/microsoft/hummingbird/issues/388https://github.com/microsoft/hummingbird/issues/388\n def test_pipeline_pca_rf(self):\n X, y = make_regression(n_samples=1000, n_features=8, n_informative=5, n_targets=1, random_state=0, shuffle=True)\n pca = PCA(n_components=8, svd_solver=\"randomized\", whiten=True)\n clf = make_pipeline(StandardScaler(), pca, RandomForestRegressor(n_estimators=10, max_depth=30, random_state=0))\n clf.fit(X, y)\n\n model = hummingbird.ml.convert(clf, \"pytorch\")\n\n prediction_sk = clf.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n\n prediction_hb = model.predict([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n\n np.testing.assert_allclose(prediction_sk, prediction_hb, rtol=1e-06, atol=1e-06)\n\n @unittest.skipIf(not onnx_runtime_installed(), reason=\"Test requires ORT installed\")\n def test_pipeline_many_inputs(self):\n n_features = 18\n X = np.random.rand(100, n_features)\n y = np.random.randint(1000, size=100)\n\n scaler_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n preprocessor = ColumnTransformer(transformers=[(\"scaling\", scaler_transformer, list(range(n_features)))])\n model = RandomForestRegressor(n_estimators=10, max_depth=9)\n pipeline = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"model\", model)])\n\n pipeline.fit(X, y)\n\n X_test = tuple(np.split(X, n_features, axis=1))\n\n hb_model = hummingbird.ml.convert(pipeline, \"onnx\", X_test)\n\n assert len(hb_model.model.graph.input) == n_features\n\n np.testing.assert_allclose(\n pipeline.predict(X), np.array(hb_model.predict(X_test)).flatten(), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(not onnx_runtime_installed(), reason=\"Test requires ORT installed\")\n def test_pipeline_many_inputs_with_schema(self):\n n_features = 5\n X = np.random.rand(100, n_features)\n y = np.random.randint(1000, size=100)\n input_column_names = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n output_column_names = [\"score\"]\n\n scaler_transformer = Pipeline(steps=[(\"scaler\", StandardScaler())])\n preprocessor = ColumnTransformer(transformers=[(\"scaling\", scaler_transformer, list(range(n_features)))])\n model = RandomForestRegressor(n_estimators=10, max_depth=9)\n pipeline = Pipeline(steps=[(\"preprocessor\", preprocessor), (\"model\", model)])\n\n pipeline.fit(X, y)\n\n X_test = tuple(np.split(X, n_features, axis=1))\n extra_config = {constants.INPUT_NAMES: input_column_names, constants.OUTPUT_NAMES: output_column_names}\n\n hb_model = hummingbird.ml.convert(pipeline, \"onnx\", X_test, extra_config=extra_config)\n\n graph_inputs = [input.name for input in hb_model.model.graph.input]\n graph_outputs = [output.name for output in hb_model.model.graph.output]\n\n assert len(hb_model.model.graph.input) == n_features\n assert graph_inputs == input_column_names\n assert graph_outputs == output_column_names\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingClassifier not available in scikit-learn < 0.22\")\n def test_stacking_classifier(self):\n X, y = load_iris(return_X_y=True)\n estimators = [\n (\"rf\", RandomForestClassifier(n_estimators=10, random_state=42)),\n (\"svr\", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),\n ]\n clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\n clf.fit(X_train, y_train)\n\n hb_model = hummingbird.ml.convert(clf, \"torch\")\n\n np.testing.assert_allclose(\n clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingClassifier not available in scikit-learn < 0.22\")\n def test_stacking_classifier_passthrough(self):\n X, y = load_iris(return_X_y=True)\n estimators = [\n (\"rf\", RandomForestClassifier(n_estimators=10, random_state=42)),\n (\"svr\", make_pipeline(StandardScaler(), LogisticRegression(random_state=42))),\n ]\n clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), passthrough=True)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\n clf.fit(X_train, y_train)\n\n hb_model = hummingbird.ml.convert(clf, \"torch\")\n\n np.testing.assert_allclose(\n clf.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingClassifier not available in scikit-learn < 0.22\")\n def test_stacking_classifier_decision_function(self):\n X, y = load_iris(return_X_y=True)\n estimators = [\n (\"rf\", RandomForestClassifier(n_estimators=10, random_state=42)),\n (\"svr\", make_pipeline(StandardScaler(), LinearSVC(random_state=42))),\n ]\n clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\n clf.fit(X_train, y_train)\n\n self.assertRaises(ValueError, hummingbird.ml.convert, clf, \"torch\")\n\n @unittest.skipIf(StackingClassifier is None, reason=\"StackingRegressor not available in scikit-learn < 0.22\")\n def test_stacking_regressor(self):\n X, y = load_diabetes(return_X_y=True)\n estimators = [(\"lr\", RidgeCV()), (\"svr\", LinearSVR(random_state=42))]\n reg = StackingRegressor(estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n reg.fit(X_train, y_train)\n\n hb_model = hummingbird.ml.convert(reg, \"torch\")\n\n np.testing.assert_allclose(\n reg.predict(X_test), hb_model.predict(X_test), rtol=1e-06, atol=1e-06,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"sklearn.compose.ColumnTransformer",
"sklearn.datasets.make_regression",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.linear_model.LogisticRegression",
"sklearn.pipeline.Pipeline",
"sklearn.datasets.load_iris",
"sklearn.preprocessing.Imputer",
"numpy.random.rand",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.svm.LinearSVR",
"sklearn.preprocessing.OneHotEncoder",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.svm.LinearSVC",
"sklearn.linear_model.RidgeCV",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.decomposition.PCA",
"sklearn.datasets.load_diabetes",
"pandas.DataFrame",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.random.randint",
"numpy.split"
]
] |
BumagniyPacket/ocr | [
"f2651f3a23cf835a689b35a658ef3443086fd72a"
] | [
"ocr/paint.py"
] | [
"# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\n\n\ndef show_image(image):\n plt.imshow(-image, cmap='Greys')\n plt.show()\n\n\ndef show_two(image1, image2):\n plt.subplot(121)\n plt.imshow(-image1, cmap='Greys')\n\n plt.subplot(122)\n plt.imshow(-image2, cmap='Greys')\n\n plt.show()\n\n\ndef plot_hist(img):\n plt.hist(img.ravel(), 256, range=(0., 1.), color='red')\n plt.show()\n\n\ndef plot_2img_2hist(image1, image2):\n\n plt.subplot(221)\n plt.imshow(-image1, cmap='Greys')\n\n plt.subplot(223)\n plt.hist(image1.ravel(), 256, range=(0., 1.), color='red')\n\n plt.subplot(222)\n plt.imshow(-image2, cmap='Greys')\n\n plt.subplot(224)\n plt.hist(image2.ravel(), 256, range=(0., 1.), color='red')\n\n plt.show()\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
] |
RoyalTS/stochatreat | [
"6e638e748b8638b64a185229f78967cf864cd45e"
] | [
"tests/test_stochatreat_assignment.py"
] | [
"import pytest\n\nfrom math import gcd\n\nimport numpy as np\nimport pandas as pd\n\nfrom stochatreat import stochatreat\nfrom stochatreat import get_lcm_prob_denominators\n\n\n################################################################################\n# fixtures\n################################################################################\n\[email protected](params=[10_000, 100_000])\ndef df(request):\n N = request.param\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"dummy\": [1] * N,\n \"stratum1\": np.random.randint(1, 100, size=N),\n \"stratum2\": np.random.randint(0, 2, size=N),\n }\n )\n\n return df\n\n# a set of treatment assignment probabilities to throw at many tests\nstandard_probs = [[0.1, 0.9],\n [1/3, 2/3],\n [0.5, 0.5],\n [2/3, 1/3],\n [0.9, 0.1]]\n\n# a set of stratum column combinations from the above df fixture to throw at\n# many tests\nstandard_stratum_cols = [\n [\"dummy\"],\n [\"stratum1\"],\n [\"stratum1\", \"stratum2\"],\n]\n\n\n# a DataFrame and treatment assignment probabilities under which there will be\n# no misfits\[email protected]\ndef df_no_misfits():\n N = 1_000\n stratum_size = 10\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"stratum\": np.repeat(\n np.arange(N / stratum_size),\n repeats=stratum_size\n )\n }\n )\n\n return df\n\nprobs_no_misfits =[\n [0.1, 0.9],\n [0.5, 0.5],\n [0.9, 0.1],\n]\n\n\n################################################################################\n# overall treatment assignment proportions\n################################################################################\n\[email protected](\"n_treats\", [2, 3, 4, 5, 10])\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_no_probs(n_treats, stratum_cols, df):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended with equal treatment assignment probabilities -- relies on the Law\n of Large Numbers, not deterministic\n \"\"\"\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=n_treats,\n idx_col=\"id\",\n random_state=42\n )\n\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array([1 / n_treats] * n_treats), decimal=2\n )\n\n\[email protected](\"probs\", standard_probs)\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_probs(probs, stratum_cols, df):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended with unequal treatment assignment probabilities -- relies on the\n Law of Large Numbers, not deterministic\n \"\"\"\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array(probs), decimal=2\n )\n\n\[email protected](\"probs\", probs_no_misfits)\ndef test_stochatreat_no_misfits(probs, df_no_misfits):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended when strata are such that there are no misfits\n \"\"\"\n treats = stochatreat(\n data=df_no_misfits,\n stratum_cols=[\"stratum\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array(probs), decimal=2\n )\n\n\[email protected](\"probs\", standard_probs)\ndef test_stochatreat_only_misfits(probs):\n \"\"\"\n Tests that overall treatment assignment proportions across all strata are as\n intended when strata are such that there are only misfits and the number of\n units is sufficiently large -- relies on the Law of Large Numbers, not\n deterministic\n \"\"\"\n N = 10_000\n df = pd.DataFrame(\n data={\n \"id\": np.arange(N),\n \"stratum\": np.arange(N),\n }\n )\n treats = stochatreat(\n data=df,\n stratum_cols=[\"stratum\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n treatment_shares = treats.groupby('treat')['id'].size() / treats.shape[0]\n\n np.testing.assert_almost_equal(\n treatment_shares, np.array(probs), decimal=2\n )\n\n\n################################################################################\n# within-stratum treatment assignments\n################################################################################\n\ndef get_within_strata_counts(treats):\n \"\"\"Helper function to compute the treatment shares within strata\"\"\"\n treatment_counts = (treats\n .groupby([\"stratum_id\", \"treat\"])[[\"id\"]]\n .count()\n .rename(columns={\"id\": \"treat_count\"})\n .reset_index()\n )\n\n stratum_counts = (treats\n .groupby([\"stratum_id\"])[[\"id\"]]\n .count()\n .rename(columns={\"id\": \"stratum_count\"})\n .reset_index()\n )\n\n counts = pd.merge(\n treatment_counts, stratum_counts, on=\"stratum_id\", how=\"left\"\n )\n\n return counts\n\n\ndef compute_count_diff(treats, probs):\n \"\"\"\n Helper function to compute the treatment counts within strata and line them\n up with required counts, and returns the different treatment counts\n aggregated at the stratum level as well as the dataframe with the different\n counts used in some tests\n \"\"\"\n counts = get_within_strata_counts(treats)\n\n required_props = pd.DataFrame(\n {\"required_prop\": probs, \"treat\": range(len(probs))}\n )\n comp = pd.merge(\n counts, required_props, on=\"treat\", how=\"left\"\n )\n comp[\"desired_counts\"] = comp[\"stratum_count\"] * comp[\"required_prop\"]\n\n comp[\"count_diff\"] = (comp[\"treat_count\"] - comp[\"desired_counts\"]).abs()\n\n return comp\n\n\[email protected](\"n_treats\", [2, 3, 4, 5, 10])\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_within_strata_no_probs(n_treats, stratum_cols, df):\n \"\"\"\n Tests that within strata treatment assignment counts are only as far from\n the required counts as misfit assignment randomization allows with equal\n treatment assignment probabilities but a differing number of treatments\n \"\"\"\n probs = n_treats * [1 / n_treats]\n lcm_prob_denominators = n_treats\n treats = stochatreat(\n data=df, \n stratum_cols=stratum_cols, \n treats=n_treats, \n idx_col=\"id\", \n random_state=42\n )\n comp = compute_count_diff(treats, probs)\n\n assert_msg = \"\"\"The counts differences exceed the bound that misfit \n allocation should not exceed\"\"\"\n assert (comp[\"count_diff\"] < lcm_prob_denominators).all(), assert_msg\n\n\[email protected](\"probs\", standard_probs)\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_within_strata_probs(probs, stratum_cols, df):\n \"\"\"\n Tests that within strata treatment assignment counts are only as far from\n the required counts as misfit assignment randomization allows with two\n treatments but unequal treatment assignment probabilities\n \"\"\"\n lcm_prob_denominators = get_lcm_prob_denominators(probs)\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n comp = compute_count_diff(treats, probs)\n\n assert_msg = \"\"\"The counts differences exceed the bound that misfit \n allocation should not exceed\"\"\"\n assert (comp[\"count_diff\"] < lcm_prob_denominators).all(), assert_msg\n\n\[email protected](\"probs\", probs_no_misfits)\ndef test_stochatreat_within_strata_no_misfits(probs, df_no_misfits):\n \"\"\"\n Tests that within strata treatment assignment counts are exactly equal to\n the required counts when strata are such that there are no misfits\n \"\"\"\n treats = stochatreat(\n data=df_no_misfits,\n stratum_cols=[\"stratum\"],\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n )\n comp = compute_count_diff(treats, probs)\n\n assert_msg = \"The required proportions are not reached without misfits\"\n assert (comp[\"count_diff\"] == 0).all(), assert_msg\n\n\[email protected](\"probs\", standard_probs)\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_global_strategy(probs, stratum_cols, df):\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=len(probs),\n idx_col=\"id\",\n probs=probs,\n random_state=42,\n misfit_strategy=\"global\"\n )\n comp = compute_count_diff(treats, probs)\n\n stratum_count_diff = comp.groupby([\"stratum_id\"])[\"count_diff\"].sum()\n\n assert_msg = \"There is more than one stratum with misfits\"\n assert (stratum_count_diff != 0).sum() <= 1, assert_msg\n\n\[email protected](\"misfit_strategy\", [\"global\", \"stratum\"])\[email protected](\"stratum_cols\", standard_stratum_cols)\ndef test_stochatreat_stratum_ids(df, misfit_strategy, stratum_cols):\n \"\"\"Tests that the function returns the right number of stratum ids\"\"\"\n treats = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=2,\n idx_col=\"id\",\n random_state=42,\n misfit_strategy=misfit_strategy,\n )\n\n n_unique_strata = len(df[stratum_cols].drop_duplicates())\n\n n_unique_stratum_ids = len(treats[\"stratum_id\"].drop_duplicates())\n\n if misfit_strategy == \"global\":\n # depending on whether there are misfits\n assert (\n (n_unique_stratum_ids == n_unique_strata) or\n (n_unique_stratum_ids - 1 == n_unique_strata)\n )\n else:\n assert n_unique_stratum_ids == n_unique_strata\n\n\[email protected](\"stratum_cols\", standard_stratum_cols)\[email protected](\"misfit_strategy\", [\"global\", \"stratum\"])\ndef test_stochatreat_random_state(df, stratum_cols, misfit_strategy):\n \"\"\"\n Tests that the results are the same on two consecutive calls with the same\n random state\n \"\"\"\n random_state = 42\n treats = []\n for _ in range(2):\n treatments_i = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=2,\n idx_col=\"id\",\n random_state=random_state,\n misfit_strategy=misfit_strategy,\n )\n treats.append(treatments_i)\n \n pd.testing.assert_series_equal(\n treats[0][\"treat\"], treats[1][\"treat\"]\n )\n\n \[email protected](\"stratum_cols\", standard_stratum_cols)\[email protected](\"misfit_strategy\", [\"global\", \"stratum\"])\ndef test_stochatreat_shuffle_data(df, stratum_cols, misfit_strategy):\n \"\"\"\n Tests that the mapping between idx_col and the assignments is the same on\n two consecutive calls with the same random state and shuffled data points\n \"\"\"\n random_state = 42\n treats = []\n for _ in range(2):\n treatments_i = stochatreat(\n data=df,\n stratum_cols=stratum_cols,\n treats=2,\n idx_col=\"id\",\n random_state=random_state,\n misfit_strategy=misfit_strategy,\n )\n treatments_i = treatments_i.sort_values(\"id\")\n treats.append(treatments_i)\n\n df = df.sample(len(df), random_state=random_state)\n \n pd.testing.assert_series_equal(\n treats[0][\"treat\"], treats[1][\"treat\"]\n )\n\n\n\n\n \n\n\n\n"
] | [
[
"numpy.arange",
"pandas.merge",
"numpy.array",
"pandas.testing.assert_series_equal",
"numpy.random.randint"
]
] |
mutazag/misc | [
"dfef362cdd835ef4efd1f2d02e13ff5297ccfc0f"
] | [
"py_merge/mergeexample.py"
] | [
"#%% \n\nimport pandas as pd\n\n#%%\n\ndf1 = pd.read_csv('df1.csv', index_col=0)\n# %%\ndf2 = pd.read_csv('df2.csv', index_col=0)\n# %%\ndf3 = pd.read_csv('df3.csv', index_col=0)\n# %%\ndf1.merge(df2, on='proj_id').merge(df3, on='doc_id')\n# %%\ndf1.merge(df2, on='proj_id', how='left').merge(df3, on='doc_id', how='left')\n# %%\n"
] | [
[
"pandas.read_csv"
]
] |
mohammadrezabk/eo-learn | [
"8de3cfd64e74c1e4832e585954cdbf0ee9676eb3"
] | [
"features/eolearn/features/radiometric_normalization.py"
] | [
"\"\"\"\nModule for radiometric normalization\n\nCredits:\nCopyright (c) 2018-2019 Johannes Schmid (GeoVille)\nCopyright (c) 2017-2019 Matej Aleksandrov, Matic Lubej, Devis Peresutti (Sinergise)\n\nThis source code is licensed under the MIT license found in the LICENSE\nfile in the root directory of this source tree.\n\"\"\"\n\nimport numpy as np\n\nfrom eolearn.core import EOTask, FeatureType\n\n\nclass ReferenceScenes(EOTask):\n \"\"\" Creates a layer of reference scenes which have the highest fraction of valid pixels.\n\n The number of reference scenes is limited to a definable number.\n\n Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018\n\n :param feature: Name of the eopatch data layer. Needs to be of the FeatureType \"DATA\".\n :type feature: (FeatureType, str) or (FeatureType, str, str)\n :param valid_fraction_feature: Name of the layer containing the valid fraction obtained with the EOTask\n 'AddValidDataFraction'. Needs to be of the FeatureType \"SCALAR\".\n :type valid_fraction_feature: (FeatureType, str)\n :param max_scene_number: Maximum number of reference scenes taken for the creation of the composite. By default,\n the maximum number of scenes equals the number of time frames\n :type max_scene_number: int\n\n \"\"\"\n def __init__(self, feature, valid_fraction_feature, max_scene_number=None):\n self.feature = self._parse_features(feature, new_names=True,\n default_feature_type=FeatureType.DATA,\n rename_function='{}_REFERENCE'.format)\n self.valid_fraction_feature = self._parse_features(valid_fraction_feature,\n default_feature_type=FeatureType.SCALAR)\n self.number = max_scene_number\n\n def execute(self, eopatch):\n feature_type, feature_name, new_feature_name = next(self.feature(eopatch))\n valid_fraction_feature_type, valid_fraction_feature_name = next(self.valid_fraction_feature(eopatch))\n\n valid_frac = list(eopatch[valid_fraction_feature_type][valid_fraction_feature_name].flatten())\n data = eopatch[feature_type][feature_name]\n\n number = data.shape[0] if self.number is None else self.number\n\n eopatch[feature_type][new_feature_name] = np.array([data[x] for _, x in\n sorted(zip(valid_frac, range(data.shape[0])), reverse=True)\n if x <= number-1])\n\n return eopatch\n\n\nclass BaseCompositing(EOTask):\n \"\"\" Base class to create a composite of reference scenes\n\n Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018\n\n :param feature: Feature holding the input time-series. Default type is FeatureType.DATA\n :type feature: (FeatureType, str)\n :param feature_composite: Type and name of output composite image. Default type is FeatureType.DATA_TIMELESS\n :type feature_composite: (FeatureType, str)\n :param percentile: Percentile along the time dimension used for compositing. Methods use different percentiles\n :type percentile: int or list\n :param max_index: Value used to flag indices with NaNs. Could be integer or NaN. Default is 255\n :type max_index: int or NaN\n :param interpolation: Method used to compute percentile. Allowed values are {'geoville', 'linear', 'lower',\n 'higher', 'midpoint', 'nearest'}. 'geoville' interpolation performs a custom\n implementation, while the other methods use the numpy `percentile` function. Default is\n 'lower'\n :type interpolation: str\n :param no_data_value: Value in the composite assigned to non valid data points. Default is NaN\n :type no_data_value: float or NaN\n \"\"\"\n\n def __init__(self, feature, feature_composite, percentile=None, max_index=255, interpolation='lower',\n no_data_value=np.nan):\n self.feature = self._parse_features(feature,\n default_feature_type=FeatureType.DATA,\n rename_function='{}_COMPOSITE'.format)\n self.composite_type, self.composite_name = next(\n self._parse_features(feature_composite, default_feature_type=FeatureType.DATA_TIMELESS)())\n self.percentile = percentile\n self.max_index = max_index\n self.interpolation = interpolation\n self._index_by_percentile = self._geoville_index_by_percentile \\\n if self.interpolation.lower() == 'geoville' else self._numpy_index_by_percentile\n self.no_data_value = no_data_value\n\n def _numpy_index_by_percentile(self, data, percentile):\n \"\"\" Calculate percentile of numpy stack and return the index of the chosen pixel.\n\n numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',\n 'midpoint', 'nearest'}\n \"\"\"\n data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)\n\n indices = np.empty(data_perc_low.shape, dtype=np.uint8)\n indices[:] = np.nan\n\n abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))\n\n indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))\n\n return indices\n\n def _geoville_index_by_percentile(self, data, percentile):\n \"\"\" Calculate percentile of numpy stack and return the index of the chosen pixel. \"\"\"\n # no_obs = bn.allnan(arr_tmp[\"data\"], axis=0)\n data_tmp = np.array(data, copy=True)\n valid_obs = np.sum(np.isfinite(data_tmp), axis=0)\n # replace NaN with maximum\n max_val = np.nanmax(data_tmp) + 1\n data_tmp[np.isnan(data_tmp)] = max_val\n # sort - former NaNs will move to the end\n ind_tmp = np.argsort(data_tmp, kind=\"mergesort\", axis=0)\n # desired position as well as floor and ceiling of it\n k_arr = (valid_obs - 1) * (percentile / 100.0)\n k_arr = np.where(k_arr < 0, 0, k_arr)\n f_arr = np.floor(k_arr + 0.5)\n f_arr = f_arr.astype(int)\n # get floor value of reference band and index band\n ind = f_arr.astype(\"int16\")\n y_val, x_val = ind_tmp.shape[1], ind_tmp.shape[2]\n y_val, x_val = np.ogrid[0:y_val, 0:x_val]\n idx = np.where(valid_obs == 0, self.max_index, ind_tmp[ind, y_val, x_val])\n return idx\n\n def _get_reference_band(self, data):\n \"\"\" Extract reference band from input 4D data according to compositing method\n\n :param data: 4D array from which to extract reference band (e.g. blue, maxNDVI, ..)\n :type data: numpy array\n :return: 3D array containing reference band according to compositing method\n \"\"\"\n raise NotImplementedError\n\n def _get_indices(self, data):\n \"\"\" Compute indices along temporal dimension corresponding to the sought percentile\n\n :param data: Input 3D array holding the reference band\n :type data: numpy array\n :return: 2D array holding the temporal index corresponding to percentile\n \"\"\"\n indices = self._index_by_percentile(data, self.percentile)\n return indices\n\n def execute(self, eopatch):\n \"\"\" Compute composite array merging temporal frames according to the compositing method\n\n :param eopatch: eopatch holding time-series\n :return: eopatch with composite image of time-series\n \"\"\"\n feature_type, feature_name = next(self.feature(eopatch))\n data = eopatch[feature_type][feature_name].copy()\n\n # compute band according to compositing method (e.g. blue, maxNDVI, maxNDWI)\n reference_bands = self._get_reference_band(data)\n\n # find temporal indices corresponding to pre-defined percentile\n indices = self._get_indices(reference_bands)\n\n # compute composite image selecting values along temporal dimension corresponding to percentile indices\n composite_image = np.empty((data.shape[1:]), np.float32)\n composite_image[:] = self.no_data_value\n for scene_id, scene in enumerate(data):\n composite_image = np.where(np.dstack([indices]) == scene_id, scene, composite_image)\n\n eopatch[self.composite_type][self.composite_name] = composite_image\n\n return eopatch\n\n\nclass BlueCompositing(BaseCompositing):\n \"\"\" Blue band compositing method\n\n - blue (25th percentile of the blue band)\n\n :param blue_idx: Index of blue band in `feature` array\n :type blue_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, blue_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)\n self.blue_idx = blue_idx\n if not isinstance(blue_idx, int):\n raise ValueError('Incorrect value of blue band index specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the blue band from time-series\n\n :param data: 4D array from which to extract the blue reference band\n :type data: numpy array\n :return: 3D array containing the blue reference band\n \"\"\"\n return data[..., self.blue_idx].astype(\"float32\")\n\n\nclass HOTCompositing(BaseCompositing):\n \"\"\" HOT compositing method\n\n - HOT (Index using bands blue and red)\n\n The HOT index is defined as per\n Zhu, Z., & Woodcock, C. E. (2012). \"Object-based cloud and cloud shadow detection in Landsat imagery.\"\n Remote Sensing of Environment, 118, 83-94.\n\n :param blue_idx: Index of blue band in `feature` array\n :type blue_idx: int\n :param red_idx: Index of red band in `feature` array\n :type red_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, blue_idx, red_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=25, interpolation=interpolation)\n self.blue_idx = blue_idx\n self.red_idx = red_idx\n if not isinstance(blue_idx, int) or not isinstance(red_idx, int):\n raise ValueError('Incorrect values of blue and red band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the HOT band from time-series\n\n :param data: 4D array from which to extract the HOT reference band\n :type data: numpy array\n :return: 3D array containing the HOT reference band\n \"\"\"\n return data[..., self.blue_idx] - 0.5 * data[..., self.red_idx] - 0.08\n\n\nclass MaxNDVICompositing(BaseCompositing):\n \"\"\" maxNDVI compositing method\n\n - maxNDVI (temporal maximum of NDVI)\n\n :param red_idx: Index of red band in `feature` array\n :type red_idx: int\n :param nir_idx: Index of NIR band in `feature` array\n :type nir_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, red_idx, nir_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=[0, 100], interpolation=interpolation)\n self.red_idx = red_idx\n self.nir_idx = nir_idx\n if not isinstance(nir_idx, int) or not isinstance(red_idx, int):\n raise ValueError('Incorrect values of red and NIR band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the NDVI band from time-series\n\n :param data: 4D array from which to compute the NDVI reference band\n :type data: numpy array\n :return: 3D array containing the NDVI reference band\n \"\"\"\n nir = data[..., self.nir_idx].astype(\"float32\")\n red = data[..., self.red_idx].astype(\"float32\")\n return (nir - red) / (nir + red)\n\n def _get_indices(self, data):\n median = np.nanmedian(data, axis=0)\n indices_min = self._index_by_percentile(data, self.percentile[0])\n indices_max = self._index_by_percentile(data, self.percentile[1])\n indices = np.where(median < -0.05, indices_min, indices_max)\n return indices\n\n\nclass MaxNDWICompositing(BaseCompositing):\n \"\"\" maxNDWI compositing method\n\n - maxNDWI (temporal maximum of NDWI)\n\n :param nir_idx: Index of NIR band in `feature` array\n :type nir_idx: int\n :param swir1_idx: Index of SWIR1 band in `feature` array\n :type swir1_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, nir_idx, swir1_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)\n self.nir_idx = nir_idx\n self.swir1_idx = swir1_idx\n if not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):\n raise ValueError('Incorrect values of NIR and SWIR1 band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the NDWI band from time-series\n\n :param data: 4D array from which to compute the NDWI reference band\n :type data: numpy array\n :return: 3D array containing the NDWI reference band\n \"\"\"\n nir = data[..., self.nir_idx].astype(\"float32\")\n swir1 = data[..., self.swir1_idx].astype(\"float32\")\n return (nir - swir1) / (nir + swir1)\n\n\nclass MaxRatioCompositing(BaseCompositing):\n \"\"\" maxRatio compositing method\n\n - maxRatio (temporal maximum of a ratio using bands blue, NIR and SWIR)\n\n :param blue_idx: Index of blue band in `feature` array\n :type blue_idx: int\n :param nir_idx: Index of NIR band in `feature` array\n :type nir_idx: int\n :param swir1_idx: Index of SWIR1 band in `feature` array\n :type swir1_idx: int\n \"\"\"\n def __init__(self, feature, feature_composite, blue_idx, nir_idx, swir1_idx, interpolation='lower'):\n super().__init__(feature, feature_composite, percentile=100, interpolation=interpolation)\n self.blue_idx = blue_idx\n self.nir_idx = nir_idx\n self.swir1_idx = swir1_idx\n if not isinstance(blue_idx, int) or not isinstance(nir_idx, int) or not isinstance(swir1_idx, int):\n raise ValueError('Incorrect values for either blue, NIR or SWIR1 band indices specified')\n\n def _get_reference_band(self, data):\n \"\"\" Extract the max-ratio band from time-series\n\n The max-ratio is defined as max(NIR,SWIR1)/BLUE\n\n :param data: 4D array from which to compute the max-ratio reference band\n :type data: numpy array\n :return: 3D array containing the max-ratio reference band\n \"\"\"\n blue = data[..., self.blue_idx].astype(\"float32\")\n nir = data[..., self.nir_idx].astype(\"float32\")\n swir1 = data[..., self.swir1_idx].astype(\"float32\")\n return np.nanmax(np.array([nir, swir1]), axis=0) / blue\n\n\nclass HistogramMatching(EOTask):\n \"\"\" Histogram match of each band of each scene within a time-series with respect to the corresponding band of a\n reference composite.\n\n Contributor: Johannes Schmid, GeoVille Information Systems GmbH, 2018\n\n :param feature: Name of the eopatch data layer that will undergo a histogram match.\n Should be of the FeatureType \"DATA\".\n :type feature: (FeatureType, str) or (FeatureType, str, str)\n :param reference: Name of the eopatch data layer that represents the reference for the histogram match.\n Should be of the FeatureType \"DATA_TIMELESS\".\n :type reference: (FeatureType, str)\n \"\"\"\n\n def __init__(self, feature, reference):\n self.feature = self._parse_features(feature, new_names=True,\n default_feature_type=FeatureType.DATA,\n rename_function='{}_NORMALISED'.format)\n self.reference = self._parse_features(reference, default_feature_type=FeatureType.DATA_TIMELESS)\n\n def execute(self, eopatch):\n \"\"\" Perform histogram matching of the time-series with respect to a reference scene\n\n :param eopatch: eopatch holding the time-series and reference data\n :type eopatch: EOPatch\n :return: The same eopatch instance with the normalised time-series\n \"\"\"\n feature_type, feature_name, new_feature_name = next(self.feature(eopatch))\n reference_type, reference_name = next(self.reference(eopatch))\n\n reference_scene = eopatch[reference_type][reference_name]\n # check if band dimension matches\n if reference_scene.shape[-1] != eopatch[feature_type][feature_name].shape[-1]:\n raise ValueError('Time-series and reference scene must have corresponding bands')\n\n eopatch[feature_type][new_feature_name] = np.zeros_like(eopatch[feature_type][feature_name])\n for source_id, source in enumerate(eopatch[feature_type][feature_name]):\n # mask-out same invalid pixels\n src_masked = np.where(np.isnan(reference_scene), np.nan, source)\n ref_masked = np.where(np.isnan(source), np.nan, reference_scene)\n # compute statistics\n std_ref = np.nanstd(ref_masked, axis=(0, 1), dtype=np.float64)\n std_src = np.nanstd(src_masked, axis=(0, 1), dtype=np.float64)\n mean_ref = np.nanmean(ref_masked, axis=(0, 1), dtype=np.float64)\n mean_src = np.nanmean(src_masked, axis=(0, 1), dtype=np.float64)\n # normalise values\n eopatch[feature_type][new_feature_name][source_id] = \\\n source * (std_ref / std_src) + (mean_ref - (mean_src * (std_ref / std_src)))\n\n return eopatch\n"
] | [
[
"numpy.zeros_like",
"numpy.empty",
"numpy.nanmax",
"numpy.nanmedian",
"numpy.nanmean",
"numpy.argsort",
"numpy.nanpercentile",
"numpy.floor",
"numpy.nanstd",
"numpy.dstack",
"numpy.nanargmin",
"numpy.isnan",
"numpy.array",
"numpy.where",
"numpy.isfinite"
]
] |
MohamedAli1995/Cifar-100-Classifier | [
"924704a81ce13062825a88b90b80e8ac2ba45d63"
] | [
"src/base/base_train.py"
] | [
"import tensorflow as tf\n\n\nclass BaseTrain:\n \"\"\"Standard base_train-class for easy multiple-inheritance.\n It is responsible for defining the functions to be implemented with any child.\n\n Attributes:\n sess: Tensorflow session to use.\n model: Model to be trained.\n data: Data_loader object to interact with dataset.\n config: Config object to store data related to training, testing and validation.\n logger: Logger object to use tensorboard.\n \"\"\"\n\n def __init__(self, sess, model, data, config, logger):\n self.model = model\n self.config = config\n self.sess = sess\n self.data = data\n self.logger = logger\n if not self.config.pretrain: # If not pretrain then initialize variables.\n self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n self.sess.run(self.init)\n\n def train(self):\n \"\"\"Train the model for the number of epochs in config.num_epochs.\n Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch.\n Returns:\n\n \"\"\"\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\n self.data.prepare_new_epoch_data()\n self.train_epoch()\n if self.config.use_val and (\n cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):\n self.validate_epoch()\n\n self.sess.run(self.model.increment_cur_epoch_tensor)\n\n def train_epoch(self):\n \"\"\"Implements the logic of training_epoch:\n -Loop over the batches of the training data and call the train step for each.\n -Add any summaries you want using the summary\n \"\"\"\n raise NotImplemented\n\n def train_step(self):\n \"\"\"Implements the logic of the train step:\n -Run the tensorflow session\n -Returns:\n Any of the metrics needs to be summarized.\n \"\"\"\n\n raise NotImplementedError\n\n def validate_epoch(self):\n \"\"\"Implements the logic of validation_epoch:\n -Loop over the batches of the validation data and call the validate step for each.\n -Add any summaries you want using the summary\n \"\"\"\n raise NotImplemented\n\n def validate_step(self):\n \"\"\"Implements the logic of the validate step:\n -Run the tensorflow session\n -Returns:\n Any of the metrics needs to be summarized.\n \"\"\"\n raise NotImplemented\n"
] | [
[
"tensorflow.local_variables_initializer",
"tensorflow.global_variables_initializer"
]
] |
sparkma/arrow | [
"62fd703a4ef0abbecb02397a06a630a9dee382d9"
] | [
"python/pyarrow/tests/test_parquet.py"
] | [
"# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\nimport datetime\nimport decimal\nimport io\nimport json\nimport os\nimport six\nimport pickle\nimport pytest\n\nimport numpy as np\n\nimport pyarrow as pa\nfrom pyarrow.compat import guid, u, BytesIO, unichar, PY2\nfrom pyarrow.pandas_compat import _pandas_api\nfrom pyarrow.tests import util\nfrom pyarrow.filesystem import LocalFileSystem, FileSystem\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = None\n\n\ntry:\n import pandas as pd\n import pandas.util.testing as tm\n from .pandas_examples import dataframe_with_arrays, dataframe_with_lists\nexcept ImportError:\n pd = tm = None\n\n\n# Marks all of the tests in this module\n# Ignore these with pytest ... -m 'not parquet'\npytestmark = pytest.mark.parquet\n\n\[email protected](scope='module')\ndef datadir(datadir):\n return datadir / 'parquet'\n\n\ndef _write_table(table, path, **kwargs):\n # So we see the ImportError somewhere\n import pyarrow.parquet as pq\n\n if _pandas_api.is_data_frame(table):\n table = pa.Table.from_pandas(table)\n\n pq.write_table(table, path, **kwargs)\n return table\n\n\ndef _read_table(*args, **kwargs):\n return pq.read_table(*args, **kwargs)\n\n\ndef _roundtrip_table(table, read_table_kwargs=None,\n write_table_kwargs=None):\n read_table_kwargs = read_table_kwargs or {}\n write_table_kwargs = write_table_kwargs or {}\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_table_kwargs)\n buf.seek(0)\n return _read_table(buf, **read_table_kwargs)\n\n\ndef _check_roundtrip(table, expected=None, read_table_kwargs=None,\n **write_table_kwargs):\n if expected is None:\n expected = table\n\n read_table_kwargs = read_table_kwargs or {}\n\n # intentionally check twice\n result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n\n\ndef _roundtrip_pandas_dataframe(df, write_kwargs):\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_kwargs)\n\n buf.seek(0)\n table1 = _read_table(buf)\n return table1.to_pandas()\n\n\[email protected]('dtype', [int, float])\ndef test_single_pylist_column_roundtrip(tempdir, dtype):\n filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)\n data = [pa.array(list(map(dtype, range(5))))]\n table = pa.Table.from_arrays(data, names=['a'])\n _write_table(table, filename)\n table_read = _read_table(filename)\n for i in range(table.num_columns):\n col_written = table[i]\n col_read = table_read[i]\n assert table.field(i).name == table_read.field(i).name\n assert col_read.num_chunks == 1\n data_written = col_written.chunk(0)\n data_read = col_read.chunk(0)\n assert data_written.equals(data_read)\n\n\ndef alltypes_sample(size=10000, seed=0, categorical=False):\n np.random.seed(seed)\n arrays = {\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Test other timestamp resolutions now that arrow supports\n # them\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': pd.Series([str(x) for x in range(size)]),\n 'empty_str': [''] * size,\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'null': [None] * size,\n 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],\n }\n if categorical:\n arrays['str_category'] = arrays['str'].astype('category')\n return pd.DataFrame(arrays)\n\n\[email protected]\[email protected]('chunk_size', [None, 1000])\ndef test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):\n df = alltypes_sample(size=10000, categorical=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version=\"2.0\",\n coerce_timestamps='ms', chunk_size=chunk_size)\n table_read = pq.read_pandas(filename)\n assert table_read.schema.pandas_metadata is not None\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef test_set_data_page_size():\n arr = pa.array([1, 2, 3] * 1000000)\n t = pa.Table.from_arrays([arr], names=['f0'])\n\n # 128K, 256K, 512K\n page_sizes = [2 << 16, 2 << 17, 2 << 18]\n for target_page_size in page_sizes:\n _check_roundtrip(t, data_page_size=target_page_size)\n\n\[email protected]\ndef test_chunked_table_write():\n # ARROW-232\n df = alltypes_sample(size=10)\n\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n df, _ = dataframe_with_lists()\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_no_memory_map(tempdir):\n df = alltypes_sample(size=10)\n\n table = pa.Table.from_pandas(df)\n _check_roundtrip(table, read_table_kwargs={'memory_map': False},\n version='2.0')\n\n filename = str(tempdir / 'tmp_file')\n with open(filename, 'wb') as f:\n _write_table(table, f, version='2.0')\n table_read = pq.read_pandas(filename, memory_map=False)\n assert table_read.equals(table)\n\n\ndef test_special_chars_filename(tempdir):\n table = pa.Table.from_arrays([pa.array([42])], [\"ints\"])\n filename = \"foo # bar\"\n path = tempdir / filename\n assert not path.exists()\n _write_table(table, str(path))\n assert path.exists()\n table_read = _read_table(str(path))\n assert table_read.equals(table)\n\n\[email protected]\ndef test_empty_table_roundtrip():\n df = alltypes_sample(size=10)\n\n # Create a non-empty table to infer the types correctly, then slice to 0\n table = pa.Table.from_pandas(df)\n table = pa.Table.from_arrays(\n [col.chunk(0)[:0] for col in table.itercolumns()],\n names=table.schema.names)\n\n assert table.schema.field_by_name('null').type == pa.null()\n assert table.schema.field_by_name('null_list').type == pa.list_(pa.null())\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_empty_table_no_columns():\n df = pd.DataFrame()\n empty = pa.Table.from_pandas(df, preserve_index=False)\n _check_roundtrip(empty)\n\n\ndef test_empty_lists_table_roundtrip():\n # ARROW-2744: Shouldn't crash when writing an array of empty lists\n arr = pa.array([[], []], type=pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr], [\"A\"])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_pandas_parquet_datetime_tz():\n s = pd.Series([datetime.datetime(2017, 9, 6)])\n s = s.dt.tz_localize('utc')\n\n s.index = s\n\n # Both a column and an index to hit both use cases\n df = pd.DataFrame({'tz_aware': s,\n 'tz_eastern': s.dt.tz_convert('US/Eastern')},\n index=s)\n\n f = BytesIO()\n\n arrow_table = pa.Table.from_pandas(df)\n\n _write_table(arrow_table, f, coerce_timestamps='ms')\n f.seek(0)\n\n table_read = pq.read_pandas(f)\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\[email protected](six.PY2, reason='datetime.timezone is available since '\n 'python version 3.2')\ndef test_datetime_timezone_tzinfo():\n value = datetime.datetime(2018, 1, 1, 1, 23, 45,\n tzinfo=datetime.timezone.utc)\n df = pd.DataFrame({'foo': [value]})\n\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_pandas_parquet_custom_metadata(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert b'pandas' in arrow_table.schema.metadata\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n metadata = pq.read_metadata(filename).metadata\n assert b'pandas' in metadata\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n assert js['index_columns'] == [{'kind': 'range',\n 'name': None,\n 'start': 0, 'stop': 10000,\n 'step': 1}]\n\n\[email protected]\ndef test_pandas_parquet_column_multiindex(tempdir):\n df = alltypes_sample(size=10)\n df.columns = pd.MultiIndex.from_tuples(\n list(zip(df.columns, df.columns[::-1])),\n names=['level_1', 'level_2']\n )\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n table_read = pq.read_pandas(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n js = arrow_table.schema.pandas_metadata\n assert not js['index_columns']\n # ARROW-2170\n # While index_columns should be empty, columns needs to be filled still.\n assert js['columns']\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n table_read = pq.read_pandas(filename)\n\n js = table_read.schema.pandas_metadata\n assert not js['index_columns']\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_1_0_roundtrip(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename, version='1.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n # We pass uint32_t as int64_t if we write Parquet version 1.0\n df['uint32'] = df['uint32'].values.astype(np.int64)\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_multiple_path_types(tempdir):\n # Test compatibility with PEP 519 path-like objects\n path = tempdir / 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n # Test compatibility with plain string paths\n path = str(tempdir) + 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_column_selection(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16)\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename)\n table_read = _read_table(filename, columns=['uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n table_read = _read_table(filename, columns=['uint8', 'uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n\ndef _random_integers(size, dtype):\n # We do not generate integers outside the int64 range\n platform_int_info = np.iinfo('int_')\n iinfo = np.iinfo(dtype)\n return np.random.randint(max(iinfo.min, platform_int_info.min),\n min(iinfo.max, platform_int_info.max),\n size=size).astype(dtype)\n\n\ndef _test_dataframe(size=10000, seed=0):\n np.random.seed(seed)\n df = pd.DataFrame({\n 'uint8': _random_integers(size, np.uint8),\n 'uint16': _random_integers(size, np.uint16),\n 'uint32': _random_integers(size, np.uint32),\n 'uint64': _random_integers(size, np.uint64),\n 'int8': _random_integers(size, np.int8),\n 'int16': _random_integers(size, np.int16),\n 'int32': _random_integers(size, np.int32),\n 'int64': _random_integers(size, np.int64),\n 'float32': np.random.randn(size).astype(np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': [tm.rands(10) for i in range(size)],\n 'all_none': [None] * size,\n 'all_none_category': [None] * size\n })\n # TODO(PARQUET-1015)\n # df['all_none_category'] = df['all_none_category'].astype('category')\n return df\n\n\[email protected]\ndef test_pandas_parquet_native_file_roundtrip(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_parquet_incremental_file_build(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n writer.close()\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_read_pandas_column_subset(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()\n tm.assert_frame_equal(df[['strings', 'uint8']], df_read)\n\n\[email protected]\ndef test_pandas_parquet_empty_roundtrip(tempdir):\n df = _test_dataframe(0)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_pyfile_roundtrip(tempdir):\n filename = tempdir / 'pandas_pyfile_roundtrip.parquet'\n size = 5\n df = pd.DataFrame({\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': ['foo', 'bar', None, 'baz', 'qux']\n })\n\n arrow_table = pa.Table.from_pandas(df)\n\n with filename.open('wb') as f:\n _write_table(arrow_table, f, version=\"1.0\")\n\n data = io.BytesIO(filename.read_bytes())\n\n table_read = _read_table(data)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_configuration_options(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n\n for use_dictionary in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n use_dictionary=use_dictionary)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for write_statistics in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n write_statistics=write_statistics)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:\n _write_table(arrow_table, filename, version='2.0',\n compression=compression)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef make_sample_file(table_or_df):\n if isinstance(table_or_df, pa.Table):\n a_table = table_or_df\n else:\n a_table = pa.Table.from_pandas(table_or_df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='SNAPPY', version='2.0',\n coerce_timestamps='ms')\n\n buf.seek(0)\n return pq.ParquetFile(buf)\n\n\[email protected]\ndef test_parquet_metadata_api():\n df = alltypes_sample(size=10000)\n df = df.reindex(columns=sorted(df.columns))\n df.index = np.random.randint(0, 1000000, size=len(df))\n\n fileh = make_sample_file(df)\n ncols = len(df.columns)\n\n # Series of sniff tests\n meta = fileh.metadata\n repr(meta)\n assert meta.num_rows == len(df)\n assert meta.num_columns == ncols + 1 # +1 for index\n assert meta.num_row_groups == 1\n assert meta.format_version == '2.0'\n assert 'parquet-cpp' in meta.created_by\n assert isinstance(meta.serialized_size, int)\n assert isinstance(meta.metadata, dict)\n\n # Schema\n schema = fileh.schema\n assert meta.schema is schema\n assert len(schema) == ncols + 1 # +1 for index\n repr(schema)\n\n col = schema[0]\n repr(col)\n assert col.name == df.columns[0]\n assert col.max_definition_level == 1\n assert col.max_repetition_level == 0\n assert col.max_repetition_level == 0\n\n assert col.physical_type == 'BOOLEAN'\n assert col.converted_type == 'NONE'\n\n with pytest.raises(IndexError):\n schema[ncols + 1] # +1 for index\n\n with pytest.raises(IndexError):\n schema[-1]\n\n # Row group\n for rg in range(meta.num_row_groups):\n rg_meta = meta.row_group(rg)\n assert isinstance(rg_meta, pq.RowGroupMetaData)\n repr(rg_meta)\n\n for col in range(rg_meta.num_columns):\n col_meta = rg_meta.column(col)\n assert isinstance(col_meta, pq.ColumnChunkMetaData)\n repr(col_meta)\n\n with pytest.raises(IndexError):\n meta.row_group(-1)\n\n with pytest.raises(IndexError):\n meta.row_group(meta.num_row_groups + 1)\n\n rg_meta = meta.row_group(0)\n assert rg_meta.num_rows == len(df)\n assert rg_meta.num_columns == ncols + 1 # +1 for index\n assert rg_meta.total_byte_size > 0\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(-1)\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(ncols + 2)\n\n col_meta = rg_meta.column(0)\n assert col_meta.file_offset > 0\n assert col_meta.file_path == '' # created from BytesIO\n assert col_meta.physical_type == 'BOOLEAN'\n assert col_meta.num_values == 10000\n assert col_meta.path_in_schema == 'bool'\n assert col_meta.is_stats_set is True\n assert isinstance(col_meta.statistics, pq.Statistics)\n assert col_meta.compression == 'SNAPPY'\n assert col_meta.encodings == ('PLAIN', 'RLE')\n assert col_meta.has_dictionary_page is False\n assert col_meta.dictionary_page_offset is None\n assert col_meta.data_page_offset > 0\n assert col_meta.total_compressed_size > 0\n assert col_meta.total_uncompressed_size > 0\n with pytest.raises(NotImplementedError):\n col_meta.has_index_page\n with pytest.raises(NotImplementedError):\n col_meta.index_page_offset\n\n\[email protected]\[email protected](\n (\n 'data',\n 'type',\n 'physical_type',\n 'min_value',\n 'max_value',\n 'null_count',\n 'num_values',\n 'distinct_count'\n ),\n [\n ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float32(),\n 'FLOAT', -1.1, 4.4, 1, 4, 0\n ),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float64(),\n 'DOUBLE', -1.1, 4.4, 1, 4, 0\n ),\n (\n [u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0\n ),\n (\n [True, False, False, True, True], pa.bool_(),\n 'BOOLEAN', False, True, 0, 5, 0\n ),\n (\n [b'\\x00', b'b', b'12', None, b'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'\\x00', b'b', 1, 4, 0\n ),\n ]\n)\ndef test_parquet_column_statistics_api(data, type, physical_type, min_value,\n max_value, null_count, num_values,\n distinct_count):\n df = pd.DataFrame({'data': data})\n schema = pa.schema([pa.field('data', type)])\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n fileh = make_sample_file(table)\n\n meta = fileh.metadata\n\n rg_meta = meta.row_group(0)\n col_meta = rg_meta.column(0)\n\n stat = col_meta.statistics\n assert stat.has_min_max\n assert _close(type, stat.min, min_value)\n assert _close(type, stat.max, max_value)\n assert stat.null_count == null_count\n assert stat.num_values == num_values\n # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount\n # method, missing distinct_count is represented as zero instead of None\n assert stat.distinct_count == distinct_count\n assert stat.physical_type == physical_type\n\n\ndef _close(type, left, right):\n if type == pa.float32():\n return abs(left - right) < 1E-7\n elif type == pa.float64():\n return abs(left - right) < 1E-13\n else:\n return left == right\n\n\ndef test_statistics_convert_logical_types(tempdir):\n # ARROW-5166, ARROW-4139\n\n # (min, max, type)\n cases = [(10, 11164359321221007157, pa.uint64()),\n (10, 4294967295, pa.uint32()),\n (u\"ähnlich\", u\"öffentlich\", pa.utf8()),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time32('ms')),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time64('us')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('ms')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('us'))]\n\n for i, (min_val, max_val, typ) in enumerate(cases):\n t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],\n ['col'])\n path = str(tempdir / ('example{}.parquet'.format(i)))\n pq.write_table(t, path, version='2.0')\n pf = pq.ParquetFile(path)\n stats = pf.metadata.row_group(0).column(0).statistics\n assert stats.min == min_val\n assert stats.max == max_val\n\n\ndef test_parquet_write_disable_statistics(tempdir):\n table = pa.Table.from_pydict(\n {'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})\n _write_table(table, tempdir / 'data.parquet')\n meta = pq.read_metadata(tempdir / 'data.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is True\n assert cc.statistics is not None\n\n _write_table(table, tempdir / 'data2.parquet', write_statistics=False)\n meta = pq.read_metadata(tempdir / 'data2.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is False\n assert cc.statistics is None\n\n _write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])\n meta = pq.read_metadata(tempdir / 'data3.parquet')\n cc_a = meta.row_group(0).column(0)\n assert cc_a.is_stats_set is True\n assert cc_a.statistics is not None\n cc_b = meta.row_group(0).column(1)\n assert cc_b.is_stats_set is False\n assert cc_b.statistics is None\n\n\[email protected]\ndef test_compare_schemas():\n df = alltypes_sample(size=10000)\n\n fileh = make_sample_file(df)\n fileh2 = make_sample_file(df)\n fileh3 = make_sample_file(df[df.columns[::2]])\n\n # ParquetSchema\n assert isinstance(fileh.schema, pq.ParquetSchema)\n assert fileh.schema.equals(fileh.schema)\n assert fileh.schema == fileh.schema\n assert fileh.schema.equals(fileh2.schema)\n assert fileh.schema == fileh2.schema\n assert fileh.schema != 'arbitrary object'\n assert not fileh.schema.equals(fileh3.schema)\n assert fileh.schema != fileh3.schema\n\n # ColumnSchema\n assert isinstance(fileh.schema[0], pq.ColumnSchema)\n assert fileh.schema[0].equals(fileh.schema[0])\n assert fileh.schema[0] == fileh.schema[0]\n assert not fileh.schema[0].equals(fileh.schema[1])\n assert fileh.schema[0] != fileh.schema[1]\n assert fileh.schema[0] != 'arbitrary object'\n\n\ndef test_validate_schema_write_table(tempdir):\n # ARROW-2926\n simple_fields = [\n pa.field('POS', pa.uint32()),\n pa.field('desc', pa.string())\n ]\n\n simple_schema = pa.schema(simple_fields)\n\n # simple_table schema does not match simple_schema\n simple_from_array = [pa.array([1]), pa.array(['bla'])]\n simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])\n\n path = tempdir / 'simple_validate_schema.parquet'\n\n with pq.ParquetWriter(path, simple_schema,\n version='2.0',\n compression='snappy', flavor='spark') as w:\n with pytest.raises(ValueError):\n w.write_table(simple_table)\n\n\[email protected]\ndef test_column_of_arrays(tempdir):\n df, schema = dataframe_with_arrays()\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='ms')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_coerce_timestamps(tempdir):\n from collections import OrderedDict\n # ARROW-622\n arrays = OrderedDict()\n fields = [pa.field('datetime64',\n pa.list_(pa.timestamp('ms')))]\n arrays['datetime64'] = [\n np.array(['2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n None,\n None,\n np.array(['2007-07-13T02',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n ]\n\n df = pd.DataFrame(arrays)\n schema = pa.schema(fields)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='us')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n df_expected = df.copy()\n for i, x in enumerate(df_expected['datetime64']):\n if isinstance(x, np.ndarray):\n df_expected['datetime64'][i] = x.astype('M8[us]')\n\n tm.assert_frame_equal(df_expected, df_read)\n\n with pytest.raises(ValueError):\n _write_table(arrow_table, filename, version='2.0',\n coerce_timestamps='unknown')\n\n\[email protected]\ndef test_coerce_timestamps_truncated(tempdir):\n \"\"\"\n ARROW-2555: Test that we can truncate timestamps when coercing if\n explicitly allowed.\n \"\"\"\n dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1, microsecond=1)\n dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1)\n\n fields_us = [pa.field('datetime64', pa.timestamp('us'))]\n arrays_us = {'datetime64': [dt_us, dt_ms]}\n\n df_us = pd.DataFrame(arrays_us)\n schema_us = pa.schema(fields_us)\n\n filename = tempdir / 'pandas_truncated.parquet'\n table_us = pa.Table.from_pandas(df_us, schema=schema_us)\n\n _write_table(table_us, filename, version=\"2.0\", coerce_timestamps='ms',\n allow_truncated_timestamps=True)\n table_ms = _read_table(filename)\n df_ms = table_ms.to_pandas()\n\n arrays_expected = {'datetime64': [dt_ms, dt_ms]}\n df_expected = pd.DataFrame(arrays_expected)\n tm.assert_frame_equal(df_expected, df_ms)\n\n\[email protected]\ndef test_column_of_lists(tempdir):\n df, schema = dataframe_with_lists(parquet_compatible=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version='2.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n if PY2:\n # assert_frame_equal fails when comparing datetime.date and\n # np.datetime64, even with check_datetimelike_compat=True so\n # convert the values to np.datetime64 instead\n for col in ['date32[day]_list', 'date64[ms]_list']:\n df[col] = df[col].apply(\n lambda x: list(map(np.datetime64, x)) if x else x\n )\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_date_time_types(tempdir):\n t1 = pa.date32()\n data1 = np.array([17259, 17260, 17261], dtype='int32')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.date64()\n data2 = data1.astype('int64') * 86400000\n a2 = pa.array(data2, type=t2)\n\n t3 = pa.timestamp('us')\n start = pd.Timestamp('2001-01-01').value / 1000\n data3 = np.array([start, start + 1, start + 2], dtype='int64')\n a3 = pa.array(data3, type=t3)\n\n t4 = pa.time32('ms')\n data4 = np.arange(3, dtype='i4')\n a4 = pa.array(data4, type=t4)\n\n t5 = pa.time64('us')\n a5 = pa.array(data4.astype('int64'), type=t5)\n\n t6 = pa.time32('s')\n a6 = pa.array(data4, type=t6)\n\n ex_t6 = pa.time32('ms')\n ex_a6 = pa.array(data4 * 1000, type=ex_t6)\n\n t7 = pa.timestamp('ns')\n start = pd.Timestamp('2001-01-01').value\n data7 = np.array([start, start + 1000, start + 2000],\n dtype='int64')\n a7 = pa.array(data7, type=t7)\n\n table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n # date64 as date32\n # time32[s] to time32[ms]\n expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n _check_roundtrip(table, expected=expected, version='2.0')\n\n t0 = pa.timestamp('ms')\n data0 = np.arange(4, dtype='int64')\n a0 = pa.array(data0, type=t0)\n\n t1 = pa.timestamp('us')\n data1 = np.arange(4, dtype='int64')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.timestamp('ns')\n data2 = np.arange(4, dtype='int64')\n a2 = pa.array(data2, type=t2)\n\n table = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n expected = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int64 for all timestamps supported by default\n filename = tempdir / 'int64_timestamps.parquet'\n _write_table(table, filename, version='2.0')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT64'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n t0_ns = pa.timestamp('ns')\n data0_ns = np.array(data0 * 1000000, dtype='int64')\n a0_ns = pa.array(data0_ns, type=t0_ns)\n\n t1_ns = pa.timestamp('ns')\n data1_ns = np.array(data1 * 1000, dtype='int64')\n a1_ns = pa.array(data1_ns, type=t1_ns)\n\n expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int96 nanosecond timestamps produced upon request\n filename = tempdir / 'explicit_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n use_deprecated_int96_timestamps=True)\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n # int96 nanosecond timestamps implied by flavor 'spark'\n filename = tempdir / 'spark_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n flavor='spark')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n\ndef test_timestamp_restore_timezone():\n # ARROW-5888, restore timezone from serialized metadata\n ty = pa.timestamp('ms', tz='America/New_York')\n arr = pa.array([1, 2, 3], type=ty)\n t = pa.table([arr], names=['f0'])\n _check_roundtrip(t)\n\n\[email protected]\ndef test_list_of_datetime_time_roundtrip():\n # ARROW-4135\n times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',\n '11:30', '12:00'])\n df = pd.DataFrame({'time': [times.time]})\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_parquet_version_timestamp_differences():\n i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000\n\n d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')\n d_ms = d_s * 1000\n d_us = d_ms * 1000\n d_ns = d_us * 1000\n\n a_s = pa.array(d_s, type=pa.timestamp('s'))\n a_ms = pa.array(d_ms, type=pa.timestamp('ms'))\n a_us = pa.array(d_us, type=pa.timestamp('us'))\n a_ns = pa.array(d_ns, type=pa.timestamp('ns'))\n\n names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']\n table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)\n\n # Using Parquet version 1.0, seconds should be coerced to milliseconds\n # and nanoseconds should be coerced to microseconds by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)\n _check_roundtrip(table, expected)\n\n # Using Parquet version 2.0, seconds should be coerced to milliseconds\n # and nanoseconds should be retained by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)\n _check_roundtrip(table, expected, version='2.0')\n\n # Using Parquet version 1.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)\n _check_roundtrip(table, expected, coerce_timestamps='ms')\n\n # Using Parquet version 2.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)\n _check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')\n\n # TODO: after pyarrow allows coerce_timestamps='ns', tests like the\n # following should pass ...\n\n # Using Parquet version 1.0, coercing to nanoseconds is not allowed\n # expected = None\n # with pytest.raises(NotImplementedError):\n # _roundtrip_table(table, coerce_timestamps='ns')\n\n # Using Parquet version 2.0, coercing to nanoseconds is allowed\n # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n # _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')\n\n # For either Parquet version, coercing to nanoseconds is allowed\n # if Int96 storage is used\n expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n _check_roundtrip(table, expected,\n use_deprecated_int96_timestamps=True)\n _check_roundtrip(table, expected, version='2.0',\n use_deprecated_int96_timestamps=True)\n\n\ndef test_large_list_records():\n # This was fixed in PARQUET-1100\n\n list_lengths = np.random.randint(0, 500, size=50)\n list_lengths[::10] = 0\n\n list_values = [list(map(int, np.random.randint(0, 100, size=x)))\n if i % 8 else None\n for i, x in enumerate(list_lengths)]\n\n a1 = pa.array(list_values)\n\n table = pa.Table.from_arrays([a1], ['int_lists'])\n _check_roundtrip(table)\n\n\ndef test_sanitized_spark_field_names():\n a0 = pa.array([0, 1, 2, 3, 4])\n name = 'prohib; ,\\t{}'\n table = pa.Table.from_arrays([a0], [name])\n\n result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})\n\n expected_name = 'prohib______'\n assert result.schema[0].name == expected_name\n\n\[email protected]\ndef test_spark_flavor_preserves_pandas_metadata():\n df = _test_dataframe(size=100)\n df.index = np.arange(0, 10 * len(df), 10)\n df.index.name = 'foo'\n\n result = _roundtrip_pandas_dataframe(df, {'version': '2.0',\n 'flavor': 'spark'})\n tm.assert_frame_equal(result, df)\n\n\ndef test_fixed_size_binary():\n t0 = pa.binary(10)\n data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']\n a0 = pa.array(data, type=t0)\n\n table = pa.Table.from_arrays([a0],\n ['binary[10]'])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_multithreaded_read():\n df = alltypes_sample(size=10000)\n\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, compression='SNAPPY', version='2.0')\n\n buf.seek(0)\n table1 = _read_table(buf, use_threads=True)\n\n buf.seek(0)\n table2 = _read_table(buf, use_threads=False)\n\n assert table1.equals(table2)\n\n\[email protected]\ndef test_min_chunksize():\n data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])\n table = pa.Table.from_pandas(data.reset_index())\n\n buf = io.BytesIO()\n _write_table(table, buf, chunk_size=-1)\n\n buf.seek(0)\n result = _read_table(buf)\n\n assert result.equals(table)\n\n with pytest.raises(ValueError):\n _write_table(table, buf, chunk_size=0)\n\n\[email protected]\ndef test_pass_separate_metadata():\n # ARROW-471\n df = alltypes_sample(size=10000)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='snappy', version='2.0')\n\n buf.seek(0)\n metadata = pq.read_metadata(buf)\n\n buf.seek(0)\n\n fileh = pq.ParquetFile(buf, metadata=metadata)\n\n tm.assert_frame_equal(df, fileh.read().to_pandas())\n\n\[email protected]\ndef test_read_single_row_group():\n # ARROW-471\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n\n pf = pq.ParquetFile(buf)\n\n assert pf.num_row_groups == K\n\n row_groups = [pf.read_row_group(i) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df, result.to_pandas())\n\n\[email protected]\ndef test_read_single_row_group_with_column_subset():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n cols = list(df.columns[:2])\n row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n\[email protected]\ndef test_scan_contents():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n assert pf.scan_contents() == 10000\n assert pf.scan_contents(df.columns[:4]) == 10000\n\n\[email protected]\ndef test_parquet_piece_read(tempdir):\n df = _test_dataframe(1000)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece1 = pq.ParquetDatasetPiece(path)\n\n result = piece1.read()\n assert result.equals(table)\n\n\[email protected]\ndef test_parquet_piece_open_and_get_metadata(tempdir):\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece = pq.ParquetDatasetPiece(path)\n table1 = piece.read()\n assert isinstance(table1, pa.Table)\n meta1 = piece.get_metadata()\n assert isinstance(meta1, pq.FileMetaData)\n\n assert table == table1\n\n\ndef test_parquet_piece_basics():\n path = '/baz.parq'\n\n piece1 = pq.ParquetDatasetPiece(path)\n piece2 = pq.ParquetDatasetPiece(path, row_group=1)\n piece3 = pq.ParquetDatasetPiece(\n path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])\n\n assert str(piece1) == path\n assert str(piece2) == '/baz.parq | row_group=1'\n assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'\n\n assert piece1 == piece1\n assert piece2 == piece2\n assert piece3 == piece3\n assert piece1 != piece3\n\n\ndef test_partition_set_dictionary_type():\n set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])\n set2 = pq.PartitionSet('key2', [2007, 2008, 2009])\n\n assert isinstance(set1.dictionary, pa.StringArray)\n assert isinstance(set2.dictionary, pa.IntegerArray)\n\n set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])\n with pytest.raises(TypeError):\n set3.dictionary\n\n\[email protected]\ndef test_read_partitioned_directory(tempdir):\n fs = LocalFileSystem.get_instance()\n _partition_test_for_filesystem(fs, tempdir)\n\n\[email protected]\ndef test_create_parquet_dataset_multi_threaded(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n _partition_test_for_filesystem(fs, base_path)\n\n manifest = pq.ParquetManifest(base_path, filesystem=fs,\n metadata_nthreads=1)\n dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)\n assert len(dataset.pieces) > 0\n partitions = dataset.partitions\n assert len(partitions.partition_names) > 0\n assert partitions.partition_names == manifest.partitions.partition_names\n assert len(partitions.levels) == len(manifest.partitions.levels)\n\n\[email protected]\ndef test_equivalency(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n # Old filters syntax:\n # integer == 1 AND string != b AND boolean == True\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', '=', 1), ('string', '!=', 'b'),\n ('boolean', '==', True)]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'b' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n # filters in disjunctive normal form:\n # (integer == 1 AND string != b AND boolean == True) OR\n # (integer == 2 AND boolean == False)\n # TODO(ARROW-3388): boolean columns are reconstructed as string\n filters = [\n [\n ('integer', '=', 1),\n ('string', '!=', 'b'),\n ('boolean', '==', 'True')\n ],\n [('integer', '=', 0), ('boolean', '==', 'False')]\n ]\n dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n table = dataset.read()\n result_df = table.to_pandas().reset_index(drop=True)\n\n # Check that all rows in the DF fulfill the filter\n # Pandas 0.23.x has problems with indexing constant memoryviews in\n # categoricals. Thus we need to make an explicity copy here with np.array.\n df_filter_1 = (np.array(result_df['integer']) == 1) \\\n & (np.array(result_df['string']) != 'b') \\\n & (np.array(result_df['boolean']) == 'True')\n df_filter_2 = (np.array(result_df['integer']) == 0) \\\n & (np.array(result_df['boolean']) == 'False')\n assert df_filter_1.sum() > 0\n assert df_filter_2.sum() > 0\n assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())\n\n # Check for \\0 in predicate values. Until they are correctly implemented\n # in ARROW-3391, they would otherwise lead to weird results with the\n # current code.\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', b'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', u'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n\n\[email protected]\ndef test_cutoff_exclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<', 4),\n ('integers', '>', 1),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [x for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\[email protected](\n raises=TypeError,\n reason='Loss of type information in creation of categoricals.'\n)\ndef test_cutoff_exclusive_datetime(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n date_keys = [\n datetime.date(2018, 4, 9),\n datetime.date(2018, 4, 10),\n datetime.date(2018, 4, 11),\n datetime.date(2018, 4, 12),\n datetime.date(2018, 4, 13)\n ]\n partition_spec = [\n ['dates', date_keys]\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'dates': np.array(date_keys, dtype='datetime64'),\n }, columns=['index', 'dates'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('dates', '<', \"2018-04-12\"),\n ('dates', '>', \"2018-04-10\")\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected = pd.Categorical(\n np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),\n categories=np.array(date_keys, dtype='datetime64'))\n\n assert result_df['dates'].values == expected\n\n\[email protected]\ndef test_inclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<=', 3),\n ('integers', '>=', 2),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [int(x) for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\ndef test_inclusive_set(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),\n ('boolean', 'in', {True})]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'c' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n\[email protected]\ndef test_invalid_pred_op(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '=<', 3),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', 'in', set()),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '!=', {3}),\n ])\n\n\[email protected]\ndef test_filters_read_table(tempdir):\n # test that filters keyword is passed through in read_table\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[[('integers', '<', 3)]])\n assert table.num_rows == 3\n\n table = pq.read_pandas(\n base_path, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n\[email protected]_fixture\ndef s3_example():\n access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']\n secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']\n bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']\n\n import s3fs\n fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)\n\n test_dir = guid()\n\n bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)\n fs.mkdir(bucket_uri)\n yield fs, bucket_uri\n fs.rm(bucket_uri, recursive=True)\n\n\[email protected]\[email protected]\ndef test_read_partitioned_directory_s3fs(s3_example):\n from pyarrow.filesystem import S3FSWrapper\n\n fs, bucket_uri = s3_example\n wrapper = S3FSWrapper(fs)\n _partition_test_for_filesystem(wrapper, bucket_uri)\n\n # Check that we can auto-wrap\n dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)\n dataset.read()\n\n\ndef _partition_test_for_filesystem(fs, base_path):\n foo_keys = [0, 1]\n bar_keys = ['a', 'b', 'c']\n partition_spec = [\n ['foo', foo_keys],\n ['bar', bar_keys]\n ]\n N = 30\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'foo': np.array(foo_keys, dtype='i4').repeat(15),\n 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),\n 'values': np.random.randn(N)\n }, columns=['index', 'foo', 'bar', 'values'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected_df = (df.sort_values(by='index')\n .reset_index(drop=True)\n .reindex(columns=result_df.columns))\n expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)\n expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)\n\n assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\ndef _generate_partition_directories(fs, base_dir, partition_spec, df):\n # partition_spec : list of lists, e.g. [['foo', [0, 1, 2],\n # ['bar', ['a', 'b', 'c']]\n # part_table : a pyarrow.Table to write to each partition\n DEPTH = len(partition_spec)\n\n def _visit_level(base_dir, level, part_keys):\n name, values = partition_spec[level]\n for value in values:\n this_part_keys = part_keys + [(name, value)]\n\n level_dir = base_dir / '{0}={1}'.format(name, value)\n fs.mkdir(level_dir)\n\n if level == DEPTH - 1:\n # Generate example data\n file_path = level_dir / guid()\n\n filtered_df = _filter_partition(df, this_part_keys)\n part_table = pa.Table.from_pandas(filtered_df)\n with fs.open(file_path, 'wb') as f:\n _write_table(part_table, f)\n assert fs.exists(file_path)\n\n (level_dir / '_SUCCESS').touch()\n else:\n _visit_level(level_dir, level + 1, this_part_keys)\n (level_dir / '_SUCCESS').touch()\n\n _visit_level(base_dir, 0, [])\n\n\ndef _test_read_common_metadata_files(fs, base_path):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n base_path = str(base_path)\n data_path = os.path.join(base_path, 'data.parquet')\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n assert dataset.common_metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n common_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(common_schema)\n\n # handle list of one directory\n dataset2 = pq.ParquetDataset([base_path], filesystem=fs)\n assert dataset2.schema.equals(dataset.schema)\n\n\[email protected]\ndef test_read_common_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n _test_read_common_metadata_files(fs, tempdir)\n\n\[email protected]\ndef test_read_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'data.parquet'\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n metadata_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(metadata_schema)\n\n\[email protected]\ndef test_read_schema(tempdir):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'test.parquet'\n\n table = pa.Table.from_pandas(df)\n _write_table(table, data_path)\n\n read1 = pq.read_schema(data_path)\n read2 = pq.read_schema(data_path, memory_map=True)\n assert table.schema.equals(read1, check_metadata=False)\n assert table.schema.equals(read2, check_metadata=False)\n\n assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']\n\n\ndef _filter_partition(df, part_keys):\n predicate = np.ones(len(df), dtype=bool)\n\n to_drop = []\n for name, value in part_keys:\n to_drop.append(name)\n\n # to avoid pandas warning\n if isinstance(value, (datetime.date, datetime.datetime)):\n value = pd.Timestamp(value)\n\n predicate &= df[name] == value\n\n return df[predicate].drop(to_drop, axis=1)\n\n\[email protected]\ndef test_read_multiple_files(tempdir):\n nfiles = 10\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n\n test_data.append(table)\n paths.append(path)\n\n # Write a _SUCCESS.crc file\n (dirpath / '_SUCCESS.crc').touch()\n\n def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):\n dataset = pq.ParquetDataset(paths, **kwargs)\n return dataset.read(columns=columns, use_threads=use_threads)\n\n result = read_multiple_files(paths)\n expected = pa.concat_tables(test_data)\n\n assert result.equals(expected)\n\n # Read with provided metadata\n metadata = pq.read_metadata(paths[0])\n\n result2 = read_multiple_files(paths, metadata=metadata)\n assert result2.equals(expected)\n\n result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)\n assert result3.equals(expected)\n\n # Read column subset\n to_read = [0, 2, 6, result.num_columns - 1]\n\n col_names = [result.field(i).name for i in to_read]\n out = pa.localfs.read_parquet(dirpath, columns=col_names)\n expected = pa.Table.from_arrays([result.column(i) for i in to_read],\n names=col_names,\n metadata=result.schema.metadata)\n assert out.equals(expected)\n\n # Read with multiple threads\n pa.localfs.read_parquet(dirpath, use_threads=True)\n\n # Test failure modes with non-uniform metadata\n bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]\n bad_apple_path = tempdir / '{}.parquet'.format(guid())\n\n t = pa.Table.from_pandas(bad_apple)\n _write_table(t, bad_apple_path)\n\n bad_meta = pq.read_metadata(bad_apple_path)\n\n with pytest.raises(ValueError):\n read_multiple_files(paths + [bad_apple_path])\n\n with pytest.raises(ValueError):\n read_multiple_files(paths, metadata=bad_meta)\n\n mixed_paths = [bad_apple_path, paths[0]]\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths, schema=bad_meta.schema)\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths)\n\n\[email protected]\ndef test_dataset_read_pandas(tempdir):\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = np.arange(i * size, (i + 1) * size)\n df.index.name = 'index'\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_dataset_no_memory_map(tempdir):\n # ARROW-2627: Check that we can use ParquetDataset without memory-mapping\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n df = _test_dataframe(10, seed=0)\n path = dirpath / '{}.parquet'.format(0)\n table = pa.Table.from_pandas(df)\n _write_table(table, path, version='2.0')\n\n # TODO(wesm): Not sure how to easily check that memory mapping is _not_\n # used. Mocking is not especially easy for pa.memory_map\n dataset = pq.ParquetDataset(dirpath, memory_map=False)\n assert dataset.pieces[0].read().equals(table)\n\n\[email protected]\[email protected]('preserve_index', [True, False, None])\ndef test_dataset_read_pandas_common_metadata(tempdir, preserve_index):\n # ARROW-1103\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df, preserve_index=preserve_index)\n\n # Obliterate metadata\n table = table.replace_schema_metadata(None)\n assert table.schema.metadata is None\n\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n # Write _metadata common file\n table_for_metadata = pa.Table.from_pandas(\n df, preserve_index=preserve_index\n )\n pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n expected.index.name = (\n df.index.name if preserve_index is not False else None)\n tm.assert_frame_equal(result, expected)\n\n\ndef _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(file_nrows, seed=i)\n path = base_path / '{}.parquet'.format(i)\n\n test_data.append(_write_table(df, path))\n paths.append(path)\n return paths\n\n\[email protected]\ndef test_ignore_private_directories(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n # private directory\n (dirpath / '_impala_staging').mkdir()\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_dot(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '.DS_Store').open('wb') as f:\n f.write(b'gibberish')\n\n with (dirpath / '.private').open('wb') as f:\n f.write(b'gibberish')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_underscore(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '_committed_123').open('wb') as f:\n f.write(b'abcd')\n\n with (dirpath / '_started_321').open('wb') as f:\n f.write(b'abcd')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_multiindex_duplicate_values(tempdir):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n table = pa.Table.from_pandas(df)\n\n filename = tempdir / 'dup_multi_index_levels.parquet'\n\n _write_table(table, filename)\n result_table = _read_table(filename)\n assert table.equals(result_table)\n\n result_df = result_table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n\[email protected]\ndef test_write_error_deletes_incomplete_file(tempdir):\n # ARROW-1285\n df = pd.DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc')),\n 'g': pd.date_range('20130101', periods=3),\n 'h': pd.date_range('20130101', periods=3,\n tz='US/Eastern'),\n 'i': pd.date_range('20130101', periods=3, freq='ns')})\n\n pdf = pa.Table.from_pandas(df)\n\n filename = tempdir / 'tmp_file'\n try:\n _write_table(pdf, filename)\n except pa.ArrowException:\n pass\n\n assert not filename.exists()\n\n\[email protected]\ndef test_noncoerced_nanoseconds_written_without_exception(tempdir):\n # ARROW-1957: the Parquet version 2.0 writer preserves Arrow\n # nanosecond timestamps by default\n n = 9\n df = pd.DataFrame({'x': range(n)},\n index=pd.DatetimeIndex(start='2017-01-01',\n freq='1n',\n periods=n))\n tb = pa.Table.from_pandas(df)\n\n filename = tempdir / 'written.parquet'\n try:\n pq.write_table(tb, filename, version='2.0')\n except Exception:\n pass\n assert filename.exists()\n\n recovered_table = pq.read_table(filename)\n assert tb.equals(recovered_table)\n\n # Loss of data thru coercion (without explicit override) still an error\n filename = tempdir / 'not_written.parquet'\n with pytest.raises(ValueError):\n pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')\n\n\ndef test_read_non_existent_file(tempdir):\n path = 'non-existent-file.parquet'\n try:\n pq.read_table(path)\n except Exception as e:\n assert path in e.args[0]\n\n\ndef test_read_table_doesnt_warn(datadir):\n with pytest.warns(None) as record:\n pq.read_table(datadir / 'v0.7.1.parquet')\n\n assert len(record) == 0\n\n\ndef _test_write_to_dataset_with_partitions(base_path,\n filesystem=None,\n schema=None,\n index_name=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,\n preserve_index=False)\n pq.write_to_dataset(output_table, base_path, partition_by,\n filesystem=filesystem)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n\n if filesystem is not None:\n with filesystem.open(metadata_path, 'wb') as f:\n pq.write_metadata(output_table.schema, f)\n else:\n pq.write_metadata(output_table.schema, metadata_path)\n\n # ARROW-2891: Ensure the output_schema is preserved when writing a\n # partitioned dataset\n dataset = pq.ParquetDataset(base_path,\n filesystem=filesystem,\n validate_schema=True)\n # ARROW-2209: Ensure the dataset schema also includes the partition columns\n dataset_cols = set(dataset.schema.to_arrow_schema().names)\n assert dataset_cols == set(output_table.schema.names)\n\n input_table = dataset.read()\n input_df = input_table.to_pandas()\n\n # Read data back in and compare with original DataFrame\n # Partitioned columns added to the end of the DataFrame when read\n input_df_cols = input_df.columns.tolist()\n assert partition_by == input_df_cols[-1 * len(partition_by):]\n\n # Partitioned columns become 'categorical' dtypes\n input_df = input_df[cols]\n for col in partition_by:\n output_df[col] = output_df[col].astype('category')\n assert output_df.equals(input_df)\n\n\ndef _test_write_to_dataset_no_partitions(base_path, filesystem=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n output_table = pa.Table.from_pandas(output_df)\n\n if filesystem is None:\n filesystem = LocalFileSystem.get_instance()\n\n # Without partitions, append files to root_path\n n = 5\n for i in range(n):\n pq.write_to_dataset(output_table, base_path,\n filesystem=filesystem)\n output_files = [file for file in filesystem.ls(base_path)\n if file.endswith(\".parquet\")]\n assert len(output_files) == n\n\n # Deduplicated incoming DataFrame should match\n # original outgoing Dataframe\n input_table = pq.ParquetDataset(base_path,\n filesystem=filesystem).read()\n input_df = input_table.to_pandas()\n input_df = input_df.drop_duplicates()\n input_df = input_df[cols]\n assert output_df.equals(input_df)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_schema(tempdir):\n schema = pa.schema([pa.field('group1', type=pa.string()),\n pa.field('group2', type=pa.string()),\n pa.field('num', type=pa.int64()),\n pa.field('nan', type=pa.int32()),\n pa.field('date', type=pa.timestamp(unit='us'))])\n _test_write_to_dataset_with_partitions(str(tempdir), schema=schema)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_index_name(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir),\n index_name='index_name')\n\n\[email protected]\ndef test_write_to_dataset_no_partitions(tempdir):\n _test_write_to_dataset_no_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df)\n path = str(tempdir)\n\n def partition_filename_callback(keys):\n return \"{0}-{1}.parquet\".format(*keys)\n\n pq.write_to_dataset(output_table, path,\n partition_by, partition_filename_callback)\n\n dataset = pq.ParquetDataset(path)\n\n # ARROW-3538: Ensure partition filenames match the given pattern\n # defined in the local function partition_filename_callback\n expected_basenames = [\n 'a-e.parquet', 'a-f.parquet',\n 'b-e.parquet', 'b-f.parquet',\n 'b-g.parquet', 'c-e.parquet'\n ]\n output_basenames = [os.path.basename(p.path) for p in dataset.pieces]\n\n assert sorted(expected_basenames) == sorted(output_basenames)\n\n\[email protected]_memory\ndef test_large_table_int32_overflow():\n size = np.iinfo('int32').max + 1\n\n arr = np.ones(size, dtype='uint8')\n\n parr = pa.array(arr, type=pa.uint8())\n\n table = pa.Table.from_arrays([parr], names=['one'])\n f = io.BytesIO()\n _write_table(table, f)\n\n\ndef _simple_table_roundtrip(table):\n stream = pa.BufferOutputStream()\n _write_table(table, stream)\n buf = stream.getvalue()\n return _read_table(buf)\n\n\[email protected]\[email protected]_memory\ndef test_binary_array_overflow_to_chunked():\n # ARROW-3762\n\n # 2^31 + 1 bytes\n values = [b'x'] + [\n b'x' * (1 << 20)\n ] * 2 * (1 << 10)\n df = pd.DataFrame({'byte_col': values})\n\n tbl = pa.Table.from_pandas(df, preserve_index=False)\n read_tbl = _simple_table_roundtrip(tbl)\n\n col0_data = read_tbl[0]\n assert isinstance(col0_data, pa.ChunkedArray)\n\n # Split up into 2GB chunks\n assert col0_data.num_chunks == 2\n\n assert tbl.equals(read_tbl)\n\n\[email protected]\[email protected]_memory\ndef test_list_of_binary_large_cell():\n # ARROW-4688\n data = []\n\n # TODO(wesm): handle chunked children\n # 2^31 - 1 bytes in a single cell\n # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])\n\n # A little under 2GB in cell each containing approximately 10MB each\n data.extend([[b'x' * 1000000] * 10] * 214)\n\n arr = pa.array(data)\n table = pa.Table.from_arrays([arr], ['chunky_cells'])\n read_table = _simple_table_roundtrip(table)\n assert table.equals(read_table)\n\n\[email protected]\ndef test_index_column_name_duplicate(tempdir):\n data = {\n 'close': {\n pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,\n pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,\n },\n 'time': {\n pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(\n '2017-06-30 01:31:00'\n ),\n pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(\n '2017-06-30 01:32:00'\n ),\n }\n }\n path = str(tempdir / 'data.parquet')\n dfx = pd.DataFrame(data).set_index('time', drop=False)\n tdfx = pa.Table.from_pandas(dfx)\n _write_table(tdfx, path)\n arrow_table = _read_table(path)\n result_df = arrow_table.to_pandas()\n tm.assert_frame_equal(result_df, dfx)\n\n\[email protected]\ndef test_parquet_nested_convenience(tempdir):\n # ARROW-1684\n df = pd.DataFrame({\n 'a': [[1, 2, 3], None, [4, 5], []],\n 'b': [[1.], None, None, [6., 7.]],\n })\n\n path = str(tempdir / 'nested_convenience.parquet')\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n _write_table(table, path)\n\n read = pq.read_table(path, columns=['a'])\n tm.assert_frame_equal(read.to_pandas(), df[['a']])\n\n read = pq.read_table(path, columns=['a', 'b'])\n tm.assert_frame_equal(read.to_pandas(), df)\n\n\[email protected]\ndef test_backwards_compatible_index_naming(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=None, header=0, engine='python')\n table = _read_table(datadir / 'v0.7.1.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n\n table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_some_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string),\n sep=r'\\s{2,}', index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n expected.index = expected.index.set_names(['cut', None, 'clarity'])\n\n table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_column_metadata_handling(datadir):\n expected = pd.DataFrame(\n {'a': [1, 2, 3], 'b': [.1, .2, .3],\n 'c': pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')})\n expected.index = pd.MultiIndex.from_arrays(\n [['a', 'b', 'c'],\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')],\n names=['index', None])\n\n path = datadir / 'v0.7.1.column-metadata-handling.parquet'\n table = _read_table(path)\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n table = _read_table(path, columns=['a'])\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))\n\n\ndef _make_dataset_for_pickling(tempdir, N=100):\n path = tempdir / 'data.parquet'\n fs = LocalFileSystem.get_instance()\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n table = pa.Table.from_pandas(df)\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n return dataset\n\n\[email protected]\[email protected]('pickler', [\n pytest.param(pickle, id='builtin'),\n pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')\n])\ndef test_pickle_dataset(tempdir, datadir, pickler):\n def is_pickleable(obj):\n return obj == pickler.loads(pickler.dumps(obj))\n\n dataset = _make_dataset_for_pickling(tempdir)\n\n assert is_pickleable(dataset)\n assert is_pickleable(dataset.metadata)\n assert is_pickleable(dataset.metadata.schema)\n assert len(dataset.metadata.schema)\n for column in dataset.metadata.schema:\n assert is_pickleable(column)\n\n for piece in dataset.pieces:\n assert is_pickleable(piece)\n metadata = piece.get_metadata()\n assert metadata.num_row_groups\n for i in range(metadata.num_row_groups):\n assert is_pickleable(metadata.row_group(i))\n\n\[email protected]\ndef test_decimal_roundtrip(tempdir):\n num_values = 10\n\n columns = {}\n for precision in range(1, 39):\n for scale in range(0, precision + 1):\n with util.random_seed(0):\n random_decimal_values = [\n util.randdecimal(precision, scale)\n for _ in range(num_values)\n ]\n column_name = ('dec_precision_{:d}_scale_{:d}'\n .format(precision, scale))\n columns[column_name] = random_decimal_values\n\n expected = pd.DataFrame(columns)\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n table = pa.Table.from_pandas(expected)\n _write_table(table, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\[email protected](\n raises=pa.ArrowException, reason='Parquet does not support negative scale'\n)\ndef test_decimal_roundtrip_negative_scale(tempdir):\n expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n t = pa.Table.from_pandas(expected)\n _write_table(t, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj_with_exception(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n error_text = 'Artificial Error'\n\n try:\n with pq.ParquetWriter(out,\n arrow_table.schema,\n version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n frames.append(df.copy())\n if i == 5:\n raise ValueError(error_text)\n except Exception as e:\n assert str(e) == error_text\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_zlib_compression_bug():\n # ARROW-3514: \"zlib deflate failed, output buffer too small\"\n table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])\n f = io.BytesIO()\n pq.write_table(table, f, compression='gzip')\n\n f.seek(0)\n roundtrip = pq.read_table(f)\n tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())\n\n\[email protected]\ndef test_merging_parquet_tables_with_different_pandas_metadata(tempdir):\n # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch\n schema = pa.schema([\n pa.field('int', pa.int16()),\n pa.field('float', pa.float32()),\n pa.field('string', pa.string())\n ])\n df1 = pd.DataFrame({\n 'int': np.arange(3, dtype=np.uint8),\n 'float': np.arange(3, dtype=np.float32),\n 'string': ['ABBA', 'EDDA', 'ACDC']\n })\n df2 = pd.DataFrame({\n 'int': [4, 5],\n 'float': [1.1, None],\n 'string': [None, None]\n })\n table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)\n\n assert not table1.schema.equals(table2.schema)\n assert table1.schema.equals(table2.schema, check_metadata=False)\n\n writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)\n writer.write_table(table1)\n writer.write_table(table2)\n\n\ndef test_empty_row_groups(tempdir):\n # ARROW-3020\n table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])\n\n path = tempdir / 'empty_row_groups.parquet'\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n for i in range(num_groups):\n assert reader.read_row_group(i).equals(table)\n\n\[email protected]\ndef test_parquet_writer_with_caller_provided_filesystem():\n out = pa.BufferOutputStream()\n\n class CustomFS(FileSystem):\n def __init__(self):\n self.path = None\n self.mode = None\n\n def open(self, path, mode='rb'):\n self.path = path\n self.mode = mode\n return out\n\n fs = CustomFS()\n fname = 'expected_fname.parquet'\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \\\n as writer:\n writer.write_table(table)\n\n assert fs.path == fname\n assert fs.mode == 'wb'\n assert out.closed\n\n buf = out.getvalue()\n table_read = _read_table(pa.BufferReader(buf))\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df_read, df)\n\n # Should raise ValueError when filesystem is passed with file-like object\n with pytest.raises(ValueError) as err_info:\n pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)\n expected_msg = (\"filesystem passed but where is file-like, so\"\n \" there is nothing to open with filesystem.\")\n assert str(err_info) == expected_msg\n\n\ndef test_writing_empty_lists():\n # ARROW-2591: [Python] Segmentation fault issue in pq.write_table\n arr1 = pa.array([[], []], pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr1], ['list(int32)'])\n _check_roundtrip(table)\n\n\ndef test_write_nested_zero_length_array_chunk_failure():\n # Bug report in ARROW-3792\n cols = OrderedDict(\n int32=pa.int32(),\n list_string=pa.list_(pa.string())\n )\n data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]\n\n # This produces a table with a column like\n # <Column name='list_string' type=ListType(list<item: string>)>\n # [\n # [],\n # [\n # [\n # \"G\"\n # ]\n # ]\n # ]\n #\n # Each column is a ChunkedArray with 2 elements\n my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()\n for batch in data]\n my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))\n for batch in my_arrays]\n tbl = pa.Table.from_batches(my_batches, pa.schema(cols))\n _check_roundtrip(tbl)\n\n\[email protected]\ndef test_partitioned_dataset(tempdir):\n # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset\n # to a Parquet file\n path = tempdir / \"ARROW-3208\"\n df = pd.DataFrame({\n 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],\n 'two': [-1, 10, 2, 100, 1000, 1, 11],\n 'three': [0, 0, 0, 0, 0, 0, 0]\n })\n table = pa.Table.from_pandas(df)\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'])\n table = pq.ParquetDataset(path).read()\n pq.write_table(table, path / \"output.parquet\")\n\n\ndef test_read_column_invalid_index():\n table = pa.table([pa.array([4, 5]), pa.array([\"foo\", \"bar\"])],\n names=['ints', 'strs'])\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n f = pq.ParquetFile(bio.getvalue())\n assert f.reader.read_column(0).to_pylist() == [4, 5]\n assert f.reader.read_column(1).to_pylist() == [\"foo\", \"bar\"]\n for index in (-1, 2):\n with pytest.raises((ValueError, IndexError)):\n f.reader.read_column(index)\n\n\ndef test_direct_read_dictionary():\n # ARROW-3325\n repeats = 10\n nunique = 5\n\n data = [\n [tm.rands(10) for i in range(nunique)] * repeats,\n\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0'])\n\n # Compute dictionary-encoded subfield\n expected = pa.table([table[0].dictionary_encode()], names=['f0'])\n assert result.equals(expected)\n\n\ndef test_dataset_read_dictionary(tempdir):\n path = tempdir / \"ARROW-3325-dataset\"\n t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n pq.write_to_dataset(t1, root_path=str(path))\n pq.write_to_dataset(t2, root_path=str(path))\n\n result = pq.ParquetDataset(path, read_dictionary=['f0']).read()\n\n # The order of the chunks is non-deterministic\n ex_chunks = [t1[0].chunk(0).dictionary_encode(),\n t2[0].chunk(0).dictionary_encode()]\n\n assert result[0].num_chunks == 2\n c0, c1 = result[0].chunk(0), result[0].chunk(1)\n if c0.equals(ex_chunks[0]):\n assert c1.equals(ex_chunks[1])\n else:\n assert c0.equals(ex_chunks[1])\n assert c1.equals(ex_chunks[0])\n\n\ndef test_direct_read_dictionary_subfield():\n repeats = 10\n nunique = 5\n\n data = [\n [[tm.rands(10)] for i in range(nunique)] * repeats,\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0.list.item'])\n\n arr = pa.array(data[0])\n values_as_dict = arr.values.dictionary_encode()\n\n inner_indices = values_as_dict.indices.cast('int32')\n new_values = pa.DictionaryArray.from_arrays(inner_indices,\n values_as_dict.dictionary)\n\n offsets = pa.array(range(51), type='int32')\n expected_arr = pa.ListArray.from_arrays(offsets, new_values)\n expected = pa.table([expected_arr], names=['f0'])\n\n assert result.equals(expected)\n assert result[0].num_chunks == 1\n\n\[email protected]\ndef test_dataset_metadata(tempdir):\n path = tempdir / \"ARROW-1983-dataset\"\n\n # create and write a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n metadata_list = []\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'],\n metadata_collector=metadata_list)\n\n # open the dataset and collect metadata from pieces:\n dataset = pq.ParquetDataset(path)\n metadata_list2 = [p.get_metadata() for p in dataset.pieces]\n\n # compare metadata list content:\n assert len(metadata_list) == len(metadata_list2)\n for md, md2 in zip(metadata_list, metadata_list2):\n d = md.to_dict()\n d2 = md2.to_dict()\n # serialized_size is initialized in the reader:\n assert d.pop('serialized_size') == 0\n assert d2.pop('serialized_size') > 0\n assert d == d2\n\n\ndef test_parquet_file_too_small(tempdir):\n path = str(tempdir / \"test.parquet\")\n with pytest.raises(pa.ArrowIOError,\n match='size is 0 bytes'):\n with open(path, 'wb') as f:\n pass\n pq.read_table(path)\n\n with pytest.raises(pa.ArrowIOError,\n match='size is 4 bytes'):\n with open(path, 'wb') as f:\n f.write(b'ffff')\n pq.read_table(path)\n\n\[email protected]\ndef test_categorical_index_survives_roundtrip():\n # ARROW-3652, addressed by ARROW-3246\n df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])\n df['c1'] = df['c1'].astype('category')\n df = df.set_index(['c1'])\n\n table = pa.Table.from_pandas(df)\n bos = pa.BufferOutputStream()\n pq.write_table(table, bos)\n ref_df = pq.read_pandas(bos.getvalue()).to_pandas()\n assert isinstance(ref_df.index, pd.CategoricalIndex)\n assert ref_df.index.equals(df.index)\n\n\ndef test_dictionary_array_automatically_read():\n # ARROW-3246\n\n # Make a large dictionary, a little over 4MB of data\n dict_length = 4000\n dict_values = pa.array([('x' * 1000 + '_{}'.format(i))\n for i in range(dict_length)])\n\n num_chunks = 10\n chunk_size = 100\n chunks = []\n for i in range(num_chunks):\n indices = np.random.randint(0, dict_length,\n size=chunk_size).astype(np.int32)\n chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),\n dict_values))\n\n table = pa.table([pa.chunked_array(chunks)], names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents))\n\n assert result.equals(table)\n\n # The only key in the metadata was the Arrow schema key\n assert result.schema.metadata is None\n\n\[email protected]\ndef test_pandas_categorical_na_type_row_groups():\n # ARROW-5085\n df = pd.DataFrame({\"col\": [None] * 100, \"int\": [1.0] * 100})\n df_category = df.astype({\"col\": \"category\", \"int\": \"category\"})\n table = pa.Table.from_pandas(df)\n table_cat = pa.Table.from_pandas(df_category)\n buf = pa.BufferOutputStream()\n\n # it works\n pq.write_table(table_cat, buf, version=\"2.0\", chunk_size=10)\n result = pq.read_table(buf.getvalue())\n\n # Result is non-categorical\n assert result[0].equals(table[0])\n assert result[1].equals(table[1])\n\n\[email protected]\ndef test_pandas_categorical_roundtrip():\n # ARROW-5480, this was enabled by ARROW-3246\n\n # Have one of the categories unobserved and include a null (-1)\n codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')\n categories = ['foo', 'bar', 'baz']\n df = pd.DataFrame({'x': pd.Categorical.from_codes(\n codes, categories=categories)})\n\n buf = pa.BufferOutputStream()\n pq.write_table(pa.table(df), buf)\n\n result = pq.read_table(buf.getvalue()).to_pandas()\n assert result.x.dtype == 'category'\n assert (result.x.cat.categories == categories).all()\n tm.assert_frame_equal(result, df)\n\n\[email protected]\ndef test_multi_dataset_metadata(tempdir):\n filenames = [\"ARROW-1983-dataset.0\", \"ARROW-1983-dataset.1\"]\n metapath = str(tempdir / \"_metadata\")\n\n # create a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n # write dataset twice and collect/merge metadata\n _meta = None\n for filename in filenames:\n meta = []\n pq.write_table(table, str(tempdir / filename),\n metadata_collector=meta)\n meta[0].set_file_path(filename)\n if _meta is None:\n _meta = meta[0]\n else:\n _meta.append_row_groups(meta[0])\n\n # Write merged metadata-only file\n with open(metapath, \"wb\") as f:\n _meta.write_metadata_file(f)\n\n # Read back the metadata\n meta = pq.read_metadata(metapath)\n md = meta.to_dict()\n _md = _meta.to_dict()\n for key in _md:\n if key != 'serialized_size':\n assert _md[key] == md[key]\n assert _md['num_columns'] == 3\n assert _md['num_rows'] == 6\n assert _md['num_row_groups'] == 2\n assert _md['serialized_size'] == 0\n assert md['serialized_size'] > 0\n\n\[email protected]\ndef test_filter_before_validate_schema(tempdir):\n # ARROW-4076 apply filter before schema validation\n # to avoid checking unneeded schemas\n\n # create partitioned dataset with mismatching schemas which would\n # otherwise raise if first validation all schemas\n dir1 = tempdir / 'A=0'\n dir1.mkdir()\n table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))\n pq.write_table(table1, dir1 / 'data.parquet')\n\n dir2 = tempdir / 'A=1'\n dir2.mkdir()\n table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))\n pq.write_table(table2, dir2 / 'data.parquet')\n\n # read single file using filter\n table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])\n assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))\n"
] | [
[
"numpy.ones",
"pandas.DatetimeIndex",
"pandas.date_range",
"pandas.MultiIndex.from_arrays",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.random.randn",
"pandas.Categorical",
"pandas.util.testing.rands",
"numpy.arange",
"numpy.iinfo",
"pandas.to_datetime",
"pandas.Categorical.from_codes",
"pandas.concat",
"numpy.array",
"pandas.util.testing.assert_frame_equal",
"pandas.Timestamp",
"numpy.random.randint"
]
] |
drvinceknight/amwoss | [
"8b0bf80f0a06dc5cf9bfeef4b9f9e174ccadf06d"
] | [
"src/assets/sd_vaccine_plots/main.py"
] | [
"from scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\n\ndef derivatives(t, y, vaccine_rate, birth_rate=0.01):\n \"\"\"Defines the system of differential equations that\n describe the epidemiology model.\n\n Args:\n t: a positive float\n y: a tuple of three integers\n vaccine_rate: a positive float <= 1\n birth_rate: a positive float <= 1\n\n Returns:\n A tuple containing dS, dI, and dR\n \"\"\"\n infection_rate = 0.3\n recovery_rate = 0.02\n death_rate = 0.01\n S, I, R = y\n N = S + I + R\n dSdt = (\n -((infection_rate * S * I) / N)\n + ((1 - vaccine_rate) * birth_rate * N)\n - (death_rate * S)\n )\n dIdt = (\n ((infection_rate * S * I) / N)\n - (recovery_rate * I)\n - (death_rate * I)\n )\n dRdt = (\n (recovery_rate * I)\n - (death_rate * R)\n + (vaccine_rate * birth_rate * N)\n )\n return dSdt, dIdt, dRdt\n\ndef integrate_ode(\n derivative_function,\n t_span,\n y0=(2999, 1, 0),\n vaccine_rate=0.85,\n birth_rate=0.01,\n):\n \"\"\"Numerically solve the system of differential equations.\n\n Args:\n derivative_function: a function returning a tuple\n of three floats\n t_span: endpoints oif the time range to integrate over\n y0: a tuple of three integers (default: (2999, 1, 0))\n vaccine_rate: a positive float <= 1 (default: 0.85)\n birth_rate: a positive float <= 1 (default: 0.01)\n\n Returns:\n A tuple of three arrays\n \"\"\"\n sol = solve_ivp(\n derivative_function,\n t_span,\n y0,\n args=(vaccine_rate, birth_rate),\n )\n ts, S, I, R = sol.t, sol.y[0], sol.y[1], sol.y[2]\n return ts, S, I, R\n\nt_span = [0, 730]\nt, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.0)\n\nfig, ax = plt.subplots(1, figsize=(10, 5))\nax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)\nax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)\nax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)\nax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))\nax.set_xlabel('Time', fontsize=14)\nax.set_ylabel('People', fontsize=14)\nfig.savefig(\"plot_no_vaccine.pdf\")\n\nt, S, I, R = integrate_ode(derivatives, t_span, vaccine_rate=0.85)\n\nfig, ax = plt.subplots(1, figsize=(10, 5))\nax.plot(t, S, label='Susceptible', c='black', linestyle='solid', linewidth=1.75)\nax.plot(t, I, label='Infected', c='black', linestyle='dotted', linewidth=1.75)\nax.plot(t, R, label='Recovered', c='black', linestyle='dashed', linewidth=1.75)\nax.legend(fontsize=14, frameon=True, ncol=3, bbox_to_anchor=(0.85, 1.13))\nax.set_xlabel('Time', fontsize=14)\nax.set_ylabel('People', fontsize=14)\nfig.savefig(\"plot_with_vaccine.pdf\")"
] | [
[
"matplotlib.pyplot.style.use",
"scipy.integrate.solve_ivp",
"matplotlib.pyplot.subplots"
]
] |
agstub/sglake-detectability | [
"5556250a59d7f500bcee86899dd9a497a368faca"
] | [
"source/boundaryconds.py"
] | [
"#-------------------------------------------------------------------------------\n# This file contains functions that:\n# (1) define the boundaries (ice-air,ice-water,ice-bed) of the mesh, AND...\n# (2) mark the boundaries of the mesh\n#-------------------------------------------------------------------------------\nfrom params import tol,Lngth,Hght\nfrom geometry import bed\nimport numpy as np\nfrom dolfin import *\n\n#-------------------------------------------------------------------------------\n# Define SubDomains for ice-water boundary, ice-bed boundary, inflow (x=0) and\n# outflow (x=Length of domain). The parameter 'tol' is a minimal water depth\n# used to distinguish the ice-water and ice-bed surfaces.\n\nclass WaterBoundary(SubDomain):\n # Ice-water boundary.\n # This boundary is marked first and all of the irrelevant portions are\n # overwritten by the other boundary markers.\n def inside(self, x, on_boundary):\n return (on_boundary and (x[1]<0.5*Hght))\n\nclass BedBoundary(SubDomain):\n # Ice-bed boundary away from the lake; the portions near the lake are overwritten\n # by BasinBoundary.\n # Lifting of ice from the bed *is not* allowed on this boundary.\n def inside(self, x, on_boundary):\n return (on_boundary and ((x[1]-bed(x[0]))<=tol))\n\nclass LeftBoundary(SubDomain):\n # Left boundary\n def inside(self, x, on_boundary):\n return (on_boundary and np.abs(x[0])<tol)\n\nclass RightBoundary(SubDomain):\n # Right boundary\n def inside(self, x, on_boundary):\n return (on_boundary and np.abs(x[0]-Lngth)<tol)\n\n#-------------------------------------------------------------------------------\n\ndef mark_boundary(mesh):\n # Assign markers to each boundary segment (except the upper surface).\n # This is used at each time step to update the markers.\n #\n # Boundary marker numbering convention:\n # 1 - Left boundary\n # 2 - Right boundary\n # 3 - Ice-bed boundary\n # 4 - Ice-water boundary\n #\n # This function returns these markers, which are used to define the\n # boundary integrals and dirichlet conditions.\n\n boundary_markers = MeshFunction('size_t', mesh,dim=1)\n boundary_markers.set_all(0)\n\n # Mark ice-water boundary\n bdryWater = WaterBoundary()\n bdryWater.mark(boundary_markers, 4)\n\n # Mark ice-bed boundary away from lake\n bdryBed = BedBoundary()\n bdryBed.mark(boundary_markers, 3)\n\n # Mark inflow boundary\n bdryLeft = LeftBoundary()\n bdryLeft.mark(boundary_markers, 1)\n\n # Mark outflow boundary\n bdryRight = RightBoundary()\n bdryRight.mark(boundary_markers, 2)\n\n return boundary_markers\n"
] | [
[
"numpy.abs"
]
] |
caogao/param | [
"9de2602c894df264a004c352ee16abc14f93da76"
] | [
"train/comms/pt/comms.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport logging\nimport time\n\nimport comms_utils\nimport numpy as np\n\n# pytorch\nimport torch\nfrom comms_utils import paramCommsBench, ensureTensorFlush\n\n### TODO: add these to class variables?\nsupportedCollectives = [\n \"reduce\",\n \"all_reduce\",\n \"all_to_all\",\n \"all_to_allv\",\n \"all_gather\",\n \"broadcast\",\n \"reduce_scatter\",\n \"reduce_scatter_base\",\n \"all_gather_base\",\n \"incast\",\n \"multicast\",\n] # , \"scatter\", \"gather\"]\npt2ptPatterns = [\n \"one2one\",\n \"pairwise\",\n]\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultilineFormatter(argparse.ArgumentDefaultsHelpFormatter):\n def _split_lines(self, text, width):\n if text.startswith(\"R|\"):\n return text[2:].splitlines()\n # this is the RawTextHelpFormatter._split_lines\n return argparse.ArgumentDefaultsHelpFormatter._split_lines(self, text, width)\n\n\n# define the collective benchmark\nclass commsCollBench(paramCommsBench):\n def __init__(self):\n super().__init__(supportedNwstacks=[\"pytorch-dist\", \"pytorch-xla-tpu\"])\n\n # def readCollArgs(self, parser):\n def readArgs(self, parser):\n # read the common/basic arguments\n super().readArgs(parser)\n parser.add_argument(\n \"--w\", type=int, default=5, help=\"number of warmup iterations\"\n ) # number of warmup-iterations\n parser.add_argument(\n \"--n\", type=int, default=5, help=\"number of iterations\"\n ) # number of iterations\n # experiment related parameters\n parser.add_argument(\n \"--mode\",\n type=str,\n default=\"comms\",\n help=\"benchmark mode\",\n choices=[\"comms\", \"compute\", \"dlrm\", \"comms-compute\"],\n ) # alternative is DLRM mode or comm-compute mode\n parser.add_argument(\n \"--b\", type=str, default=\"8\", help=\"minimum size, in bytes, to start with\"\n ) # COMMS mode, begin the sweep at.\n parser.add_argument(\n \"--e\", type=str, default=\"64\", help=\"maximum size, in bytes, to end at\"\n ) # COMMS mode, end the sweep at.\n parser.add_argument(\n \"--f\", type=int, default=2, help=\"multiplication factor between sizes\"\n ) # COMMS mode, multiplication factor.\n parser.add_argument(\n \"--collective\",\n type=str,\n default=\"all_reduce\",\n help=\"Collective operation to be evaluated\",\n choices=supportedCollectives,\n ) # collective op to benchmark\n # For comm-compute or compute mode\n parser.add_argument(\n \"--kernel\",\n type=str,\n default=\"gemm\",\n help=\"Compute kernel, used for comms-compute or compute mode\",\n choices=[\"gemm\", \"emb_lookup\"],\n ) # Compute kernel: \"gemm\"\n parser.add_argument(\n \"--num-compute\",\n type=int,\n default=100,\n help=\"one collective for every NUM_COMPUTE compute kernels\",\n ) # Launch one coll for every n compute kernels\n # For GEMM\n parser.add_argument(\n \"--mm-dim\",\n type=int,\n default=100,\n help=\"dimension size for GEMM compute kernel\",\n ) # Matrix multiplication dim n, A[n,n] * B [n,n]\n # For emb lookup\n parser.add_argument(\n \"--emb-dim\",\n type=int,\n default=128,\n help=\"dimension size for Embedding table compute kernel\",\n ) # Embedding table dimension\n parser.add_argument(\n \"--num-embs\",\n type=int,\n default=100000,\n help=\"Embedding table hash size for Embedding table compute kernel\",\n ) # Embedding table hash size\n parser.add_argument(\n \"--avg-len\",\n type=int,\n default=28,\n help=\"Average lookup operations per sample\",\n ) # Average #lookup per sample\n parser.add_argument(\n \"--batch-size\",\n type=int,\n default=512,\n help=\"number of samples reading the table concurrently\",\n ) # #Samples reading the table concurrently\n parser.add_argument(\n \"--root\", type=int, default=0, help=\"root process for reduce benchmark\"\n ) # root process for reduce and bcast (and gather, scatter, etc., if support in the future)\n # TODO: check the correctness of root, should be between 0 to [world_size -1]\n parser.add_argument(\n \"--src-ranks\",\n type=str,\n nargs=\"?\",\n help=\"R|src ranks for many-to-one incast pattern or pt2pt.\\n\"\n \"List of ranks separated by comma or a range specified by start:end.\\n\"\n \"Pt2pt one2one should set only one rank.\\n\"\n \"The default value of incast includes all ranks, pt2pt includes rank 0.\",\n ) # optional: group of src ranks in many-to-one incast or pt2pt\n parser.add_argument(\n \"--dst-ranks\",\n type=str,\n nargs=\"?\",\n help=\"R|dst ranks for one-to-many multicast pattern or pt2pt.\\n\"\n \"List of ranks separated by comma or a range specified by start:end.\\n\"\n \"Pt2pt one2one should set only one rank\\n\"\n \"The default value of multicast includes all ranks, pt2pt includes rank 1.\",\n ) # optional: group of dst ranks in one-to-many multicast or pt2pt\n parser.add_argument(\n \"--pair\",\n action=\"store_true\",\n default=False,\n help=\"Toggle to enable collective pair mode\",\n )\n parser.add_argument(\n \"--collective-pair\",\n type=str,\n default=\"all_reduce\",\n help=\"Collective pair operation to be evaluated\",\n choices=supportedCollectives,\n ) # collective op to pair with the other collective, --collective should be non-empty\n parser.add_argument(\n \"--overlap-pair-pgs\",\n action=\"store_true\",\n default=False,\n help=\"Toggle to enable overlapping collective pair with two pgs\",\n ) # overlap collective pair with two pgs\n parser.add_argument(\n \"--pt2pt\",\n type=str,\n default=None,\n help=\"point to point pattern\",\n choices=pt2ptPatterns,\n ) # point to point mode\n parser.add_argument(\n \"--window\",\n type=int,\n default=100,\n help=\"window size for pt2pt throughput test\",\n ) # optional: point to point throughput test window size\n\n return parser.parse_known_args()\n\n def checkArgs(self, args):\n super().checkArgs(args)\n\n if args.pt2pt is not None:\n args.collective = \"pt2pt\"\n if args.pt2pt not in pt2ptPatterns:\n logger.error(\n f\"Specified pt2pt pattern: {args.pt2pt} is not one of the supported pt2pt patterns: {str(pt2ptPatterns)}\"\n )\n comms_utils.gracefulExit()\n\n args.b = comms_utils.parsesize(args.b)\n args.e = comms_utils.parsesize(args.e)\n args.dtype = self.dtypeMap[args.data_type]\n\n if args.b < 1:\n logger.warning(\n f\"Starting size (--b {args.b}) should be greater than 1 byte...fix and continue\"\n )\n args.b = 1\n\n if args.e < args.b:\n logger.warning(\n f\"the begin-size (--b {args.b}) is larger than the end-size (--e {args.e})\"\n )\n\n if args.device == \"cpu\" and args.backend == \"nccl\":\n raise ValueError(f\"NCCL is not supported for device type {args.device}\")\n\n if args.c == 1 and args.z == 0 and args.collective in (\"all_reduce\", \"reduce\", \"reduce_scatter\"):\n logger.warning(\n f\"Data validation is not supported for {args.collective} in non-blocking mode, disabled and continue\"\n )\n args.c = 0\n\n # run a few sanity checks\n if args.bitwidth < 32:\n if args.device != \"cuda\":\n logger.error(\n f\"collective quantization may not be fully supported for {args.device}\"\n )\n comms_utils.checkQuantArgs(\n args.collective,\n args.dtype,\n args.b,\n args.quant_a2a_embedding_dim,\n args.z,\n )\n\n def runColl(self, comm_fn=None, compute_fn=None, comm_fn_pair=None):\n self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)\n self.backendFuncs.sync_barrier(self.collectiveArgs, desc=\"runColl_begin\")\n\n elapsedTimeNS = 0.0\n is_blocking = not self.collectiveArgs.asyncOp\n enable_comms = False if (comm_fn is None or comm_fn == self.backendFuncs.noop) else True\n enable_compute = False if (compute_fn is None or compute_fn == self.backendFuncs.noop) else True\n enable_comms_pair = False if (comm_fn_pair is None or comm_fn_pair == self.backendFuncs.noop) else True\n\n # for comms pair mode, force async comms for overlapping evaluation\n if enable_comms_pair:\n self.collectiveArgs.asyncOp = True\n for nIter in range(\n self.collectiveArgs.numWarmupIters + self.collectiveArgs.numIters\n ):\n if nIter == self.collectiveArgs.numWarmupIters:\n # Flush non-blocking ops to ensure warmup is really complete\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n ensureTensorFlush(self.collectiveArgs.opTensor)\n if enable_comms_pair:\n ensureTensorFlush(self.collectiveArgs.opTensor_pair)\n # Start measuring time after warmup iterations\n elapsedTimeNS = 0.0\n self.collectiveArgs.quant_time.reset()\n self.collectiveArgs.dequant_time.reset()\n # reset tensor values for data validation check\n if enable_comms:\n self.setTensorVal(self.collectiveArgs.opTensor)\n # for blocking mode, do barrier before starting collective\n if is_blocking:\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n\n start = time.monotonic() # available only in py3\n self.collectiveArgs.group = self.backendFuncs.get_next_group()\n comm_fn(self.collectiveArgs)\n # post another collecitve if on comms pair mode, otherwise it's noop\n self.collectiveArgs.group = self.backendFuncs.get_next_group()\n comm_fn_pair(self.collectiveArgs, pair=enable_comms_pair)\n\n if enable_compute:\n for _ in range(self.collectiveArgs.numComputePerColl):\n # TODO: investigate the cache effect\n # Flush the cache\n # _ = torch.rand(6 * 1024 * 1024 // 4).float() * 2 # V100 6MB L2 cache\n compute_fn(self.collectiveArgs)\n if is_blocking: # should be sychronous, wait for the collective\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n # Measuring time.\n elapsedTimeNS += (\n time.monotonic() - start\n ) * 1e9 # keeping time in NS, helps in divising data by nanosecond\n\n start = time.monotonic() # available only in py3\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n end = time.monotonic() # available only in py3\n\n ensureTensorFlush(self.collectiveArgs.opTensor)\n if enable_comms_pair:\n ensureTensorFlush(self.collectiveArgs.opTensor_pair)\n\n elapsedTimeNS += (\n end - start\n ) * 1e9 # keeping time in NS, helps in divising data by nanoseconds\n\n memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)\n\n avgIterNS, algBW = comms_utils.getAlgBW(\n elapsedTimeNS, memSize, self.collectiveArgs.numIters\n )\n busBW = self.backendFuncs.getBusBW(\n self.collectiveArgs.collective,\n algBW,\n self.collectiveArgs,\n )\n if enable_comms_pair:\n memSize_pair = self.backendFuncs.get_mem_size(\n self.collectiveArgs, pair=enable_comms_pair\n )\n memSize += memSize_pair\n\n _, algBW_pair = comms_utils.getAlgBW(\n elapsedTimeNS, memSize_pair, self.collectiveArgs.numIters\n )\n algBW += algBW_pair\n\n busBW += self.backendFuncs.getBusBW(\n self.collectiveArgs.collective_pair,\n algBW_pair,\n self.collectiveArgs,\n )\n\n self.backendFuncs.sync_barrier(self.collectiveArgs, desc=\"runColl_end\")\n\n results = {\n \"timeUS\": avgIterNS / 1e3,\n \"algBW\": algBW,\n \"busBW\": busBW,\n \"memSize\": memSize,\n }\n return results\n\n def runPt2Pt(self):\n self.backendFuncs.complete_accel_ops(self.collectiveArgs, initOp=True)\n # warm-up\n memSize = self.backendFuncs.get_mem_size(self.collectiveArgs)\n self.getPingLatency(self.collectiveArgs.numWarmupIters)\n self.getPingPongLatency(self.collectiveArgs.numWarmupIters)\n self.getUniBW(self.collectiveArgs.numWarmupIters, memSize)\n self.getBiBW(self.collectiveArgs.numWarmupIters, memSize)\n self.backendFuncs.sync_barrier(self.collectiveArgs, \"runpt2pt_begin\")\n # pt2pt benchmark\n pingPerIterNS = self.getPingLatency(self.collectiveArgs.numIters)\n pingPongPerIterNS = self.getPingPongLatency(self.collectiveArgs.numIters)\n avgUniBW = self.getUniBW(self.collectiveArgs.numIters, memSize)\n avgBiBW = self.getBiBW(self.collectiveArgs.numIters, memSize)\n self.backendFuncs.sync_barrier(self.collectiveArgs, \"runpt2pt\")\n results = {\n \"pingPerIterNS\": pingPerIterNS,\n \"pingPongPerIterNS\": pingPongPerIterNS,\n \"avgUniBW\": avgUniBW,\n \"avgBiBW\": avgBiBW,\n \"memSize\": memSize,\n }\n return results\n\n def getPingLatency(self, numIters):\n logger.debug(\n \"STATUS: begin ping test with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = False\n # get one-way latency\n pingLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.send(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.recv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx]\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n pingLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n logger.debug(\"STATUS: end ping test.\")\n return pingLatencyNS\n\n def getPingPongLatency(self, numIters):\n logger.debug(\n \"STATUS: begin ping-pong with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = False\n # get round-trip latency\n pingPongLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.send(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]\n )\n self.backendFuncs.recv(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx]\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.recv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx]\n )\n self.backendFuncs.send(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx]\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n pingPongLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n logger.debug(\"STATUS: end ping-pong test.\")\n return pingPongLatencyNS\n\n def getUniBW(self, numIters, memSize):\n logger.debug(\n \"STATUS: begin UniBW test with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = True\n # get unidirectional bandwidth\n uniLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n for w in range(self.collectiveArgs.window):\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.isend(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.irecv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n uniLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n uniLatencyNS = [lat / self.collectiveArgs.window for lat in uniLatencyNS]\n uniLatencyNS = np.mean(np.array(uniLatencyNS))\n _, avgUniBW = comms_utils.getAlgBW(uniLatencyNS, memSize, 1)\n logger.debug(\"STATUS: end UniBW test.\")\n return avgUniBW\n\n def getBiBW(self, numIters, memSize):\n logger.debug(\n \"STATUS: begin BiBW test with src_ranks=%s, dst_ranks=%s.\"\n % (self.collectiveArgs.src_ranks, self.collectiveArgs.dst_ranks)\n )\n self.collectiveArgs.asyncOp = True\n # get bidirectional bandwidth\n biLatencyNS = []\n for _ in range(numIters):\n self.backendFuncs.sync_barrier(self.collectiveArgs)\n start = time.monotonic()\n for w in range(self.collectiveArgs.window):\n if self.collectiveArgs.global_rank in self.collectiveArgs.src_ranks:\n idx = self.collectiveArgs.src_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.isend(\n self.collectiveArgs, self.collectiveArgs.dst_ranks[idx], tag=w\n )\n self.backendFuncs.irecv(\n self.collectiveArgs,\n self.collectiveArgs.dst_ranks[idx],\n tag=w + self.collectiveArgs.window,\n )\n elif self.collectiveArgs.global_rank in self.collectiveArgs.dst_ranks:\n idx = self.collectiveArgs.dst_ranks.index(\n self.collectiveArgs.global_rank\n )\n self.backendFuncs.irecv(\n self.collectiveArgs, self.collectiveArgs.src_ranks[idx], tag=w\n )\n self.backendFuncs.isend(\n self.collectiveArgs,\n self.collectiveArgs.src_ranks[idx],\n tag=w + self.collectiveArgs.window,\n )\n self.backendFuncs.complete_accel_ops(self.collectiveArgs)\n biLatencyNS.append(\n (time.monotonic() - start) * 1e9\n ) # keeping time in NS, helps in divising data by nanosecond\n biLatencyNS = [lat / self.collectiveArgs.window for lat in biLatencyNS]\n biLatencyNS = np.mean(np.array(biLatencyNS))\n _, avgBiBW = comms_utils.getAlgBW(biLatencyNS, 2 * memSize, 1)\n logger.debug(\"STATUS: end UniBW test.\")\n return avgBiBW\n\n def checkPt2PtRanks(self):\n # set default values\n if not self.collectiveArgs.src_ranks:\n self.collectiveArgs.src_ranks = [0]\n if not self.collectiveArgs.dst_ranks:\n self.collectiveArgs.dst_ranks = [1]\n\n # sanity check\n if self.collectiveArgs.pt2pt == \"one2one\":\n if (\n len(self.collectiveArgs.src_ranks) > 1\n or len(self.collectiveArgs.dst_ranks) > 1\n ):\n if self.global_rank == 0:\n logger.error(\n \"One2one Pt2Pt requires only a single rank is specified in src_ranks and dst_ranks! \"\n )\n comms_utils.gracefulExit()\n elif self.collectiveArgs.pt2pt == \"pairwise\":\n # pairwise pt2pt requires identical number of ranks in src_ranks and dst_ranks.\n if len(self.collectiveArgs.src_ranks) != len(self.collectiveArgs.dst_ranks):\n if self.global_rank == 0:\n logger.error(\n \"Pairwise Pt2Pt requires identical number of members in src_ranks and dst_ranks! \"\n )\n comms_utils.gracefulExit()\n # pairwise pt2pt does not allow same rank to exist in both groups\n if bool(\n set(self.collectiveArgs.src_ranks).intersection(\n self.collectiveArgs.dst_ranks\n )\n ):\n if self.global_rank == 0:\n logger.error(\n \"Pairwise Pt2Pt requires distinct members in src_ranks and dst_ranks! \"\n )\n comms_utils.gracefulExit()\n\n if self.global_rank == 0:\n print(\n f\"\\t collective={self.collectiveArgs.collective}\\t{self.collectiveArgs.pt2pt}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}\"\n )\n\n def checkCollectiveRanks(self):\n if self.collectiveArgs.collective == \"incast\":\n # incast: set default value and exclude root\n if not self.collectiveArgs.src_ranks:\n self.collectiveArgs.src_ranks = [*range(self.comm_size)]\n if self.collectiveArgs.srcOrDst in self.collectiveArgs.src_ranks:\n self.collectiveArgs.src_ranks.remove(self.collectiveArgs.srcOrDst)\n elif self.collectiveArgs.collective == \"multicast\":\n # multicast: set default value and exclude root\n if not self.collectiveArgs.dst_ranks:\n self.collectiveArgs.dst_ranks = [*range(self.comm_size)]\n if self.collectiveArgs.srcOrDst in self.collectiveArgs.dst_ranks:\n self.collectiveArgs.dst_ranks.remove(self.collectiveArgs.srcOrDst)\n\n if self.global_rank == 0:\n print(\n f\"\\t collective={self.collectiveArgs.collective}, src_ranks={self.collectiveArgs.src_ranks}, dst_ranks={self.collectiveArgs.dst_ranks}\"\n )\n\n def initCollectiveArgs(self, commsParams):\n # lint was complaining that benchTime was too complex!\n (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n ) = comms_utils.get_rank_details(\n self.backendFuncs\n ) # Getting ranks from backednFuncs object, since we cannot use MPI (e.g.: TPU) to launch all the processes.\n self.backendFuncs.sayHello() # Informs us where each process is running.\n groups = self.backendFuncs.get_groups()\n num_pgs = len(groups)\n\n self.comm_size = world_size\n self.global_rank = global_rank\n\n comms_utils.fixBeginSize(\n commsParams, world_size\n ) # Ensuring that all-reduce and all-to-all has atleast one member per rank.\n allSizes = comms_utils.getSizes(\n commsParams.beginSize, commsParams.endSize, commsParams.stepFactor\n ) # Given the begin-size, end-size, step-factor what are the message sizes to iterate on.\n\n if global_rank == 0:\n print(\n f\"[Rank {global_rank:>3}] allSizes: {allSizes} local_rank: {local_rank} element_size: {commsParams.element_size}\"\n )\n\n self.collectiveArgs.group = group\n self.collectiveArgs.groups = groups\n self.collectiveArgs.num_pgs = num_pgs\n self.collectiveArgs.device = curDevice\n self.collectiveArgs.world_size = world_size\n self.collectiveArgs.numIters = commsParams.numIters\n self.collectiveArgs.numWarmupIters = commsParams.numWarmupIters\n self.collectiveArgs.global_rank = global_rank\n self.collectiveArgs.backendFuncs = self.backendFuncs\n self.collectiveArgs.collective = commsParams.collective\n op = self.backendFuncs.get_reduce_op(\"sum\")\n self.collectiveArgs.op = op\n self.collectiveArgs.srcOrDst = commsParams.srcOrDst\n self.collectiveArgs.src_ranks = commsParams.src_ranks\n self.collectiveArgs.dst_ranks = commsParams.dst_ranks\n self.collectiveArgs.pair = commsParams.pair\n self.collectiveArgs.collective_pair = commsParams.collective_pair\n self.collectiveArgs.pt2pt = commsParams.pt2pt\n self.collectiveArgs.window = commsParams.window\n self.collectiveArgs.asyncOp = False if commsParams.blockingFlag == 1 else True\n\n if commsParams.bitwidth < 32:\n comms_utils.initQuantCommCtx(self.collectiveArgs, commsParams)\n\n if self.collectiveArgs.collective == \"pt2pt\":\n self.checkPt2PtRanks()\n else:\n self.checkCollectiveRanks()\n\n computeFunc = self.backendFuncs.noop\n if (\n commsParams.mode != \"comms\"\n ): # Compute mode related initialization if not in comms-only mode\n if commsParams.kernel == \"gemm\":\n computeFunc = self.backendFuncs.gemm\n\n mm_dim = commsParams.mm_dim\n in1 = np.random.rand(mm_dim, mm_dim)\n MMin1 = torch.FloatTensor(in1).to(curDevice)\n in2 = np.random.rand(mm_dim, mm_dim)\n MMin2 = torch.FloatTensor(in2).to(curDevice)\n in3 = np.random.rand(mm_dim, mm_dim)\n MMin3 = torch.FloatTensor(in3).to(curDevice)\n MMout = self.backendFuncs.alloc_empty(\n [mm_dim, mm_dim], commsParams.dtype, curDevice\n )\n self.collectiveArgs.MMout = MMout\n self.collectiveArgs.MMin1 = MMin1\n self.collectiveArgs.MMin2 = MMin2\n self.collectiveArgs.MMin3 = MMin3\n self.collectiveArgs.numComputePerColl = commsParams.num_compute\n elif commsParams.kernel == \"emb_lookup\":\n computeFunc = self.backendFuncs.emb_lookup\n\n emb_dim = commsParams.emb_dim\n num_embeddings = commsParams.num_embs\n avg_length = commsParams.avg_len\n batch_size = commsParams.batch_size\n print(\n f\"emb_dim {emb_dim} num_embs {num_embeddings} avg_len {avg_length} bs {batch_size}\"\n )\n self.collectiveArgs.EmbWeights = self.backendFuncs.alloc_empty(\n [num_embeddings, emb_dim], torch.double, curDevice\n )\n self.collectiveArgs.TableOffsets = torch.LongTensor(\n [0, num_embeddings]\n ).to(curDevice)\n self.collectiveArgs.Indices = torch.LongTensor(\n np.random.randint(0, num_embeddings - 1, avg_length * batch_size)\n ).to(curDevice)\n lengths = np.ones((1, batch_size)) * avg_length\n flat_lengths = lengths.flatten()\n self.collectiveArgs.Offsets = torch.LongTensor(\n [0] + np.cumsum(flat_lengths).tolist()\n ).to(curDevice)\n self.collectiveArgs.LookupOut = self.backendFuncs.alloc_empty(\n [batch_size, emb_dim], torch.double, curDevice\n )\n self.collectiveArgs.AvgLengths = avg_length\n self.collectiveArgs.numComputePerColl = commsParams.num_compute\n\n return (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n allSizes,\n computeFunc,\n )\n\n def gatherBenchTime(self, collectiveArgs, commsParams, timeUsElapsedList):\n # Push the list to device, then do an all-gather.\n timeElapsedTensor = torch.tensor(\n timeUsElapsedList, device=self.backendFuncs.get_device()\n )\n collectiveArgs.opTensor = None\n if commsParams.backend != \"xla\":\n timeList = list(torch.ones(\n (self.comm_size,) + timeElapsedTensor.shape,\n dtype=timeElapsedTensor.dtype,\n device=timeElapsedTensor.device,\n ).unbind(0))\n collectiveArgs.opTensor = timeList\n\n collectiveArgs.ipTensor = timeElapsedTensor\n collectiveArgs.asyncOp = False\n collectiveArgs.dataSize = (\n timeElapsedTensor.nelement() * timeElapsedTensor.element_size()\n )\n collectiveArgs.numElements = timeElapsedTensor.nelement()\n\n # use allgather as all process group should support it\n self.backendFuncs.all_gather(collectiveArgs)\n self.backendFuncs.complete_accel_ops(collectiveArgs)\n\n return timeList\n\n def printPreamble(self, commsParams):\n logger.debug(f\"\\tcommsParams: {str(commsParams.__dict__)}\")\n header = \"\\n\\tCOMMS-RES\"\n if self.collectiveArgs.collective == \"pt2pt\":\n header += \"{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}\".format(\n \"size (B)\",\n \"pingLatency(us):p50\",\n \"p75\",\n \"p95\",\n \"pingPongLatency(us):p50\",\n \"p75\",\n \"p95\",\n \"avgUniBW(GB/s)\",\n \"avgBiBW(GB/s)\",\n \"totalUniBW(GB/s)\",\n \"totalBiBW(GB/s)\",\n )\n else:\n if commsParams.bitwidth < 32:\n header += \"-QUANT\\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}\".format(\n \"size (B)\",\n \"nElementsPerRank\",\n \"P95 Latency(us): Quant\",\n \"Comms\",\n \"De-Quant\",\n \"Overall\",\n )\n elif not self.collectiveArgs.pair:\n header += (\n \"{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n \"size (B)\",\n \"nElementsPerRank\",\n \"Latency(us):p50\",\n \"p75\",\n \"p95\",\n \"Min\",\n \"Max\",\n \"AlgBW(GB/s)\",\n \"BusBW(GB/s)\",\n )\n )\n else:\n header += \"{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n \"total-size (B)\",\n \"nElementsPerRank\",\n \"nElementsPairPerRank\",\n \"Latency(us):p50\",\n \"p75\",\n \"p95\",\n \"Min\",\n \"Max\",\n \"AlgBW(GB/s)\",\n \"BusBW(GB/s)\",\n )\n\n print(header)\n\n def reportBenchTimeCollWithQuant(\n self,\n commsParams,\n results,\n tensorList,\n quantTimeTensorList,\n dequantTimeTensorList,\n ):\n if commsParams.backend == \"xla\":\n latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]\n latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()\n # quant tensor\n quantLatencyAcrossRanks = torch.transpose(\n quantTimeTensorList.view(-1, 1), 0, 1\n )[0]\n quantLatencyAcrossRanks = quantLatencyAcrossRanks.cpu().detach().numpy()\n # dequant tensor\n dequantLatencyAcrossRanks = torch.transpose(\n dequantTimeTensorList.view(-1, 1), 0, 1\n )[0]\n dequantLatencyAcrossRanks = dequantLatencyAcrossRanks.cpu().detach().numpy()\n else:\n if isinstance(tensorList, list):\n tensorList = [t.cpu().detach().numpy() for t in tensorList]\n latencyAcrossRanks = np.array(tensorList)\n # quant tensor\n quantLatencyAcrossRanks = np.array(quantTimeTensorList)\n # dequant tensor\n dequantLatencyAcrossRanks = np.array(dequantTimeTensorList)\n\n p95 = np.percentile(latencyAcrossRanks, 95)\n\n quant_p95 = np.percentile(quantLatencyAcrossRanks, 95)\n dequant_p95 = np.percentile(dequantLatencyAcrossRanks, 95)\n\n print(\n \"\\tCOMMS-RES-QUANT\\t{:>15}{:>18}{:>25}{:>15}{:>15}{:>15}\".format(\n results[\"memSize\"],\n str(\"%d\" % (results[\"numElements\"])),\n str(\"%.1f\" % (quant_p95)),\n str(\"%.1f\" % (p95 - quant_p95 - dequant_p95)),\n str(\"%.1f\" % (dequant_p95)),\n str(\"%.1f\" % (p95)),\n # str(\"%.3f\" % (algBW)),\n # str(\"%.3f\" % (busBW)),\n )\n )\n\n def reportBenchTime(\n self,\n commsParams,\n results,\n tensorList,\n quantTimeTensorList,\n dequantTimeTensorList,\n ):\n # convernt num_elements to # of elements per rank\n if commsParams.collective in (\"all_to_all\", \"all_to_allv\"):\n results[\"numElements\"] = int(\n results[\"numElements\"] // commsParams.comms_world_info.world_size\n )\n\n if commsParams.collective == \"pt2pt\":\n self.reportBenchTimePt2Pt(commsParams, tensorList, results)\n elif commsParams.bitwidth < 32:\n self.reportBenchTimeCollWithQuant(\n commsParams,\n results,\n tensorList,\n quantTimeTensorList,\n dequantTimeTensorList,\n )\n else:\n self.reportBenchTimeColl(commsParams, results, tensorList)\n\n def reportBenchTimeColl(self, commsParams, results, tensorList):\n if commsParams.backend == \"xla\":\n latencyAcrossRanks = torch.transpose(tensorList.view(-1, 1), 0, 1)[0]\n latencyAcrossRanks = latencyAcrossRanks.cpu().detach().numpy()\n else:\n if isinstance(tensorList, list):\n tensorList = [t.cpu().detach().numpy() for t in tensorList]\n latencyAcrossRanks = np.array(tensorList)\n\n logger.debug(f\"Latency across all ranks: {latencyAcrossRanks}\")\n\n # Include only communicating ranks\n if self.collectiveArgs.collective == \"multicast\":\n commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.dst_ranks\n elif self.collectiveArgs.collective == \"incast\":\n commRanks = [self.collectiveArgs.srcOrDst] + self.collectiveArgs.src_ranks\n else:\n commRanks = range(self.collectiveArgs.world_size)\n\n latencyAcrossCommRanks = latencyAcrossRanks[commRanks]\n logger.debug(\n \"Latency across communicating ranks (%s): %s\"\n % (commRanks, latencyAcrossCommRanks)\n )\n\n p50 = np.percentile(latencyAcrossCommRanks, 50)\n p75 = np.percentile(latencyAcrossCommRanks, 75)\n p95 = np.percentile(latencyAcrossCommRanks, 95)\n minlat = np.amin(latencyAcrossCommRanks)\n maxlat = np.amax(latencyAcrossCommRanks)\n\n # adjust busBW\n busBW = results[\"busBW\"] * (commsParams.bitwidth / 32.0)\n\n if not self.collectiveArgs.pair:\n print(\n \"\\tCOMMS-RES{:>15}{:>18}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n results[\"memSize\"],\n str(\"%d\" % (results[\"numElements\"])),\n str(\"%.1f\" % (p50)),\n str(\"%.1f\" % (p75)),\n str(\"%.1f\" % (p95)),\n str(\"%.1f\" % (minlat)),\n str(\"%.1f\" % (maxlat)),\n str(\"%.3f\" % (results[\"algBW\"])),\n str(\"%.3f\" % (busBW)),\n )\n )\n else:\n # convernt to # of elements per rank\n if commsParams.collective_pair in (\"all_to_all\", \"all_to_allv\"):\n results[\"numElements_pair\"] = int(\n results[\"numElements_pair\"]\n // commsParams.comms_world_info.world_size\n )\n print(\n \"\\tCOMMS-RES{:>15}{:>18}{:>22}{:>18}{:>12}{:>12}{:>12}{:>12}{:>15}{:>12}\".format(\n results[\"memSize\"],\n str(\"%d\" % (results[\"numElements\"])),\n str(\"%d\" % (results[\"numElements_pair\"])),\n str(\"%.1f\" % (p50)),\n str(\"%.1f\" % (p75)),\n str(\"%.1f\" % (p95)),\n str(\"%.1f\" % (minlat)),\n str(\"%.1f\" % (maxlat)),\n str(\"%.3f\" % (results[\"algBW\"])),\n str(\"%.3f\" % (busBW)),\n )\n )\n\n def reportBenchTimePt2Pt(self, commsParams, resultsAcrossRanks, results):\n pingLatencyAcrossRanks = []\n pingPongLatencyAcrossRanks = []\n uniBWAcrossRanks = []\n biBWAcrossRanks = []\n # idx = 0\n for curRankTensor in resultsAcrossRanks:\n pingLatencyAcrossRanks.append(curRankTensor[0].item())\n pingPongLatencyAcrossRanks.append(curRankTensor[1].item())\n uniBWAcrossRanks.append(curRankTensor[2].item())\n biBWAcrossRanks.append(curRankTensor[3].item())\n\n pingLatencyAcrossRanks = np.array(pingLatencyAcrossRanks)\n pingPongLatencyAcrossRanks = np.array(pingPongLatencyAcrossRanks)\n uniBWAcrossRanks = np.array(uniBWAcrossRanks)\n biBWAcrossRanks = np.array(biBWAcrossRanks)\n\n # Include only communicating ranks\n commRanks = self.collectiveArgs.src_ranks + self.collectiveArgs.dst_ranks\n pingLatencyAcrossCommRanks = pingLatencyAcrossRanks[commRanks]\n pingPongLatencyAcrossCommRanks = pingPongLatencyAcrossRanks[commRanks]\n uniBWAcrossCommRanks = uniBWAcrossRanks[commRanks]\n biBWAcrossCommRanks = biBWAcrossRanks[commRanks]\n\n logger.debug(\n \"Ping latency across communicating ranks (%s): %s\"\n % (commRanks, pingLatencyAcrossCommRanks)\n )\n logger.debug(\n \"PingPong latency across communicating ranks (%s): %s\"\n % (commRanks, pingPongLatencyAcrossCommRanks)\n )\n logger.debug(\n \"UniBW across all communicating ranks (%s): %s\"\n % (commRanks, uniBWAcrossCommRanks)\n )\n logger.debug(\n \"BiBW across all communicating ranks (%s): %s\"\n % (commRanks, biBWAcrossCommRanks)\n )\n\n avgUniBW = np.mean(uniBWAcrossCommRanks)\n avgBiBW = np.mean(biBWAcrossCommRanks)\n totalUniBW = np.sum(uniBWAcrossCommRanks) / 2\n totalBiBW = np.sum(biBWAcrossCommRanks) / 2\n\n ping_p50 = np.percentile(pingLatencyAcrossCommRanks, 50)\n ping_p75 = np.percentile(pingLatencyAcrossCommRanks, 75)\n ping_p95 = np.percentile(pingLatencyAcrossCommRanks, 95)\n\n ping_pong_p50 = np.percentile(pingPongLatencyAcrossCommRanks, 50)\n ping_pong_p75 = np.percentile(pingPongLatencyAcrossCommRanks, 75)\n ping_pong_p95 = np.percentile(pingPongLatencyAcrossCommRanks, 95)\n\n print(\n \"\\tCOMMS-RES{:>15}{:>20}{:>10}{:>10}{:>25}{:>10}{:>10}{:>15}{:>15}{:>18}{:>18}\".format(\n results[\"memSize\"],\n str(\"%.1f\" % (ping_p50)),\n str(\"%.1f\" % (ping_p75)),\n str(\"%.1f\" % (ping_p95)),\n str(\"%.1f\" % (ping_pong_p50)),\n str(\"%.1f\" % (ping_pong_p75)),\n str(\"%.1f\" % (ping_pong_p95)),\n str(\"%.3f\" % (avgUniBW)),\n str(\"%.3f\" % (avgBiBW)),\n str(\"%.3f\" % (totalUniBW)),\n str(\"%.3f\" % (totalBiBW)),\n )\n )\n\n def benchTime(self, index, commsParams, backendFuncs):\n # Get NW stack specific parameters\n (\n local_rank,\n global_rank,\n world_size,\n group,\n curDevice,\n curHwDevice,\n allSizes,\n computeFunc,\n ) = self.initCollectiveArgs(commsParams)\n\n backendFuncs.sync_barrier(self.collectiveArgs)\n if global_rank == 0:\n self.printPreamble(commsParams)\n\n for curSize in allSizes:\n results = {}\n timeUsElapsedList = []\n quantTimeElapsedList = []\n dequantTimeElapsedList = []\n numElements = int(curSize // commsParams.element_size)\n collectiveFunc = self.backendFuncs.noop\n collectiveFunc_pair = self.backendFuncs.noop\n\n if (\n commsParams.mode != \"compute\"\n ): # comms specific initializations if not in compute-only mode\n # set corresponding function pointers\n if commsParams.collective != \"pt2pt\":\n collectiveFunc = backendFuncs.collectiveFunc[commsParams.collective]\n\n (\n self.collectiveArgs.ipTensor,\n self.collectiveArgs.opTensor,\n ) = self.prepComm(\n curComm={\n \"in_msg_size\": numElements,\n \"out_msg_size\": numElements,\n \"world_size\": world_size,\n },\n commsParams=commsParams,\n )\n\n # Setup the arguments.\n self.collectiveArgs.dataSize = curSize\n self.collectiveArgs.numElements = numElements\n self.collectiveArgs.waitObj = []\n results[\"numElements\"] = numElements\n\n if (\n commsParams.pair and commsParams.mode != \"compute\"\n ): # comms-pair specific initializations if not in compute-only mode:\n # set corresponding function pointers\n collectiveFunc_pair = backendFuncs.collectiveFunc[\n commsParams.collective_pair\n ]\n # TODO: allow user to set specific size\n # Setup the arguments.\n self.collectiveArgs.dataSize_pair = curSize\n self.collectiveArgs.numElements_pair = int(\n self.collectiveArgs.dataSize_pair // commsParams.element_size\n )\n results[\"numElements_pair\"] = self.collectiveArgs.numElements_pair\n (\n self.collectiveArgs.ipTensor_pair,\n self.collectiveArgs.opTensor_pair,\n ) = self.prepComm(\n curComm={\n \"in_msg_size\": self.collectiveArgs.numElements_pair,\n \"out_msg_size\": self.collectiveArgs.numElements_pair,\n \"world_size\": world_size,\n },\n commsParams=commsParams,\n )\n\n # self.collectiveArgs has all the information on the experiment.\n if commsParams.collective == \"pt2pt\":\n results.update(self.runPt2Pt())\n\n timeUsElapsedList = [\n np.mean(np.array(results[\"pingPerIterNS\"])) / 1e3,\n np.mean(np.array(results[\"pingPongPerIterNS\"])) / 1e3,\n results[\"avgUniBW\"],\n results[\"avgBiBW\"],\n ] # time in US\n if (\n global_rank in self.collectiveArgs.src_ranks\n or global_rank in self.collectiveArgs.dst_ranks\n ):\n logger.debug(timeUsElapsedList)\n else:\n results.update(\n self.runColl(\n comm_fn=collectiveFunc,\n compute_fn=computeFunc,\n comm_fn_pair=collectiveFunc_pair,\n )\n )\n timeUsElapsedList = [results[\"timeUS\"]]\n\n # perfom data validation check on the final opTensor\n if commsParams.dcheck == 1:\n self.dcheck(commsParams, curSize, self.collectiveArgs.opTensor)\n\n backendFuncs.clear_memory(self.collectiveArgs)\n\n # gather quantization overhead if enabled\n if commsParams.bitwidth < 32:\n # calculate average (de-)quantization overhead\n results[\"quantTimeUS\"] = (\n self.collectiveArgs.quant_time.getTimeUS()\n / self.collectiveArgs.numIters\n )\n results[\"dequantTimeUS\"] = (\n self.collectiveArgs.dequant_time.getTimeUS()\n / self.collectiveArgs.numIters\n )\n quantTimeElapsedList.append(results[\"quantTimeUS\"])\n dequantTimeElapsedList.append(results[\"dequantTimeUS\"])\n\n logger.debug(quantTimeElapsedList)\n quantTimeElapsedList = self.gatherBenchTime(\n self.collectiveArgs, commsParams, quantTimeElapsedList\n )\n dequantTimeElapsedList = self.gatherBenchTime(\n self.collectiveArgs, commsParams, dequantTimeElapsedList\n )\n\n # gather and report performance to stdout\n tensorList = self.gatherBenchTime(\n self.collectiveArgs, commsParams, timeUsElapsedList\n )\n if global_rank == 0:\n self.reportBenchTime(\n commsParams,\n results,\n tensorList,\n quantTimeElapsedList,\n dequantTimeElapsedList,\n )\n\n self.backendFuncs.sync_barrier(\n self.collectiveArgs, desc=f\"curSize_{curSize}\"\n )\n\n comms_utils.clearQuantCommCtx(self.collectiveArgs)\n\n # wait rank 0 reports results to avoid other ranks mess up the output\n self.backendFuncs.sync_barrier(self.collectiveArgs, \"benchtime\")\n\n def runBench(self, comms_world_info, commsParams):\n # Init the desired backend\n if commsParams.nw_stack == \"pytorch-dist\":\n from pytorch_dist_backend import PyTorchDistBackend\n\n backendObj = PyTorchDistBackend(comms_world_info, commsParams)\n elif commsParams.nw_stack == \"pytorch-xla-tpu\":\n from pytorch_tpu_backend import PyTorchTPUBackend\n\n backendObj = PyTorchTPUBackend(comms_world_info, commsParams)\n else:\n logger.error(\"Unsupported NW stack! \")\n comms_utils.gracefulExit()\n\n self.backendFuncs = backendObj\n try:\n backendObj.benchmark_comms()\n except ValueError as ve:\n if commsParams.backend == \"ucc\":\n logger.critical(\"PyTorch UCC not implemented? {}\".format(repr(ve)))\n raise\n\n\ndef main():\n collBenchObj = commsCollBench()\n\n ### parse arguments ###\n parser = argparse.ArgumentParser(\n description=\"PARAM-Comm Benchmark\",\n formatter_class=MultilineFormatter,\n )\n args, leftovers = collBenchObj.readArgs(parser)\n\n collBenchObj.checkArgs(args)\n\n comms_env_params = comms_utils.read_comms_env_vars()\n if comms_env_params[\"global_rank\"] == 0:\n print(\"\\t MPI environment: %s \" % (str(comms_env_params)))\n print(\n \"\\t backend: %s nw-stack: %s mode: %s args.b: %d args.e: %d args.f: %d args.z: %s args.master_ip: %s \"\n % (\n args.backend,\n args.nw_stack,\n args.mode,\n args.b,\n args.e,\n args.f,\n args.z,\n args.master_ip,\n )\n )\n\n element_size = torch.ones([1], dtype=args.dtype).element_size()\n comms_world_info = comms_utils.comms_world_info_holder(\n args.master_ip, args.master_port, args.num_tpu_cores, comms_env_params\n )\n\n commsParams = comms_utils.commsParamsHolder(\n args, comms_world_info, element_size, collBenchObj.benchTime\n )\n\n if args.pair and args.overlap_pair_pgs:\n commsParams.num_pgs = 2\n collBenchObj.runBench(comms_world_info, commsParams)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.sum",
"torch.ones",
"torch.FloatTensor",
"numpy.ones",
"numpy.cumsum",
"numpy.amin",
"numpy.random.rand",
"numpy.amax",
"torch.LongTensor",
"numpy.array",
"numpy.percentile",
"numpy.mean",
"numpy.random.randint"
]
] |
rjgpinel/mime-release | [
"26a850c4ba5b702b86d068995614163338fb01df"
] | [
"mime/agent/script_agent_augmented.py"
] | [
"import itertools\nimport types\nimport numpy as np\n\nimport torch\nimport click\nimport gym\nimport time\nimport yaml\n\nfrom robos2r.model import build_model\nfrom .agent import Agent\nfrom .script_agent import ScriptAgent, make_noised\nfrom .utils import Rate\nfrom PIL import Image\nfrom pathlib import Path\nfrom einops import rearrange\nfrom torchvision import transforms as T\n\n\[email protected](help=\"script_agent env_name [options]\")\[email protected](\"env_name\", type=str)\[email protected](\"-s\", \"--seed\", default=0, help=\"seed\")\[email protected](\"-t\", \"--times-repeat\", default=1, help=\"times to repeat the script\")\[email protected](\"-n\", \"--add-noise\", is_flag=True, help=\"adding noise to actions or not\")\[email protected](\n \"-sc\",\n \"--skill-collection/--no-skill-collection\",\n is_flag=True,\n help=\"whether to show the skills collection\",\n)\ndef main(env_name, seed, times_repeat, add_noise, skill_collection):\n print(\"Loading Augmentor model...\")\n diffaug_model_path = \"/home/rgarciap/Remote/models/diffs2r_new/resnet_adam_lr_1e-3_lraug0.01_bs_64_L8/\"\n diffaug_model_path = Path(diffaug_model_path)\n diffaug_cfg_path = diffaug_model_path / \"config.yml\"\n\n with open(str(diffaug_cfg_path), \"rb\") as f:\n diffaug_cfg = yaml.load(f, Loader=yaml.FullLoader)\n\n model_cfg = dict(\n name=\"diffaug\",\n reg_output_size=3,\n aug_pipeline=diffaug_cfg[\"aug_pipeline\"],\n multi=diffaug_cfg[\"multi_pipeline\"],\n num_layers=diffaug_cfg[\"num_layers\"],\n gumbel=diffaug_cfg[\"gumbel\"],\n backbone_name=diffaug_cfg[\"backbone_name\"],\n )\n diffaug_model = build_model(model_cfg)\n diffaug_ckp_path = diffaug_model_path / \"best_checkpoint.pth\"\n checkpoint = torch.load(str(diffaug_ckp_path), map_location=\"cpu\")\n diffaug_model.load_state_dict(checkpoint[\"model\"])\n augmentor = diffaug_model.augmentor\n augmentor.to(\"cpu\")\n augmentor.eval()\n print(\"Model loaded\")\n\n env = gym.make(env_name)\n scene = env.unwrapped.scene\n scene.renders(True)\n if skill_collection:\n scene.skill_data_collection = True\n env.seed(seed)\n for _ in range(times_repeat):\n obs = env.reset()\n\n agent = ScriptAgent(env)\n import matplotlib.pyplot as plt\n\n done = False\n i = 0\n rate = Rate(scene.dt)\n action = agent.get_action()\n if add_noise:\n make_noised(action)\n frames = []\n j = 0\n while not done and action is not None:\n obs, reward, done, info = env.step(action)\n\n im = T.ToTensor()(obs[\"rgb0\"]).unsqueeze(0)\n mask = torch.tensor(obs[\"mask0\"]).unsqueeze(0)\n\n im, mask = augmentor((im, mask))\n im = rearrange(im.detach().detach().squeeze(0).numpy(), \"c h w -> h w c\")\n im = Image.fromarray((im * 255).astype(np.uint8))\n im.save(f\"0/output{j}.jpeg\")\n j += 1\n action = agent.get_action()\n if add_noise and action is not None:\n make_noised(action)\n\n if action is None:\n info[\"failure_message\"] = \"End of Script.\"\n if not info[\"success\"]:\n click.secho(\n \"Failure Seed {}: {}\".format(seed, info[\"failure_message\"]), fg=\"red\"\n )\n\n print(\"Success\", info[\"success\"])\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.tensor"
]
] |
zhengxiawu/Transformer | [
"8cad013913254ea4e06c4a8d460d9f2cf42df086"
] | [
"Embed.py"
] | [
"import torch\nimport torch.nn as nn\nimport math\nfrom torch.autograd import Variable\n\n\nclass Embedder(nn.Module):\n def __init__(self, vocab_size, d_model):\n super().__init__()\n self.d_model = d_model\n self.embed = nn.Embedding(vocab_size, d_model)\n\n def forward(self, x):\n return self.embed(x)\n\n\nclass PositionalEncoder(nn.Module):\n def __init__(self, d_model, max_seq_len=200, dropout=0.1):\n super().__init__()\n self.d_model = d_model\n self.dropout = nn.Dropout(dropout)\n # create constant 'pe' matrix with values dependant on\n # pos and i\n pe = torch.zeros(max_seq_len, d_model)\n for pos in range(max_seq_len):\n for i in range(0, d_model, 2):\n pe[pos, i] = \\\n math.sin(pos / (10000 ** ((2 * i)/d_model)))\n pe[pos, i + 1] = \\\n math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n # make embeddings relatively larger\n x = x * math.sqrt(self.d_model)\n # add constant to embedding\n seq_len = x.size(1)\n pe = Variable(self.pe[:, :seq_len], requires_grad=False)\n if x.is_cuda:\n pe.cuda()\n x = x + pe\n return self.dropout(x)\n"
] | [
[
"torch.zeros",
"torch.autograd.Variable",
"torch.nn.Embedding",
"torch.nn.Dropout"
]
] |
ds4dm/GraphRL | [
"b5b1519f6dd92b401625d51add9ae5829004a30b"
] | [
"rl/train_a2c_mc.py"
] | [
"import torch\nimport torch.optim as optm\nimport torch.nn.functional as F\n\nimport numpy as np\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom data.graph import Graph\nfrom collections import namedtuple\n\nSavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])\n\n# Mont Carlo methods\nclass TrainModel_MC:\n\n def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):\n self.model = model\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.max_grad_norm = max_grad_norm\n self.use_cuda = use_cuda\n\n self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)\n self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)\n\n self.epochs = 0\n self.beta = 0.9\n self.eps = np.finfo(np.float32).eps.item()\n\n\n def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):\n\n self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)\n\n print(use_critic)\n if use_critic:\n self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)\n self.critic_loss_criterion = torch.nn.MSELoss()\n else:\n baseline = torch.zeros(1)\n if self.use_cuda:\n baseline = baseline.cuda()\n\n for epoch in range(1):\n\n n_graphs_proceed = 0\n for X in self.train_loader:\n for x in X:\n\n self.model.train()\n ratio_gcn2mind = []\n ratio_gcn2rand = []\n\n for epoch in range(n_epochs):\n\n rewards_mindegree = 0 # number of added edges\n rewards_random = 0\n x_mind = Graph(x.M)\n x_rand = Graph(x.M)\n x_rl = Graph(x.M)\n\n # loop for training while eliminating a graph iteratively\n for i in range(x.n - 2):\n\n # baseline1: compute return of min degree\n if i % 100 == 0:\n print('iterations {}'.format(i))\n node_mind, d_min = x_mind.min_degree(x_mind.M)\n rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)\n\n # baseline2: compute return of random\n rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)\n\n # call actor-critic model\n\n action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl) # forward propagation,action: node selected, reward: nb edges added\n self.model.rewards.append(reward)\n self.model.actions.append(action)\n self.model.saved_actions.append(SavedAction(log_prob, value_current))\n\n R = 0\n actor_losses = []\n critic_losses = []\n returns = []\n\n # compute sampled return for each step\n for r in self.model.rewards[::-1]:\n R = r + gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns)\n returns = (returns - returns.mean()) / (returns.std() + self.eps)\n saved_actions = self.model.saved_actions\n # compute cummulated loss of actor and critic of one graph\n for (log_prob, value_current), R in zip(saved_actions, returns):\n if use_critic:\n advantage = R - value_current\n critic_losses.append(-value_current* advantage)\n # critic_losses.append(self.critic_loss_criterion(value_current, torch.Tensor([R.detach()])))\n else:\n advantage = R - baseline\n actor_losses.append(log_prob * advantage.detach()) # the return here is discounted nb of added edges,\n # hence, it actually represents loss\n # step update of actor\n self.actor_optim.zero_grad()\n actor_loss = torch.stack(actor_losses).sum()\n actor_loss.backward(retain_graph=True)\n self.actor_optim.step()\n\n # step update of critic\n if use_critic:\n self.critic_optim.zero_grad()\n critic_closs = torch.stack(critic_losses).sum()\n critic_closs.backward()\n self.critic_optim.step()\n else:\n baseline = baseline.detach()\n\n rewards_gcn = sum(self.model.rewards)\n\n _ratio_gcn2mind = rewards_gcn / rewards_mindegree\n _ratio_gcn2rand = rewards_gcn / rewards_random\n\n print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),\n 'gcn2mind ratio {}'.format(_ratio_gcn2mind),\n 'value {}'.format(saved_actions[0].value_current),\n 'R {}'.format(returns[0]))\n print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),\n 'gcn2rand ratio {}'.format(_ratio_gcn2rand))\n\n ratio_gcn2mind.append(_ratio_gcn2mind)\n ratio_gcn2rand.append(_ratio_gcn2rand)\n del self.model.rewards[:]\n del self.model.actions[:]\n del self.model.saved_actions[:]\n\n ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)\n ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)\n\n min_ratio_gcn2mind = np.min(ratio_gcn2mind)\n max_ratio_gcn2mind = np.max(ratio_gcn2mind)\n av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs\n\n min_ratio_gcn2rand = np.min(ratio_gcn2rand)\n max_ratio_gcn2rand = np.max(ratio_gcn2rand)\n av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs\n\n print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',\n 'min_ratio {}'.format(min_ratio_gcn2mind),\n 'max_ratio {}'.format(max_ratio_gcn2mind),\n 'av_ratio {}'.format(av_ratio_gcn2mind))\n print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',\n 'min_ratio {}'.format(min_ratio_gcn2rand),\n 'max_ratio {}'.format(max_ratio_gcn2rand),\n 'av_ratio {}'.format(av_ratio_gcn2rand),\n 'nb graph proceeded {}'.format(n_graphs_proceed))\n\n n_graphs_proceed += len(X)\n\n # ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)\n # ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)\n #\n # total_ratio_gcn2mind = np.sum(ratio_gcn2mind)\n # total_ratio_gcn2rand = np.sum(ratio_gcn2rand)\n #\n # min_ratio_gcn2mind = np.min(ratio_gcn2mind)\n # max_ratio_gcn2mind = np.max(ratio_gcn2mind)\n # av_ratio_gcn2mind = total_ratio_gcn2mind / n_graphs_proceed\n #\n # min_ratio_gcn2rand = np.min(ratio_gcn2rand)\n # max_ratio_gcn2rand = np.max(ratio_gcn2rand)\n # av_ratio_gcn2rand = total_ratio_gcn2rand / n_graphs_proceed\n #\n # print('epoch {:04d}'.format(epoch), 'gcn2mind{:04d}',\n # 'min_ratio {}'.format(min_ratio_gcn2mind),\n # 'max_ratio {}'.format(max_ratio_gcn2mind),\n # 'av_ratio {}'.format(av_ratio_gcn2mind))\n # print('epoch {:04d}'.format(epoch), 'gcn2rand{:04d}',\n # 'min_ratio {}'.format(min_ratio_gcn2rand),\n # 'max_ratio {}'.format(max_ratio_gcn2rand),\n # 'av_ratio {}'.format(av_ratio_gcn2rand),\n # 'nb graph proceeded {}'.format(n_graphs_proceed))\n"
] | [
[
"torch.utils.data.DataLoader",
"numpy.sum",
"torch.stack",
"torch.nn.MSELoss",
"torch.tensor",
"numpy.max",
"numpy.min",
"torch.zeros",
"numpy.finfo",
"numpy.random.randint",
"numpy.array"
]
] |
JeremieMelo/ADEPT | [
"f79f518197798735cb684b373e11cdcc8a80d872"
] | [
"unitest/test_supermesh.py"
] | [
"'''\nDescription:\nAuthor: Jiaqi Gu ([email protected])\nDate: 2021-09-27 23:48:01\nLastEditors: Jiaqi Gu ([email protected])\nLastEditTime: 2022-02-26 02:22:52\n'''\nimport torch\nfrom core.models.layers.super_mesh import super_layer_name_dict\n\ndef test():\n device=torch.device(\"cuda:0\")\n p, q, k = 2, 2, 4\n x = torch.eye(k, dtype=torch.cfloat, device=device).unsqueeze(0).repeat(q,1,1).permute(1,0,2).contiguous()\n sigma = torch.ones(p,q,k, device=device)\n # x [bs, q, k]\n\n arch = dict(\n n_waveguides=k,\n n_front_share_waveguides=k,\n n_front_share_ops=k,\n n_blocks=4,\n n_layers_per_block=2,\n n_front_share_blocks=2,\n share_ps=\"row_col\",\n interleave_dc=True,\n )\n sample_arch = [\n k//3,1,\n k//2,1,\n k//2,1,\n k//2,1,\n 4\n ]\n layer = super_layer_name_dict[\"ps_dc_cr\"](arch, device=device)\n super_ps_layers = layer.build_ps_layser(grid_dim_x=q, grid_dim_y=p)\n for m in super_ps_layers:\n # m.reset_parameters(alg=\"identity\")\n m.reset_parameters(alg=\"uniform\")\n layer.set_sample_arch(sample_arch)\n print(layer)\n layer.set_identity_cr()\n layer.build_sampling_coefficients()\n layer.set_gumbel_temperature(0.1)\n layer.set_aux_skip_path(0)\n layer.build_arch_mask()\n U,V = layer.get_UV(super_ps_layers, q, p)\n print(U, U.size())\n print(U[0,0].conj().t().matmul(U[0,0]))\n print(V)\n print(V[0,0].conj().t().matmul(V[0,0]))\n weight = layer.get_weight_matrix(super_ps_layers, sigma)\n print(weight)\n weight.sum().backward()\n print(super_ps_layers[0].weight.grad.norm(p=2))\n print(layer.super_layers_all[0].weight.grad.norm(p=2))\n\n print(layer.super_layers_all[1].weight.grad.norm(p=2))\n\n\nif __name__ == \"__main__\":\n test()\n"
] | [
[
"torch.ones",
"torch.eye",
"torch.device"
]
] |
nbortolotti/tflite-tpu-experiences | [
"8f613e059335d1d90886282f005261917fd9cfd3"
] | [
"inference_exploration/cpu/main.py"
] | [
"import os\nimport numpy as np\nimport PIL.Image as Image\nimport matplotlib.pylab as plt\nimport time\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.keras import layers\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef image_analysis(classifier, image_shape, img_array):\n result = classifier.predict(img_array[np.newaxis, ...])\n # result.shape\n\n predicted_class = np.argmax(result[0], axis=-1)\n return predicted_class\n\n\ndef main():\n classifier_url = \"https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/4\"\n image_shape = (224, 224)\n classifier = tf.keras.Sequential([\n hub.KerasLayer(classifier_url, input_shape=image_shape + (3,))\n ])\n\n img_file = tf.keras.utils.get_file('image.jpg', 'https://storage.googleapis.com/demostration_images/2.jpg')\n img = Image.open(img_file).resize(image_shape)\n\n img_array = np.array(img) / 255.0\n # img_array.shape\n\n predicted_class = image_analysis(classifier, image_shape, img_array)\n\n labels_path = tf.keras.utils.get_file('ImageNetLabels.txt',\n 'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')\n imagenet_labels = np.array(open(labels_path).read().splitlines())\n #\n # plt.imshow(img_array)\n # plt.axis('off')\n # predicted_class_name = imagenet_labels[predicted_class]\n # _ = plt.title(\"Prediction: \" + predicted_class_name.title())\n # plt.show()\n for _ in range(5):\n inferenceTime(img_array, classifier)\n\n\n# explore time to do the inference\ndef inferenceTime(image, mClassifier):\n start = time.time()\n result = mClassifier.predict(image[np.newaxis, ...])\n end = time.time()\n print((end - start)*1000) #milliseconds\n\n # predicted_class = np.argmax(result[0], axis=-1)\n # predicted_class_name = mLabels[predicted_class]\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"tensorflow.keras.utils.get_file",
"numpy.argmax"
]
] |
FaceThePirate/pyspeckit | [
"734b9f81d440ca3a6db9bf68e9409dbddb52d08b"
] | [
"pyspeckit/spectrum/readers/read_class.py"
] | [
"\"\"\"\n------------------------\nGILDAS CLASS file reader\n------------------------\n\nRead a CLASS file into an :class:`pyspeckit.spectrum.ObsBlock`\n\"\"\"\nfrom __future__ import print_function\nfrom six.moves import xrange\nfrom six import iteritems\nimport six\nimport astropy.io.fits as pyfits\nimport numpy\nimport numpy as np\nfrom numpy import pi\nfrom astropy import log\n# from astropy.time import Time\nfrom astropy import units as u\nimport pyspeckit\nimport sys\nimport re\ntry:\n from astropy.utils.console import ProgressBar\nexcept ImportError:\n ProgressBar = lambda x: None\n ProgressBar.update = lambda x: None\nimport struct\n\nimport time\n\n# 'range' is needed as a keyword\nirange = range\n\ndef print_timing(func):\n \"\"\"\n Prints execution time of decorated function.\n Included here because CLASS files can take a little while to read;\n this should probably be replaced with a progressbar\n \"\"\"\n def wrapper(*arg,**kwargs):\n t1 = time.time()\n res = func(*arg,**kwargs)\n t2 = time.time()\n log.info('%s took %0.5g s' % (func.__name__, (t2-t1)))\n return res\n wrapper.__doc__ = func.__doc__\n return wrapper\n\ndef ensure_bytes(string):\n \"\"\"\n Ensure a given string is in byte form\n \"\"\"\n if six.PY3:\n return bytes(string, 'utf-8')\n else:\n return str(string)\n\n\"\"\" Specification: http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html \"\"\"\nfiletype_dict = {'1A ':'Multiple_IEEE',\n '1 ':'Multiple_Vax',\n '1B ':'Multiple_EEEI',\n '2A ':'v2',\n '2 ':'v2',\n '2B ':'v2',\n '9A ':'Single_IEEE',\n '9 ':'Single_Vax',\n '9B ':'Single_EEEI'}\nfor key in list(filetype_dict.keys()):\n filetype_dict[ensure_bytes(key)] = filetype_dict[key]\n\nfileversion_dict = {'1A ':'v1',\n '2A ':'v2',\n '9A ':'v1', # untested\n }\nfor key in list(fileversion_dict.keys()):\n fileversion_dict[ensure_bytes(key)] = fileversion_dict[key]\n\nrecord_lengths = {'1A': 512,\n '2A': 1024*4}\n\nheader_id_numbers = {0: 'USER CODE',\n -1: 'COMMENT',\n -2: 'GENERAL',\n -3: 'POSITION',\n -4: 'SPECTRO',\n -5: 'BASELINE',\n -6: 'HISTORY',\n -7: 'UNKNOWN-APEX',\n # -8: 'SWITCH',\n -9: 'GAUSSFIT', # \"private\"; see class-interfaces-private.f90\n -10: 'DRIFT',\n -11: 'BEAMSWITCH', # \"private\"; see class-interfaces-private.f90\n -12: 'SHELLFIT', # \"private\"; see class-interfaces-private.f90\n -13: 'NH3FIT', # \"private\"; see class-interfaces-private.f90\n -14: 'CALIBRATION',\n -18: 'ABSFIT', # \"private\"; see class-interfaces-private.f90\n }\n\nheader_id_lengths = {-2: 9, # may really be 10?\n -3: 17,\n -4: 17,\n -5: None, # variable length\n -6: 3, # variable length\n -14: 25,\n }\n\n# from packages/classic/lib/classic_mod.f90\nfiledescv2_nw1=14\n\n\n\"\"\"\nGENERAL\n integer(kind=obsnum_length) :: num ! [ ] Observation number\n integer(kind=4) :: ver ! [ ] Version number\n integer(kind=4) :: teles(3) ! [ ] Telescope name\n integer(kind=4) :: dobs ! [MJD-60549] Date of observation\n integer(kind=4) :: dred ! [MJD-60549] Date of reduction\n integer(kind=4) :: typec ! [ code] Type of coordinates\n integer(kind=4) :: kind ! [ code] Type of data\n integer(kind=4) :: qual ! [ code] Quality of data\n integer(kind=4) :: subscan ! [ ] Subscan number\n integer(kind=obsnum_length) :: scan ! [ ] Scan number\n ! Written in the entry\n real(kind=8) :: ut ! 1-2 [ rad] UT of observation\n real(kind=8) :: st ! 3-4 [ rad] LST of observation\n real(kind=4) :: az ! 5 [ rad] Azimuth\n real(kind=4) :: el ! 6 [ rad] Elevation\n real(kind=4) :: tau ! 7 [neper] Opacity\n real(kind=4) :: tsys ! 8 [ K] System temperature\n real(kind=4) :: time ! 9 [ s] Integration time\n ! Not in this section in file\n integer(kind=4) :: xunit ! [ code] X unit (if X coordinates section is present)\n ! NOT in data ---\n character(len=12) :: cdobs ! [string] Duplicate of dobs\n character(len=12) :: cdred ! [string] Duplicate of dred\n\n\"\"\"\n\nkeys_lengths = {\n 'unknown': [\n #('NUM' ,1,'int32'), # Observation number\n ('VER' ,1,'int32'), # Version number\n ('TELES' ,3,'|S12') , # Telescope name\n ('DOBS' ,1,'int32'), # Date of observation\n ('DRED' ,1,'int32'), # Date of reduction\n ('TYPEC' ,1,'int32'), # Type of coordinates\n ('KIND' ,1,'int32'), # Type of data\n ('QUAL' ,1,'int32'), # Quality of data\n ('SCAN' ,1,'int32'), # Scan number\n ('SUBSCAN' ,1,'int32'), # Subscan number\n ],\n\n 'COMMENT': [ # -1\n ('LTEXT',1,'int32'), # integer(kind=4) :: ltext ! Length of comment\n ('CTEXT',1024//4,'|S1024'), # character ctext*1024 ! Comment string\n ],\n\n 'GENERAL': [ # -2\n ('UT' ,2,'float64'), # rad UT of observation\n ('ST' ,2,'float64'), # rad LST of observation\n ('AZ' ,1,'float32'), # rad Azimuth\n ('EL' ,1,'float32'), # rad Elevation\n ('TAU' ,1,'float32'), # neper Opacity\n ('TSYS' ,1,'float32'), # K System temperature\n ('TIME' ,1,'float32'), # s Integration time\n # XUNIT should not be there?\n #( 'XUNIT' ,1,'int32'), # code X unit (if xcoord_sec is present)\n ] ,\n 'POSITION': [ # -3\n ('SOURC',3,'|S12') , # [ ] Source name\n ('EPOCH',1,'float32'), # [ ] Epoch of coordinates\n ('LAM' ,2,'float64'), #[rad] Lambda\n ('BET' ,2,'float64'), #[rad] Beta\n ('LAMOF',1,'float32'), # [rad] Offset in Lambda\n ('BETOF',1,'float32'), # [rad] Offset in Beta\n ('PROJ' ,1,'int32') , # [rad] Projection system\n ('SL0P' ,1,'float64'), # lambda of descriptive system # MAY NOT EXIST IN OLD CLASS\n ('SB0P' ,1,'float64'), # beta of descriptive system # MAY NOT EXIST IN OLD CLASS\n ('SK0P' ,1,'float64'), # angle of descriptive system # MAY NOT EXIST IN OLD CLASS\n ],\n 'SPECTRO': [ # -4\n #('align' ,1,'int32'), # [ ] Alignment padding\n ('LINE' ,3,'|S12'), # [ ] Line name\n ('RESTF' ,2,'float64'), # [ MHz] Rest frequency\n ('NCHAN' ,1,'int32'), # [ ] Number of channels\n ('RCHAN' ,1,'float32'), # [ ] Reference channels\n ('FRES' ,1,'float32'), # [ MHz] Frequency resolution\n ('FOFF' ,1,'float32'), # [ MHz] Frequency offset\n ('VRES' ,1,'float32'), # [km/s] Velocity resolution\n ('VOFF' ,1,'float32'), # [km/s] Velocity at reference channel\n ('BAD' ,1,'float32'), # [ ] Blanking value\n #('ALIGN_1',1,'int32'), # [ ] Alignment padding\n ('IMAGE' ,2,'float64'), # [ MHz] Image frequency\n #('ALIGN_2',1,'int32'), # [ ] Alignment padding\n ('VTYPE' ,1,'int32'), # [code] Type of velocity\n ('DOPPLER',2,'float64'), # [ ] Doppler factor = -V/c (CLASS convention)\n ],\n 'CALIBRATION': [ # -14\n ('ALIGN',1,'int32'), # BUFFER (it's a zero - it is not declared in the docs!!!!)\n ('BEEFF',1,'float32'), # [ ] Beam efficiency\n ('FOEFF',1,'float32'), # [ ] Forward efficiency\n ('GAINI',1,'float32'), # [ ] Image/Signal gain ratio\n ('H2OMM',1,'float32'), # [ mm] Water vapor content\n ('PAMB',1,'float32'), # [ hPa] Ambient pressure\n ('TAMB',1,'float32'), # [ K] Ambient temperature\n ('TATMS',1,'float32'), # [ K] Atmosphere temp. in signal band\n ('TCHOP',1,'float32'), # [ K] Chopper temperature\n ('TCOLD',1,'float32'), # [ K] Cold load temperature\n ('TAUS',1,'float32'), # [neper] Opacity in signal band\n ('TAUI',1,'float32'), # [neper] Opacity in image band\n ('TATMI',1,'float32'), # [ K] Atmosphere temp. in image band\n ('TREC',1,'float32'), # [ K] Receiver temperature\n ('CMODE',1,'int32'), # [ code] Calibration mode\n ('ATFAC',1,'float32'), # [ ] Applied calibration factor\n ('ALTI',1,'float32'), # [ m] Site elevation\n ('COUNT',3,'3float32'), # [count] Power of Atm., Chopp., Cold\n ('LCALOF',1,'float32'), # [ rad] Longitude offset for sky measurement\n ('BCALOF',1,'float32'), # [ rad] Latitude offset for sky measurement\n ('GEOLONG',1,'float64'), # [ rad] Geographic longitude of observatory # MAY NOT EXIST IN OLD CLASS\n ('GEOLAT',1,'float64'), # [ rad] Geographic latitude of observatory # MAY NOT EXIST IN OLD CLASS\n ],\n 'BASELINE':[\n ('DEG',1,'int32'), #! [ ] Degree of last baseline\n ('SIGFI',1,'float32'), #! [Int. unit] Sigma\n ('AIRE',1,'float32'), #! [Int. unit] Area under windows\n ('NWIND',1,'int32'), #! [ ] Number of line windows\n # WARNING: These should probably have 'n', the second digit, = NWIND\n # The docs are really unclear about this, they say \"W1(MWIND)\"\n ('W1MWIND',1,'float32'), #! [km/s] Lower limits of windows\n ('W2MWIND',1,'float32'), #! [km/s] Upper limits of windows\n ('SINUS',3,'float32'), #![] Sinus baseline results\n ],\n\n 'DRIFT':[ # 16?\n ('FREQ',1,'float64') , #! [ MHz] Rest frequency real(kind=8) ::\n ('WIDTH',1,'float32'), #! [ MHz] Bandwidth real(kind=4) ::\n ('NPOIN',1,'int32') , #! [ ] Number of data points integer(kind=4) ::\n ('RPOIN',1,'float32'), #! [ ] Reference point real(kind=4) ::\n ('TREF',1,'float32') , #! [ ?] Time at reference real(kind=4) ::\n ('AREF',1,'float32') , #! [ rad] Angular offset at ref. real(kind=4) ::\n ('APOS',1,'float32') , #! [ rad] Position angle of drift real(kind=4) ::\n ('TRES',1,'float32') , #! [ ?] Time resolution real(kind=4) ::\n ('ARES',1,'float32') , #! [ rad] Angular resolution real(kind=4) ::\n ('BAD',1,'float32') , #! [ ] Blanking value real(kind=4) ::\n ('CTYPE',1,'int32') , #! [code] Type of offsets integer(kind=4) ::\n ('CIMAG',1,'float64'), #! [ MHz] Image frequency real(kind=8) ::\n ('COLLA',1,'float32'), #! [ ?] Collimation error Az real(kind=4) ::\n ('COLLE',1,'float32'), #! [ ?] Collimation error El real(kind=4) ::\n ],\n\n }\n\ndef _read_bytes(f, n):\n '''Read the next `n` bytes (from idlsave)'''\n return f.read(n)\n\n\"\"\"\nWarning: UNCLEAR what endianness should be!\nNumpy seemed to get it right, and I think numpy assumes NATIVE endianness\n\"\"\"\n\ndef _read_byte(f):\n '''Read a single byte (from idlsave)'''\n return numpy.uint8(struct.unpack('=B', f.read(4)[:1])[0])\n\ndef _read_int16(f):\n '''Read a signed 16-bit integer (from idlsave)'''\n return numpy.int16(struct.unpack('=h', f.read(4)[2:4])[0])\n\ndef _read_int32(f):\n '''Read a signed 32-bit integer (from idlsave)'''\n return numpy.int32(struct.unpack('=i', f.read(4))[0])\n\ndef _read_int64(f):\n '''Read a signed 64-bit integer '''\n return numpy.int64(struct.unpack('=q', f.read(8))[0])\n\ndef _read_float32(f):\n '''Read a 32-bit float (from idlsave)'''\n return numpy.float32(struct.unpack('=f', f.read(4))[0])\n\ndef _align_32(f):\n '''Align to the next 32-bit position in a file (from idlsave)'''\n\n pos = f.tell()\n if pos % 4 != 0:\n f.seek(pos + 4 - pos % 4)\n return\n\ndef _read_word(f,length):\n if length > 0:\n chars = _read_bytes(f, length)\n _align_32(f)\n else:\n chars = None\n return chars\n\ndef _read_int(f):\n return struct.unpack('i',f.read(4))\n\ndef is_ascii(s):\n \"\"\"Check if there are non-ascii characters in Unicode string\n\n Parameters\n ----------\n s : str\n The string to be checked\n\n Returns\n -------\n is_ascii : bool\n Returns True if all characters in the string are ascii. False\n otherwise.\n \"\"\"\n return len(s) == len(s.decode('ascii').encode('utf-8'))\n\ndef is_all_null(s):\n return all(x=='\\x00' for x in s) or all(x==b'\\x00' for x in s)\n\n\n\"\"\"\nfrom clic_file.f90: v1, v2\n integer(kind=4) :: bloc ! 1 : observation address [records] integer(kind=8) :: bloc ! 1- 2: observation address [records] integer(kind=4) :: bloc ! 1 : block read from index\n integer(kind=4) :: num ! 2 : observation number integer(kind=4) :: word ! 3 : address offset [4-bytes] integer(kind=4) :: num ! 2 : number read\n integer(kind=4) :: ver ! 3 : observation version integer(kind=4) :: ver ! 4 : observation version integer(kind=4) :: ver ! 3 : version read from index\n integer(kind=4) :: sourc(3) ! 4- 6: source name integer(kind=8) :: num ! 5- 6: observation number character(len=12) :: csour ! 4- 6: source read from index\n integer(kind=4) :: line(3) ! 7- 9: line name integer(kind=4) :: sourc(3) ! 7- 9: source name character(len=12) :: cline ! 7- 9: line read from index\n integer(kind=4) :: teles(3) ! 10-12: telescope name integer(kind=4) :: line(3) ! 10-12: line name character(len=12) :: ctele ! 10-12: telescope read from index\n integer(kind=4) :: dobs ! 13 : observation date [class_date] integer(kind=4) :: teles(3) ! 13-15: telescope name integer(kind=4) :: dobs ! 13 : date obs. read from index\n integer(kind=4) :: dred ! 14 : reduction date [class_date] integer(kind=4) :: dobs ! 16 : observation date [class_date] integer(kind=4) :: dred ! 14 : date red. read from index\n real(kind=4) :: off1 ! 15 : lambda offset [radian] integer(kind=4) :: dred ! 17 : reduction date [class_date] real(kind=4) :: off1 ! 15 : read offset 1\n real(kind=4) :: off2 ! 16 : beta offset [radian] real(kind=4) :: off1 ! 18 : lambda offset [radian] real(kind=4) :: off2 ! 16 : read offset 2\n integer(kind=4) :: typec ! 17 : coordinates types real(kind=4) :: off2 ! 19 : beta offset [radian] integer(kind=4) :: type ! 17 : type of read offsets\n integer(kind=4) :: kind ! 18 : data kind integer(kind=4) :: typec ! 20 : coordinates types integer(kind=4) :: kind ! 18 : type of observation\n integer(kind=4) :: qual ! 19 : data quality integer(kind=4) :: kind ! 21 : data kind integer(kind=4) :: qual ! 19 : Quality read from index\n integer(kind=4) :: scan ! 20 : scan number integer(kind=4) :: qual ! 22 : data quality integer(kind=4) :: scan ! 20 : Scan number read from index\n integer(kind=4) :: proc ! 21 : procedure type integer(kind=4) :: scan ! 23 : scan number real(kind=4) :: posa ! 21 : Position angle\n integer(kind=4) :: itype ! 22 : observation type integer(kind=4) :: proc ! 24 : procedure type integer(kind=4) :: subscan ! 22 : Subscan number\n real(kind=4) :: houra ! 23 : hour angle [radian] integer(kind=4) :: itype ! 25 : observation type integer(kind=4) :: pad(10) ! 23-32: Pad to 32 words\n integer(kind=4) :: project ! 24 : project name real(kind=4) :: houra ! 26 : hour angle [radian]\n integer(kind=4) :: pad1 ! 25 : unused word integer(kind=4) :: project(2) ! 27 : project name\n integer(kind=4) :: bpc ! 26 : baseline bandpass cal status integer(kind=4) :: bpc ! 29 : baseline bandpass cal status\n integer(kind=4) :: ic ! 27 : instrumental cal status integer(kind=4) :: ic ! 30 : instrumental cal status\n integer(kind=4) :: recei ! 28 : receiver number integer(kind=4) :: recei ! 31 : receiver number\n real(kind=4) :: ut ! 29 : UT [s] real(kind=4) :: ut ! 32 : UT [s]\n integer(kind=4) :: pad2(3) ! 30-32: padding to 32 4-bytes word\n\nequivalently\n\n integer(kind=obsnum_length) :: num ! [ ] Observation number\n integer(kind=4) :: ver ! [ ] Version number\n integer(kind=4) :: teles(3) ! [ ] Telescope name\n integer(kind=4) :: dobs ! [MJD-60549] Date of observation\n integer(kind=4) :: dred ! [MJD-60549] Date of reduction\n integer(kind=4) :: typec ! [ code] Type of coordinates\n integer(kind=4) :: kind ! [ code] Type of data\n integer(kind=4) :: qual ! [ code] Quality of data\n integer(kind=4) :: subscan ! [ ] Subscan number\n integer(kind=obsnum_length) :: scan ! [ ] Scan number\n\"\"\"\n\n\"\"\"\nindex.f90:\n\n call conv%read%i8(data(1), indl%bloc, 1) ! bloc\n call conv%read%i4(data(3), indl%word, 1) ! word\n call conv%read%i8(data(4), indl%num, 1) ! num\n call conv%read%i4(data(6), indl%ver, 1) ! ver\n call conv%read%cc(data(7), indl%csour, 3) ! csour\n call conv%read%cc(data(10),indl%cline, 3) ! cline\n call conv%read%cc(data(13),indl%ctele, 3) ! ctele\n call conv%read%i4(data(16),indl%dobs, 1) ! dobs\n call conv%read%i4(data(17),indl%dred, 1) ! dred\n call conv%read%r4(data(18),indl%off1, 1) ! off1\n call conv%read%r4(data(19),indl%off2, 1) ! off2\n call conv%read%i4(data(20),indl%type, 1) ! type\n call conv%read%i4(data(21),indl%kind, 1) ! kind\n call conv%read%i4(data(22),indl%qual, 1) ! qual\n call conv%read%r4(data(23),indl%posa, 1) ! posa\n call conv%read%i8(data(24),indl%scan, 1) ! scan\n call conv%read%i4(data(26),indl%subscan,1) ! subscan\n if (isv3) then\n call conv%read%r8(data(27),indl%ut, 1) ! ut\n else\n\"\"\"\n\ndef _read_indices(f, file_description):\n #if file_description['version'] in (1,2):\n # extension_positions = (file_description['aex']-1)*file_description['reclen']*4\n # all_indices = {extension:\n # [_read_index(f,\n # filetype=file_description['version'],\n # entry=ii,\n # #position=position,\n # )\n # for ii in range(file_description['lex1'])]\n # for extension,position in enumerate(extension_positions)\n # if position > 0\n # }\n\n #elif file_description['version'] == 1:\n extension_positions = ((file_description['aex'].astype('int64')-1)\n *file_description['reclen']*4)\n all_indices = [_read_index(f,\n filetype=file_description['version'],\n # 1-indexed files\n entry_number=ii+1,\n file_description=file_description,\n )\n for ii in range(file_description['xnext']-1)]\n #else:\n # raise ValueError(\"Invalid file version {0}\".format(file_description['version']))\n\n\n return all_indices\n\n\ndef _find_index(entry_number, file_description, return_position=False):\n if file_description['gex'] == 10:\n kex=(entry_number-1)//file_description['lex1'] + 1\n else:\n # exponential growth:\n #kex = gi8_dicho(file_description['nex'], file_description['lexn'], entry_number) - 1\n kex = len([xx for xx in file_description['lexn'] if xx<entry_number])\n\n ken = entry_number - file_description['lexn'][kex-1]\n #! Find ken (relative entry number in the extension, starts from 1)\n #ken = entry_num - file%desc%lexn(kex-1)\n\n kb = ((ken-1)*file_description['lind'])//file_description['reclen']\n #kb = ((ken-1)*file%desc%lind)/file%desc%reclen ! In the extension, the\n # ! relative record position (as an offset, starts from 0) where the\n # ! Entry Index starts. NB: there can be a non-integer number of Entry\n # ! Indexes per record\n\n # Subtract 1: 'aex' is 1-indexed\n kbl = (file_description['aex'][kex-1]+kb)-1\n # kbl = file%desc%aex(kex)+kb ! The absolute record number where the Entry Index goes\n\n k = ((ken-1)*file_description['lind']) % file_description['reclen']\n #k = mod((ken-1)*file%desc%lind,file%desc%reclen)+1 ! = in the record, the\n # ! first word of the Entry Index of the entry number 'entry_num'\n\n\n if return_position:\n return (kbl*file_description['reclen']+k)*4\n else:\n return kbl,k\n\n\ndef _read_index(f, filetype='v1', DEBUG=False, clic=False, position=None,\n entry_number=None, file_description=None):\n\n if position is not None:\n f.seek(position)\n if entry_number is not None:\n indpos = _find_index(entry_number, file_description, return_position=True)\n f.seek(indpos)\n\n x0 = f.tell()\n\n if filetype in ('1A ','v1', 1):\n log.debug('Index filetype 1A')\n index = {\n \"XBLOC\":_read_int32(f),\n \"XNUM\":_read_int32(f),\n \"XVER\":_read_int32(f),\n \"XSOURC\":_read_word(f,12),\n \"XLINE\":_read_word(f,12),\n \"XTEL\":_read_word(f,12),\n \"XDOBS\":_read_int32(f),\n \"XDRED\":_read_int32(f),\n \"XOFF1\":_read_float32(f),# \t first offset (real, radians)\n \"XOFF2\":_read_float32(f),# \t second offset (real, radians)\n \"XTYPE\":_read_int32(f),# \t coordinate system ('EQ'', 'GA', 'HO')\n \"XKIND\":_read_int32(f),# \t Kind of observation (0: spectral, 1: continuum, )\n \"XQUAL\":_read_int32(f),# \t Quality (0-9)\n \"XSCAN\":_read_int32(f),# \t Scan number\n }\n index['BLOC'] = index['XBLOC'] # v2 compatibility\n index['WORD'] = 1 # v2 compatibility\n index['SOURC'] = index['CSOUR'] = index['XSOURC']\n index['DOBS'] = index['CDOBS'] = index['XDOBS']\n index['CTELE'] = index['XTEL']\n index['LINE'] = index['XLINE']\n index['OFF1'] = index['XOFF1']\n index['OFF2'] = index['XOFF2']\n index['QUAL'] = index['XQUAL']\n index['SCAN'] = index['XSCAN']\n index['KIND'] = index['XKIND']\n if clic: # use header set up in clic\n nextchunk = {\n \"XPROC\":_read_int32(f),# \"procedure type\"\n \"XITYPE\":_read_int32(f),#\n \"XHOURANG\":_read_float32(f),#\n \"XPROJNAME\":_read_int32(f),#\n \"XPAD1\":_read_int32(f),\n \"XBPC\" :_read_int32(f),\n \"XIC\" :_read_int32(f),\n \"XRECEI\" :_read_int32(f),\n \"XUT\":_read_float32(f),\n \"XPAD2\":numpy.fromfile(f,count=3,dtype='int32') # BLANK is NOT ALLOWED!!! It is a special KW\n }\n else:\n nextchunk = {\"XPOSA\":_read_float32(f),\n \"XSUBSCAN\":_read_int32(f),\n 'XPAD2': numpy.fromfile(f,count=10,dtype='int32'),\n }\n nextchunk['SUBSCAN'] = nextchunk['XSUBSCAN']\n nextchunk['POSA'] = nextchunk['XPOSA']\n index.update(nextchunk)\n if (f.tell() - x0 != 128):\n missed_bits = (f.tell()-x0)\n X = f.read(128-missed_bits)\n if DEBUG: print(\"read_index missed %i bits: %s\" % (128-missed_bits,X))\n #raise IndexError(\"read_index did not successfully read 128 bytes at %i. Read %i bytes.\" % (x0,f.tell()-x0))\n if any(not is_ascii(index[x]) for x in ('XSOURC','XLINE','XTEL')):\n raise ValueError(\"Invalid index read from {0}.\".format(x0))\n elif filetype in ('2A ','v2', 2):\n log.debug('Index filetype 2A')\n index = {\n \"BLOC\" : _read_int64(f) , #(data(1), 1) ! bloc\n \"WORD\" : _read_int32(f) , #(data(3), 1) ! word\n \"NUM\" : _read_int64(f) , #(data(4), 1) ! num\n \"VER\" : _read_int32(f) , #(data(6), 1) ! ver\n \"CSOUR\" : _read_word(f,12), #(data(7), 3) ! csour\n \"CLINE\" : _read_word(f,12), #(data(10), 3) ! cline\n \"CTELE\" : _read_word(f,12), #(data(13), 3) ! ctele\n \"DOBS\" : _read_int32(f) , #(data(16), 1) ! dobs\n \"DRED\" : _read_int32(f) , #(data(17), 1) ! dred\n \"OFF1\" : _read_float32(f), #(data(18), 1) ! off1\n \"OFF2\" : _read_float32(f), #(data(19), 1) ! off2\n \"TYPE\" : _read_int32(f) , #(data(20), 1) ! type\n \"KIND\" : _read_int32(f) , #(data(21), 1) ! kind\n \"QUAL\" : _read_int32(f) , #(data(22), 1) ! qual\n \"POSA\" : _read_float32(f), #(data(23), 1) ! posa\n \"SCAN\" : _read_int64(f) , #(data(24), 1) ! scan\n \"SUBSCAN\": _read_int32(f) , #(data(26), 1) ! subscan\n }\n #last24bits = f.read(24)\n #log.debug(\"Read 24 bits: '{0}'\".format(last24bits))\n if any((is_all_null(index[x]) or not is_ascii(index[x]))\n for x in ('CSOUR','CLINE','CTELE')):\n raise ValueError(\"Invalid index read from {0}.\".format(x0))\n index['SOURC'] = index['XSOURC'] = index['CSOUR']\n index['LINE'] = index['XLINE'] = index['CLINE']\n index['XKIND'] = index['KIND']\n try:\n index['DOBS'] = index['XDOBS'] = index['CDOBS']\n except KeyError:\n index['CDOBS'] = index['XDOBS'] = index['DOBS']\n\n else:\n raise NotImplementedError(\"Filetype {0} not implemented.\".format(filetype))\n\n # from kernel/lib/gsys/date.f90: gag_julda\n index['MJD'] = index['DOBS'] + 60549\n class_dobs = index['DOBS']\n index['DOBS'] = ((class_dobs + 365*2025)/365.2425 + 1)\n # SLOW\n #index['DATEOBS'] = Time(index['DOBS'], format='jyear')\n #index['DATEOBSS'] = index['DATEOBS'].iso\n\n log.debug(\"Indexing finished at {0}\".format(f.tell()))\n return index\n\ndef _read_header(f, type=0, position=None):\n \"\"\"\n Read a header entry from a CLASS file\n (helper function)\n \"\"\"\n if position is not None:\n f.seek(position)\n if type in keys_lengths:\n hdrsec = [(x[0],numpy.fromfile(f,count=1,dtype=x[2])[0])\n for x in keys_lengths[type]]\n return dict(hdrsec)\n else:\n return {}\n raise ValueError(\"Unrecognized type {0}\".format(type))\n\ndef _read_first_record(f):\n f.seek(0)\n filetype = f.read(4)\n if fileversion_dict[filetype] == 'v1':\n return _read_first_record_v1(f)\n elif fileversion_dict[filetype] == 'v2':\n return _read_first_record_v2(f)\n else:\n raise ValueError(\"Unrecognized filetype {0}\".format(filetype))\n\ndef _read_first_record_v1(f, record_length_words=128):\n r\"\"\"\n Position & Parameter & Fortran Kind & Purpose \\\\\n \\hline\n 1 & {\\tt code} & Character*4 & File code \\\\\n 2 & {\\tt next} & Integer*4 & Next free record \\\\\n 3 & {\\tt lex} & Integer*4 & Length of first extension (number of entries) \\\\\n 4 & {\\tt nex} & Integer*4 & Number of extensions \\\\\n 5 & {\\tt xnext} & Integer*4 & Next available entry number \\\\\n 6:2*{\\tt reclen} & {\\tt ex(:)} & Integer*4 & Array of extension addresses\n\n from classic_mod.f90:\n integer(kind=4) :: code ! 1 File code\n integer(kind=4) :: next ! 2 Next free record\n integer(kind=4) :: lex ! 3 Extension length (number of entries)\n integer(kind=4) :: nex ! 4 Number of extensions\n integer(kind=4) :: xnext ! 5 Next available entry number\n integer(kind=4) :: aex(mex_v1) ! 6:256 Extension addresses\n\n from old (<dec2013) class, file.f90:\n read(ilun,rec=1,err=11,iostat=ier) ibx%code,ibx%next, &\n & ibx%ilex,ibx%imex,ibx%xnext\n\n also uses filedesc_v1tov2 from classic/lib/file.f90\n \"\"\"\n\n# OLD NOTES\n# hdr = header\n# hdr.update(obshead) # re-overwrite things\n# hdr.update({'OBSNUM':obsnum,'RECNUM':spcount})\n# hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})\n# hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})\n# hdr.update({'OBJECT':hdr['SOURC'].strip()})\n# hdr.update({'BUNIT':'Tastar'})\n# hdr.update({'EXPOSURE':hdr['TIME']})\n\n\n f.seek(0)\n file_description = {\n 'code': f.read(4),\n 'next': _read_int32(f),\n 'lex': _read_int32(f),\n 'nex': _read_int32(f),\n 'xnext': _read_int32(f),\n 'gex': 10.,\n 'vind': 1, # classic_vind_v1 packages/classic/lib/classic_mod.f90\n 'version': 1,\n 'nextrec': 3,\n 'nextword': 1,\n 'lind': 32, #classic_lind_v1 packages/classic/lib/classic_mod.f90\n 'kind': 'unknown',\n 'flags': 0,\n }\n file_description['reclen'] = record_length_words # should be 128w = 512 bytes\n ex = np.fromfile(f, count=(record_length_words*2-5), dtype='int32')\n file_description['ex'] = ex[ex!=0]\n file_description['nextrec'] = file_description['next'] # this can't be...\n file_description['lex1'] = file_description['lex'] # number of entries\n file_description['lexn'] = (np.arange(file_description['nex']+1) *\n file_description['lex1'])\n file_description['nentries'] = np.sum(file_description['lexn'])\n file_description['aex'] = file_description['ex'][:file_description['nex']]\n #file_description['version'] = fileversion_dict[file_description['code']]\n assert f.tell() == 1024\n # Something is not quite right with the 'ex' parsing\n #assert len(file_description['ex']) == file_description['nex']\n return file_description\n\ndef _read_first_record_v2(f):\n r\"\"\" packages/classic/lib/file.f90\n Position & Parameter & Fortran Kind & Purpose & Unit \\\\\n \\hline\n 1 & {\\tt code} & Character*4 & File code & - \\\\\n 2 & {\\tt reclen} & Integer*4 & Record length & words \\\\\n 3 & {\\tt kind} & Integer*4 & File kind & - \\\\\n 4 & {\\tt vind} & Integer*4 & Index version & - \\\\\n 5 & {\\tt lind} & Integer*4 & Index length & words \\\\\n 6 & {\\tt flags} & Integer*4 & Bit flags. \\#1: single or multiple, & - \\\\\n & & & \\#2-32: provision (0-filled) & \\\\\n \\hline\n 7:8 & {\\tt xnext} & Integer*8 & Next available entry number & - \\\\\n 9:10 & {\\tt nextrec} & Integer*8 & Next record which contains free space & record \\\\\n 11 & {\\tt nextword} & Integer*4 & Next free word in this record & word \\\\\n \\hline\n 12 & {\\tt lex1} & Integer*4 & Length of first extension index & entries \\\\\n 13 & {\\tt nex} & Integer*4 & Number of extensions & - \\\\\n 14 & {\\tt gex} & Integer*4 & Extension growth rule & - \\\\\n 15:{\\tt reclen} & {\\tt aex(:)} & Integer*8 & Array of extension addresses & record\n \"\"\"\n f.seek(0)\n file_description = {\n 'code': f.read(4),\n 'reclen': _read_int32(f),\n 'kind': _read_int32(f),\n 'vind': _read_int32(f),\n 'lind': _read_int32(f),\n 'flags': _read_int32(f),\n 'xnext': _read_int64(f),\n 'nextrec': _read_int64(f),\n 'nextword': _read_int32(f),\n 'lex1': _read_int32(f),\n 'nex': _read_int32(f),\n 'gex': _read_int32(f),\n }\n file_description['lexn'] = [0]\n if file_description['gex'] == 10:\n for ii in range(1, file_description['nex']+1):\n file_description['lexn'].append(file_description['lexn'][-1]+file_description['lex1'])\n else:\n #! Exponential growth. Only growth with mantissa 2.0 is supported\n for ii in range(1, file_description['nex']):\n # I don't know what the fortran does here!!!\n # ahh, maybe 2_8 means int(2, dtype='int64')\n nent = int(file_description['lex1'] * 2**(ii-1))\n #nent = int(file%desc%lex1,kind=8) * 2_8**(iex-1)\n file_description['lexn'].append(file_description['lexn'][-1]+nent)\n #file%desc%lexn(iex) = file%desc%lexn(iex-1) + nent\n file_description['nentries'] = np.sum(file_description['lexn'])\n record_length_words = file_description['reclen']\n aex = numpy.fromfile(f, count=(record_length_words-15)//2, dtype='int64')\n file_description['aex'] = aex[aex!=0]\n assert len(file_description['aex']) == file_description['nex']\n file_description['version'] = 2\n return file_description\n\ndef gi8_dicho(ninp,lexn,xval,ceil=True):\n \"\"\"\n ! @ public\n ! Find ival such as\n ! X(ival-1) < xval <= X(ival) (ceiling mode)\n ! or\n ! X(ival) <= xval < X(ival+1) (floor mode)\n ! for input data ordered. Use a dichotomic search for that.\n call gi8_dicho(nex,file%desc%lexn,entry_num,.true.,kex,error)\n \"\"\"\n #integer(kind=size_length), intent(in) :: np ! Number of input points\n #integer(kind=8), intent(in) :: x(np) ! Input ordered Values\n #integer(kind=8), intent(in) :: xval ! The value we search for\n #logical, intent(in) :: ceil ! Ceiling or floor mode?\n #integer(kind=size_length), intent(out) :: ival ! Position in the array\n #logical, intent(inout) :: error ! Logical error flag\n iinf = 1\n isup = ninp\n #! Ceiling mode\n while isup > (iinf+1):\n imid = int(np.floor((isup + iinf)/2.))\n if (lexn[imid-1] < xval):\n iinf = imid\n else:\n isup = imid\n ival = isup\n return ival\n\ndef _read_obshead(f, file_description, position=None, verbose=False):\n if file_description['version'] == 1:\n return _read_obshead_v1(f, position=position, verbose=verbose)\n if file_description['version'] == 2:\n return _read_obshead_v2(f, position=position)\n else:\n raise ValueError(\"Invalid file version {0}.\".\n format(file_description['version']))\n\ndef _read_obshead_v2(f, position=None):\n \"\"\"\n ! Version 2 (public)\n integer(kind=4), parameter :: entrydescv2_nw1=11 ! Number of words, in 1st part\n integer(kind=4), parameter :: entrydescv2_nw2=5 ! Number of words for 1 section in 2nd part\n type classic_entrydesc_t\n sequence\n integer(kind=4) :: code ! 1 : code observation icode\n integer(kind=4) :: version ! 2 : observation version\n integer(kind=4) :: nsec ! 3 : number of sections\n integer(kind=4) :: pad1 ! - : memory padding (not in data)\n integer(kind=8) :: nword ! 4- 5: number of words\n integer(kind=8) :: adata ! 6- 7: data address\n integer(kind=8) :: ldata ! 8- 9: data length\n integer(kind=8) :: xnum ! 10-11: entry number\n ! Out of the 'sequence' block:\n integer(kind=4) :: msec ! Not in data: maximum number of sections the\n ! Observation Index can hold\n integer(kind=4) :: pad2 ! Memory padding for 8 bytes alignment\n integer(kind=4) :: seciden(classic_maxsec) ! Section Numbers (on disk: 1 to ed%nsec)\n integer(kind=8) :: secleng(classic_maxsec) ! Section Lengths (on disk: 1 to ed%nsec)\n integer(kind=8) :: secaddr(classic_maxsec) ! Section Addresses (on disk: 1 to ed%nsec)\n end type classic_entrydesc_t\n \"\"\"\n if position is not None:\n f.seek(position)\n else:\n position = f.tell()\n IDcode = f.read(4)\n if IDcode.strip() != b'2':\n raise IndexError(\"Observation Header reading failure at {0}. \"\n \"Record does not appear to be an observation header.\".\n format(position))\n f.seek(position)\n\n entrydescv2_nw1 = 11\n entrydescv2_nw2 = 5\n obshead = {\n 'CODE': f.read(4),\n 'VERSION': _read_int32(f),\n 'NSEC': _read_int32(f),\n #'_blank': _read_int32(f),\n 'NWORD': _read_int64(f),\n 'ADATA': _read_int64(f),\n 'LDATA': _read_int64(f),\n 'XNUM': _read_int64(f),\n #'MSEC': _read_int32(f),\n #'_blank2': _read_int32(f),\n }\n section_numbers = np.fromfile(f, count=obshead['NSEC'], dtype='int32')\n section_lengths = np.fromfile(f, count=obshead['NSEC'], dtype='int64')\n section_addresses = np.fromfile(f, count=obshead['NSEC'], dtype='int64')\n\n return obshead['XNUM'],obshead,dict(zip(section_numbers,section_addresses))\n\ndef _read_obshead_v1(f, position=None, verbose=False):\n \"\"\"\n Read the observation header of a CLASS file\n (helper function for read_class; should not be used independently)\n \"\"\"\n if position is not None:\n f.seek(position)\n IDcode = f.read(4)\n if IDcode.strip() != b'2':\n raise IndexError(\"Observation Header reading failure at {0}. \"\n \"Record does not appear to be an observation header.\".\n format(f.tell() - 4))\n (nblocks, nbyteob, data_address, nheaders, data_length, obindex, nsec,\n obsnum) = numpy.fromfile(f, count=8, dtype='int32')\n if verbose:\n print(\"nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum\",nblocks,nbyteob,data_address,data_length,nheaders,obindex,nsec,obsnum)\n print(\"DATA_LENGTH: \",data_length)\n\n seccodes = numpy.fromfile(f,count=nsec,dtype='int32')\n # Documentation says addresses then length: It is apparently wrong\n seclen = numpy.fromfile(f,count=nsec,dtype='int32')\n secaddr = numpy.fromfile(f,count=nsec,dtype='int32')\n if verbose:\n print(\"Section codes, addresses, lengths: \",seccodes,secaddr,seclen)\n\n hdr = {'NBLOCKS':nblocks, 'NBYTEOB':nbyteob, 'DATAADDR':data_address,\n 'DATALEN':data_length, 'NHEADERS':nheaders, 'OBINDEX':obindex,\n 'NSEC':nsec, 'OBSNUM':obsnum}\n\n #return obsnum,seccodes\n return obsnum,hdr,dict(zip(seccodes,secaddr))\n\n# THIS IS IN READ_OBSHEAD!!!\n# def _read_preheader(f):\n# \"\"\"\n# Not entirely clear what this is, but it is stuff that precedes the actual data\n#\n# Looks something like this:\n# array([ 1, -2, -3, -4, -14,\n# 9, 17, 18, 25, 55,\n# 64, 81, 99, -1179344801, 979657591,\n#\n# -2, -3, -4, -14 indicate the 4 header types\n# 9,17,18,25 *MAY* indicate the number of bytes in each\n#\n#\n# HOW is it indicated how many entries there are?\n# \"\"\"\n# # 13 comes from counting 1, -2,....99 above\n# numbers = np.fromfile(f, count=13, dtype='int32')\n# sections = [n for n in numbers if n in header_id_numbers]\n# return sections\n\ndef downsample_1d(myarr,factor,estimator=np.mean, weight=None):\n \"\"\"\n Downsample a 1D array by averaging over *factor* pixels.\n Crops right side if the shape is not a multiple of factor.\n\n This code is pure numpy and should be fast.\n\n keywords:\n estimator - default to mean. You can downsample by summing or\n something else if you want a different estimator\n (e.g., downsampling error: you want to sum & divide by sqrt(n))\n weight: np.ndarray\n An array of weights to use for the downsampling. If None,\n assumes uniform 1\n \"\"\"\n if myarr.ndim != 1:\n raise ValueError(\"Only works on 1d data. Says so in the title.\")\n xs = myarr.size\n crarr = myarr[:xs-(xs % int(factor))]\n if weight is None:\n dsarr = estimator(np.concatenate([[crarr[i::factor] for i in\n range(factor)]]),axis=0)\n else:\n dsarr = estimator(np.concatenate([[crarr[i::factor]*weight[i::factor] for i in\n range(factor)]]),axis=0)\n warr = estimator(np.concatenate([[weight[i::factor] for i in\n range(factor)]]),axis=0)\n dsarr = dsarr/warr\n return dsarr\n\n# unit test\ndef test_downsample1d():\n data = np.arange(10)\n weight = np.ones(10)\n weight[5]=0\n assert np.all(downsample_1d(data, 2, weight=weight, estimator=np.mean) ==\n np.array([0.5, 2.5, 4.0, 6.5, 8.5]))\n\ndef read_observation(f, obsid, file_description=None, indices=None,\n my_memmap=None, memmap=True, verbose=False):\n if isinstance(f, str):\n f = open(f,'rb')\n opened = True\n if memmap:\n my_memmap = numpy.memmap(f, offset=0, dtype='float32',\n mode='r')\n else:\n my_memmap = None\n elif my_memmap is None and memmap:\n raise ValueError(\"Must pass in a memmap object if passing in a file object.\")\n else:\n opened = False\n\n if file_description is None:\n file_description = _read_first_record(f)\n\n if indices is None:\n indices = _read_indices(f, file_description)\n\n index = indices[obsid]\n\n obs_position = (index['BLOC']-1)*file_description['reclen']*4 + (index['WORD']-1)*4\n log.debug(\"Reading observation at position {0}\".format(obs_position))\n obsnum,obshead,sections = _read_obshead(f, file_description,\n position=obs_position,\n verbose=verbose)\n header = obshead\n\n datastart = 0\n for section_id,section_address in iteritems(sections):\n # Section addresses are 1-indexed byte addresses\n # in the current \"block\"\n sec_position = obs_position + (section_address-1)*4\n temp_hdr = _read_header(f, type=header_id_numbers[section_id],\n position=sec_position)\n header.update(temp_hdr)\n datastart = max(datastart,f.tell())\n\n hdr = header\n hdr.update(obshead) # re-overwrite things\n hdr.update({'OBSNUM':obsnum,'RECNUM':obsid})\n hdr.update({'RA':hdr['LAM']/pi*180,'DEC':hdr['BET']/pi*180})\n hdr.update({'RAoff':hdr['LAMOF']/pi*180,'DECoff':hdr['BETOF']/pi*180})\n hdr.update({'OBJECT':hdr['SOURC'].strip()})\n hdr.update({'BUNIT':'Tastar'})\n hdr.update({'EXPOSURE':float(hdr['TIME'])})\n hdr['HDRSTART'] = obs_position\n hdr['DATASTART'] = datastart\n hdr.update(indices[obsid])\n # Define MJD as mid-exposure time in MJD\n hdr.update({'OBSDATE': hdr['MJD'] + hdr['UT']/2./pi})\n\n # Apparently the data are still valid in this case?\n #if hdr['XNUM'] != obsid+1:\n # log.error(\"The spectrum read was {0} but {1} was requested.\".\n # format(hdr['XNUM']-1, obsid))\n\n if hdr['KIND'] == 1: # continuum\n nchan = hdr['NPOIN']\n elif 'NCHAN' in hdr:\n nchan = hdr['NCHAN']\n else:\n log.error(\"No NCHAN in header. This is not a spectrum.\")\n import ipdb; ipdb.set_trace()\n # There may be a 1-channel offset? CHECK!!!\n # (changed by 1 pixel - October 14, 2014)\n # (changed back - October 21, 2014 - I think the ends are just bad, but not\n # zero.)\n f.seek(datastart-1)\n spec = _read_spectrum(f, position=datastart-1, nchan=nchan,\n memmap=memmap, my_memmap=my_memmap)\n\n if opened:\n f.close()\n\n return spec, hdr\n\ndef _read_spectrum(f, position, nchan, my_memmap=None, memmap=True):\n if position != f.tell():\n log.warning(\"Reading data from {0}, but the file is wound \"\n \"to {1}.\".format(position, f.tell()))\n if memmap:\n here = position\n #spectrum = numpy.memmap(filename, offset=here, dtype='float32',\n # mode='r', shape=(nchan,))\n spectrum = my_memmap[here//4:here//4+nchan]\n f.seek(here+nchan*4)\n else:\n f.seek(position)\n spectrum = numpy.fromfile(f,count=nchan,dtype='float32')\n\n return spectrum\n\ndef _spectrum_from_header(fileobj, header, memmap=None):\n return _read_spectrum(fileobj, position=header['DATASTART'],\n nchan=header['NCHAN'] if 'NCHAN' in hdr else hdr['NPOIN'],\n my_memmap=memmap)\n\ndef clean_header(header):\n newheader = {}\n for k in header:\n if not isinstance(header[k], (int, float, str)):\n if isinstance(header[k], np.ndarray) and header[k].size > 1:\n if header[k].size > 10:\n raise ValueError(\"Large array being put in header. That's no good. key={0}\".format(k))\n for ii,val in enumerate(header[k]):\n newheader[k[:7]+str(ii)] = val\n else:\n newheader[k[:8]] = str(header[k])\n else:\n newheader[k[:8]] = header[k]\n\n return newheader\n\nclass ClassObject(object):\n def __init__(self, filename, verbose=False):\n t0 = time.time()\n self._file = open(filename, 'rb')\n self.file_description = _read_first_record(self._file)\n self.allind = _read_indices(self._file, self.file_description)\n self._data = np.memmap(self._file, dtype='float32', mode='r')\n if verbose: log.info(\"Setting _spectra\")\n self._spectra = LazyItem(self)\n t1 = time.time()\n if verbose: log.info(\"Setting posang. t={0}\".format(t1-t0))\n self.set_posang()\n t2 = time.time()\n if verbose: log.info(\"Identifying otf scans. t={0}\".format(t2-t1))\n self._identify_otf_scans(verbose=verbose)\n t3 = time.time()\n #self._load_all_spectra()\n if verbose:\n log.info(\"Loaded CLASS object with {3} indices. Time breakdown:\"\n \" {0}s for indices, \"\n \"{1}s for posang, and {2}s for OTF scan identification\"\n .format(t1-t0, t2-t1, t3-t2, len(self.allind)))\n\n\n def __repr__(self):\n s = \"\\n\".join([\"{k}: {v}\".format(k=k,v=v)\n for k,v in iteritems(self.getinfo())])\n return \"ClassObject({id}) with {nspec} entries\\n\".format(id=id(self),\n nspec=len(self.allind)) + s\n\n def getinfo(self, allsources=False):\n info = dict(\n tels = self.tels,\n lines = self.lines,\n scans = self.scans,\n sources = self.sources if allsources else self.sci_sources,\n )\n return info\n\n def set_posang(self):\n h0 = self.headers[0]\n for h in self.headers:\n dx = h['OFF1'] - h0['OFF1']\n dy = h['OFF2'] - h0['OFF2']\n h['COMPPOSA'] = np.arctan2(dy,dx)*180/np.pi\n h0 = h\n\n\n def _identify_otf_scans(self, verbose=False):\n h0 = self.allind[0]\n st = 0\n otfscan = 0\n posangs = [h['COMPPOSA'] for h in self.allind]\n if verbose:\n pb = ProgressBar(len(self.allind))\n\n for ii,h in enumerate(self.allind):\n if (h['SCAN'] != h0['SCAN']\n or h['SOURC'] != h0['SOURC']):\n\n h0['FIRSTSCAN'] = st\n cpa = np.median(posangs[st:ii])\n for hh in self.allind[st:ii]:\n hh['SCANPOSA'] = cpa % 180\n st = ii\n if h['SCAN'] == h0['SCAN']:\n h0['OTFSCAN'] = otfscan\n otfscan += 1\n h['OTFSCAN'] = otfscan\n else:\n otfscan = 0\n h['OTFSCAN'] = otfscan\n else:\n h['OTFSCAN'] = otfscan\n\n if verbose:\n pb.update(ii)\n\n def listscans(self, source=None, telescope=None, out=sys.stdout):\n minid=0\n scan = -1\n sourc = \"\"\n #tel = ''\n minoff1,maxoff1 = np.inf,-np.inf\n minoff2,maxoff2 = np.inf,-np.inf\n ttlangle,nangle = 0.0,0\n print(\"{entries:15s} {SOURC:12s} {XTEL:12s} {SCAN:>8s} {SUBSCAN:>8s} \"\n \"[ {RAmin:>12s}, {RAmax:>12s} ] \"\n \"[ {DECmin:>12s}, {DECmax:>12s} ] \"\n \"{angle:>12s} {SCANPOSA:>12s} {OTFSCAN:>8s} {TSYS:>8s} {UTD:>12s}\"\n .format(entries='Scans', SOURC='Source', XTEL='Telescope',\n SCAN='Scan', SUBSCAN='Subscan',\n RAmin='min(RA)', RAmax='max(RA)',\n DECmin='min(DEC)', DECmax='max(DEC)',\n SCANPOSA='Scan PA',\n angle='Angle', OTFSCAN='OTFscan',\n TSYS='TSYS', UTD='UTD'),\n file=out)\n\n data_rows = []\n\n for ii,row in enumerate(self.headers):\n if (row['SCAN'] == scan\n and row['SOURC'] == sourc\n #and row['XTEL'] == tel\n ):\n minoff1 = min(minoff1, row['OFF1'])\n maxoff1 = max(maxoff1, row['OFF1'])\n minoff2 = min(minoff2, row['OFF2'])\n maxoff2 = max(maxoff2, row['OFF2'])\n ttlangle += np.arctan2(row['OFF2'] - prevrow['OFF2'],\n row['OFF1'] - prevrow['OFF1'])%np.pi\n nangle += 1\n prevrow = row\n\n else:\n if scan == -1:\n scan = row['SCAN']\n sourc = row['SOURC']\n #tel = row['XTEL']\n prevrow = row\n continue\n\n ok = True\n if source is not None:\n if isinstance(source, (list,tuple)):\n ok = ok and any(re.search((s), prevrow['SOURC'])\n for s in source)\n else:\n ok = ok and re.search((source), prevrow['SOURC'])\n if telescope is not None:\n ok = ok and re.search((telescope), prevrow['XTEL'])\n if ok:\n data = dict(RAmin=minoff1*180/np.pi*3600,\n RAmax=maxoff1*180/np.pi*3600,\n DECmin=minoff2*180/np.pi*3600,\n DECmax=maxoff2*180/np.pi*3600,\n angle=(ttlangle/nangle)*180/np.pi if nangle>0 else 0,\n e0=minid,\n e1=ii-1,\n #TSYS=row['TSYS'] if 'TSYS' in row else '--',\n UTD=row['DOBS']+row['UT'] if 'UT' in row else -99,\n **prevrow)\n print(\"{e0:7d}-{e1:7d} {SOURC:12s} {XTEL:12s} {SCAN:8d} {SUBSCAN:8d} \"\n \"[ {RAmin:12f}, {RAmax:12f} ] \"\n \"[ {DECmin:12f}, {DECmax:12f} ] \"\n \"{angle:12.1f} {SCANPOSA:12.1f} {OTFSCAN:8d}\"\n \" {TSYS:>8.1f} {UTD:12f}\".\n format(**data),\n file=out)\n\n data_rows.append(data)\n\n minoff1,maxoff1 = np.inf,-np.inf\n minoff2,maxoff2 = np.inf,-np.inf\n ttlangle,nangle = 0.0,0\n scan = row['SCAN']\n sourc = row['SOURC']\n #tel = row['XTEL']\n minid = ii\n\n return data\n\n @property\n def tels(self):\n if hasattr(self,'_tels'):\n return self._tels\n else:\n self._tels = set([h['CTELE'] for h in self.allind])\n #testing if CTELE even works\n return self._tels\n\n @property\n def sources(self):\n if hasattr(self,'_source'):\n return self._source\n else:\n self._source = set([h['SOURC'] for h in self.allind])\n return self._source\n\n @property\n def scans(self):\n if hasattr(self,'_scan'):\n return self._scan\n else:\n self._scan = set([h['SCAN'] for h in self.allind])\n return self._scan\n\n @property\n def sci_sources(self):\n return set([s for s in self.sources\n if s[:4] not in ('SKY-', 'TSYS', 'TCAL', 'TREC', 'HOT-',\n 'COLD')])\n\n @property\n def lines(self):\n if hasattr(self,'_lines'):\n return self._lines\n else:\n self._lines = set([h['LINE'] for h in self.allind])\n return self._lines\n\n def _load_all_spectra(self, indices=None):\n if indices is None:\n indices = range(self.file_description['xnext']-1)\n\n if hasattr(self, '_loaded_indices'):\n indices_set = set(indices)\n indices_to_load = (indices_set.difference(self._loaded_indices))\n self._loaded_indices = self._loaded_indices.union(indices_set)\n\n if any(indices_to_load):\n pb = ProgressBar(len(indices_to_load))\n for ii,k in enumerate(xrange(indices_to_load)):\n self._spectra[k]\n pb.update(ii)\n\n else:\n self._loaded_indices = set(indices)\n\n self._spectra.load_all()\n\n\n @property\n def spectra(self):\n return [x[0] for x in self._spectra]\n\n @property\n def headers(self):\n return [self._spectra[ii][1]\n if ii in self._spectra else x\n for ii,x in enumerate(self.allind)]\n\n def select_spectra(self,\n all=None,\n line=None,\n linere=None,\n linereflags=re.IGNORECASE,\n number=None,\n scan=None,\n offset=None,\n source=None,\n sourcere=None,\n sourcereflags=re.IGNORECASE,\n range=None,\n quality=None,\n telescope=None,\n telescopere=None,\n telescopereflags=re.IGNORECASE,\n subscan=None,\n entry=None,\n posang=None,\n #observed=None,\n #reduced=None,\n frequency=None,\n section=None,\n user=None,\n include_old_versions=False,\n ):\n \"\"\"\n Parameters\n ----------\n include_old_versions: bool\n Include spectra with XVER numbers <0? These are CLASS spectra that\n have been \"overwritten\" (re-reduced?)\n \"\"\"\n if entry is not None and len(entry)==2:\n return irange(entry[0], entry[1])\n\n if frequency is not None:\n self._load_all_spectra()\n\n sel = [(re.search(re.escape(ensure_bytes(line)), h['LINE'], re.IGNORECASE)\n if line is not None else True) and\n (re.search(ensure_bytes(linere), h['LINE'], linereflags)\n if linere is not None else True) and\n (h['SCAN'] == scan if scan is not None else True) and\n ((h['OFF1'] == offset or\n h['OFF2'] == offset) if offset is not None else True) and\n (re.search(re.escape(ensure_bytes(source)), h['CSOUR'], re.IGNORECASE)\n if source is not None else True) and\n (re.search(ensure_bytes(sourcere), h['CSOUR'], sourcereflags)\n if sourcere is not None else True) and\n (h['OFF1']>range[0] and h['OFF1'] < range[1] and\n h['OFF2']>range[2] and h['OFF2'] < range[3]\n if range is not None and len(range)==4 else True) and\n (h['QUAL'] == quality if quality is not None else True) and\n (re.search(re.escape(ensure_bytes(telescope)), h['CTELE'], re.IGNORECASE)\n if telescope is not None else True) and\n (re.search(ensure_bytes(telescopere), h['CTELE'], telescopereflags)\n if telescopere is not None else True) and\n (h['SUBSCAN']==subscan if subscan is not None else True) and\n ('RESTF' in h and # Need to check that it IS a spectrum: continuum data can't be accessed this way\n h['RESTF'] > frequency[0] and\n h['RESTF'] < frequency[1]\n if frequency is not None and len(frequency)==2\n else True) and\n (h['COMPPOSA']%180 > posang[0] and\n h['COMPPOSA']%180 < posang[1]\n if posang is not None and len(posang)==2\n else True) and\n # 1A uses XVER, 2A uses VER. If neither are present, it's\n # probably not a valid spectrum?\n (h.get('XVER', h.get('VER', -999)) > 0\n if not include_old_versions else True)\n for h in self.headers\n ]\n\n return [ii for ii,k in enumerate(sel) if k]\n\n def get_spectra(self, progressbar=True, **kwargs):\n selected_indices = self.select_spectra(**kwargs)\n\n if not any(selected_indices):\n raise ValueError(\"Selection yielded empty.\")\n\n self._spectra.load(selected_indices, progressbar=progressbar)\n return [self._spectra[ii] for ii in selected_indices]\n\n def get_pyspeckit_spectra(self, progressbar=True, **kwargs):\n\n spdata = self.get_spectra(progressbar=progressbar, **kwargs)\n\n spectra = [pyspeckit.Spectrum(data=data,\n xarr=make_axis(header),\n header=clean_header(header))\n for data,header in spdata]\n\n return spectra\n\n\n def read_observations(self, observation_indices, progressbar=True):\n self._spectra.load(observation_indices, progressbar=progressbar)\n return [self._spectra[ii] for ii in observation_indices]\n\n\n@print_timing\ndef read_class(filename, downsample_factor=None, sourcename=None,\n telescope=None, line=None, posang=None, verbose=False,\n flag_array=None):\n \"\"\"\n Read a binary class file.\n Based on the\n `GILDAS CLASS file type Specification\n <http://iram.fr/IRAMFR/GILDAS/doc/html/class-html/node58.html>`_\n\n Parameters\n ----------\n filename: str\n downsample_factor: None or int\n Factor by which to downsample data by averaging. Useful for\n overresolved data.\n sourcename: str or list of str\n Source names to match to the data (uses regex)\n telescope: str or list of str\n 'XTEL' or 'TELE' parameters: the telescope & instrument\n line: str or list of str\n The line name\n posang: tuple of 2 floats\n The first float is the minimum value for the position angle. The second\n float is the maximum value for the position angle.\n verbose: bool\n Log messages with severity INFO\n flag_array: np.ndarray\n An array with the same shape as the data used to flag out\n (remove) data when downsampling. True = flag out\n \"\"\"\n classobj = ClassObject(filename)\n\n if not isinstance(sourcename, (list,tuple)):\n sourcename = [sourcename]\n if not isinstance(telescope, (list,tuple)):\n telescope = [telescope]\n if not isinstance(line, (list,tuple)):\n line = [line]\n\n spectra,headers = [],[]\n if verbose:\n log.info(\"Reading...\")\n selection = [ii\n for source in sourcename\n for tel in telescope\n for li in line\n for ii in classobj.select_spectra(sourcere=source,\n telescope=tel,\n line=li,\n posang=posang)]\n\n sphdr = classobj.read_observations(selection)\n if len(sphdr) == 0:\n return None\n spec,hdr = zip(*sphdr)\n spectra += spec\n headers += hdr\n\n indexes = headers\n\n weight = ~flag_array if flag_array is not None else None\n\n if downsample_factor is not None:\n if verbose:\n log.info(\"Downsampling...\")\n spectra = [downsample_1d(spec, downsample_factor,\n weight=weight)\n for spec in ProgressBar(spectra)]\n headers = [downsample_header(h, downsample_factor)\n for h in ProgressBar(headers)]\n\n for hdr in headers:\n stringify_header(hdr)\n\n return spectra,headers,indexes\n\ndef stringify_header(header):\n from six import string_types, integer_types\n import string\n FITS_allowed_types = (string_types + integer_types +\n (float, complex, bool, np.floating, np.integer,\n np.complexfloating, np.bool_))\n bad_chars = string.printable[96:]\n badcharre = re.compile(\"[{0}]\".format(bad_chars))\n for key, value in header.items():\n if isinstance(value, bytes):\n header[key] = value.decode()\n elif not isinstance(value, FITS_allowed_types):\n header[key] = badcharre.sub(\"\", str(header[key]))\n\ndef downsample_header(hdr, downsample_factor):\n for k in ('NCHAN','NPOIN','DATALEN'):\n if k in hdr:\n hdr[k] = int((hdr[k] / downsample_factor))\n # maybe wrong? h['RCHAN'] = (h['RCHAN']-1) / downsample_factor + 1\n scalefactor = 1./downsample_factor\n hdr['RCHAN'] = (hdr['RCHAN']-1)*scalefactor + 0.5 + scalefactor/2.\n for kw in ['FRES','VRES']:\n if kw in hdr:\n hdr[kw] *= downsample_factor\n return hdr\n\ndef make_axis(header,imagfreq=False):\n \"\"\"\n Create a :class:`pyspeckit.spectrum.units.SpectroscopicAxis` from the CLASS \"header\"\n \"\"\"\n from .. import units\n\n rest_frequency = header.get('RESTF')\n xunits = 'MHz'\n nchan = header.get('NCHAN')\n voff = header.get('VOFF')\n foff = header.get('FOFF')\n doppler = header.get('DOPPLER')\n fres = header.get('FRES')\n refchan = header.get('RCHAN')\n imfreq = header.get('IMAGE')\n\n if foff in (None, 0.0) and voff not in (None, 0.0):\n # Radio convention\n foff = -voff/2.997924580e5 * rest_frequency\n\n if not imagfreq:\n xarr = rest_frequency + foff + (numpy.arange(1, nchan+1) - refchan) * fres\n XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=rest_frequency*u.MHz)\n else:\n xarr = imfreq - (numpy.arange(1, nchan+1) - refchan) * fres\n XAxis = units.SpectroscopicAxis(xarr,unit='MHz',refX=imfreq*u.MHz)\n\n return XAxis\n\n@print_timing\ndef class_to_obsblocks(filename, telescope, line, datatuple=None, source=None,\n imagfreq=False, DEBUG=False, **kwargs):\n \"\"\"\n Load an entire CLASS observing session into a list of ObsBlocks based on\n matches to the 'telescope', 'line' and 'source' names\n\n Parameters\n ----------\n filename : string\n The Gildas CLASS data file to read the spectra from.\n telescope : list\n List of telescope names to be matched.\n line : list\n List of line names to be matched.\n source : list (optional)\n List of source names to be matched. Defaults to None.\n imagfreq : bool\n Create a SpectroscopicAxis with the image frequency.\n \"\"\"\n if datatuple is None:\n spectra,header,indexes = read_class(filename, **kwargs)\n else:\n spectra,header,indexes = datatuple\n\n obslist = []\n lastscannum = -1\n spectrumlist = None\n for sp,hdr,ind in zip(spectra,header,indexes):\n hdr.update(ind)\n # this is slow but necessary...\n H = pyfits.Header()\n for k,v in iteritems(hdr):\n if hasattr(v,\"__len__\") and not isinstance(v,str):\n # make an array of header entries, but this\n # supports only up to 10 of them...\n if len(v) > 1:\n if len(v) < 10:\n for ii,vv in enumerate(v):\n newkey = k[:7]+str(ii)\n H[newkey] = vv\n elif len(v) < 100:\n for ii,vv in enumerate(v):\n newkey = k[:6]+str(ii)\n H[newkey] = vv\n else:\n raise ValueError(\"Too many entries for {0}\".format(k))\n else:\n H[k] = v[0]\n #elif not any(x in str(v).lower() for x in ('comment', 'end', 'history')):\n # # do not try to add comments...\n # This commented out block used to attempt to reject comments\n # using a private regex in the old pyfits which no longer exists.\n # I don't know if it was necessary.\n else:\n H[k] = v\n scannum = hdr['SCAN']\n if 'XTEL' in hdr and hdr['XTEL'].strip() not in telescope:\n continue\n if hdr['LINE'].strip() not in line:\n continue\n if (source is not None) and (hdr['SOURC'].strip() not in source):\n continue\n hdr['RESTFREQ'] = hdr.get('RESTF')\n H['RESTFREQ'] = hdr.get('RESTF')\n\n #print \"Did not skip %s,%s. Scannum, last: %i,%i\" % (hdr['XTEL'],hdr['LINE'],scannum,lastscannum)\n\n if scannum != lastscannum:\n lastscannum = scannum\n if spectrumlist is not None:\n obslist.append(pyspeckit.ObsBlock(spectrumlist))\n xarr = make_axis(hdr,imagfreq=imagfreq)\n spectrumlist = [(\n pyspeckit.Spectrum(xarr=xarr,\n header=H,\n data=sp))]\n else:\n spectrumlist.append(\n pyspeckit.Spectrum(xarr=xarr,\n header=H,\n data=sp))\n\n return obslist\n\nclass LazyItem(object):\n \"\"\"\n Simple lazy spectrum-retriever wrapper\n \"\"\"\n def __init__(self, parent):\n self.parent = parent\n self.sphdr = {}\n self.nind = len(self.parent.allind)\n self.nloaded = 0\n\n def __repr__(self):\n return (\"Set of {0} spectra & headers, {1} loaded\"\n \" ({2:0.2f}%)\".format(self.nind, self.nloaded,\n (float(self.nloaded)/self.nind)*100))\n\n def load_all(self, progressbar=True):\n self.load(range(self.nind))\n\n def load(self, indices, progressbar=True):\n pb = ProgressBar(len(indices))\n counter = 0\n for k in indices:\n self[k]\n counter += 1\n pb.update(counter)\n\n def __getitem__(self, key):\n if key in self.sphdr:\n return self.sphdr[key]\n elif isinstance(key, slice):\n return [self[k] for k in xrange(key.start or 0,\n key.end or len(self.parent.allind),\n key.step or 1)]\n else:\n sphd = read_observation(self.parent._file, key,\n file_description=self.parent.file_description,\n indices=self.parent.allind,\n my_memmap=self.parent._data)\n # Update the header with OTFSCAN and POSANG info\n sphd[1].update(self.parent.allind[key])\n self.sphdr[key] = sphd\n self.nloaded += 1\n return sphd\n\n def __iter__(self):\n return self.next()\n\n def __next__(self):\n for k in self.spheader:\n yield self.spheader[k]\n\n def __contains__(self, key):\n return key in self.sphdr\n\n\n\n@print_timing\ndef class_to_spectra(filename, datatuple=None, **kwargs):\n \"\"\"\n Load each individual spectrum within a CLASS file into a list of Spectrum\n objects\n \"\"\"\n if datatuple is None:\n spectra,header,indexes = read_class(filename, **kwargs)\n else:\n spectra,header,indexes = datatuple\n\n spectrumlist = []\n for sp,hdr,ind in zip(spectra,header,indexes):\n hdr.update(ind)\n xarr = make_axis(hdr)\n spectrumlist.append(\n pyspeckit.Spectrum(xarr=xarr,\n header=hdr,\n data=sp))\n\n return pyspeckit.Spectra(spectrumlist)\n\ndef tests():\n \"\"\"\n Tests are specific to the machine on which this code was developed.\n \"\"\"\n fn1 = '/Users/adam/work/bolocam/hht/class_003.smt'\n #fn1 = '/Users/adam/work/bolocam/hht/class_001.smt'\n #fn1 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-20824-073.cls'\n #fn2 = '/Users/adam/work/bolocam/hht/test_SMT-F1M-VU-79472+203.cls'\n #F1 = read_class(fn1)#,DEBUG=True)\n #F2 = read_class(fn2)\n n2hp = class_to_obsblocks(fn1,telescope=['SMT-F1M-HU','SMT-F1M-VU'],line=['N2HP(3-2)','N2H+(3-2)'])\n hcop = class_to_obsblocks(fn1,telescope=['SMT-F1M-HL','SMT-F1M-VL'],line=['HCOP(3-2)','HCO+(3-2)'])\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.arctan2",
"numpy.fromfile",
"numpy.floor",
"numpy.median",
"numpy.arange",
"numpy.array",
"numpy.memmap"
]
] |
Tarpelite/BERT_self_training | [
"f50ff015f0d3669b5d927a6d28d8a08201c101b6"
] | [
"examples/ner/run_ner_strain.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). \"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom seqeval.metrics import f1_score, precision_score, recall_score\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\nimport pickle\n# from pudb import set_trace\n# set_trace()\n\nfrom transformers import (\n MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n get_linear_schedule_with_warmup,\n)\nfrom utils_ner import convert_examples_to_features, get_labels, read_examples_from_file\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())\n\nTOKENIZER_ARGS = [\"do_lower_case\", \"strip_accents\", \"keep_accents\", \"use_fast\"]\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n if args.warmup_ratio > 0:\n args.warmup_steps = int(t_total * args.warmup_ratio)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to gobal_step of last saved checkpoint from model path\n try:\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n except ValueError:\n global_step = 0\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iter(loss=X.XXX, lr=X.XXXXXXXX)\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"soft_labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n ) # XLM and RoBERTa don\"t use segment_ids\n\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n\n if (step + 1) % args.gradient_accumulation_steps == 0:\n epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\")\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=\"\"):\n eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation %s *****\", prefix)\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n ) # XLM and RoBERTa don\"t use segment_ids\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating\n\n eval_loss += tmp_eval_loss.item()\n nb_eval_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n preds = np.argmax(preds, axis=2)\n\n label_map = {i: label for i, label in enumerate(labels)}\n\n out_label_list = [[] for _ in range(out_label_ids.shape[0])]\n preds_list = [[] for _ in range(out_label_ids.shape[0])]\n\n for i in range(out_label_ids.shape[0]):\n for j in range(out_label_ids.shape[1]):\n if out_label_ids[i, j] != pad_token_label_id:\n out_label_list[i].append(label_map[out_label_ids[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n\n print(\"preds:\", preds_list[0])\n print(\"labels:\", out_label_list[0])\n results = {\n \"loss\": eval_loss,\n \"precision\": precision_score(out_label_list, preds_list),\n \"recall\": recall_score(out_label_list, preds_list),\n \"f1\": f1_score(out_label_list, preds_list),\n }\n\n logger.info(\"***** Eval results %s *****\", prefix)\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n\n return results, preds_list\n\n\ndef load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):\n if args.local_rank not in [-1, 0] and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Load data features from cache or dataset file\n \n \n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n examples = read_examples_from_file(args.eval_file, mode)\n features = convert_examples_to_features(\n examples,\n labels,\n args.max_seq_length,\n tokenizer,\n cls_token_at_end=bool(args.model_type in [\"xlnet\"]),\n # xlnet has a cls token at the end\n cls_token=tokenizer.cls_token,\n cls_token_segment_id=2 if args.model_type in [\"xlnet\"] else 0,\n sep_token=tokenizer.sep_token,\n sep_token_extra=bool(args.model_type in [\"roberta\"]),\n # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n pad_on_left=bool(args.model_type in [\"xlnet\"]),\n # pad on the left for xlnet\n pad_token=tokenizer.pad_token_id,\n pad_token_segment_id=tokenizer.pad_token_type_id,\n pad_token_label_id=pad_token_label_id,\n )\n \n\n if args.local_rank == 0 and not evaluate:\n torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)\n\n dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n return dataset\n\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n help=\"The input data dir. Should contain the training files for the CoNLL-2003 NER task.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_TYPES),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--labels\",\n default=\"\",\n type=str,\n help=\"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\", action=\"store_true\", help=\"Whether to run predictions on the test set.\")\n parser.add_argument(\n \"--evaluate_during_training\",\n action=\"store_true\",\n help=\"Whether to run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n parser.add_argument(\n \"--keep_accents\", action=\"store_const\", const=True, help=\"Set this flag if model is trained with accents.\"\n )\n parser.add_argument(\n \"--strip_accents\", action=\"store_const\", const=True, help=\"Set this flag if model is trained without accents.\"\n )\n parser.add_argument(\"--use_fast\", action=\"store_const\", const=True, help=\"Set this flag to use fast tokenization.\")\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n\n parser.add_argument(\"--logits_file\", type=str, default=\"\")\n parser.add_argument(\"--eval_file\", type=str, default=\"\")\n parser.add_argument(\"--warmup_ratio\", type=float, default=0.1)\n\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n \n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Prepare CONLL-2003 task\n labels = get_labels(args.labels)\n num_labels = len(labels)\n # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later\n pad_token_label_id = CrossEntropyLoss().ignore_index\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config = AutoConfig.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n id2label={str(i): label for i, label in enumerate(labels)},\n label2id={label: i for i, label in enumerate(labels)},\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}\n logger.info(\"Tokenizer arguments: %s\", tokenizer_args)\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n **tokenizer_args,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n # train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=\"train\")\n with open(args.logits_file, \"rb\") as f:\n datasets = pickle.load(f)\n \n all_input_ids = torch.tensor(datasets[0], dtype=torch.long)\n all_input_mask = torch.tensor(datasets[1], dtype=torch.long)\n all_segment_ids = torch.tensor(datasets[2], dtype=torch.long)\n all_ner_logits = torch.tensor(datasets[3], dtype=torch.float)\n\n train_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ner_logits)\n\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"pytorch_transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n model = AutoModelForTokenClassification.from_pretrained(checkpoint)\n model.to(args.device)\n result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", prefix=global_step)\n if global_step:\n result = {\"{}_{}\".format(global_step, k): v for k, v in result.items()}\n results.update(result)\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n if args.do_predict and args.local_rank in [-1, 0]:\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)\n model = AutoModelForTokenClassification.from_pretrained(args.output_dir)\n model.to(args.device)\n result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"test\")\n # Save results\n output_test_results_file = os.path.join(args.output_dir, \"test_results.txt\")\n with open(output_test_results_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n # Save predictions\n output_test_predictions_file = os.path.join(args.output_dir, \"test_predictions.txt\")\n with open(output_test_predictions_file, \"w\") as writer:\n with open(os.path.join(args.data_dir, \"test.txt\"), \"r\") as f:\n example_id = 0\n for line in f:\n if line.startswith(\"-DOCSTART-\") or line == \"\" or line == \"\\n\":\n writer.write(line)\n if not predictions[example_id]:\n example_id += 1\n elif predictions[example_id]:\n output_line = line.split()[0] + \" \" + predictions[example_id].pop(0) + \"\\n\"\n writer.write(output_line)\n else:\n logger.warning(\"Maximum sequence length exceeded: No prediction for '%s'.\", line.split()[0])\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.cuda.manual_seed_all",
"torch.no_grad",
"numpy.random.seed",
"torch.cuda.is_available",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.nn.DataParallel",
"torch.utils.data.RandomSampler",
"torch.device",
"torch.cuda.set_device",
"torch.distributed.get_world_size",
"torch.distributed.get_rank",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.tensor",
"numpy.argmax",
"torch.distributed.barrier",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.TensorDataset",
"torch.utils.data.distributed.DistributedSampler",
"torch.nn.CrossEntropyLoss"
]
] |
chipmuenk/acoustics | [
"c85ac95a10c09d7fa15d63b2bdb24acab89fec60"
] | [
"code/LTI/Demos/Tex_matplotlib.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 14 14:15:52 2012\n\nPlot mit TeX-Formatierung der Labels\n(LaTeX muss auf dem Rechner installiert sein)\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\n\nrc('text', usetex=True)\nplt.figure(1)\nax = plt.axes([0.1, 0.1, 0.8, 0.7])\nt = np.arange(0.0, 1.0+0.01, 0.01)\ns = np.cos(2*2*np.pi*t)+2\nplt.plot(t, s)\n\nplt.xlabel(r'\\textbf{Time (s)}')\nplt.ylabel(r'\\textit{Voltage} (mV)',fontsize=16)\nplt.title(r\"\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n fontsize=16, color='r')\nplt.grid(True)\nplt.savefig('tex_demo')\n\nplt.show()"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.cos",
"matplotlib.rc",
"matplotlib.pyplot.axes",
"numpy.arange",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
kuldeepaman/tf-pose | [
"8050912c52a7b4f3c8a2656f267d47ba21d093f6"
] | [
"scripts/pyqtgraph-develop/examples/MultiPlotWidget.py"
] | [
"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n## Add path to library (just for examples; you do not need this)\r\nimport initExample\r\n\r\n\r\nfrom scipy import random\r\nfrom numpy import linspace\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph import MultiPlotWidget\r\ntry:\r\n from pyqtgraph.metaarray import *\r\nexcept:\r\n print(\"MultiPlot is only used with MetaArray for now (and you do not have the metaarray package)\")\r\n exit()\r\n\r\napp = QtGui.QApplication([])\r\nmw = QtGui.QMainWindow()\r\nmw.resize(800,800)\r\npw = MultiPlotWidget()\r\nmw.setCentralWidget(pw)\r\nmw.show()\r\n\r\ndata = random.normal(size=(3, 1000)) * np.array([[0.1], [1e-5], [1]])\r\nma = MetaArray(data, info=[\r\n {'name': 'Signal', 'cols': [\r\n {'name': 'Col1', 'units': 'V'}, \r\n {'name': 'Col2', 'units': 'A'}, \r\n {'name': 'Col3'},\r\n ]}, \r\n {'name': 'Time', 'values': linspace(0., 1., 1000), 'units': 's'}\r\n ])\r\npw.plot(ma)\r\n\r\n## Start Qt event loop unless running in interactive mode.\r\nif __name__ == '__main__':\r\n import sys\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n\r\n"
] | [
[
"numpy.linspace",
"scipy.random.normal"
]
] |
lilies/Cirq | [
"519b8b70ba4d2d92d1c034c398161ebdbd23e2e7"
] | [
"cirq/contrib/svg/svg_test.py"
] | [
"import pytest\nimport numpy as np\n\nimport cirq\nfrom cirq.contrib.svg import circuit_to_svg\n\n\ndef test_svg():\n a, b, c = cirq.LineQubit.range(3)\n\n svg_text = circuit_to_svg(\n cirq.Circuit(\n cirq.CNOT(a, b),\n cirq.CZ(b, c),\n cirq.SWAP(a, c),\n cirq.PhasedXPowGate(exponent=0.123, phase_exponent=0.456).on(c),\n cirq.Z(a),\n cirq.measure(a, b, c, key='z'),\n cirq.MatrixGate(np.eye(2)).on(a),\n ))\n assert '<svg' in svg_text\n assert '</svg>' in svg_text\n\n\ndef test_svg_noise():\n noise_model = cirq.ConstantQubitNoiseModel(cirq.DepolarizingChannel(p=1e-3))\n q = cirq.LineQubit(0)\n circuit = cirq.Circuit(cirq.X(q))\n circuit = cirq.Circuit(noise_model.noisy_moments(circuit.moments, [q]))\n svg = circuit_to_svg(circuit)\n assert '>D(0.001)</text>' in svg\n\n\ndef test_validation():\n with pytest.raises(ValueError):\n circuit_to_svg(cirq.Circuit())\n\n q0 = cirq.LineQubit(0)\n with pytest.raises(ValueError):\n circuit_to_svg(\n cirq.Circuit([cirq.Moment([cirq.X(q0)]),\n cirq.Moment([])]))\n"
] | [
[
"numpy.eye"
]
] |
playerkk/HoiTransformer | [
"b710216d6b338863ebe9d40a96765ab52780cefa"
] | [
"models/backbone.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nBackbone modules.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torchvision.models._utils import IntermediateLayerGetter\nfrom typing import Dict, List\n\nfrom util.misc import NestedTensor, is_main_process\n\nfrom .position_encoding import build_position_encoding\n\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n rv = self.running_var.reshape(1, -1, 1, 1)\n rm = self.running_mean.reshape(1, -1, 1, 1)\n eps = 1e-5\n scale = w * (rv + eps).rsqrt()\n bias = b - rm * scale\n return x * scale + bias\n\n\nclass BackboneBase(nn.Module):\n\n def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):\n super().__init__()\n for name, parameter in backbone.named_parameters():\n if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:\n parameter.requires_grad_(False)\n if return_interm_layers:\n return_layers = {\"layer1\": \"0\", \"layer2\": \"1\", \"layer3\": \"2\", \"layer4\": \"3\"}\n else:\n return_layers = {'layer4': \"0\"}\n self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n self.num_channels = num_channels\n\n def forward(self, tensor_list: NestedTensor):\n xs = self.body(tensor_list.tensors)\n out: Dict[str, NestedTensor] = {}\n for name, x in xs.items():\n m = tensor_list.mask\n assert m is not None\n mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n out[name] = NestedTensor(x, mask)\n return out\n\n\nclass Backbone(BackboneBase):\n \"\"\"ResNet backbone with frozen BatchNorm.\"\"\"\n def __init__(self, name: str,\n train_backbone: bool,\n return_interm_layers: bool,\n dilation: bool):\n backbone = getattr(torchvision.models, name)(\n replace_stride_with_dilation=[False, False, dilation],\n pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)\n num_channels = 512 if name in ('resnet18', 'resnet34') else 2048\n super().__init__(backbone, train_backbone, num_channels, return_interm_layers)\n\n\nclass Joiner(nn.Sequential):\n def __init__(self, backbone, position_embedding):\n super().__init__(backbone, position_embedding)\n\n def forward(self, tensor_list: NestedTensor):\n xs = self[0](tensor_list)\n out: List[NestedTensor] = []\n pos = []\n for name, x in xs.items():\n out.append(x)\n # position encoding\n pos.append(self[1](x).to(x.tensors.dtype))\n\n return out, pos\n\n\ndef build_backbone(args):\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n return_interm_layers = False # args.masks\n backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n return model\n"
] | [
[
"torch.zeros",
"torch.ones"
]
] |
DonCammne/OpenSeesPyAssistant | [
"f380f0f2a2f3d1336320bd8d26fa5efe00a12134"
] | [
"DataManagement.py"
] | [
"\"\"\"\nModule with the parent abstract class DataManagement. \\n\nCarmine Schipani, 2021\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom OpenSeesPyAssistant.ErrorHandling import *\nimport numpy as np\n\n\nclass DataManagement(ABC):\n \"\"\"\n Abstract parent class for data management.\n Using the associated MATLAB class \\n\n LOAD_CLASS.m \\n\n for the postprocessing in MATLAB, allowing for simpler and more reliable data management because the parameters\n from the OpenSeesPy analysis are imported automatically. \n \"\"\"\n\n def SaveData(self, f):\n \"\"\"\n Function that lists in the command window and saves in a opened file text \"f\" the data from the \"self\" class that calls it. \n Example: call this function after this line: \\n \n with open(FileName, 'w') as f:\n\n @param f (io.TextIOWrapper): Opened file to write into\n\n @exception WrongDimension: The number of lists in the list self.data needs to be 2\n \"\"\"\n if len(self.data[0]) != 2: raise WrongDimension() \n \n delimiter = \"##############################\" # 30 times #\n col_delimiter = \"\\t\" # tab\n for data_line in self.data:\n f.write('\\n')\n for col in data_line:\n if type(col) == np.ndarray:\n tmp_str = np.array_str(col, max_line_width = np.inf)\n else:\n tmp_str = str(col)\n f.write(tmp_str)\n f.write(col_delimiter)\n f.write('\\n')\n f.write('NEW INFO SECTION DELIMITER \\t')\n f.write(delimiter)\n\n @abstractmethod\n def ShowInfo(self):\n \"\"\"\n Abstract method that shows the data stored in the class in the command window.\n In some cases, it's possible to plot some information (for example the curve of the material model).\n \"\"\"\n pass\n\n @abstractmethod\n def ReInit(self):\n \"\"\"\n Abstract method that computes the value of the parameters with respect of the arguments. \\n\n Use after changing the value of argument inside the class (to update the values accordingly). \\n\n This function can be very useful in combination with the function \"deepcopy()\" from the module \"copy\". \\n\n Be careful that the parameter self.Initialized is also copied, thus it is safer to copy the class before the method that calls the actual OpenSees commands (and initialise the object).\n \"\"\"\n pass\n\n @abstractmethod\n def UpdateStoredData(self):\n \"\"\"\n Abstract method used to define and update the self.data member variable. \\n\n This member variable (self.data) is a list of lists with 2 entries (info_name and info_value)\n and for each list is stored a different member variable of the class. \\n\n Useful to debug the model, export data, copy object.\n \"\"\"\n pass"
] | [
[
"numpy.array_str"
]
] |
lonelu/Metalprot_learning | [
"8edb2c3e4f6ba129a409d75fd4d15ceb3a9e307b"
] | [
"src/extractor/make_bb_info_mats.py"
] | [
"from numpy.core.numeric import full\nfrom numpy.lib.function_base import append\nimport prody as pr\nimport os\nimport numpy\nimport matplotlib as mpl\nimport pylab\nfrom itertools import combinations, combinations_with_replacement\nfrom docopt import docopt\nimport itertools\nimport pickle\nimport sys\nfrom scipy.linalg.basic import matrix_balance\nfrom scipy.spatial.distance import cdist\n\nfrom . import ligand_database as ld\nfrom . import features_pdb2dihe as fpdh\n\nmetal_sel = 'ion or name NI MN ZN CO CU MG FE' \n\n\n#TO DO: create artificial aa in the 4th aa.\ndef get_atg(full_pdb):\n\t'''\n\tprody atomgroup will be used to calc bb info.\n\tIf the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.\n\t'''\n\n\tmetal = full_pdb.select(metal_sel)[0]\n\tcontact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))\n\tcontact_aa_resinds = numpy.unique(contact_aas.getResindices()) \n\textention = 1 \n\n\tcoords = []\n\tresnames = []\n\tnames = []\n\tresnums = []\n\tresn = 1\n\tfor resind in contact_aa_resinds:\t\t\n\t\text_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)\n\n\t\t#In some cases, the contact aa is at terminal. We can add more aa to match the shape.\n\t\tif len(ext_inds) == 2:\n\t\t\tif ext_inds[0] == resind:\n\t\t\t\text_inds.insert(0, resind)\n\t\t\telse:\n\t\t\t\text_inds.append(resind)\n\t\tif len(ext_inds) == 1:\n\t\t\text_inds.append(resind)\n\t\t\text_inds.append(resind)\n\n\t\tfor ind in ext_inds:\n\t\t\taa = full_pdb.select('resindex ' + str(ind))\n\t\t\tcoords.extend(aa.getCoords())\n\t\t\tresnames.extend(aa.getResnames())\n\t\t\tnames.extend(aa.getNames())\n\t\t\tresnums.extend([resn for _i in range(len(aa))])\n\t\t\tresn += 1\n\n\n\tif len(contact_aa_resinds) == 3:\n\t\tcoords.extend([])\n\t\tresnames.extend([])\n\t\tnames.extend([])\n\t\tresnums.extend([])\n\n\t#ag = pr.AtomGroup('-'.join([str(p) for p in per]))\n\tag = pr.AtomGroup('0-1-2-3')\n\tag.setCoords(coords)\n\tag.setResnums(resnums)\n\tag.setResnames(resnames)\n\tag.setNames(names)\n\n\treturn ag\n\n\ndef get_atgs(full_pdb, contain_metal = True):\n\t'''\n\tprody atomgroup will be used to calc bb info.\n\tIf the contact aa is at terminal, then the shape of the dist matrix will be < 12. So contact aa will be copied and added.\n\t'''\n\tif contain_metal:\n\t\tmetal = full_pdb.select(metal_sel)[0]\n\t\tcontact_aas = full_pdb.select('protein and not carbon and not hydrogen and within 2.83 of resindex ' + str(metal.getResindex()))\t\n\telse:\n\t\t#TO DO: it is not quite right here if the pdb happened to have more HIS-CYS-GLU-ASP. Skip now.\n\t\tcontact_aas = full_pdb.select('resname HIS CYS GLU ASP')\n\t\tif not contact_aas and len(numpy.unique(contact_aas.getResindices())) > 4: \n\t\t\treturn []\n\n\tcontact_aa_resinds = numpy.unique(contact_aas.getResindices()) \n\n\textention = 1 \n\t\n\t# TO DO: If the len of contact_ass is not 4...\n\tags = []\n\t#for per in itertools.permutations(range(len(contact_aa_resinds))):\n\tfor per in [range(len(contact_aa_resinds))]:\n\t\tprint(per)\n\n\t\tcoords = []\n\t\tresnames = []\n\t\tnames = []\n\t\tresnums = []\n\t\tresn = 1\n\t\tfor idx in per:\n\t\t\tresind = contact_aa_resinds[idx]\n\n\t\t\text_inds = ld.extend_res_indices([resind], full_pdb, extend =extention)\n\n\t\t\t#In some cases, the contact aa is at terminal. We can add more aa to match the shape.\n\t\t\tif len(ext_inds) == 2:\n\t\t\t\tif ext_inds[0] == resind:\n\t\t\t\t\text_inds.insert(0, resind)\n\t\t\t\telse:\n\t\t\t\t\text_inds.append(resind)\n\t\t\tif len(ext_inds) == 1:\n\t\t\t\text_inds.append(resind)\n\t\t\t\text_inds.append(resind)\n\n\t\t\tfor ind in ext_inds:\n\t\t\t\taa = full_pdb.select('resindex ' + str(ind))\n\t\t\t\tcoords.extend(aa.getCoords())\n\t\t\t\tresnames.extend(aa.getResnames())\n\t\t\t\tnames.extend(aa.getNames())\n\t\t\t\tresnums.extend([resn for _i in range(len(aa))])\n\t\t\t\tresn += 1\n\n\t\tag = pr.AtomGroup('-'.join([str(p) for p in per]))\n\t\tag.setCoords(coords)\n\t\tag.setResnums(resnums)\n\t\tag.setResnames(resnames)\n\t\tag.setNames(names)\n\n\t\tags.append(ag)\n\n\treturn ags\n\n\n\ndef get_bb_dist_seq(core):\n\t'''\n\tIf we know N CA C, The coords of CB could be calcualted. So we may not need CB coords.\n\n\t'''\n\n\tn_coords = core.select('name N').getCoords()\n\n\tc_coords = core.select('name C').getCoords()\n\n\tca_coords = core.select('name CA').getCoords()\n\n\n\tn_n = cdist(n_coords, n_coords)\n\n\tc_c = cdist(c_coords, c_coords)\n\n\tca_ca = cdist(ca_coords, ca_coords)\n\n\tcb_coords = []\n\n\tfor i in range(len(n_coords)):\n\t\tCa = ca_coords[i]\n\t\tC = c_coords[i]\n\t\tN = n_coords[i]\n\n\t\tb = Ca - N\n\t\tc = C - Ca\n\t\ta = numpy.cross(b, c)\n\t\tCb = -0.58273431*a + 0.56802827*b - 0.54067466*c + Ca\n\n\t\tcb_coords.append(Cb)\n\n\tcb_coords = core.select('name CB').getCoords()\n\n\tcb_cb = cdist(cb_coords, cb_coords)\n\n\treturn n_n, c_c, ca_ca, cb_cb\n\n\ndef get_dihe(ag):\n\t'''\n\tPlease check features_pdb2dihe.py.\n\tOnly the contact aa will be extracted.\n\t'''\n\t\n\tnres = len(ag.select('name CA'))\n\tprint(nres)\n\tdist, _omega, _theta_asym, _phi_asym = fpdh.get_neighbors(ag, nres, 20.0) \n\n\t#TO DO: extract info, only the contact aa matters?!\n\tomega = numpy.zeros((nres, nres))\n\ttheta_asym = numpy.zeros((nres, nres))\n\tphi_asym = numpy.zeros((nres, nres))\n\tfor i in range(1, nres, 3):\n\t\tfor j in range(1, nres, 3):\n\t\t\tomega[i, j] = _omega[i, j]\n\t\t\ttheta_asym[i, j] = _theta_asym[i, j]\n\t\t\tphi_asym[i, j] = _phi_asym[i, j]\n\n\treturn omega, theta_asym, phi_asym\n\n\ndef get_seq_mat(ag, matrix_size = 12):\n\n\tseq = ag.select('name CA').getResnames()\n\n\tthreelettercodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\\\n\t\t\t\t\t\t'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']\n\n\tseq_channels = numpy.zeros([40, matrix_size, matrix_size], dtype=int)\n\n\n\tfor i in range(len(seq)):\n\t\taa = seq[i]\n\t\ttry:\n\t\t\tidx = threelettercodes.index(aa)\n\t\texcept:\n\t\t\tprint('Resname of following atom not found: {}'.format(aa))\n\t\t\tcontinue\n\t\tfor j in range(len(seq)):\n\t\t\tseq_channels[idx][i][j] = 1 # horizontal rows of 1's in first 20 channels\n\t\t\tseq_channels[idx+20][j][i] = 1 # vertical columns of 1's in next 20 channels\n\t\t\n\treturn seq_channels\n\n\ndef mk_full_mats(ag, matrix_size = 12):\n\tnres = len(ag.select('name CA'))\n\n\tn_n, c_c, ca_ca, cb_cb = get_bb_dist_seq(ag)\n\n\tomega, theta_asym, phi_asym = get_dihe(ag)\n\n\tseq_mats = get_seq_mat(ag, matrix_size)\n\n\tfull_mat = numpy.zeros((47, matrix_size, matrix_size))\n\n\t# Make sure the shape of each matrix is smaller than the matrix_size.\n\n\tfull_mat[0,0:n_n.shape[0], 0:n_n.shape[1]] = n_n\n\tfull_mat[1,0:c_c.shape[0], 0:c_c.shape[1]] = c_c\n\tfull_mat[2,0:ca_ca.shape[0], 0:ca_ca.shape[1]] = ca_ca\n\tfull_mat[3,0:cb_cb.shape[0], 0:cb_cb.shape[1]] = cb_cb\n\n\tfull_mat[4,0:omega.shape[0], 0:omega.shape[1]] = omega\n\tfull_mat[5,0:theta_asym.shape[0], 0:theta_asym.shape[1]] = theta_asym\n\tfull_mat[6,0:phi_asym.shape[0], 0:phi_asym.shape[1]] = phi_asym\n\n\tfor i in range(7, 47):\n\t\tfull_mat[i, :, :] = seq_mats[i - 7]\n\n\treturn full_mat\n\n\ndef write_pickle_file(full_mat, pdb, ag, out_folder, tag = ''):\n\t\"\"\"\n\tWrites a pickle file containing the input numpy array into the current permutation's folder.\n\tCurrently using this only to save the full matrix (all 46 channels).\n\t\"\"\"\n\tnumpy.set_printoptions(threshold=numpy.inf)\n\tpdb_name = pdb.split('.')[0]\n\t\n\tpkl_file = out_folder + pdb_name + '_full_mat_' + ag.getTitle() + tag + '.pkl'\n\n\twith open(pkl_file, 'wb') as f:\n\t\tprint(pkl_file)\n\t\tpickle.dump(full_mat, f)\n\n\treturn\n\n\ndef write_dist_mat_file(mat, pdb, ag, out_folder, tag = ''):\n\t\"\"\"\n\tWrites out a file containing the distance matrix\n\t\"\"\"\n\t# output_folder = 'core_contact_maps/dist_mat_txt_folder/'\n\n\tnumpy.set_printoptions(threshold=numpy.inf)\n\n\tdist_mat_file = pdb.split('.')[0]\n\n\tdist_mat_file = out_folder + dist_mat_file + '_full_mat_' + ag.getTitle() + tag + '.txt'\n\n\twith open(dist_mat_file, 'w') as open_file:\n\t\tfor i in mat:\n\t\t\topen_file.write(str(i) + '\\n')\n\n\treturn\n\n\ndef run_mk_bb_info_mats(workdir, out_path, mat_size = 12, top = 1000, contain_metal = True, opts = None):\n\n\tos.makedirs(out_path, exist_ok=True)\n\n\tcount = 0\n\n\terrors = ''\n\t\n\tfor pdb_name in os.listdir(workdir):\n\n\t\tif count >= top:\n\t\t\tbreak\n\n\t\tif '.pdb' not in pdb_name:\n\t\t\tcontinue\n\n\t\tpdb_file = workdir + pdb_name\n\n\t\tpdb = pr.parsePDB(pdb_file)\n\n\t\tags = get_atgs(pdb, contain_metal)\n\t\n\t\tfor ag in ags:\n\t\t\ttry:\n\t\t\t\t#TO DO: currently, only consider 3 or 4 aa binding.\n\t\t\t\tif len(ag.select('name CA'))> 12 or len(ag.select('name CA')) < 7:\n\t\t\t\t\tprint(pdb_name + ' not used. ')\n\t\t\t\t\tcontinue\n\t\t\t\tfull_mat = mk_full_mats(ag, mat_size)\n\t\t\t\twrite_dist_mat_file(full_mat, pdb_name, ag, out_path)\n\t\t\t\twrite_pickle_file(full_mat, pdb_name, ag, out_path)\n\n\t\t\t\tcount += 1\n\t\t\texcept:\n\t\t\t\tprint('error: ' + pdb_name)\n\t\t\t\terrors += pdb_name + '\\n'\n\n\t\t\tif count >= top:\n\t\t\t\tbreak\n\t\n\twith open(out_path + '_error.txt', 'w') as f:\n\t\tf.write(errors)\n\n\treturn\n\n\n\n"
] | [
[
"numpy.cross",
"numpy.set_printoptions",
"scipy.spatial.distance.cdist",
"numpy.zeros"
]
] |
s123600g/openfaceInstallscript | [
"962b4b89c5626318b5701d7297d49df3423b0fe4"
] | [
"InstallOpenface/fix_sklearn/label.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Olivier Grisel <[email protected]>\n# Andreas Mueller <[email protected]>\n# Joel Nothman <[email protected]>\n# Hamzeh Alsalhi <[email protected]>\n# License: BSD 3 clause\n\nfrom collections import defaultdict\nimport itertools\nimport array\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom ..base import BaseEstimator, TransformerMixin\n\nfrom ..utils.fixes import sparse_min_max\nfrom ..utils import column_or_1d\nfrom ..utils.validation import check_array\nfrom ..utils.validation import check_is_fitted\nfrom ..utils.validation import _num_samples\nfrom ..utils.multiclass import unique_labels\nfrom ..utils.multiclass import type_of_target\n\nfrom ..externals import six\n\nzip = six.moves.zip\nmap = six.moves.map\n\n__all__ = [\n 'label_binarize',\n 'LabelBinarizer',\n 'LabelEncoder',\n 'MultiLabelBinarizer',\n]\n\n\nclass LabelEncoder(BaseEstimator, TransformerMixin):\n \"\"\"Encode labels with value between 0 and n_classes-1.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n Attributes\n ----------\n classes_ : array of shape (n_class,)\n Holds the label for each class.\n\n Examples\n --------\n `LabelEncoder` can be used to normalize labels.\n\n >>> from sklearn import preprocessing\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([1, 2, 2, 6])\n LabelEncoder()\n >>> le.classes_\n array([1, 2, 6])\n >>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS\n array([0, 0, 1, 2]...)\n >>> le.inverse_transform([0, 0, 1, 2])\n array([1, 1, 2, 6])\n\n It can also be used to transform non-numerical labels (as long as they are\n hashable and comparable) to numerical labels.\n\n >>> le = preprocessing.LabelEncoder()\n >>> le.fit([\"paris\", \"paris\", \"tokyo\", \"amsterdam\"])\n LabelEncoder()\n >>> list(le.classes_)\n ['amsterdam', 'paris', 'tokyo']\n >>> le.transform([\"tokyo\", \"tokyo\", \"paris\"]) #doctest: +ELLIPSIS\n array([2, 2, 1]...)\n >>> list(le.inverse_transform([2, 2, 1]))\n ['tokyo', 'tokyo', 'paris']\n\n See also\n --------\n sklearn.preprocessing.OneHotEncoder : encode categorical integer features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n\n def fit(self, y):\n \"\"\"Fit label encoder\n\n Parameters\n ----------\n y : array-like of shape (n_samples,)\n Target values.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_ = np.unique(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label encoder and return encoded labels\n\n Parameters\n ----------\n y : array-like of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : array-like of shape [n_samples]\n \"\"\"\n y = column_or_1d(y, warn=True)\n self.classes_, y = np.unique(y, return_inverse=True)\n return y\n\n def transform(self, y):\n \"\"\"Transform labels to normalized encoding.\n\n Parameters\n ----------\n y : array-like of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : array-like of shape [n_samples]\n \"\"\"\n check_is_fitted(self, 'classes_')\n y = column_or_1d(y, warn=True)\n\n classes = np.unique(y)\n if len(np.intersect1d(classes, self.classes_)) < len(classes):\n diff = np.setdiff1d(classes, self.classes_)\n # raise ValueError(\"y contains new labels: %s\" % str(diff))\n raise ValueError(\"y contains previously unseen labels: % s\" % str(diff))\n return np.searchsorted(self.classes_, y)\n\n def inverse_transform(self, y):\n \"\"\"Transform labels back to original encoding.\n\n Parameters\n ----------\n y : numpy array of shape [n_samples]\n Target values.\n\n Returns\n -------\n y : numpy array of shape [n_samples]\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n diff = np.setdiff1d(y, np.arange(len(self.classes_)))\n # if diff:\n # raise ValueError(\"y contains new labels: %s\" % str(diff))\n if len(diff):\n raise ValueError(\"y contains previously unseen labels: %s\" % str(diff))\n y = np.asarray(y)\n return self.classes_[y]\n\n\nclass LabelBinarizer(BaseEstimator, TransformerMixin):\n \"\"\"Binarize labels in a one-vs-all fashion\n\n Several regression and binary classification algorithms are\n available in the scikit. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n At learning time, this simply consists in learning one regressor\n or binary classifier per class. In doing so, one needs to convert\n multi-class labels to binary labels (belong or does not belong\n to the class). LabelBinarizer makes this process easy with the\n transform method.\n\n At prediction time, one assigns the class for which the corresponding\n model gave the greatest confidence. LabelBinarizer makes this easy\n with the inverse_transform method.\n\n Read more in the :ref:`User Guide <preprocessing_targets>`.\n\n Parameters\n ----------\n\n neg_label : int (default: 0)\n Value with which negative labels must be encoded.\n\n pos_label : int (default: 1)\n Value with which positive labels must be encoded.\n\n sparse_output : boolean (default: False)\n True if the returned array from transform is desired to be in sparse\n CSR format.\n\n Attributes\n ----------\n\n classes_ : array of shape [n_class]\n Holds the label for each class.\n\n y_type_ : str,\n Represents the type of the target data as evaluated by\n utils.multiclass.type_of_target. Possible type are 'continuous',\n 'continuous-multioutput', 'binary', 'multiclass',\n 'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.\n\n sparse_input_ : boolean,\n True if the input data to transform is given as a sparse matrix, False\n otherwise.\n\n Examples\n --------\n >>> from sklearn import preprocessing\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit([1, 2, 6, 4, 2])\n LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)\n >>> lb.classes_\n array([1, 2, 4, 6])\n >>> lb.transform([1, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n Binary targets transform to a column vector\n\n >>> lb = preprocessing.LabelBinarizer()\n >>> lb.fit_transform(['yes', 'no', 'no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n Passing a 2D matrix for multilabel classification\n\n >>> import numpy as np\n >>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))\n LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)\n >>> lb.classes_\n array([0, 1, 2])\n >>> lb.transform([0, 1, 2, 1])\n array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [0, 1, 0]])\n\n See also\n --------\n label_binarize : function to perform the transform operation of\n LabelBinarizer with fixed classes.\n sklearn.preprocessing.OneHotEncoder : encode categorical integer features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n\n def __init__(self, neg_label=0, pos_label=1, sparse_output=False):\n if neg_label >= pos_label:\n raise ValueError(\"neg_label={0} must be strictly less than \"\n \"pos_label={1}.\".format(neg_label, pos_label))\n\n if sparse_output and (pos_label == 0 or neg_label != 0):\n raise ValueError(\"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label))\n\n self.neg_label = neg_label\n self.pos_label = pos_label\n self.sparse_output = sparse_output\n\n def fit(self, y):\n \"\"\"Fit label binarizer\n\n Parameters\n ----------\n y : array of shape [n_samples,] or [n_samples, n_classes]\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification.\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self.y_type_ = type_of_target(y)\n if 'multioutput' in self.y_type_:\n raise ValueError(\"Multioutput target data is not supported with \"\n \"label binarization\")\n if _num_samples(y) == 0:\n raise ValueError('y has 0 samples: %r' % y)\n\n self.sparse_input_ = sp.issparse(y)\n self.classes_ = unique_labels(y)\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit label binarizer and transform multi-class labels to binary\n labels.\n\n The output of transform is sometimes referred to as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : array or sparse matrix of shape [n_samples,] or \\\n [n_samples, n_classes]\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : array or CSR matrix of shape [n_samples, n_classes]\n Shape will be [n_samples, 1] for binary problems.\n \"\"\"\n return self.fit(y).transform(y)\n\n def transform(self, y):\n \"\"\"Transform multi-class labels to binary labels\n\n The output of transform is sometimes referred to by some authors as\n the 1-of-K coding scheme.\n\n Parameters\n ----------\n y : array or sparse matrix of shape [n_samples,] or \\\n [n_samples, n_classes]\n Target values. The 2-d matrix should only contain 0 and 1,\n represents multilabel classification. Sparse matrix can be\n CSR, CSC, COO, DOK, or LIL.\n\n Returns\n -------\n Y : numpy array or CSR matrix of shape [n_samples, n_classes]\n Shape will be [n_samples, 1] for binary problems.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n y_is_multilabel = type_of_target(y).startswith('multilabel')\n if y_is_multilabel and not self.y_type_.startswith('multilabel'):\n raise ValueError(\"The object was not fitted with multilabel\"\n \" input.\")\n\n return label_binarize(y, self.classes_,\n pos_label=self.pos_label,\n neg_label=self.neg_label,\n sparse_output=self.sparse_output)\n\n def inverse_transform(self, Y, threshold=None):\n \"\"\"Transform binary labels back to multi-class labels\n\n Parameters\n ----------\n Y : numpy array or sparse matrix with shape [n_samples, n_classes]\n Target values. All sparse matrices are converted to CSR before\n inverse transformation.\n\n threshold : float or None\n Threshold used in the binary and multi-label cases.\n\n Use 0 when ``Y`` contains the output of decision_function\n (classifier).\n Use 0.5 when ``Y`` contains the output of predict_proba.\n\n If None, the threshold is assumed to be half way between\n neg_label and pos_label.\n\n Returns\n -------\n y : numpy array or CSR matrix of shape [n_samples] Target values.\n\n Notes\n -----\n In the case when the binary labels are fractional\n (probabilistic), inverse_transform chooses the class with the\n greatest value. Typically, this allows to use the output of a\n linear model's decision_function method directly as the input\n of inverse_transform.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n if threshold is None:\n threshold = (self.pos_label + self.neg_label) / 2.\n\n if self.y_type_ == \"multiclass\":\n y_inv = _inverse_binarize_multiclass(Y, self.classes_)\n else:\n y_inv = _inverse_binarize_thresholding(Y, self.y_type_,\n self.classes_, threshold)\n\n if self.sparse_input_:\n y_inv = sp.csr_matrix(y_inv)\n elif sp.issparse(y_inv):\n y_inv = y_inv.toarray()\n\n return y_inv\n\n\ndef label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):\n \"\"\"Binarize labels in a one-vs-all fashion\n\n Several regression and binary classification algorithms are\n available in the scikit. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels or multilabel data to encode.\n\n classes : array-like of shape [n_classes]\n Uniquely holds the label for each class.\n\n neg_label : int (default: 0)\n Value with which negative labels must be encoded.\n\n pos_label : int (default: 1)\n Value with which positive labels must be encoded.\n\n sparse_output : boolean (default: False),\n Set to true if output binary array is desired in CSR sparse format\n\n Returns\n -------\n Y : numpy array or CSR matrix of shape [n_samples, n_classes]\n Shape will be [n_samples, 1] for binary problems.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n Binary targets transform to a column vector\n\n >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n\n See also\n --------\n LabelBinarizer : class used to wrap the functionality of label_binarize and\n allow for fitting to classes independently of the transform operation\n \"\"\"\n if not isinstance(y, list):\n # XXX Workaround that will be removed when list of list format is\n # dropped\n y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)\n else:\n if _num_samples(y) == 0:\n raise ValueError('y has 0 samples: %r' % y)\n if neg_label >= pos_label:\n raise ValueError(\"neg_label={0} must be strictly less than \"\n \"pos_label={1}.\".format(neg_label, pos_label))\n\n if (sparse_output and (pos_label == 0 or neg_label != 0)):\n raise ValueError(\"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label))\n\n # To account for pos_label == 0 in the dense case\n pos_switch = pos_label == 0\n if pos_switch:\n pos_label = -neg_label\n\n y_type = type_of_target(y)\n if 'multioutput' in y_type:\n raise ValueError(\"Multioutput target data is not supported with label \"\n \"binarization\")\n if y_type == 'unknown':\n raise ValueError(\"The type of target data is not known\")\n\n n_samples = y.shape[0] if sp.issparse(y) else len(y)\n n_classes = len(classes)\n classes = np.asarray(classes)\n\n if y_type == \"binary\":\n if n_classes == 1:\n if sparse_output:\n return sp.csr_matrix((n_samples, 1), dtype=int)\n else:\n Y = np.zeros((len(y), 1), dtype=np.int)\n Y += neg_label\n return Y\n elif len(classes) >= 3:\n y_type = \"multiclass\"\n\n sorted_class = np.sort(classes)\n if (y_type == \"multilabel-indicator\" and classes.size != y.shape[1]):\n raise ValueError(\"classes {0} missmatch with the labels {1}\"\n \"found in the data\".format(classes, unique_labels(y)))\n\n if y_type in (\"binary\", \"multiclass\"):\n y = column_or_1d(y)\n\n # pick out the known labels from y\n y_in_classes = np.in1d(y, classes)\n y_seen = y[y_in_classes]\n indices = np.searchsorted(sorted_class, y_seen)\n indptr = np.hstack((0, np.cumsum(y_in_classes)))\n\n data = np.empty_like(indices)\n data.fill(pos_label)\n Y = sp.csr_matrix((data, indices, indptr),\n shape=(n_samples, n_classes))\n elif y_type == \"multilabel-indicator\":\n Y = sp.csr_matrix(y)\n if pos_label != 1:\n data = np.empty_like(Y.data)\n data.fill(pos_label)\n Y.data = data\n else:\n raise ValueError(\"%s target data is not supported with label \"\n \"binarization\" % y_type)\n\n if not sparse_output:\n Y = Y.toarray()\n Y = Y.astype(int, copy=False)\n\n if neg_label != 0:\n Y[Y == 0] = neg_label\n\n if pos_switch:\n Y[Y == pos_label] = 0\n else:\n Y.data = Y.data.astype(int, copy=False)\n\n # preserve label ordering\n if np.any(classes != sorted_class):\n indices = np.searchsorted(sorted_class, classes)\n Y = Y[:, indices]\n\n if y_type == \"binary\":\n if sparse_output:\n Y = Y.getcol(-1)\n else:\n Y = Y[:, -1].reshape((-1, 1))\n\n return Y\n\n\ndef _inverse_binarize_multiclass(y, classes):\n \"\"\"Inverse label binarization transformation for multiclass.\n\n Multiclass uses the maximal score instead of a threshold.\n \"\"\"\n classes = np.asarray(classes)\n\n if sp.issparse(y):\n # Find the argmax for each row in y where y is a CSR matrix\n\n y = y.tocsr()\n n_samples, n_outputs = y.shape\n outputs = np.arange(n_outputs)\n row_max = sparse_min_max(y, 1)[1]\n row_nnz = np.diff(y.indptr)\n\n y_data_repeated_max = np.repeat(row_max, row_nnz)\n # picks out all indices obtaining the maximum per row\n y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)\n\n # For corner case where last row has a max of 0\n if row_max[-1] == 0:\n y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])\n\n # Gets the index of the first argmax in each row from y_i_all_argmax\n index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])\n # first argmax of each row\n y_ind_ext = np.append(y.indices, [0])\n y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]\n # Handle rows of all 0\n y_i_argmax[np.where(row_nnz == 0)[0]] = 0\n\n # Handles rows with max of 0 that contain negative numbers\n samples = np.arange(n_samples)[(row_nnz > 0) &\n (row_max.ravel() == 0)]\n for i in samples:\n ind = y.indices[y.indptr[i]:y.indptr[i + 1]]\n y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]\n\n return classes[y_i_argmax]\n else:\n return classes.take(y.argmax(axis=1), mode=\"clip\")\n\n\ndef _inverse_binarize_thresholding(y, output_type, classes, threshold):\n \"\"\"Inverse label binarization transformation using thresholding.\"\"\"\n\n if output_type == \"binary\" and y.ndim == 2 and y.shape[1] > 2:\n raise ValueError(\"output_type='binary', but y.shape = {0}\".\n format(y.shape))\n\n if output_type != \"binary\" and y.shape[1] != len(classes):\n raise ValueError(\"The number of class is not equal to the number of \"\n \"dimension of y.\")\n\n classes = np.asarray(classes)\n\n # Perform thresholding\n if sp.issparse(y):\n if threshold > 0:\n if y.format not in ('csr', 'csc'):\n y = y.tocsr()\n y.data = np.array(y.data > threshold, dtype=np.int)\n y.eliminate_zeros()\n else:\n y = np.array(y.toarray() > threshold, dtype=np.int)\n else:\n y = np.array(y > threshold, dtype=np.int)\n\n # Inverse transform data\n if output_type == \"binary\":\n if sp.issparse(y):\n y = y.toarray()\n if y.ndim == 2 and y.shape[1] == 2:\n return classes[y[:, 1]]\n else:\n if len(classes) == 1:\n return np.repeat(classes[0], len(y))\n else:\n return classes[y.ravel()]\n\n elif output_type == \"multilabel-indicator\":\n return y\n\n else:\n raise ValueError(\"{0} format is not supported\".format(output_type))\n\n\nclass MultiLabelBinarizer(BaseEstimator, TransformerMixin):\n \"\"\"Transform between iterable of iterables and a multilabel format\n\n Although a list of sets or tuples is a very intuitive format for multilabel\n data, it is unwieldy to process. This transformer converts between this\n intuitive format and the supported multilabel format: a (samples x classes)\n binary matrix indicating the presence of a class label.\n\n Parameters\n ----------\n classes : array-like of shape [n_classes] (optional)\n Indicates an ordering for the class labels\n\n sparse_output : boolean (default: False),\n Set to true if output binary array is desired in CSR sparse format\n\n Attributes\n ----------\n classes_ : array of labels\n A copy of the `classes` parameter where provided,\n or otherwise, the sorted set of classes found when fitting.\n\n Examples\n --------\n >>> from sklearn.preprocessing import MultiLabelBinarizer\n >>> mlb = MultiLabelBinarizer()\n >>> mlb.fit_transform([(1, 2), (3,)])\n array([[1, 1, 0],\n [0, 0, 1]])\n >>> mlb.classes_\n array([1, 2, 3])\n\n >>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])\n array([[0, 1, 1],\n [1, 0, 0]])\n >>> list(mlb.classes_)\n ['comedy', 'sci-fi', 'thriller']\n\n See also\n --------\n sklearn.preprocessing.OneHotEncoder : encode categorical integer features\n using a one-hot aka one-of-K scheme.\n \"\"\"\n def __init__(self, classes=None, sparse_output=False):\n self.classes = classes\n self.sparse_output = sparse_output\n\n def fit(self, y):\n \"\"\"Fit the label sets binarizer, storing `classes_`\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n self : returns this MultiLabelBinarizer instance\n \"\"\"\n if self.classes is None:\n classes = sorted(set(itertools.chain.from_iterable(y)))\n else:\n classes = self.classes\n dtype = np.int if all(isinstance(c, int) for c in classes) else object\n self.classes_ = np.empty(len(classes), dtype=dtype)\n self.classes_[:] = classes\n return self\n\n def fit_transform(self, y):\n \"\"\"Fit the label sets binarizer and transform the given label sets\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : array or CSR matrix, shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in\n `y[i]`, and 0 otherwise.\n \"\"\"\n if self.classes is not None:\n return self.fit(y).transform(y)\n\n # Automatically increment on new class\n class_mapping = defaultdict(int)\n class_mapping.default_factory = class_mapping.__len__\n yt = self._transform(y, class_mapping)\n\n # sort classes and reorder columns\n tmp = sorted(class_mapping, key=class_mapping.get)\n\n # (make safe for tuples)\n dtype = np.int if all(isinstance(c, int) for c in tmp) else object\n class_mapping = np.empty(len(tmp), dtype=dtype)\n class_mapping[:] = tmp\n self.classes_, inverse = np.unique(class_mapping, return_inverse=True)\n # ensure yt.indices keeps its current dtype\n yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype,\n copy=False)\n\n if not self.sparse_output:\n yt = yt.toarray()\n\n return yt\n\n def transform(self, y):\n \"\"\"Transform the given label sets\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : array or CSR matrix, shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in\n `y[i]`, and 0 otherwise.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n class_to_index = dict(zip(self.classes_, range(len(self.classes_))))\n yt = self._transform(y, class_to_index)\n\n if not self.sparse_output:\n yt = yt.toarray()\n\n return yt\n\n def _transform(self, y, class_mapping):\n \"\"\"Transforms the label sets with a given mapping\n\n Parameters\n ----------\n y : iterable of iterables\n class_mapping : Mapping\n Maps from label to column index in label indicator matrix\n\n Returns\n -------\n y_indicator : sparse CSR matrix, shape (n_samples, n_classes)\n Label indicator matrix\n \"\"\"\n indices = array.array('i')\n indptr = array.array('i', [0])\n for labels in y:\n indices.extend(set(class_mapping[label] for label in labels))\n indptr.append(len(indices))\n data = np.ones(len(indices), dtype=int)\n\n return sp.csr_matrix((data, indices, indptr),\n shape=(len(indptr) - 1, len(class_mapping)))\n\n def inverse_transform(self, yt):\n \"\"\"Transform the given indicator matrix into label sets\n\n Parameters\n ----------\n yt : array or sparse matrix of shape (n_samples, n_classes)\n A matrix containing only 1s ands 0s.\n\n Returns\n -------\n y : list of tuples\n The set of labels for each sample such that `y[i]` consists of\n `classes_[j]` for each `yt[i, j] == 1`.\n \"\"\"\n check_is_fitted(self, 'classes_')\n\n if yt.shape[1] != len(self.classes_):\n raise ValueError('Expected indicator for {0} classes, but got {1}'\n .format(len(self.classes_), yt.shape[1]))\n\n if sp.issparse(yt):\n yt = yt.tocsr()\n if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator.')\n return [tuple(self.classes_.take(yt.indices[start:end]))\n for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]\n else:\n unexpected = np.setdiff1d(yt, [0, 1])\n if len(unexpected) > 0:\n raise ValueError('Expected only 0s and 1s in label indicator. '\n 'Also got {0}'.format(unexpected))\n return [tuple(self.classes_.compress(indicators)) for indicators\n in yt]\n"
] | [
[
"numpy.diff",
"numpy.intersect1d",
"numpy.any",
"numpy.asarray",
"numpy.append",
"numpy.in1d",
"numpy.empty_like",
"numpy.where",
"numpy.flatnonzero",
"numpy.unique",
"numpy.searchsorted",
"numpy.setdiff1d",
"numpy.repeat",
"numpy.arange",
"numpy.sort",
"numpy.cumsum",
"scipy.sparse.issparse",
"scipy.sparse.csr_matrix",
"numpy.array"
]
] |
yangfengKAUST/cnn-text-classification-tf | [
"5f552df9887e57a4bc5638b3d36d7393254d2644"
] | [
"generate_embeddings.py"
] | [
"import numpy as np\nimport pickle\nimport argparse\nimport re\n\n\"\"\"\nConvert pre-trained Glove embeddings into npy file\nRun using:\npython3 generate_embeddings.py -d data/glove.6B.300d.txt --npy_output data/embeddings.npy --dict_output data/vocab.pckl --dict_whitelist data/polaritydata.vocab\n\"\"\"\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', '-d', type=str, required=True)\n parser.add_argument('--npy_output', type=str, required=True)\n parser.add_argument('--dict_output', type=str, required=True)\n parser.add_argument('--dict_whitelist', type=str, required=True)\n parser.add_argument('--dump_frequency', type=int, default=10000)\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n # reserve 0 for unknown words\n data = {\n '': 0\n }\n embeddings = [\n np.zeros((300), dtype=np.float32)\n ]\n\n float_re = re.compile(' [-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?')\n\n with open(args.dict_whitelist) as wfile:\n whitelist = [line.strip() for line in wfile]\n\n print(\"Building vocabulary ...\")\n\n with open(args.dataset) as ofile, \\\n open(args.dict_output, 'wb') as dfile, \\\n open(args.npy_output, 'wb') as nfile:\n idx = 1\n for line in ofile:\n pos = next(re.finditer(float_re, line)).start()\n word, vector = line[:pos], line[pos + 1:].split()\n\n if word not in whitelist:\n continue\n\n if word in data:\n print('Possible duplicate at {} in {}'.format(idx, line))\n continue\n\n embedding = np.fromiter([float(d) for d in vector], np.float32)\n\n if embedding.shape != (300,):\n print('Shape is {}'.format(embedding.shape))\n print(line)\n embeddings.append(embedding)\n data[word] = idx\n\n idx += 1\n\n if not idx % args.dump_frequency:\n np.save(nfile, np.array(embeddings))\n embeddings.clear()\n\n np.save(nfile, np.array(embeddings))\n pickle.dump(data, dfile)\n\n print(\"Vocabulary saved, size is {} words\".format(idx))\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
renmengye/inc-few-shot-attractor-public | [
"c560d5a81480cb22d903fa746ab0cfc2eb964e4c"
] | [
"fewshot/data/compress_tiered_imagenet.py"
] | [
"import cv2\nimport numpy as np\nimport six\nimport sys\nimport pickle as pkl\n\nfrom tqdm import tqdm\n\n\ndef compress(path, output):\n with np.load(path, mmap_mode=\"r\", encoding='latin1') as data:\n images = data[\"images\"]\n array = []\n for ii in tqdm(six.moves.xrange(images.shape[0]), desc='compress'):\n im = images[ii]\n im_str = cv2.imencode('.png', im)[1]\n array.append(im_str)\n with open(output, 'wb') as f:\n pkl.dump(array, f, protocol=pkl.HIGHEST_PROTOCOL)\n\n\ndef decompress(path, output):\n try:\n with open(output, 'rb') as f:\n array = pkl.load(f, encoding='bytes')\n except:\n with open(output, 'rb') as f:\n array = pkl.load(f)\n images = np.zeros([len(array), 84, 84, 3], dtype=np.uint8)\n for ii, item in tqdm(enumerate(array), desc='decompress'):\n im = cv2.imdecode(item, 1)\n images[ii] = im\n np.savez(path, images=images)\n\n\ndef main():\n if sys.argv[1] == 'compress':\n compress(sys.argv[2], sys.argv[3])\n elif sys.argv[1] == 'decompress':\n decompress(sys.argv[2], sys.argv[3])\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.load",
"numpy.savez"
]
] |
bupt-ipcr/RL4Net | [
"b1b694361c688f5e0055148a0cdcb4c6253cd7bd"
] | [
"rl4net/envs/power_allocation/test_pa_rb_env.py"
] | [
"from .pa_rb_env import (\n PAEnv,\n Node\n)\nimport numpy as np\nfrom pathlib import Path\n\nlog2 = np.log2\n\ncues = {\n 0: Node(0.1, 0, 'cue'),\n 1: Node(-0.1, 0, 'cue'),\n}\ndevices = {\n 0: {\n 't_device': Node(0, 0.5, 't_device'),\n 'r_devices': {\n 0: Node(0, 0.6, 'r_device')\n }\n },\n 1: {\n 't_device': Node(0, -0.5, 't_device'),\n 'r_devices': {\n 0: Node(0, -0.6, 'r_device')\n }\n }\n}\n\n\ndef equal(unit, target):\n tolerance = 1e-6 * np.ones_like(target)\n return (np.abs(unit - target) < tolerance).all()\n\n\ndef test_init_pos():\n \"\"\"test position constraint\"\"\"\n env = PAEnv(n_level=4)\n\n def dis(node, target):\n return np.sqrt(\n (node.x - target.x) ** 2 +\n (node.y - target.y) ** 2\n )\n # test bs cues\n assert all(\n env.r_bs <= dis(usr, env.station) <= env.R_bs\n for usr in env.cues.values()\n )\n\n # test devices\n for cluster in env.devices.values():\n t_device, r_devices = cluster['t_device'], cluster['r_devices']\n\n assert env.r_bs <= dis(t_device, env.station) <= (\n env.R_bs - env.R_dev)\n assert all(\n env.r_dev <= dis(r_device, t_device) <= env.R_dev\n for r_device in r_devices.values()\n )\n\n\ndef test_jakes():\n # TODO test stastic features of jakes\n # target_std, target_mean = 0.429, 1.253 # Rayleigh Distribution\n\n # x_len, y_len, Ns = H_set.shape\n # h_std = np.mean([\n # H_set[x, y, :].std()\n # for x in range(x_len)\n # for y in range(y_len)\n # ])\n # assert (h_std - target_std) / target_std < 0.1\n\n # h_mean = np.mean([\n # H_set[x, y, :].mean()\n # for x in range(x_len)\n # for y in range(y_len)\n # ])\n # assert (h_mean - target_mean) / target_mean < 0.05\n pass\n\n\ndef test_init_path_loss():\n \"\"\"test distance, since lognormal is random\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=2)\n env.cues = cues\n env.devices = devices\n env.init_path_loss()\n distance_matrix = env.distance_matrix\n target_dis = np.array(\n [\n [0.1, 1.1, np.sqrt(0.26), np.sqrt(0.26), 0.5],\n [1.1, 0.1, np.sqrt(0.26), np.sqrt(0.26), 0.5],\n [0.6, 0.6, 0.1, 0.1, 0.503],\n [np.sqrt(0.37), np.sqrt(0.37), 0.503, 0.2, 0.1],\n [np.sqrt(0.37), np.sqrt(0.37), 0.2, 0.503, 0.1],\n ]\n )\n assert equal(distance_matrix, target_dis)\n\n\ndef test_get_recv_powers():\n \"\"\"test get_recv_powers\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=1)\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n target_recv_powers = np.array([\n [[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],\n [[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],\n [[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],\n [[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],\n ])\n assert equal(recv_powers, target_recv_powers)\n\n\ndef test_get_rates():\n \"\"\"test get_rates\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=1)\n recv_powers = np.array([\n [[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],\n [[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],\n [[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],\n [[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],\n ])\n rates = env.get_rates(recv_powers)\n _rate = np.array([\n log2(1+1.1/31), log2(1+2.2/42), log2(1+33/1.3), log2(1+44/2.4)\n ])\n target_rates = (_rate * np.ones((env.n_channel, env.n_channel))).T\n assert equal(rates, target_rates)\n\n\ndef test_get_indices():\n \"\"\"test get_indices\"\"\"\n env = PAEnv(n_level=4, n_pair=2, m_cue=1, sorter=\"recv_power\",\n m_state=2)\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n rates = env.get_rates(recv_powers)\n metrics = emit_powers, recv_powers, rates, fading\n # rx_indice don't need test\n tx_indice, rx_indice = env.get_indices(*metrics)\n target_tx_indice = np.array([\n [3, 3, 3, 2],\n [0, 1, 2, 3]\n ])\n assert equal(tx_indice, target_tx_indice)\n\n\ndef test_get_rewards():\n env = PAEnv(n_level=4, n_pair=2, m_cue=1, sorter=\"recv_power\",\n m_state=2)\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n rates = env.get_rates(recv_powers)\n\n metrics = emit_powers, recv_powers, rates, fading\n indices = env.get_indices(*metrics)\n rewards = env.get_rewards(rates, indices)\n target_rewards = np.array([\n log2(1+1.1/31) + log2(1+44/2.4),\n log2(1+2.2/42) + log2(1+44/2.4),\n log2(1+33/1.3) + log2(1+44/2.4),\n log2(1+44/2.4) + log2(1+33/1.3),\n ])[:2]\n\n assert equal(rewards, target_rewards)\n\n\ndef test_get_states():\n # test m_state\n env = PAEnv(n_level=4, n_pair=2, m_cue=1,\n m_state=8, metrics=['emit_power', 'recv_power', 'rate'],\n sorter='recv_power')\n assert env.m_state == 4\n\n env = PAEnv(n_level=4, n_pair=2, m_cue=1,\n m_state=2, metrics=['emit_power', 'recv_power', 'rate'],\n sorter='recv_power')\n power = np.array([\n [0.01, 0],\n [0, 0.01],\n [0.1, 0],\n [0, 0.1],\n ])\n emit_powers = np.tile(np.expand_dims(power, axis=1),\n (1, env.n_channel, 1))\n fading = np.array([\n [1.1e-2, 1.2e-2, 1.3e-2, 1.4e-2],\n [2.1e-2, 2.2e-2, 2.3e-2, 2.4e-2],\n [3.1e-2, 3.2e-2, 3.3e-2, 3.4e-2],\n [4.1e-2, 4.2e-2, 4.3e-2, 4.4e-2],\n ])\n recv_powers = env.get_recv_powers(emit_powers, fading)\n rates = env.get_rates(recv_powers)\n\n metrics = emit_powers, recv_powers, rates, fading\n indices = env.get_indices(*metrics)\n states = env.get_states(*metrics, indices=indices)\n _recv = np.array([\n [[1.1e-4, 0], [1.2e-4, 0], [1.3e-4, 0], [1.4e-4, 0]],\n [[0, 2.1e-4], [0, 2.2e-4], [0, 2.3e-4], [0, 2.4e-4]],\n [[3.1e-3, 0], [3.2e-3, 0], [3.3e-3, 0], [3.4e-3, 0]],\n [[0, 4.1e-3], [0, 4.2e-3], [0, 4.3e-3], [0, 4.4e-3]],\n ])\n _rate = np.array([\n log2(1+1.1/31), log2(1+2.2/42), log2(1+33/1.3), log2(1+44/2.4)\n ])\n target_states = np.array([\n np.concatenate([power[3],power[0],_recv[3][0],_recv[0][0],[_rate[3], _rate[0]]]),\n np.concatenate([power[3],power[1],_recv[3][1],_recv[1][1],[_rate[3], _rate[1]]]),\n np.concatenate([power[3],power[2],_recv[3][2],_recv[2][2],[_rate[3], _rate[2]]]),\n np.concatenate([power[2],power[3],_recv[2][3],_recv[3][3],[_rate[2], _rate[3]]]),\n ])[:2]\n assert equal(states, target_states)\n\n\ndef test_sorter():\n # now only recv_power can be sorter\n pass\n\n\ndef test_seed():\n env = PAEnv(n_level=4, m_cue=1, seed=123)\n # this is func in PAEnv to random pos\n\n def random_point(min_r, radius, ox=0, oy=0):\n theta = np.random.random() * 2 * np.pi\n r = np.random.uniform(min_r, radius**2)\n x, y = np.cos(theta) * np.sqrt(r), np.sin(theta) * np.sqrt(r)\n return ox + x, oy + y\n np.random.seed(123)\n target_x, target_y = random_point(env.r_bs, env.R_bs)\n usr = env.cues[0]\n assert all((target_x == usr.x, target_y == usr.y))\n\n\ndef test_action():\n env = PAEnv(n_level=10, seed=799345)\n n_actions = env.n_actions\n n_channel, n_pair = env.n_channel, env.n_pair\n # normal\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_channel, ))\n s_, r, d, i = env.step(action, unit='dBm')\n assert i['rate'] == 3.4741923099965257\n # only D2D actions is enough\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_pair, ))\n s_, r, d, i = env.step(action, unit='dBm')\n assert i['rate'] == 3.4741923099965257 \n # other action dim raises error\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_pair - 1, ))\n try:\n s_, r, d, i = env.step(action, unit='dBm')\n except ValueError as e:\n msg = f\"length of action should be n_channel({env.n_channel})\" \\\n f\" or n_pair({n_pair}), but is {len(action)}\"\n assert e.args[0] == msg\n\n env.reset()\n np.random.seed(799345)\n action = np.random.randint(0, n_actions, (n_channel, ))\n s_, r, d, i = env.step(action, unit='mW')\n assert i['rate'] == 3.4928823957853856\n # TODO add test of continuous action\n\n\ndef test_step():\n env = PAEnv(n_level=10)\n n_actions, n_states = env.n_actions, env.n_states\n assert n_actions == 40\n assert n_states == 304\n env.reset()\n action = env.sample()\n env.step(action, unit='dBm')\n # action = env.sample()\n action = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])\n env.step(action, unit='mW')\n action = env.sample()\n try:\n env.step(action, unit='xx')\n except ValueError as e:\n msg = f\"unit should in ['dBm', 'mW'], but is xx\"\n assert e.args[0] == msg\n fig: Path() = env.render()\n if fig.exists():\n fig.unlink()\n\n\nif __name__ == '__main__':\n test_action()\n"
] | [
[
"numpy.sqrt",
"numpy.random.uniform",
"numpy.ones",
"numpy.ones_like",
"numpy.random.seed",
"numpy.abs",
"numpy.cos",
"numpy.random.random",
"numpy.expand_dims",
"numpy.array",
"numpy.sin",
"numpy.concatenate",
"numpy.random.randint"
]
] |
Chicco94/crypto-bot | [
"edbc22477544a25d8eb0c90cdd5f03345f11db68"
] | [
"src/trainer.py"
] | [
"from sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport sqlalchemy\nfrom config.config import symbol,backward_steps\nimport joblib\nfrom df_functions import *\n\ndef prepare_single_dataset(df,remove_from_heads:int,remove_from_tails:int,label:int):\n df_copy = df.copy()\n for _ in range(remove_from_tails):\n remove_row_from_tail(df_copy)\n for _ in range(remove_from_heads):\n remove_row_from_head(df_copy)\n add_id(df_copy)\n df_copy.time = df_copy.time.apply(lambda x: x.value)\n df_copy.rename(columns={\"time\": \"time{}\".format(label)\n , \"price\": \"price{}\".format(label)\n , \"quantity\":\"quantity{}\".format(label)}\n ,inplace=True)\n df_copy.drop(columns=['symbol'],inplace=True)\n return df_copy\n \n\ndef prepare_dataset(df,steps:int):\n datasets = []\n for i in range(1,steps):\n datasets.append(prepare_single_dataset(df,steps-i,i-1,i))\n df_target = prepare_single_dataset(df,0,steps-1,steps)\n\n result = datasets.pop()\n while len(datasets)>0:\n result = pd.merge(result, datasets.pop(), on=\"ID\")\n\n target = df_target['price{}'.format(steps)]\n return result,target\n\ndef main():\n # open database\n engine = sqlalchemy.create_engine('sqlite:///data/{}_stream.db'.format(symbol))\n df = pd.read_sql(symbol,engine)\n # prepare dataset\n source,target = prepare_dataset(df,backward_steps)\n # train model\n model = LinearRegression()\n X_train,X_test,y_train,y_test = train_test_split(source,target,test_size=0.33)\n model.fit(X_train,y_train)\n # evaluate model\n score = model.score(X_test,y_test)\n print('score: ',score)\n # save model\n filename = 'models/model_{}.sav'.format(score)\n joblib.dump(model, filename)\n\n #model = joblib.load(filename)\n\nif __name__=='__main__':\n main() "
] | [
[
"pandas.read_sql",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LinearRegression"
]
] |
robinzixuan/Dialog_Act_Bert_Classification | [
"014cc8df0545e5bf85a22127e63e8490f3aa9012"
] | [
"data & result/history.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 30 20:49:32 2019\n\n@author: rluo\n\"\"\"\n\nimport keras\nimport matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport pickle\n\nhistory = pickle.load(open('history.p','rb'))\nplt.plot(history['loss'])\n#plt.plot(history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('Epoch')\nplt.legend(['train', 'test'], loc='upper left');\nplt.plot(history['acc'])\n#plt.plot(history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('acc')\nplt.xlabel('Epoch')\nplt.legend(['train', 'test'], loc='upper left');\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
kareem1925/qiskit-aqua | [
"7056f9bdd9ece32c41e162faecdcd24cf483da6f"
] | [
"test/optimization/test_vertex_cover.py"
] | [
"# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\" Test Vertex Cover \"\"\"\n\nimport unittest\nfrom test.optimization import QiskitOptimizationTestCase\nimport numpy as np\nfrom qiskit import BasicAer\n\nfrom qiskit.aqua import aqua_globals, QuantumInstance\nfrom qiskit.optimization.applications.ising import vertex_cover\nfrom qiskit.optimization.applications.ising.common import random_graph, sample_most_likely\nfrom qiskit.aqua.algorithms import NumPyMinimumEigensolver, VQE\nfrom qiskit.aqua.components.variational_forms import RYRZ\nfrom qiskit.aqua.components.optimizers import SPSA\n\n\nclass TestVertexCover(QiskitOptimizationTestCase):\n \"\"\"Cplex Ising tests.\"\"\"\n\n def setUp(self):\n super().setUp()\n self.seed = 100\n aqua_globals.random_seed = self.seed\n self.num_nodes = 3\n self.w = random_graph(self.num_nodes, edge_prob=0.8, weight_range=10)\n self.qubit_op, self.offset = vertex_cover.get_operator(self.w)\n\n def _brute_force(self):\n # brute-force way\n def bitfield(n, length):\n result = np.binary_repr(n, length)\n return [int(digit) for digit in result] # [2:] to chop off the \"0b\" part\n\n nodes = self.num_nodes\n maximum = 2 ** nodes\n minimal_v = np.inf\n for i in range(maximum):\n cur = bitfield(i, nodes)\n\n cur_v = vertex_cover.check_full_edge_coverage(np.array(cur), self.w)\n if cur_v:\n nonzerocount = np.count_nonzero(cur)\n if nonzerocount < minimal_v:\n minimal_v = nonzerocount\n\n return minimal_v\n\n def test_vertex_cover(self):\n \"\"\" Vertex Cover test \"\"\"\n algo = NumPyMinimumEigensolver(self.qubit_op, aux_operators=[])\n result = algo.run()\n x = sample_most_likely(result.eigenstate)\n sol = vertex_cover.get_graph_solution(x)\n np.testing.assert_array_equal(sol, [0, 1, 1])\n oracle = self._brute_force()\n self.assertEqual(np.count_nonzero(sol), oracle)\n\n def test_vertex_cover_vqe(self):\n \"\"\" Vertex Cover VQE test \"\"\"\n aqua_globals.random_seed = self.seed\n\n result = VQE(self.qubit_op,\n RYRZ(self.qubit_op.num_qubits, depth=3),\n SPSA(max_trials=200),\n max_evals_grouped=2).run(\n QuantumInstance(BasicAer.get_backend('qasm_simulator'),\n seed_simulator=aqua_globals.random_seed,\n seed_transpiler=aqua_globals.random_seed))\n\n x = sample_most_likely(result['eigvecs'][0])\n sol = vertex_cover.get_graph_solution(x)\n oracle = self._brute_force()\n self.assertEqual(np.count_nonzero(sol), oracle)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.count_nonzero",
"numpy.testing.assert_array_equal",
"numpy.binary_repr"
]
] |
oliverfaustino/NRPG-DataManager | [
"71064cb79be304f712aabcceebd6647121d2cb6c"
] | [
"modulos/utils.py"
] | [
"import pyperclip\nimport pandas as pd\n\nfrom modulos.conecao import *\n\n\n\ndef copiar(objeto): # função para copiar os objetos para área de transferência\n global copiar # para resolver o porblema UnboundLocalError: local variable 'copiar' referenced before assignment:\n opcao = int(input('Deseja copiar para área de transferência? \"1\" para sim e qualquer tecla para não\\n\\nR: ')) \n if opcao == 1:\n copiar = pyperclip.copy(objeto)\n print('\\nCopiado com sucesso!') \n else:\n pass\n return copiar\n\n\n\n\ndef select(sql): # função que decta qual tipo de ação eu desejo fazer\n try:\n df = pd.read_sql_query(sql, con=engine).to_string(index=False)\n \n finally:\n pass\n\n return df\n"
] | [
[
"pandas.read_sql_query"
]
] |
ljch2018/allennlp | [
"63ba3fb28897578d4798039d1713e2b7995eb753"
] | [
"allennlp/models/semantic_parsing/atis/atis_semantic_parser.py"
] | [
"import logging\nfrom typing import Any, Dict, List, Tuple\n\nimport difflib\nimport sqlparse\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.common.util import pad_sequence_to_length\nfrom allennlp.data import Vocabulary\nfrom allennlp.data.fields.production_rule_field import ProductionRuleArray\nfrom allennlp.semparse.executors import SqlExecutor\nfrom allennlp.models.model import Model\nfrom allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder, \\\n Embedding\nfrom allennlp.nn import util\nfrom allennlp.semparse.worlds import AtisWorld\nfrom allennlp.semparse.contexts.sql_context_utils import action_sequence_to_sql\nfrom allennlp.state_machines.states import GrammarBasedState\nfrom allennlp.state_machines.transition_functions.linking_transition_function import LinkingTransitionFunction\nfrom allennlp.state_machines import BeamSearch\nfrom allennlp.state_machines.trainers import MaximumMarginalLikelihood\nfrom allennlp.state_machines.states import GrammarStatelet, RnnStatelet\nfrom allennlp.training.metrics import Average\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\[email protected](\"atis_parser\")\nclass AtisSemanticParser(Model):\n \"\"\"\n Parameters\n ----------\n vocab : ``Vocabulary``\n utterance_embedder : ``TextFieldEmbedder``\n Embedder for utterances.\n action_embedding_dim : ``int``\n Dimension to use for action embeddings.\n encoder : ``Seq2SeqEncoder``\n The encoder to use for the input utterance.\n decoder_beam_search : ``BeamSearch``\n Beam search used to retrieve best sequences after training.\n max_decoding_steps : ``int``\n When we're decoding with a beam search, what's the maximum number of steps we should take?\n This only applies at evaluation time, not during training.\n input_attention: ``Attention``\n We compute an attention over the input utterance at each step of the decoder, using the\n decoder hidden state as the query. Passed to the transition function.\n add_action_bias : ``bool``, optional (default=True)\n If ``True``, we will learn a bias weight for each action that gets used when predicting\n that action, in addition to its embedding.\n dropout : ``float``, optional (default=0)\n If greater than 0, we will apply dropout with this probability after all encoders (pytorch\n LSTMs do not apply dropout to their last layer).\n rule_namespace : ``str``, optional (default=rule_labels)\n The vocabulary namespace to use for production rules. The default corresponds to the\n default used in the dataset reader, so you likely don't need to modify this.\n database_file: ``str``, optional (default=/atis/atis.db)\n The path of the SQLite database when evaluating SQL queries. SQLite is disk based, so we need\n the file location to connect to it.\n \"\"\"\n def __init__(self,\n vocab: Vocabulary,\n utterance_embedder: TextFieldEmbedder,\n action_embedding_dim: int,\n encoder: Seq2SeqEncoder,\n decoder_beam_search: BeamSearch,\n max_decoding_steps: int,\n input_attention: Attention,\n add_action_bias: bool = True,\n training_beam_size: int = None,\n dropout: float = 0.0,\n rule_namespace: str = 'rule_labels',\n database_file='/atis/atis.db') -> None:\n # Atis semantic parser init\n super().__init__(vocab)\n self._utterance_embedder = utterance_embedder\n self._encoder = encoder\n self._max_decoding_steps = max_decoding_steps\n self._add_action_bias = add_action_bias\n if dropout > 0:\n self._dropout = torch.nn.Dropout(p=dropout)\n else:\n self._dropout = lambda x: x\n self._rule_namespace = rule_namespace\n self._exact_match = Average()\n self._valid_sql_query = Average()\n self._action_similarity = Average()\n self._denotation_accuracy = Average()\n\n self._executor = SqlExecutor(database_file)\n self._action_padding_index = -1 # the padding value used by IndexField\n num_actions = vocab.get_vocab_size(self._rule_namespace)\n if self._add_action_bias:\n input_action_dim = action_embedding_dim + 1\n else:\n input_action_dim = action_embedding_dim\n self._action_embedder = Embedding(num_embeddings=num_actions, embedding_dim=input_action_dim)\n self._output_action_embedder = Embedding(num_embeddings=num_actions, embedding_dim=action_embedding_dim)\n\n\n # This is what we pass as input in the first step of decoding, when we don't have a\n # previous action, or a previous utterance attention.\n self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))\n self._first_attended_utterance = torch.nn.Parameter(torch.FloatTensor(encoder.get_output_dim()))\n torch.nn.init.normal_(self._first_action_embedding)\n torch.nn.init.normal_(self._first_attended_utterance)\n\n self._num_entity_types = 2 # TODO(kevin): get this in a more principled way somehow?\n self._entity_type_decoder_embedding = Embedding(self._num_entity_types, action_embedding_dim)\n\n self._beam_search = decoder_beam_search\n self._decoder_trainer = MaximumMarginalLikelihood(training_beam_size)\n self._transition_function = LinkingTransitionFunction(encoder_output_dim=self._encoder.get_output_dim(),\n action_embedding_dim=action_embedding_dim,\n input_attention=input_attention,\n predict_start_type_separately=False,\n add_action_bias=self._add_action_bias,\n dropout=dropout)\n\n @overrides\n def forward(self, # type: ignore\n utterance: Dict[str, torch.LongTensor],\n world: List[AtisWorld],\n actions: List[List[ProductionRuleArray]],\n linking_scores: torch.Tensor,\n target_action_sequence: torch.LongTensor = None,\n sql_queries: List[List[str]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n We set up the initial state for the decoder, and pass that state off to either a DecoderTrainer,\n if we're training, or a BeamSearch for inference, if we're not.\n\n Parameters\n ----------\n utterance : Dict[str, torch.LongTensor]\n The output of ``TextField.as_array()`` applied on the utterance ``TextField``. This will\n be passed through a ``TextFieldEmbedder`` and then through an encoder.\n world : ``List[AtisWorld]``\n We use a ``MetadataField`` to get the ``World`` for each input instance. Because of\n how ``MetadataField`` works, this gets passed to us as a ``List[AtisWorld]``,\n actions : ``List[List[ProductionRuleArray]]``\n A list of all possible actions for each ``World`` in the batch, indexed into a\n ``ProductionRuleArray`` using a ``ProductionRuleField``. We will embed all of these\n and use the embeddings to determine which action to take at each timestep in the\n decoder.\n linking_scores: ``torch.Tensor``\n A matrix of the linking the utterance tokens and the entities. This is a binary matrix that\n is deterministically generated where each entry indicates whether a token generated an entity.\n This tensor has shape ``(batch_size, num_entities, num_utterance_tokens)``.\n target_action_sequence : torch.Tensor, optional (default=None)\n The action sequence for the correct action sequence, where each action is an index into the list\n of possible actions. This tensor has shape ``(batch_size, sequence_length, 1)``. We remove the\n trailing dimension.\n sql_queries : List[List[str]], optional (default=None)\n A list of the SQL queries that are given during training or validation.\n \"\"\"\n initial_state = self._get_initial_state(utterance, world, actions, linking_scores)\n batch_size = linking_scores.shape[0]\n if target_action_sequence is not None:\n # Remove the trailing dimension (from ListField[ListField[IndexField]]).\n target_action_sequence = target_action_sequence.squeeze(-1)\n target_mask = target_action_sequence != self._action_padding_index\n else:\n target_mask = None\n\n if self.training:\n # target_action_sequence is of shape (batch_size, 1, sequence_length) here after we unsqueeze it for\n # the MML trainer.\n return self._decoder_trainer.decode(initial_state,\n self._transition_function,\n (target_action_sequence.unsqueeze(1), target_mask.unsqueeze(1)))\n else:\n # TODO(kevin) Move some of this functionality to a separate method for computing validation outputs.\n action_mapping = {}\n for batch_index, batch_actions in enumerate(actions):\n for action_index, action in enumerate(batch_actions):\n action_mapping[(batch_index, action_index)] = action[0]\n outputs: Dict[str, Any] = {'action_mapping': action_mapping}\n outputs['linking_scores'] = linking_scores\n if target_action_sequence is not None:\n outputs['loss'] = self._decoder_trainer.decode(initial_state,\n self._transition_function,\n (target_action_sequence.unsqueeze(1),\n target_mask.unsqueeze(1)))['loss']\n num_steps = self._max_decoding_steps\n # This tells the state to start keeping track of debug info, which we'll pass along in\n # our output dictionary.\n initial_state.debug_info = [[] for _ in range(batch_size)]\n best_final_states = self._beam_search.search(num_steps,\n initial_state,\n self._transition_function,\n keep_final_unfinished_states=False)\n outputs['best_action_sequence'] = []\n outputs['debug_info'] = []\n outputs['entities'] = []\n outputs['predicted_sql_query'] = []\n outputs['sql_queries'] = []\n outputs['utterance'] = []\n outputs['tokenized_utterance'] = []\n\n for i in range(batch_size):\n # Decoding may not have terminated with any completed valid SQL queries, if `num_steps`\n # isn't long enough (or if the model is not trained enough and gets into an\n # infinite action loop).\n if i not in best_final_states:\n self._exact_match(0)\n self._denotation_accuracy(0)\n self._valid_sql_query(0)\n self._action_similarity(0)\n outputs['predicted_sql_query'].append('')\n continue\n\n best_action_indices = best_final_states[i][0].action_history[0]\n\n action_strings = [action_mapping[(i, action_index)]\n for action_index in best_action_indices]\n predicted_sql_query = action_sequence_to_sql(action_strings)\n\n if target_action_sequence is not None:\n # Use a Tensor, not a Variable, to avoid a memory leak.\n targets = target_action_sequence[i].data\n sequence_in_targets = 0\n sequence_in_targets = self._action_history_match(best_action_indices, targets)\n self._exact_match(sequence_in_targets)\n\n similarity = difflib.SequenceMatcher(None, best_action_indices, targets)\n self._action_similarity(similarity.ratio())\n\n if sql_queries and sql_queries[i]:\n denotation_correct = self._executor.evaluate_sql_query(predicted_sql_query, sql_queries[i])\n self._denotation_accuracy(denotation_correct)\n outputs['sql_queries'].append(sql_queries[i])\n\n outputs['utterance'].append(world[i].utterances[-1])\n outputs['tokenized_utterance'].append([token.text\n for token in world[i].tokenized_utterances[-1]])\n outputs['entities'].append(world[i].entities)\n outputs['best_action_sequence'].append(action_strings)\n outputs['predicted_sql_query'].append(sqlparse.format(predicted_sql_query, reindent=True))\n outputs['debug_info'].append(best_final_states[i][0].debug_info[0]) # type: ignore\n return outputs\n\n def _get_initial_state(self,\n utterance: Dict[str, torch.LongTensor],\n worlds: List[AtisWorld],\n actions: List[List[ProductionRuleArray]],\n linking_scores: torch.Tensor) -> GrammarBasedState:\n embedded_utterance = self._utterance_embedder(utterance)\n utterance_mask = util.get_text_field_mask(utterance).float()\n\n batch_size = embedded_utterance.size(0)\n num_entities = max([len(world.entities) for world in worlds])\n\n # entity_types: tensor with shape (batch_size, num_entities)\n entity_types, _ = self._get_type_vector(worlds, num_entities, embedded_utterance)\n\n # (batch_size, num_utterance_tokens, embedding_dim)\n encoder_input = embedded_utterance\n\n # (batch_size, utterance_length, encoder_output_dim)\n encoder_outputs = self._dropout(self._encoder(encoder_input, utterance_mask))\n\n # This will be our initial hidden state and memory cell for the decoder LSTM.\n final_encoder_output = util.get_final_encoder_states(encoder_outputs,\n utterance_mask,\n self._encoder.is_bidirectional())\n memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())\n initial_score = embedded_utterance.data.new_zeros(batch_size)\n\n # To make grouping states together in the decoder easier, we convert the batch dimension in\n # all of our tensors into an outer list. For instance, the encoder outputs have shape\n # `(batch_size, utterance_length, encoder_output_dim)`. We need to convert this into a list\n # of `batch_size` tensors, each of shape `(utterance_length, encoder_output_dim)`. Then we\n # won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.\n initial_score_list = [initial_score[i] for i in range(batch_size)]\n encoder_output_list = [encoder_outputs[i] for i in range(batch_size)]\n utterance_mask_list = [utterance_mask[i] for i in range(batch_size)]\n initial_rnn_state = []\n for i in range(batch_size):\n initial_rnn_state.append(RnnStatelet(final_encoder_output[i],\n memory_cell[i],\n self._first_action_embedding,\n self._first_attended_utterance,\n encoder_output_list,\n utterance_mask_list))\n\n initial_grammar_state = [self._create_grammar_state(worlds[i],\n actions[i],\n linking_scores[i],\n entity_types[i])\n for i in range(batch_size)]\n\n initial_state = GrammarBasedState(batch_indices=list(range(batch_size)),\n action_history=[[] for _ in range(batch_size)],\n score=initial_score_list,\n rnn_state=initial_rnn_state,\n grammar_state=initial_grammar_state,\n possible_actions=actions,\n debug_info=None)\n return initial_state\n\n @staticmethod\n def _get_type_vector(worlds: List[AtisWorld],\n num_entities: int,\n tensor: torch.Tensor = None) -> Tuple[torch.LongTensor, Dict[int, int]]:\n \"\"\"\n Produces the encoding for each entity's type. In addition, a map from a flattened entity\n index to type is returned to combine entity type operations into one method.\n\n Parameters\n ----------\n worlds : ``List[AtisWorld]``\n num_entities : ``int``\n tensor : ``torch.Tensor``\n Used for copying the constructed list onto the right device.\n\n Returns\n -------\n A ``torch.LongTensor`` with shape ``(batch_size, num_entities, num_types)``.\n entity_types : ``Dict[int, int]``\n This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id.\n \"\"\"\n entity_types = {}\n batch_types = []\n\n for batch_index, world in enumerate(worlds):\n types = []\n entities = [('number', entity)\n if 'number' or 'time_range' in entity\n else ('string', entity)\n for entity in world.entities]\n\n for entity_index, entity in enumerate(entities):\n # We need numbers to be first, then strings, since our entities are going to be\n # sorted. We do a split by type and then a merge later, and it relies on this sorting.\n if entity[0] == 'number':\n entity_type = 1\n else:\n entity_type = 0\n types.append(entity_type)\n\n # For easier lookups later, we're actually using a _flattened_ version\n # of (batch_index, entity_index) for the key, because this is how the\n # linking scores are stored.\n flattened_entity_index = batch_index * num_entities + entity_index\n entity_types[flattened_entity_index] = entity_type\n padded = pad_sequence_to_length(types, num_entities, lambda: 0)\n batch_types.append(padded)\n\n return tensor.new_tensor(batch_types, dtype=torch.long), entity_types\n\n @staticmethod\n def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:\n # TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.\n # Check if target is big enough to cover prediction (including start/end symbols)\n if len(predicted) > targets.size(0):\n return 0\n predicted_tensor = targets.new_tensor(predicted)\n targets_trimmed = targets[:len(predicted)]\n # Return 1 if the predicted sequence is anywhere in the list of targets.\n return predicted_tensor.equal(targets_trimmed)\n\n @staticmethod\n def is_nonterminal(token: str):\n if token[0] == '\"' and token[-1] == '\"':\n return False\n return True\n\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n \"\"\"\n We track four metrics here:\n\n 1. exact_match, which is the percentage of the time that our best output action sequence\n matches the SQL query exactly.\n\n 2. denotation_acc, which is the percentage of examples where we get the correct\n denotation. This is the typical \"accuracy\" metric, and it is what you should usually\n report in an experimental result. You need to be careful, though, that you're\n computing this on the full data, and not just the subset that can be parsed. (make sure\n you pass \"keep_if_unparseable=True\" to the dataset reader, which we do for validation data,\n but not training data).\n\n 3. valid_sql_query, which is the percentage of time that decoding actually produces a\n valid SQL query. We might not produce a valid SQL query if the decoder gets\n into a repetitive loop, or we're trying to produce a super long SQL query and run\n out of time steps, or something.\n\n 4. action_similarity, which is how similar the action sequence predicted is to the actual\n action sequence. This is basically a soft measure of exact_match.\n \"\"\"\n return {\n 'exact_match': self._exact_match.get_metric(reset),\n 'denotation_acc': self._denotation_accuracy.get_metric(reset),\n 'valid_sql_query': self._valid_sql_query.get_metric(reset),\n 'action_similarity': self._action_similarity.get_metric(reset)\n }\n\n def _create_grammar_state(self,\n world: AtisWorld,\n possible_actions: List[ProductionRuleArray],\n linking_scores: torch.Tensor,\n entity_types: torch.Tensor) -> GrammarStatelet:\n \"\"\"\n This method creates the GrammarStatelet object that's used for decoding. Part of creating\n that is creating the `valid_actions` dictionary, which contains embedded representations of\n all of the valid actions. So, we create that here as well.\n\n The inputs to this method are for a `single instance in the batch`; none of the tensors we\n create here are batched. We grab the global action ids from the input\n ``ProductionRuleArrays``, and we use those to embed the valid actions for every\n non-terminal type. We use the input ``linking_scores`` for non-global actions.\n\n Parameters\n ----------\n world : ``AtisWorld``\n From the input to ``forward`` for a single batch instance.\n possible_actions : ``List[ProductionRuleArray]``\n From the input to ``forward`` for a single batch instance.\n linking_scores : ``torch.Tensor``\n Assumed to have shape ``(num_entities, num_utterance_tokens)`` (i.e., there is no batch\n dimension).\n entity_types : ``torch.Tensor``\n Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).\n \"\"\"\n action_map = {}\n for action_index, action in enumerate(possible_actions):\n action_string = action[0]\n action_map[action_string] = action_index\n\n valid_actions = world.valid_actions\n entity_map = {}\n entities = world.entities\n\n for entity_index, entity in enumerate(entities):\n entity_map[entity] = entity_index\n\n translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {}\n for key, action_strings in valid_actions.items():\n translated_valid_actions[key] = {}\n # `key` here is a non-terminal from the grammar, and `action_strings` are all the valid\n # productions of that non-terminal. We'll first split those productions by global vs.\n # linked action.\n\n action_indices = [action_map[action_string] for action_string in action_strings]\n production_rule_arrays = [(possible_actions[index], index) for index in action_indices]\n global_actions = []\n linked_actions = []\n for production_rule_array, action_index in production_rule_arrays:\n if production_rule_array[1]:\n global_actions.append((production_rule_array[2], action_index))\n else:\n linked_actions.append((production_rule_array[0], action_index))\n\n if global_actions:\n global_action_tensors, global_action_ids = zip(*global_actions)\n global_action_tensor = entity_types.new_tensor(torch.cat(global_action_tensors, dim=0),\n dtype=torch.long)\n global_input_embeddings = self._action_embedder(global_action_tensor)\n global_output_embeddings = self._output_action_embedder(global_action_tensor)\n translated_valid_actions[key]['global'] = (global_input_embeddings,\n global_output_embeddings,\n list(global_action_ids))\n if linked_actions:\n linked_rules, linked_action_ids = zip(*linked_actions)\n entities = linked_rules\n entity_ids = [entity_map[entity] for entity in entities]\n entity_linking_scores = linking_scores[entity_ids]\n entity_type_tensor = entity_types[entity_ids]\n entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor)\n entity_type_embeddings = entity_types.new_tensor(entity_type_embeddings, dtype=torch.float)\n translated_valid_actions[key]['linked'] = (entity_linking_scores,\n entity_type_embeddings,\n list(linked_action_ids))\n\n return GrammarStatelet(['statement'],\n {},\n translated_valid_actions,\n {},\n self.is_nonterminal,\n reverse_productions=False)\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test\n time, to finalize predictions. This is (confusingly) a separate notion from the \"decoder\"\n in \"encoder/decoder\", where that decoder logic lives in ``TransitionFunction``.\n\n This method trims the output predictions to the first end symbol, replaces indices with\n corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.\n \"\"\"\n action_mapping = output_dict['action_mapping']\n best_actions = output_dict[\"best_action_sequence\"]\n debug_infos = output_dict['debug_info']\n batch_action_info = []\n for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)):\n instance_action_info = []\n for predicted_action, action_debug_info in zip(predicted_actions, debug_info):\n action_info = {}\n action_info['predicted_action'] = predicted_action\n considered_actions = action_debug_info['considered_actions']\n probabilities = action_debug_info['probabilities']\n actions = []\n for action, probability in zip(considered_actions, probabilities):\n if action != -1:\n actions.append((action_mapping[(batch_index, action)], probability))\n actions.sort()\n considered_actions, probabilities = zip(*actions)\n action_info['considered_actions'] = considered_actions\n action_info['action_probabilities'] = probabilities\n action_info['utterance_attention'] = action_debug_info.get('question_attention', [])\n instance_action_info.append(action_info)\n batch_action_info.append(instance_action_info)\n output_dict[\"predicted_actions\"] = batch_action_info\n return output_dict\n"
] | [
[
"torch.FloatTensor",
"torch.cat",
"torch.nn.init.normal_",
"torch.nn.Dropout"
]
] |
jdammers/mne-python | [
"1dc1502215a53385cda15c6c336fcc4341dc4d3b"
] | [
"mne/bem.py"
] | [
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hamalainen <[email protected]>\n# Eric Larson <[email protected]>\n# Lorenzo De Santis <[email protected]>\n#\n# License: BSD (3-clause)\n\nfrom functools import partial\nimport glob\nimport os\nimport os.path as op\nimport shutil\nfrom copy import deepcopy\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .transforms import _ensure_trans, apply_trans\nfrom .io import Info\nfrom .io.constants import FIFF\nfrom .io.write import (start_file, start_block, write_float, write_int,\n write_float_matrix, write_int_matrix, end_block,\n end_file)\nfrom .io.tag import find_tag\nfrom .io.tree import dir_tree_find\nfrom .io.open import fiff_open\nfrom .surface import (read_surface, write_surface, complete_surface_info,\n _compute_nearest, _get_ico_surface, read_tri,\n _fast_cross_nd_sum, _get_solids)\nfrom .utils import verbose, logger, run_subprocess, get_subjects_dir, warn, _pl\nfrom .fixes import einsum\nfrom .externals.six import string_types\n\n\n# ############################################################################\n# Compute BEM solution\n\n# The following approach is based on:\n#\n# de Munck JC: \"A linear discretization of the volume conductor boundary\n# integral equation using analytically integrated elements\",\n# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990\n#\n\n\nclass ConductorModel(dict):\n \"\"\"BEM or sphere model.\"\"\"\n\n def __repr__(self): # noqa: D105\n if self['is_sphere']:\n center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0'])\n rad = self.radius\n if rad is None: # no radius / MEG only\n extra = 'Sphere (no layers): r0=[%s] mm' % center\n else:\n extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm'\n % (len(self['layers']) - 1, _pl(self['layers']),\n center, rad * 1000.))\n else:\n extra = ('BEM (%s layer%s)' % (len(self['surfs']),\n _pl(self['surfs'])))\n return '<ConductorModel | %s>' % extra\n\n def copy(self):\n \"\"\"Return copy of ConductorModel instance.\"\"\"\n return deepcopy(self)\n\n @property\n def radius(self):\n \"\"\"Sphere radius if an EEG sphere model.\"\"\"\n if not self['is_sphere']:\n raise RuntimeError('radius undefined for BEM')\n return None if len(self['layers']) == 0 else self['layers'][-1]['rad']\n\n\ndef _calc_beta(rk, rk_norm, rk1, rk1_norm):\n \"\"\"Compute coefficients for calculating the magic vector omega.\"\"\"\n rkk1 = rk1[0] - rk[0]\n size = np.linalg.norm(rkk1)\n rkk1 /= size\n num = rk_norm + np.dot(rk, rkk1)\n den = rk1_norm + np.dot(rk1, rkk1)\n res = np.log(num / den) / size\n return res\n\n\ndef _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):\n \"\"\"Compute the linear potential matrix element computations.\"\"\"\n omega = np.zeros((len(fros), 3))\n\n # we replicate a little bit of the _get_solids code here for speed\n # (we need some of the intermediate values later)\n v1 = tri_rr[np.newaxis, 0, :] - fros\n v2 = tri_rr[np.newaxis, 1, :] - fros\n v3 = tri_rr[np.newaxis, 2, :] - fros\n triples = _fast_cross_nd_sum(v1, v2, v3)\n l1 = np.linalg.norm(v1, axis=1)\n l2 = np.linalg.norm(v2, axis=1)\n l3 = np.linalg.norm(v3, axis=1)\n ss = l1 * l2 * l3\n ss += einsum('ij,ij,i->i', v1, v2, l3)\n ss += einsum('ij,ij,i->i', v1, v3, l2)\n ss += einsum('ij,ij,i->i', v2, v3, l1)\n solids = np.arctan2(triples, ss)\n\n # We *could* subselect the good points from v1, v2, v3, triples, solids,\n # l1, l2, and l3, but there are *very* few bad points. So instead we do\n # some unnecessary calculations, and then omit them from the final\n # solution. These three lines ensure we don't get invalid values in\n # _calc_beta.\n bad_mask = np.abs(solids) < np.pi / 1e6\n l1[bad_mask] = 1.\n l2[bad_mask] = 1.\n l3[bad_mask] = 1.\n\n # Calculate the magic vector vec_omega\n beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],\n _calc_beta(v2, l2, v3, l3)[:, np.newaxis],\n _calc_beta(v3, l3, v1, l1)[:, np.newaxis]]\n vec_omega = (beta[2] - beta[0]) * v1\n vec_omega += (beta[0] - beta[1]) * v2\n vec_omega += (beta[1] - beta[2]) * v3\n\n area2 = 2.0 * tri_area\n n2 = 1.0 / (area2 * area2)\n # leave omega = 0 otherwise\n # Put it all together...\n yys = [v1, v2, v3]\n idx = [0, 1, 2, 0, 2]\n for k in range(3):\n diff = yys[idx[k - 1]] - yys[idx[k + 1]]\n zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)\n omega[:, k] = -n2 * (area2 * zdots * 2. * solids -\n triples * (diff * vec_omega).sum(axis=-1))\n # omit the bad points from the solution\n omega[bad_mask] = 0.\n return omega\n\n\ndef _correct_auto_elements(surf, mat):\n \"\"\"Improve auto-element approximation.\"\"\"\n pi2 = 2.0 * np.pi\n tris_flat = surf['tris'].ravel()\n misses = pi2 - mat.sum(axis=1)\n for j, miss in enumerate(misses):\n # How much is missing?\n n_memb = len(surf['neighbor_tri'][j])\n # The node itself receives one half\n mat[j, j] = miss / 2.0\n # The rest is divided evenly among the member nodes...\n miss /= (4.0 * n_memb)\n members = np.where(j == tris_flat)[0]\n mods = members % 3\n offsets = np.array([[1, 2], [-1, 1], [-1, -2]])\n tri_1 = members + offsets[mods, 0]\n tri_2 = members + offsets[mods, 1]\n for t1, t2 in zip(tri_1, tri_2):\n mat[j, tris_flat[t1]] += miss\n mat[j, tris_flat[t2]] += miss\n return\n\n\ndef _fwd_bem_lin_pot_coeff(surfs):\n \"\"\"Calculate the coefficients for linear collocation approach.\"\"\"\n # taken from fwd_bem_linear_collocation.c\n nps = [surf['np'] for surf in surfs]\n np_tot = sum(nps)\n coeff = np.zeros((np_tot, np_tot))\n offsets = np.cumsum(np.concatenate(([0], nps)))\n for si_1, surf1 in enumerate(surfs):\n rr_ord = np.arange(nps[si_1])\n for si_2, surf2 in enumerate(surfs):\n logger.info(\" %s (%d) -> %s (%d) ...\" %\n (_bem_explain_surface(surf1['id']), nps[si_1],\n _bem_explain_surface(surf2['id']), nps[si_2]))\n tri_rr = surf2['rr'][surf2['tris']]\n tri_nn = surf2['tri_nn']\n tri_area = surf2['tri_area']\n submat = coeff[offsets[si_1]:offsets[si_1 + 1],\n offsets[si_2]:offsets[si_2 + 1]] # view\n for k in range(surf2['ntri']):\n tri = surf2['tris'][k]\n if si_1 == si_2:\n skip_idx = ((rr_ord == tri[0]) |\n (rr_ord == tri[1]) |\n (rr_ord == tri[2]))\n else:\n skip_idx = list()\n # No contribution from a triangle that\n # this vertex belongs to\n # if sidx1 == sidx2 and (tri == j).any():\n # continue\n # Otherwise do the hard job\n coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],\n tri_area[k])\n coeffs[skip_idx] = 0.\n submat[:, tri] -= coeffs\n if si_1 == si_2:\n _correct_auto_elements(surf1, submat)\n return coeff\n\n\ndef _fwd_bem_multi_solution(solids, gamma, nps):\n \"\"\"Do multi surface solution.\n\n * Invert I - solids/(2*M_PI)\n * Take deflation into account\n * The matrix is destroyed after inversion\n * This is the general multilayer case\n \"\"\"\n pi2 = 1.0 / (2 * np.pi)\n n_tot = np.sum(nps)\n assert solids.shape == (n_tot, n_tot)\n nsurf = len(nps)\n defl = 1.0 / n_tot\n # Modify the matrix\n offsets = np.cumsum(np.concatenate(([0], nps)))\n for si_1 in range(nsurf):\n for si_2 in range(nsurf):\n mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]\n slice_j = slice(offsets[si_1], offsets[si_1 + 1])\n slice_k = slice(offsets[si_2], offsets[si_2 + 1])\n solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult\n solids += np.eye(n_tot)\n return linalg.inv(solids, overwrite_a=True)\n\n\ndef _fwd_bem_homog_solution(solids, nps):\n \"\"\"Make a homogeneous solution.\"\"\"\n return _fwd_bem_multi_solution(solids, None, nps)\n\n\ndef _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):\n \"\"\"Modify the solution according to the IP approach.\"\"\"\n n_last = n_tri[-1]\n mult = (1.0 + ip_mult) / ip_mult\n\n logger.info(' Combining...')\n offsets = np.cumsum(np.concatenate(([0], n_tri)))\n for si in range(len(n_tri)):\n # Pick the correct submatrix (right column) and multiply\n sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]\n # Multiply\n sub -= 2 * np.dot(sub, ip_solution)\n\n # The lower right corner is a special case\n sub[-n_last:, -n_last:] += mult * ip_solution\n\n # Final scaling\n logger.info(' Scaling...')\n solution *= ip_mult\n return\n\n\ndef _fwd_bem_linear_collocation_solution(m):\n \"\"\"Compute the linear collocation potential solution.\"\"\"\n # first, add surface geometries\n for surf in m['surfs']:\n complete_surface_info(surf, copy=False, verbose=False)\n\n logger.info('Computing the linear collocation solution...')\n logger.info(' Matrix coefficients...')\n coeff = _fwd_bem_lin_pot_coeff(m['surfs'])\n m['nsol'] = len(coeff)\n logger.info(\" Inverting the coefficient matrix...\")\n nps = [surf['np'] for surf in m['surfs']]\n m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)\n if len(m['surfs']) == 3:\n ip_mult = m['sigma'][1] / m['sigma'][2]\n if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:\n logger.info('IP approach required...')\n logger.info(' Matrix coefficients (homog)...')\n coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])\n logger.info(' Inverting the coefficient matrix (homog)...')\n ip_solution = _fwd_bem_homog_solution(coeff,\n [m['surfs'][-1]['np']])\n logger.info(' Modify the original solution to incorporate '\n 'IP approach...')\n _fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,\n nps)\n m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL\n logger.info(\"Solution ready.\")\n\n\n@verbose\ndef make_bem_solution(surfs, verbose=None):\n \"\"\"Create a BEM solution using the linear collocation approach.\n\n Parameters\n ----------\n surfs : list of dict\n The BEM surfaces to use (`from make_bem_model`)\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n bem : instance of ConductorModel\n The BEM solution.\n\n Notes\n -----\n .. versionadded:: 0.10.0\n\n See Also\n --------\n make_bem_model\n read_bem_surfaces\n write_bem_surfaces\n read_bem_solution\n write_bem_solution\n \"\"\"\n logger.info('Approximation method : Linear collocation\\n')\n if isinstance(surfs, string_types):\n # Load the surfaces\n logger.info('Loading surfaces...')\n surfs = read_bem_surfaces(surfs)\n bem = ConductorModel(is_sphere=False, surfs=surfs)\n _add_gamma_multipliers(bem)\n if len(bem['surfs']) == 3:\n logger.info('Three-layer model surfaces loaded.')\n elif len(bem['surfs']) == 1:\n logger.info('Homogeneous model surface loaded.')\n else:\n raise RuntimeError('Only 1- or 3-layer BEM computations supported')\n _check_bem_size(bem['surfs'])\n _fwd_bem_linear_collocation_solution(bem)\n logger.info('BEM geometry computations complete.')\n return bem\n\n\n# ############################################################################\n# Make BEM model\n\ndef _ico_downsample(surf, dest_grade):\n \"\"\"Downsample the surface if isomorphic to a subdivided icosahedron.\"\"\"\n n_tri = len(surf['tris'])\n found = -1\n bad_msg = (\"A surface with %d triangles cannot be isomorphic with a \"\n \"subdivided icosahedron.\" % n_tri)\n if n_tri % 20 != 0:\n raise RuntimeError(bad_msg)\n n_tri = n_tri // 20\n found = int(round(np.log(n_tri) / np.log(4)))\n if n_tri != 4 ** found:\n raise RuntimeError(bad_msg)\n del n_tri\n\n if dest_grade > found:\n raise RuntimeError('For this surface, decimation grade should be %d '\n 'or less, not %s.' % (found, dest_grade))\n\n source = _get_ico_surface(found)\n dest = _get_ico_surface(dest_grade, patch_stats=True)\n del dest['tri_cent']\n del dest['tri_nn']\n del dest['neighbor_tri']\n del dest['tri_area']\n if not np.array_equal(source['tris'], surf['tris']):\n raise RuntimeError('The source surface has a matching number of '\n 'triangles but ordering is wrong')\n logger.info('Going from %dth to %dth subdivision of an icosahedron '\n '(n_tri: %d -> %d)' % (found, dest_grade, len(surf['tris']),\n len(dest['tris'])))\n # Find the mapping\n dest['rr'] = surf['rr'][_get_ico_map(source, dest)]\n return dest\n\n\ndef _get_ico_map(fro, to):\n \"\"\"Get a mapping between ico surfaces.\"\"\"\n nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)\n n_bads = (dists > 5e-3).sum()\n if n_bads > 0:\n raise RuntimeError('No matching vertex for %d destination vertices'\n % (n_bads))\n return nearest\n\n\ndef _order_surfaces(surfs):\n \"\"\"Reorder the surfaces.\"\"\"\n if len(surfs) != 3:\n return surfs\n # we have three surfaces\n surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,\n FIFF.FIFFV_BEM_SURF_ID_SKULL,\n FIFF.FIFFV_BEM_SURF_ID_BRAIN]\n ids = np.array([surf['id'] for surf in surfs])\n if set(ids) != set(surf_order):\n raise RuntimeError('bad surface ids: %s' % ids)\n order = [np.where(ids == id_)[0][0] for id_ in surf_order]\n surfs = [surfs[idx] for idx in order]\n return surfs\n\n\ndef _assert_complete_surface(surf, incomplete='raise'):\n \"\"\"Check the sum of solid angles as seen from inside.\"\"\"\n # from surface_checks.c\n tot_angle = 0.\n # Center of mass....\n cm = surf['rr'].mean(axis=0)\n logger.info('%s CM is %6.2f %6.2f %6.2f mm' %\n (_surf_name[surf['id']],\n 1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))\n tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]\n prop = tot_angle / (2 * np.pi)\n if np.abs(prop - 1.0) > 1e-5:\n msg = ('Surface %s is not complete (sum of solid angles '\n 'yielded %g, should be 1.)'\n % (_surf_name[surf['id']], prop))\n if incomplete == 'raise':\n raise RuntimeError(msg)\n else:\n warn(msg)\n\n\n_surf_name = {\n FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',\n FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',\n FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',\n FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ',\n}\n\n\ndef _assert_inside(fro, to):\n \"\"\"Check one set of points is inside a surface.\"\"\"\n # this is \"is_inside\" in surface_checks.c\n tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])\n if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():\n raise RuntimeError('Surface %s is not completely inside surface %s'\n % (_surf_name[fro['id']], _surf_name[to['id']]))\n\n\ndef _check_surfaces(surfs, incomplete='raise'):\n \"\"\"Check that the surfaces are complete and non-intersecting.\"\"\"\n for surf in surfs:\n _assert_complete_surface(surf, incomplete=incomplete)\n # Then check the topology\n for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):\n logger.info('Checking that %s surface is inside %s surface...' %\n (_surf_name[surf_2['id']], _surf_name[surf_1['id']]))\n _assert_inside(surf_2, surf_1)\n\n\ndef _check_surface_size(surf):\n \"\"\"Check that the coordinate limits are reasonable.\"\"\"\n sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)\n if (sizes < 0.05).any():\n raise RuntimeError('Dimensions of the surface %s seem too small '\n '(%9.5f mm). Maybe the the unit of measure is '\n 'meters instead of mm' %\n (_surf_name[surf['id']], 1000 * sizes.min()))\n\n\ndef _check_thicknesses(surfs):\n \"\"\"Compute how close we are.\"\"\"\n for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):\n min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],\n return_dists=True)[0]\n min_dist = min_dist.min()\n logger.info('Checking distance between %s and %s surfaces...' %\n (_surf_name[surf_1['id']], _surf_name[surf_2['id']]))\n logger.info('Minimum distance between the %s and %s surfaces is '\n 'approximately %6.1f mm' %\n (_surf_name[surf_1['id']], _surf_name[surf_2['id']],\n 1000 * min_dist))\n\n\ndef _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True,\n incomplete='raise'):\n \"\"\"Convert surfaces to a BEM.\"\"\"\n # equivalent of mne_surf2bem\n # surfs can be strings (filenames) or surface dicts\n if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) ==\n len(sigmas)):\n raise ValueError('surfs, ids, and sigmas must all have the same '\n 'number of elements (1 or 3)')\n surf = list(surfs)\n for si, surf in enumerate(surfs):\n if isinstance(surf, string_types):\n surfs[si] = read_surface(surf, return_dict=True)[-1]\n # Downsampling if the surface is isomorphic with a subdivided icosahedron\n if ico is not None:\n for si, surf in enumerate(surfs):\n surfs[si] = _ico_downsample(surf, ico)\n for surf, id_ in zip(surfs, ids):\n surf['id'] = id_\n surf['coord_frame'] = surf.get('coord_frame', FIFF.FIFFV_COORD_MRI)\n surf.update(np=len(surf['rr']), ntri=len(surf['tris']))\n if rescale:\n surf['rr'] /= 1000. # convert to meters\n\n # Shifting surfaces is not implemented here...\n\n # Order the surfaces for the benefit of the topology checks\n for surf, sigma in zip(surfs, sigmas):\n surf['sigma'] = sigma\n surfs = _order_surfaces(surfs)\n\n # Check topology as best we can\n _check_surfaces(surfs, incomplete=incomplete)\n for surf in surfs:\n _check_surface_size(surf)\n _check_thicknesses(surfs)\n logger.info('Surfaces passed the basic topology checks.')\n return surfs\n\n\n@verbose\ndef make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),\n subjects_dir=None, verbose=None):\n \"\"\"Create a BEM model for a subject.\n\n .. note:: To get a single layer bem corresponding to the --homog flag in\n the command line tool set the ``conductivity`` parameter\n to a list/tuple with a single value (e.g. [0.3]).\n\n Parameters\n ----------\n subject : str\n The subject.\n ico : int | None\n The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.\n If None, no subsampling is applied.\n conductivity : array of int, shape (3,) or (1,)\n The conductivities to use for each shell. Should be a single element\n for a one-layer model, or three elements for a three-layer model.\n Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a\n single-layer model would be ``[0.3]``.\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n surfaces : list of dict\n The BEM surfaces. Use `make_bem_solution` to turn these into a\n `ConductorModel` suitable for forward calculation.\n\n Notes\n -----\n .. versionadded:: 0.10.0\n\n See Also\n --------\n make_bem_solution\n make_sphere_model\n read_bem_surfaces\n write_bem_surfaces\n \"\"\"\n conductivity = np.array(conductivity, float)\n if conductivity.ndim != 1 or conductivity.size not in (1, 3):\n raise ValueError('conductivity must be 1D array-like with 1 or 3 '\n 'elements')\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n subject_dir = op.join(subjects_dir, subject)\n bem_dir = op.join(subject_dir, 'bem')\n inner_skull = op.join(bem_dir, 'inner_skull.surf')\n outer_skull = op.join(bem_dir, 'outer_skull.surf')\n outer_skin = op.join(bem_dir, 'outer_skin.surf')\n surfaces = [inner_skull, outer_skull, outer_skin]\n ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,\n FIFF.FIFFV_BEM_SURF_ID_SKULL,\n FIFF.FIFFV_BEM_SURF_ID_HEAD]\n logger.info('Creating the BEM geometry...')\n if len(conductivity) == 1:\n surfaces = surfaces[:1]\n ids = ids[:1]\n surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)\n _check_bem_size(surfaces)\n logger.info('Complete.\\n')\n return surfaces\n\n\n# ############################################################################\n# Compute EEG sphere model\n\ndef _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):\n \"\"\"Get the model depended weighting factor for n.\"\"\"\n nlayer = len(m['layers'])\n if nlayer in (0, 1):\n return 1.\n\n # Initialize the arrays\n c1 = np.zeros(nlayer - 1)\n c2 = np.zeros(nlayer - 1)\n cr = np.zeros(nlayer - 1)\n cr_mult = np.zeros(nlayer - 1)\n for k in range(nlayer - 1):\n c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']\n c2[k] = c1[k] - 1.0\n cr_mult[k] = m['layers'][k]['rel_rad']\n cr[k] = cr_mult[k]\n cr_mult[k] *= cr_mult[k]\n\n coeffs = np.zeros(n_terms - 1)\n for n in range(1, n_terms):\n # Increment the radius coefficients\n for k in range(nlayer - 1):\n cr[k] *= cr_mult[k]\n\n # Multiply the matrices\n M = np.eye(2)\n n1 = n + 1.0\n for k in range(nlayer - 2, -1, -1):\n M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],\n [n * c2[k] * cr[k], n1 + n * c1[k]]], M)\n num = n * (2.0 * n + 1.0) ** (nlayer - 1)\n coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])\n return coeffs\n\n\ndef _compose_linear_fitting_data(mu, u):\n \"\"\"Get the linear fitting data.\"\"\"\n # y is the data to be fitted (nterms-1 x 1)\n # M is the model matrix (nterms-1 x nfit-1)\n for k in range(u['nterms'] - 1):\n k1 = k + 1\n mu1n = np.power(mu[0], k1)\n u['y'][k] = u['w'][k] * (u['fn'][k1] - mu1n * u['fn'][0])\n for p in range(u['nfit'] - 1):\n u['M'][k][p] = u['w'][k] * (np.power(mu[p + 1], k1) - mu1n)\n\n\ndef _compute_linear_parameters(mu, u):\n \"\"\"Compute the best-fitting linear parameters.\"\"\"\n _compose_linear_fitting_data(mu, u)\n uu, sing, vv = linalg.svd(u['M'], full_matrices=False)\n\n # Compute the residuals\n u['resi'] = u['y'].copy()\n\n vec = np.empty(u['nfit'] - 1)\n for p in range(u['nfit'] - 1):\n vec[p] = np.dot(uu[:, p], u['y'])\n for k in range(u['nterms'] - 1):\n u['resi'][k] -= uu[k, p] * vec[p]\n vec[p] = vec[p] / sing[p]\n\n lambda_ = np.zeros(u['nfit'])\n for p in range(u['nfit'] - 1):\n sum_ = 0.\n for q in range(u['nfit'] - 1):\n sum_ += vv[q, p] * vec[q]\n lambda_[p + 1] = sum_\n lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])\n rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y'])\n return rv, lambda_\n\n\ndef _one_step(mu, u):\n \"\"\"Evaluate the residual sum of squares fit for one set of mu values.\"\"\"\n if np.abs(mu).max() > 1.0:\n return 1.0\n\n # Compose the data for the linear fitting, compute SVD, then residuals\n _compose_linear_fitting_data(mu, u)\n u['uu'], u['sing'], u['vv'] = linalg.svd(u['M'])\n u['resi'][:] = u['y'][:]\n for p in range(u['nfit'] - 1):\n dot = np.dot(u['uu'][p], u['y'])\n for k in range(u['nterms'] - 1):\n u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot\n\n # Return their sum of squares\n return np.dot(u['resi'], u['resi'])\n\n\ndef _fwd_eeg_fit_berg_scherg(m, nterms, nfit):\n \"\"\"Fit the Berg-Scherg equivalent spherical model dipole parameters.\"\"\"\n from scipy.optimize import fmin_cobyla\n assert nfit >= 2\n u = dict(y=np.zeros(nterms - 1), resi=np.zeros(nterms - 1),\n nfit=nfit, nterms=nterms, M=np.zeros((nterms - 1, nfit - 1)))\n\n # (1) Calculate the coefficients of the true expansion\n u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)\n\n # (2) Calculate the weighting\n f = (min([layer['rad'] for layer in m['layers']]) /\n max([layer['rad'] for layer in m['layers']]))\n\n # correct weighting\n k = np.arange(1, nterms + 1)\n u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /\n k) * np.power(f, (k - 1.0))\n u['w'][-1] = 0\n\n # Do the nonlinear minimization, constraining mu to the interval [-1, +1]\n mu_0 = np.random.RandomState(0).rand(nfit) * f\n fun = partial(_one_step, u=u)\n max_ = 1. - 2e-4 # adjust for fmin_cobyla \"catol\" that not all scipy have\n cons = [(lambda x: max_ - np.abs(x[ii])) for ii in range(nfit)]\n mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=5e-3, disp=0)\n\n # (6) Do the final step: calculation of the linear parameters\n rv, lambda_ = _compute_linear_parameters(mu, u)\n order = np.argsort(mu)[::-1]\n mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first\n\n m['mu'] = mu\n # This division takes into account the actual conductivities\n m['lambda'] = lambda_ / m['layers'][-1]['sigma']\n m['nfit'] = nfit\n return rv\n\n\n@verbose\ndef make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,\n relative_radii=(0.90, 0.92, 0.97, 1.0),\n sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):\n \"\"\"Create a spherical model for forward solution calculation.\n\n Parameters\n ----------\n r0 : array-like | str\n Head center to use (in head coordinates). If 'auto', the head\n center will be calculated from the digitization points in info.\n head_radius : float | str | None\n If float, compute spherical shells for EEG using the given radius.\n If 'auto', estimate an approriate radius from the dig points in Info,\n If None, exclude shells (single layer sphere model).\n info : instance of Info | None\n Measurement info. Only needed if ``r0`` or ``head_radius`` are\n ``'auto'``.\n relative_radii : array-like\n Relative radii for the spherical shells.\n sigmas : array-like\n Sigma values for the spherical shells.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n sphere : instance of ConductorModel\n The resulting spherical conductor model.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n\n See Also\n --------\n make_bem_model\n make_bem_solution\n \"\"\"\n for name in ('r0', 'head_radius'):\n param = locals()[name]\n if isinstance(param, string_types):\n if param != 'auto':\n raise ValueError('%s, if str, must be \"auto\" not \"%s\"'\n % (name, param))\n relative_radii = np.array(relative_radii, float).ravel()\n sigmas = np.array(sigmas, float).ravel()\n if len(relative_radii) != len(sigmas):\n raise ValueError('relative_radii length (%s) must match that of '\n 'sigmas (%s)' % (len(relative_radii),\n len(sigmas)))\n if len(sigmas) <= 1 and head_radius is not None:\n raise ValueError('at least 2 sigmas must be supplied if '\n 'head_radius is not None, got %s' % (len(sigmas),))\n if (isinstance(r0, string_types) and r0 == 'auto') or \\\n (isinstance(head_radius, string_types) and head_radius == 'auto'):\n if info is None:\n raise ValueError('Info must not be None for auto mode')\n head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units='m')[:2]\n if isinstance(r0, string_types):\n r0 = r0_fit\n if isinstance(head_radius, string_types):\n head_radius = head_radius_fit\n sphere = ConductorModel(is_sphere=True, r0=np.array(r0),\n coord_frame=FIFF.FIFFV_COORD_HEAD)\n sphere['layers'] = list()\n if head_radius is not None:\n # Eventually these could be configurable...\n relative_radii = np.array(relative_radii, float)\n sigmas = np.array(sigmas, float)\n order = np.argsort(relative_radii)\n relative_radii = relative_radii[order]\n sigmas = sigmas[order]\n for rel_rad, sig in zip(relative_radii, sigmas):\n # sort layers by (relative) radius, and scale radii\n layer = dict(rad=rel_rad, sigma=sig)\n layer['rel_rad'] = layer['rad'] = rel_rad\n sphere['layers'].append(layer)\n\n # scale the radii\n R = sphere['layers'][-1]['rad']\n rR = sphere['layers'][-1]['rel_rad']\n for layer in sphere['layers']:\n layer['rad'] /= R\n layer['rel_rad'] /= rR\n\n #\n # Setup the EEG sphere model calculations\n #\n\n # Scale the relative radii\n for k in range(len(relative_radii)):\n sphere['layers'][k]['rad'] = (head_radius *\n sphere['layers'][k]['rel_rad'])\n rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)\n logger.info('\\nEquiv. model fitting -> RV = %g %%' % (100 * rv))\n for k in range(3):\n logger.info('mu%d = %g lambda%d = %g'\n % (k + 1, sphere['mu'][k], k + 1,\n sphere['layers'][-1]['sigma'] *\n sphere['lambda'][k]))\n logger.info('Set up EEG sphere model with scalp radius %7.1f mm\\n'\n % (1000 * head_radius,))\n return sphere\n\n\n# #############################################################################\n# Sphere fitting\n\n_dig_kind_dict = {\n 'cardinal': FIFF.FIFFV_POINT_CARDINAL,\n 'hpi': FIFF.FIFFV_POINT_HPI,\n 'eeg': FIFF.FIFFV_POINT_EEG,\n 'extra': FIFF.FIFFV_POINT_EXTRA,\n}\n_dig_kind_rev = dict((val, key) for key, val in _dig_kind_dict.items())\n_dig_kind_ints = tuple(_dig_kind_dict.values())\n\n\n@verbose\ndef fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None):\n \"\"\"Fit a sphere to the headshape points to determine head center.\n\n Parameters\n ----------\n info : instance of Info\n Measurement info.\n dig_kinds : list of str | str\n Kind of digitization points to use in the fitting. These can be any\n combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also\n be 'auto' (default), which will use only the 'extra' points if\n enough (more than 10) are available, and if not, uses 'extra' and\n 'eeg' points.\n units : str\n Can be \"m\" (default) or \"mm\".\n\n .. versionadded:: 0.12\n\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n radius : float\n Sphere radius.\n origin_head: ndarray, shape (3,)\n Head center in head coordinates.\n origin_device: ndarray, shape (3,)\n Head center in device coordinates.\n\n Notes\n -----\n This function excludes any points that are low and frontal\n (``z < 0 and y > 0``) to improve the fit.\n \"\"\"\n if not isinstance(units, string_types) or units not in ('m', 'mm'):\n raise ValueError('units must be a \"m\" or \"mm\"')\n radius, origin_head, origin_device = _fit_sphere_to_headshape(\n info, dig_kinds)\n if units == 'mm':\n radius *= 1e3\n origin_head *= 1e3\n origin_device *= 1e3\n return radius, origin_head, origin_device\n\n\n@verbose\ndef get_fitting_dig(info, dig_kinds='auto', verbose=None):\n \"\"\"Get digitization points suitable for sphere fitting.\n\n Parameters\n ----------\n info : instance of Info\n The measurement info.\n dig_kinds : list of str | str\n Kind of digitization points to use in the fitting. These can be any\n combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also\n be 'auto' (default), which will use only the 'extra' points if\n enough (more than 10) are available, and if not, uses 'extra' and\n 'eeg' points.\n verbose : bool, str or None\n If not None, override default verbose level\n\n Returns\n -------\n dig : array, shape (n_pts, 3)\n The digitization points (in head coordinates) to use for fitting.\n\n Notes\n -----\n This will exclude digitization locations that have ``z < 0 and y > 0``,\n i.e. points on the nose and below the nose on the face.\n\n .. versionadded:: 0.14\n \"\"\"\n if not isinstance(info, Info):\n raise TypeError('info must be an instance of Info not %s' % type(info))\n if info['dig'] is None:\n raise RuntimeError('Cannot fit headshape without digitization '\n ', info[\"dig\"] is None')\n if isinstance(dig_kinds, string_types):\n if dig_kinds == 'auto':\n # try \"extra\" first\n try:\n return get_fitting_dig(info, 'extra')\n except ValueError:\n pass\n return get_fitting_dig(info, ('extra', 'eeg'))\n else:\n dig_kinds = (dig_kinds,)\n # convert string args to ints (first make dig_kinds mutable in case tuple)\n dig_kinds = list(dig_kinds)\n for di, d in enumerate(dig_kinds):\n dig_kinds[di] = _dig_kind_dict.get(d, d)\n if dig_kinds[di] not in _dig_kind_ints:\n raise ValueError('dig_kinds[#%d] (%s) must be one of %s'\n % (di, d, sorted(list(_dig_kind_dict.keys()))))\n\n # get head digization points of the specified kind(s)\n hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]\n if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']):\n raise RuntimeError('Digitization points not in head coordinates, '\n 'contact mne-python developers')\n\n # exclude some frontal points (nose etc.)\n hsp = np.array([p for p in hsp if not (p[2] < -1e-6 and p[1] > 1e-6)])\n\n if len(hsp) <= 10:\n kinds_str = ', '.join(['\"%s\"' % _dig_kind_rev[d]\n for d in sorted(dig_kinds)])\n msg = ('Only %s head digitization points of the specified kind%s (%s,)'\n % (len(hsp), _pl(dig_kinds), kinds_str))\n if len(hsp) < 4:\n raise ValueError(msg + ', at least 4 required')\n else:\n warn(msg + ', fitting may be inaccurate')\n return hsp\n\n\n@verbose\ndef _fit_sphere_to_headshape(info, dig_kinds, verbose=None):\n \"\"\"Fit a sphere to the given head shape.\"\"\"\n hsp = get_fitting_dig(info, dig_kinds)\n radius, origin_head = _fit_sphere(np.array(hsp), disp=False)\n # compute origin in device coordinates\n head_to_dev = _ensure_trans(info['dev_head_t'], 'head', 'meg')\n origin_device = apply_trans(head_to_dev, origin_head)\n logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm'\n % (radius * 1e3,))\n # 99th percentile on Wikipedia for Giabella to back of head is 21.7cm,\n # i.e. 108mm \"radius\", so let's go with 110mm\n # en.wikipedia.org/wiki/Human_head#/media/File:HeadAnthropometry.JPG\n if radius > 0.110:\n warn('Estimated head size (%0.1f mm) exceeded 99th '\n 'percentile for adult head size' % (1e3 * radius,))\n # > 2 cm away from head center in X or Y is strange\n if np.linalg.norm(origin_head[:2]) > 0.02:\n warn('(X, Y) fit (%0.1f, %0.1f) more than 20 mm from '\n 'head frame origin' % tuple(1e3 * origin_head[:2]))\n logger.info('Origin head coordinates:'.ljust(30) +\n '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_head))\n logger.info('Origin device coordinates:'.ljust(30) +\n '%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_device))\n return radius, origin_head, origin_device\n\n\ndef _fit_sphere(points, disp='auto'):\n \"\"\"Fit a sphere to an arbitrary set of points.\"\"\"\n from scipy.optimize import fmin_cobyla\n if isinstance(disp, string_types) and disp == 'auto':\n disp = True if logger.level <= 20 else False\n # initial guess for center and radius\n radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.\n radius_init = radii.mean()\n center_init = np.median(points, axis=0)\n\n # optimization\n x0 = np.concatenate([center_init, [radius_init]])\n\n def cost_fun(center_rad):\n d = np.linalg.norm(points - center_rad[:3], axis=1) - center_rad[3]\n d *= d\n return d.sum()\n\n def constraint(center_rad):\n return center_rad[3] # radius must be >= 0\n\n x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init,\n rhoend=radius_init * 1e-6, disp=disp)\n\n origin = x_opt[:3]\n radius = x_opt[3]\n return radius, origin\n\n\ndef _check_origin(origin, info, coord_frame='head', disp=False):\n \"\"\"Check or auto-determine the origin.\"\"\"\n if isinstance(origin, string_types):\n if origin != 'auto':\n raise ValueError('origin must be a numerical array, or \"auto\", '\n 'not %s' % (origin,))\n if coord_frame == 'head':\n R, origin = fit_sphere_to_headshape(info, verbose=False,\n units='m')[:2]\n logger.info(' Automatic origin fit: head of radius %0.1f mm'\n % (R * 1000.,))\n del R\n else:\n origin = (0., 0., 0.)\n origin = np.array(origin, float)\n if origin.shape != (3,):\n raise ValueError('origin must be a 3-element array')\n if disp:\n origin_str = ', '.join(['%0.1f' % (o * 1000) for o in origin])\n msg = (' Using origin %s mm in the %s frame'\n % (origin_str, coord_frame))\n if coord_frame == 'meg' and info['dev_head_t'] is not None:\n o_dev = apply_trans(info['dev_head_t'], origin)\n origin_str = ', '.join('%0.1f' % (o * 1000,) for o in o_dev)\n msg += ' (%s mm in the head frame)' % (origin_str,)\n logger.info(msg)\n return origin\n\n\n# ############################################################################\n# Create BEM surfaces\n\n@verbose\ndef make_watershed_bem(subject, subjects_dir=None, overwrite=False,\n volume='T1', atlas=False, gcaatlas=False, preflood=None,\n show=False, verbose=None):\n \"\"\"Create BEM surfaces using the FreeSurfer watershed algorithm.\n\n Parameters\n ----------\n subject : str\n Subject name (required)\n subjects_dir : str\n Directory containing subjects data. If None use\n the Freesurfer SUBJECTS_DIR environment variable.\n overwrite : bool\n Write over existing files\n volume : str\n Defaults to T1\n atlas : bool\n Specify the --atlas option for mri_watershed\n gcaatlas : bool\n Use the subcortical atlas\n preflood : int\n Change the preflood height\n show : bool\n Show surfaces to visually inspect all three BEM surfaces (recommended).\n\n .. versionadded:: 0.12\n\n verbose : bool, str or None\n If not None, override default verbose level\n\n Notes\n -----\n .. versionadded:: 0.10\n \"\"\"\n from .viz.misc import plot_bem\n env, mri_dir = _prepare_env(subject, subjects_dir,\n requires_freesurfer=True)[:2]\n\n subjects_dir = env['SUBJECTS_DIR']\n subject_dir = op.join(subjects_dir, subject)\n mri_dir = op.join(subject_dir, 'mri')\n T1_dir = op.join(mri_dir, volume)\n T1_mgz = op.join(mri_dir, volume + '.mgz')\n bem_dir = op.join(subject_dir, 'bem')\n ws_dir = op.join(subject_dir, 'bem', 'watershed')\n if not op.isdir(bem_dir):\n os.makedirs(bem_dir)\n if not op.isdir(T1_dir) and not op.isfile(T1_mgz):\n raise RuntimeError('Could not find the MRI data')\n if op.isdir(ws_dir):\n if not overwrite:\n raise RuntimeError('%s already exists. Use the --overwrite option'\n ' to recreate it.' % ws_dir)\n else:\n shutil.rmtree(ws_dir)\n # put together the command\n cmd = ['mri_watershed']\n if preflood:\n cmd += [\"-h\", \"%s\" % int(preflood)]\n\n if gcaatlas:\n cmd += ['-atlas', '-T1', '-brain_atlas', env['FREESURFER_HOME'] +\n '/average/RB_all_withskull_2007-08-08.gca',\n subject_dir + '/mri/transforms/talairach_with_skull.lta']\n elif atlas:\n cmd += ['-atlas']\n if op.exists(T1_mgz):\n cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz,\n op.join(ws_dir, 'ws')]\n else:\n cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir,\n op.join(ws_dir, 'ws')]\n # report and run\n logger.info('\\nRunning mri_watershed for BEM segmentation with the '\n 'following parameters:\\n\\n'\n 'SUBJECTS_DIR = %s\\n'\n 'SUBJECT = %s\\n'\n 'Results dir = %s\\n' % (subjects_dir, subject, ws_dir))\n os.makedirs(op.join(ws_dir, 'ws'))\n run_subprocess(cmd, env=env)\n\n if op.isfile(T1_mgz):\n new_info = _extract_volume_info(T1_mgz)\n if new_info is None:\n warn('nibabel is required to replace the volume info. Volume info'\n 'not updated in the written surface.')\n new_info = dict()\n surfs = ['brain', 'inner_skull', 'outer_skull', 'outer_skin']\n for s in surfs:\n surf_ws_out = op.join(ws_dir, '%s_%s_surface' % (subject, s))\n\n rr, tris, volume_info = read_surface(surf_ws_out,\n read_metadata=True)\n volume_info.update(new_info) # replace volume info, 'head' stays\n\n write_surface(s, rr, tris, volume_info=volume_info)\n # Create symbolic links\n surf_out = op.join(bem_dir, '%s.surf' % s)\n if not overwrite and op.exists(surf_out):\n skip_symlink = True\n else:\n if op.exists(surf_out):\n os.remove(surf_out)\n _symlink(surf_ws_out, surf_out)\n skip_symlink = False\n\n if skip_symlink:\n logger.info(\"Unable to create all symbolic links to .surf files \"\n \"in bem folder. Use --overwrite option to recreate \"\n \"them.\")\n dest = op.join(bem_dir, 'watershed')\n else:\n logger.info(\"Symbolic links to .surf files created in bem folder\")\n dest = bem_dir\n\n logger.info(\"\\nThank you for waiting.\\nThe BEM triangulations for this \"\n \"subject are now available at:\\n%s.\" % dest)\n\n # Write a head file for coregistration\n fname_head = op.join(bem_dir, subject + '-head.fif')\n if op.isfile(fname_head):\n os.remove(fname_head)\n\n surf = _surfaces_to_bem([op.join(ws_dir, subject + '_outer_skin_surface')],\n [FIFF.FIFFV_BEM_SURF_ID_HEAD], sigmas=[1])\n write_bem_surfaces(fname_head, surf)\n\n # Show computed BEM surfaces\n if show:\n plot_bem(subject=subject, subjects_dir=subjects_dir,\n orientation='coronal', slices=None, show=True)\n\n logger.info('Created %s\\n\\nComplete.' % (fname_head,))\n\n\ndef _extract_volume_info(mgz, raise_error=True):\n \"\"\"Extract volume info from a mgz file.\"\"\"\n try:\n import nibabel as nib\n except ImportError:\n return # warning raised elsewhere\n header = nib.load(mgz).header\n vol_info = dict()\n version = header['version']\n if version == 1:\n version = '%s # volume info valid' % version\n else:\n raise ValueError('Volume info invalid.')\n vol_info['valid'] = version\n vol_info['filename'] = mgz\n vol_info['volume'] = header['dims'][:3]\n vol_info['voxelsize'] = header['delta']\n vol_info['xras'], vol_info['yras'], vol_info['zras'] = header['Mdc'].T\n vol_info['cras'] = header['Pxyz_c']\n return vol_info\n\n\n# ############################################################################\n# Read\n\n@verbose\ndef read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):\n \"\"\"Read the BEM surfaces from a FIF file.\n\n Parameters\n ----------\n fname : string\n The name of the file containing the surfaces.\n patch_stats : bool, optional (default False)\n Calculate and add cortical patch statistics to the surfaces.\n s_id : int | None\n If int, only read and return the surface with the given s_id.\n An error will be raised if it doesn't exist. If None, all\n surfaces are read and returned.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n surf: list | dict\n A list of dictionaries that each contain a surface. If s_id\n is not None, only the requested surface will be returned.\n\n See Also\n --------\n write_bem_surfaces, write_bem_solution, make_bem_model\n \"\"\"\n # Default coordinate frame\n coord_frame = FIFF.FIFFV_COORD_MRI\n # Open the file, create directory\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find BEM\n bem = dir_tree_find(tree, FIFF.FIFFB_BEM)\n if bem is None or len(bem) == 0:\n raise ValueError('BEM data not found')\n\n bem = bem[0]\n # Locate all surfaces\n bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)\n if bemsurf is None:\n raise ValueError('BEM surface data not found')\n\n logger.info(' %d BEM surfaces found' % len(bemsurf))\n # Coordinate frame possibly at the top level\n tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)\n if tag is not None:\n coord_frame = tag.data\n # Read all surfaces\n if s_id is not None:\n surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)\n for bsurf in bemsurf]\n surf = [s for s in surf if s is not None]\n if not len(surf) == 1:\n raise ValueError('surface with id %d not found' % s_id)\n else:\n surf = list()\n for bsurf in bemsurf:\n logger.info(' Reading a surface...')\n this = _read_bem_surface(fid, bsurf, coord_frame)\n surf.append(this)\n logger.info('[done]')\n logger.info(' %d BEM surfaces read' % len(surf))\n for this in surf:\n if patch_stats or this['nn'] is None:\n complete_surface_info(this, copy=False)\n return surf[0] if s_id is not None else surf\n\n\ndef _read_bem_surface(fid, this, def_coord_frame, s_id=None):\n \"\"\"Read one bem surface.\"\"\"\n # fid should be open as a context manager here\n res = dict()\n # Read all the interesting stuff\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)\n\n if tag is None:\n res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN\n else:\n res['id'] = int(tag.data)\n\n if s_id is not None and res['id'] != s_id:\n return None\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)\n res['sigma'] = 1.0 if tag is None else float(tag.data)\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)\n if tag is None:\n raise ValueError('Number of vertices not found')\n\n res['np'] = int(tag.data)\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)\n if tag is None:\n raise ValueError('Number of triangles not found')\n res['ntri'] = int(tag.data)\n\n tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)\n if tag is None:\n tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)\n if tag is None:\n res['coord_frame'] = def_coord_frame\n else:\n res['coord_frame'] = tag.data\n else:\n res['coord_frame'] = tag.data\n\n # Vertices, normals, and triangles\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)\n if tag is None:\n raise ValueError('Vertex data not found')\n\n res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug\n if res['rr'].shape[0] != res['np']:\n raise ValueError('Vertex information is incorrect')\n\n tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)\n if tag is None:\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)\n if tag is None:\n res['nn'] = None\n else:\n res['nn'] = tag.data.copy()\n if res['nn'].shape[0] != res['np']:\n raise ValueError('Vertex normal information is incorrect')\n\n tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)\n if tag is None:\n raise ValueError('Triangulation not found')\n\n res['tris'] = tag.data - 1 # index start at 0 in Python\n if res['tris'].shape[0] != res['ntri']:\n raise ValueError('Triangulation information is incorrect')\n\n return res\n\n\n@verbose\ndef read_bem_solution(fname, verbose=None):\n \"\"\"Read the BEM solution from a file.\n\n Parameters\n ----------\n fname : string\n The file containing the BEM solution.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Returns\n -------\n bem : instance of ConductorModel\n The BEM solution.\n\n See Also\n --------\n write_bem_solution, read_bem_surfaces, write_bem_surfaces,\n make_bem_solution\n \"\"\"\n # mirrors fwd_bem_load_surfaces from fwd_bem_model.c\n logger.info('Loading surfaces...')\n bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)\n if len(bem_surfs) == 3:\n logger.info('Three-layer model surfaces loaded.')\n needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,\n FIFF.FIFFV_BEM_SURF_ID_SKULL,\n FIFF.FIFFV_BEM_SURF_ID_BRAIN])\n if not all(x['id'] in needed for x in bem_surfs):\n raise RuntimeError('Could not find necessary BEM surfaces')\n # reorder surfaces as necessary (shouldn't need to?)\n reorder = [None] * 3\n for x in bem_surfs:\n reorder[np.where(x['id'] == needed)[0][0]] = x\n bem_surfs = reorder\n elif len(bem_surfs) == 1:\n if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:\n raise RuntimeError('BEM Surfaces not found')\n logger.info('Homogeneous model surface loaded.')\n\n # convert from surfaces to solution\n bem = ConductorModel(is_sphere=False, surfs=bem_surfs)\n logger.info('\\nLoading the solution matrix...\\n')\n f, tree, _ = fiff_open(fname)\n with f as fid:\n # Find the BEM data\n nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)\n if len(nodes) == 0:\n raise RuntimeError('No BEM data in %s' % fname)\n bem_node = nodes[0]\n\n # Approximation method\n tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)\n if tag is None:\n raise RuntimeError('No BEM solution found in %s' % fname)\n method = tag.data[0]\n if method not in (FIFF.FIFFV_BEM_APPROX_CONST,\n FIFF.FIFFV_BEM_APPROX_LINEAR):\n raise RuntimeError('Cannot handle BEM approximation method : %d'\n % method)\n\n tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)\n dims = tag.data.shape\n if len(dims) != 2:\n raise RuntimeError('Expected a two-dimensional solution matrix '\n 'instead of a %d dimensional one' % dims[0])\n\n dim = 0\n for surf in bem['surfs']:\n if method == FIFF.FIFFV_BEM_APPROX_LINEAR:\n dim += surf['np']\n else: # method == FIFF.FIFFV_BEM_APPROX_CONST\n dim += surf['ntri']\n\n if dims[0] != dim or dims[1] != dim:\n raise RuntimeError('Expected a %d x %d solution matrix instead of '\n 'a %d x %d one' % (dim, dim, dims[1], dims[0]))\n sol = tag.data\n nsol = dims[0]\n\n bem['solution'] = sol\n bem['nsol'] = nsol\n bem['bem_method'] = method\n\n # Gamma factors and multipliers\n _add_gamma_multipliers(bem)\n kind = {\n FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',\n FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',\n }[bem['bem_method']]\n logger.info('Loaded %s BEM solution from %s', kind, fname)\n return bem\n\n\ndef _add_gamma_multipliers(bem):\n \"\"\"Add gamma and multipliers in-place.\"\"\"\n bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])\n # Dirty trick for the zero conductivity outside\n sigma = np.r_[0.0, bem['sigma']]\n bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])\n bem['field_mult'] = sigma[1:] - sigma[:-1]\n # make sure subsequent \"zip\"s work correctly\n assert len(bem['surfs']) == len(bem['field_mult'])\n bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /\n (sigma[1:] + sigma[:-1])[:, np.newaxis])\n\n\n_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,\n 'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,\n 'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}\n\n\ndef _bem_find_surface(bem, id_):\n \"\"\"Find surface from already-loaded BEM.\"\"\"\n if isinstance(id_, string_types):\n name = id_\n id_ = _surf_dict[id_]\n else:\n name = _bem_explain_surface(id_)\n idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]\n if len(idx) != 1:\n raise RuntimeError('BEM model does not have the %s triangulation'\n % name.replace('_', ' '))\n return bem['surfs'][idx[0]]\n\n\ndef _bem_explain_surface(id_):\n \"\"\"Return a string corresponding to the given surface ID.\"\"\"\n _rev_dict = dict((val, key) for key, val in _surf_dict.items())\n return _rev_dict[id_]\n\n\n# ############################################################################\n# Write\n\ndef write_bem_surfaces(fname, surfs):\n \"\"\"Write BEM surfaces to a fiff file.\n\n Parameters\n ----------\n fname : str\n Filename to write.\n surfs : dict | list of dict\n The surfaces, or a single surface.\n \"\"\"\n if isinstance(surfs, dict):\n surfs = [surfs]\n with start_file(fname) as fid:\n start_block(fid, FIFF.FIFFB_BEM)\n write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])\n _write_bem_surfaces_block(fid, surfs)\n end_block(fid, FIFF.FIFFB_BEM)\n end_file(fid)\n\n\ndef _write_bem_surfaces_block(fid, surfs):\n \"\"\"Write bem surfaces to open file handle.\"\"\"\n for surf in surfs:\n start_block(fid, FIFF.FIFFB_BEM_SURF)\n write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])\n write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])\n write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])\n write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])\n write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])\n write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])\n # index start at 0 in Python\n write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,\n surf['tris'] + 1)\n if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:\n write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])\n end_block(fid, FIFF.FIFFB_BEM_SURF)\n\n\ndef write_bem_solution(fname, bem):\n \"\"\"Write a BEM model with solution.\n\n Parameters\n ----------\n fname : str\n The filename to use.\n bem : instance of ConductorModel\n The BEM model with solution to save.\n\n See Also\n --------\n read_bem_solution\n \"\"\"\n _check_bem_size(bem['surfs'])\n with start_file(fname) as fid:\n start_block(fid, FIFF.FIFFB_BEM)\n # Coordinate frame (mainly for backward compatibility)\n write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,\n bem['surfs'][0]['coord_frame'])\n # Surfaces\n _write_bem_surfaces_block(fid, bem['surfs'])\n # The potential solution\n if 'solution' in bem:\n if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:\n raise RuntimeError('Only linear collocation supported')\n write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)\n write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,\n bem['solution'])\n end_block(fid, FIFF.FIFFB_BEM)\n end_file(fid)\n\n\n# #############################################################################\n# Create 3-Layers BEM model from Flash MRI images\n\ndef _prepare_env(subject, subjects_dir, requires_freesurfer):\n \"\"\"Prepare an env object for subprocess calls.\"\"\"\n env = os.environ.copy()\n if requires_freesurfer and not os.environ.get('FREESURFER_HOME'):\n raise RuntimeError('I cannot find freesurfer. The FREESURFER_HOME '\n 'environment variable is not set.')\n\n if not isinstance(subject, string_types):\n raise TypeError('The subject argument must be set')\n\n subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)\n if not op.isdir(subjects_dir):\n raise RuntimeError('Could not find the MRI data directory \"%s\"'\n % subjects_dir)\n subject_dir = op.join(subjects_dir, subject)\n if not op.isdir(subject_dir):\n raise RuntimeError('Could not find the subject data directory \"%s\"'\n % (subject_dir,))\n env['SUBJECT'] = subject\n env['SUBJECTS_DIR'] = subjects_dir\n mri_dir = op.join(subject_dir, 'mri')\n bem_dir = op.join(subject_dir, 'bem')\n return env, mri_dir, bem_dir\n\n\n@verbose\ndef convert_flash_mris(subject, flash30=True, convert=True, unwarp=False,\n subjects_dir=None, verbose=None):\n \"\"\"Convert DICOM files for use with make_flash_bem.\n\n Parameters\n ----------\n subject : str\n Subject name.\n flash30 : bool\n Use 30-degree flip angle data.\n convert : bool\n Assume that the Flash MRI images have already been converted\n to mgz files.\n unwarp : bool\n Run grad_unwarp with -unwarp option on each of the converted\n data sets. It requires FreeSurfer's MATLAB toolbox to be properly\n installed.\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Notes\n -----\n Before running this script do the following:\n (unless convert=False is specified)\n\n 1. Copy all of your FLASH images in a single directory <source> and\n create a directory <dest> to hold the output of mne_organize_dicom\n 2. cd to <dest> and run\n $ mne_organize_dicom <source>\n to create an appropriate directory structure\n 3. Create symbolic links to make flash05 and flash30 point to the\n appropriate series:\n $ ln -s <FLASH 5 series dir> flash05\n $ ln -s <FLASH 30 series dir> flash30\n Some partition formats (e.g. FAT32) do not support symbolic links.\n In this case, copy the file to the appropriate series:\n $ cp <FLASH 5 series dir> flash05\n $ cp <FLASH 30 series dir> flash30\n 4. cd to the directory where flash05 and flash30 links are\n 5. Set SUBJECTS_DIR and SUBJECT environment variables appropriately\n 6. Run this script\n\n This function assumes that the Freesurfer segmentation of the subject\n has been completed. In particular, the T1.mgz and brain.mgz MRI volumes\n should be, as usual, in the subject's mri directory.\n \"\"\"\n env, mri_dir = _prepare_env(subject, subjects_dir,\n requires_freesurfer=True)[:2]\n curdir = os.getcwd()\n # Step 1a : Data conversion to mgz format\n if not op.exists(op.join(mri_dir, 'flash', 'parameter_maps')):\n os.makedirs(op.join(mri_dir, 'flash', 'parameter_maps'))\n echos_done = 0\n if convert:\n logger.info(\"\\n---- Converting Flash images ----\")\n echos = ['001', '002', '003', '004', '005', '006', '007', '008']\n if flash30:\n flashes = ['05']\n else:\n flashes = ['05', '30']\n #\n missing = False\n for flash in flashes:\n for echo in echos:\n if not op.isdir(op.join('flash' + flash, echo)):\n missing = True\n if missing:\n echos = ['002', '003', '004', '005', '006', '007', '008', '009']\n for flash in flashes:\n for echo in echos:\n if not op.isdir(op.join('flash' + flash, echo)):\n raise RuntimeError(\"Directory %s is missing.\"\n % op.join('flash' + flash, echo))\n #\n for flash in flashes:\n for echo in echos:\n if not op.isdir(op.join('flash' + flash, echo)):\n raise RuntimeError(\"Directory %s is missing.\"\n % op.join('flash' + flash, echo))\n sample_file = glob.glob(op.join('flash' + flash, echo, '*'))[0]\n dest_file = op.join(mri_dir, 'flash',\n 'mef' + flash + '_' + echo + '.mgz')\n # do not redo if already present\n if op.isfile(dest_file):\n logger.info(\"The file %s is already there\")\n else:\n cmd = ['mri_convert', sample_file, dest_file]\n run_subprocess(cmd, env=env)\n echos_done += 1\n # Step 1b : Run grad_unwarp on converted files\n os.chdir(op.join(mri_dir, \"flash\"))\n files = glob.glob(\"mef*.mgz\")\n if unwarp:\n logger.info(\"\\n---- Unwarp mgz data sets ----\")\n for infile in files:\n outfile = infile.replace(\".mgz\", \"u.mgz\")\n cmd = ['grad_unwarp', '-i', infile, '-o', outfile, '-unwarp',\n 'true']\n run_subprocess(cmd, env=env)\n # Clear parameter maps if some of the data were reconverted\n if echos_done > 0 and op.exists(\"parameter_maps\"):\n shutil.rmtree(\"parameter_maps\")\n logger.info(\"\\nParameter maps directory cleared\")\n if not op.exists(\"parameter_maps\"):\n os.makedirs(\"parameter_maps\")\n # Step 2 : Create the parameter maps\n if flash30:\n logger.info(\"\\n---- Creating the parameter maps ----\")\n if unwarp:\n files = glob.glob(\"mef05*u.mgz\")\n if len(os.listdir('parameter_maps')) == 0:\n cmd = ['mri_ms_fitparms'] + files + ['parameter_maps']\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"Parameter maps were already computed\")\n # Step 3 : Synthesize the flash 5 images\n logger.info(\"\\n---- Synthesizing flash 5 images ----\")\n os.chdir('parameter_maps')\n if not op.exists('flash5.mgz'):\n cmd = ['mri_synthesize', '20 5 5', 'T1.mgz', 'PD.mgz',\n 'flash5.mgz']\n run_subprocess(cmd, env=env)\n os.remove('flash5_reg.mgz')\n else:\n logger.info(\"Synthesized flash 5 volume is already there\")\n else:\n logger.info(\"\\n---- Averaging flash5 echoes ----\")\n os.chdir('parameter_maps')\n if unwarp:\n files = glob.glob(\"mef05*u.mgz\")\n else:\n files = glob.glob(\"mef05*.mgz\")\n cmd = ['mri_average', '-noconform', files, 'flash5.mgz']\n run_subprocess(cmd, env=env)\n if op.exists('flash5_reg.mgz'):\n os.remove('flash5_reg.mgz')\n\n # Go back to initial directory\n os.chdir(curdir)\n\n\n@verbose\ndef make_flash_bem(subject, overwrite=False, show=True, subjects_dir=None,\n flash_path=None, verbose=None):\n \"\"\"Create 3-Layer BEM model from prepared flash MRI images.\n\n Parameters\n ----------\n subject : str\n Subject name.\n overwrite : bool\n Write over existing .surf files in bem folder.\n show : bool\n Show surfaces to visually inspect all three BEM surfaces (recommended).\n subjects_dir : string, or None\n Path to SUBJECTS_DIR if it is not set in the environment.\n flash_path : str | None\n Path to the flash images. If None (default), mri/flash/parameter_maps\n within the subject reconstruction is used.\n\n .. versionadded:: 0.13.0\n\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n Notes\n -----\n This program assumes that FreeSurfer is installed and sourced properly.\n\n This function extracts the BEM surfaces (outer skull, inner skull, and\n outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30\n degrees, in mgz format.\n\n See Also\n --------\n convert_flash_mris\n \"\"\"\n from .viz.misc import plot_bem\n\n is_test = os.environ.get('MNE_SKIP_FS_FLASH_CALL', False)\n\n env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir,\n requires_freesurfer=True)\n\n if flash_path is None:\n flash_path = op.join(mri_dir, 'flash', 'parameter_maps')\n else:\n flash_path = op.abspath(flash_path)\n curdir = os.getcwd()\n subjects_dir = env['SUBJECTS_DIR']\n\n logger.info('\\nProcessing the flash MRI data to produce BEM meshes with '\n 'the following parameters:\\n'\n 'SUBJECTS_DIR = %s\\n'\n 'SUBJECT = %s\\n'\n 'Result dir = %s\\n' % (subjects_dir, subject,\n op.join(bem_dir, 'flash')))\n # Step 4 : Register with MPRAGE\n logger.info(\"\\n---- Registering flash 5 with MPRAGE ----\")\n flash5 = op.join(flash_path, 'flash5.mgz')\n flash5_reg = op.join(flash_path, 'flash5_reg.mgz')\n if not op.exists(flash5_reg):\n if op.exists(op.join(mri_dir, 'T1.mgz')):\n ref_volume = op.join(mri_dir, 'T1.mgz')\n else:\n ref_volume = op.join(mri_dir, 'T1')\n cmd = ['fsl_rigid_register', '-r', ref_volume, '-i', flash5,\n '-o', flash5_reg]\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"Registered flash 5 image is already there\")\n # Step 5a : Convert flash5 into COR\n logger.info(\"\\n---- Converting flash5 volume into COR format ----\")\n shutil.rmtree(op.join(mri_dir, 'flash5'), ignore_errors=True)\n os.makedirs(op.join(mri_dir, 'flash5'))\n if not is_test: # CIs don't have freesurfer, skipped when testing.\n cmd = ['mri_convert', flash5_reg, op.join(mri_dir, 'flash5')]\n run_subprocess(cmd, env=env)\n # Step 5b and c : Convert the mgz volumes into COR\n os.chdir(mri_dir)\n convert_T1 = False\n if not op.isdir('T1') or len(glob.glob(op.join('T1', 'COR*'))) == 0:\n convert_T1 = True\n convert_brain = False\n if not op.isdir('brain') or len(glob.glob(op.join('brain', 'COR*'))) == 0:\n convert_brain = True\n logger.info(\"\\n---- Converting T1 volume into COR format ----\")\n if convert_T1:\n if not op.isfile('T1.mgz'):\n raise RuntimeError(\"Both T1 mgz and T1 COR volumes missing.\")\n os.makedirs('T1')\n cmd = ['mri_convert', 'T1.mgz', 'T1']\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"T1 volume is already in COR format\")\n logger.info(\"\\n---- Converting brain volume into COR format ----\")\n if convert_brain:\n if not op.isfile('brain.mgz'):\n raise RuntimeError(\"Both brain mgz and brain COR volumes missing.\")\n os.makedirs('brain')\n cmd = ['mri_convert', 'brain.mgz', 'brain']\n run_subprocess(cmd, env=env)\n else:\n logger.info(\"Brain volume is already in COR format\")\n # Finally ready to go\n if not is_test: # CIs don't have freesurfer, skipped when testing.\n logger.info(\"\\n---- Creating the BEM surfaces ----\")\n cmd = ['mri_make_bem_surfaces', subject]\n run_subprocess(cmd, env=env)\n\n logger.info(\"\\n---- Converting the tri files into surf files ----\")\n os.chdir(bem_dir)\n if not op.exists('flash'):\n os.makedirs('flash')\n os.chdir('flash')\n surfs = ['inner_skull', 'outer_skull', 'outer_skin']\n for surf in surfs:\n shutil.move(op.join(bem_dir, surf + '.tri'), surf + '.tri')\n\n nodes, tris = read_tri(surf + '.tri', swap=True)\n vol_info = _extract_volume_info(flash5_reg)\n if vol_info is None:\n warn('nibabel is required to update the volume info. Volume info '\n 'omitted from the written surface.')\n else:\n vol_info['head'] = np.array([20])\n write_surface(surf + '.surf', nodes, tris, volume_info=vol_info)\n\n # Cleanup section\n logger.info(\"\\n---- Cleaning up ----\")\n os.chdir(bem_dir)\n os.remove('inner_skull_tmp.tri')\n os.chdir(mri_dir)\n if convert_T1:\n shutil.rmtree('T1')\n logger.info(\"Deleted the T1 COR volume\")\n if convert_brain:\n shutil.rmtree('brain')\n logger.info(\"Deleted the brain COR volume\")\n shutil.rmtree('flash5')\n logger.info(\"Deleted the flash5 COR volume\")\n # Create symbolic links to the .surf files in the bem folder\n logger.info(\"\\n---- Creating symbolic links ----\")\n os.chdir(bem_dir)\n for surf in surfs:\n surf = surf + '.surf'\n if not overwrite and op.exists(surf):\n skip_symlink = True\n else:\n if op.exists(surf):\n os.remove(surf)\n _symlink(op.join('flash', surf), op.join(surf))\n skip_symlink = False\n if skip_symlink:\n logger.info(\"Unable to create all symbolic links to .surf files \"\n \"in bem folder. Use --overwrite option to recreate them.\")\n dest = op.join(bem_dir, 'flash')\n else:\n logger.info(\"Symbolic links to .surf files created in bem folder\")\n dest = bem_dir\n logger.info(\"\\nThank you for waiting.\\nThe BEM triangulations for this \"\n \"subject are now available at:\\n%s.\\nWe hope the BEM meshes \"\n \"created will facilitate your MEG and EEG data analyses.\"\n % dest)\n # Show computed BEM surfaces\n if show:\n plot_bem(subject=subject, subjects_dir=subjects_dir,\n orientation='coronal', slices=None, show=True)\n\n # Go back to initial directory\n os.chdir(curdir)\n\n\ndef _check_bem_size(surfs):\n \"\"\"Check bem surface sizes.\"\"\"\n if len(surfs) > 1 and surfs[0]['np'] > 10000:\n warn('The bem surfaces have %s data points. 5120 (ico grade=4) '\n 'should be enough. Dense 3-layer bems may not save properly.' %\n surfs[0]['np'])\n\n\ndef _symlink(src, dest):\n \"\"\"Create a symlink.\"\"\"\n try:\n os.symlink(src, dest)\n except OSError:\n warn('Could not create symbolic link %s. Check that your partition '\n 'handles symbolic links. The file will be copied instead.' % dest)\n shutil.copy(src, dest)\n"
] | [
[
"numpy.sum",
"numpy.argsort",
"numpy.random.RandomState",
"numpy.log",
"numpy.abs",
"numpy.where",
"numpy.eye",
"numpy.zeros",
"numpy.dot",
"numpy.median",
"scipy.optimize.fmin_cobyla",
"scipy.linalg.inv",
"numpy.arange",
"numpy.power",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.linalg.norm",
"numpy.arctan2",
"numpy.empty",
"numpy.array_equal",
"numpy.sqrt",
"numpy.concatenate",
"scipy.linalg.svd"
]
] |
TencentYoutuResearch/SelfSupervisedLearning-DSM | [
"655a0a23a47bf2559f3d435384ae59a8871a5ff5"
] | [
"src/augment/basic_augmentation/noise.py"
] | [
"import torch\nimport torch.nn as nn\n\n\n\"\"\"\nusage\n z_rand = generate_noise([1,nzx,nzy], device=opt.device)\n z_rand = z_rand.expand(1,3,Z_opt.shape[2],Z_opt.shape[3])\n z_prev1 = 0.95*Z_opt +0.05*z_rand\n\"\"\"\n\n\ndef upsampling(im, sx, sy):\n m = nn.Upsample(size=[round(sx), round(sy)], mode='bilinear', align_corners=True)\n return m(im)\n\n\ndef generate_noise(size, num_samp=1, device='cuda', type='gaussian', scale=1):\n if type == 'gaussian':\n noise = torch.randn(num_samp, size[0], round(size[1]/scale), round(size[2]/scale))\n noise = upsampling(noise, size[1], size[2])\n if type == 'gaussian_mixture':\n noise1 = torch.randn(num_samp, size[0], size[1], size[2]) + 5\n noise2 = torch.randn(num_samp, size[0], size[1], size[2])\n noise = noise1 + noise2\n if type == 'uniform':\n noise = torch.randn(num_samp, size[0], size[1], size[2])\n return noise\n"
] | [
[
"torch.randn"
]
] |
NoeSamaille/medical-detection-toolkit | [
"232d3d1444ccaac04e15a00d8030390560236871"
] | [
"utils/exp_utils.py"
] | [
"#!/usr/bin/env python\n# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Iterable, Tuple, Any, Union\nimport os, sys\nimport subprocess\nfrom multiprocessing import Process\n\nimport importlib.util\nimport pickle\n\nimport logging\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom collections import OrderedDict\nimport numpy as np\nimport torch\nimport pandas as pd\n\ndef split_off_process(target, *args, daemon: bool=False, **kwargs):\n \"\"\"Start a process that won't block parent script.\n No join(), no return value. If daemon=False: before parent exits, it waits for this to finish.\n :param target: the target function of the process.\n :params *args: args to pass to target.\n :param daemon: if False: before parent exits, it waits for this process to finish.\n :params **kwargs: kwargs to pass to target.\n \"\"\"\n p = Process(target=target, args=tuple(args), kwargs=kwargs, daemon=daemon)\n p.start()\n return p\n\ndef get_formatted_duration(seconds: float, format: str=\"hms\") -> str:\n \"\"\"Format a time in seconds.\n :param format: \"hms\" for hours mins secs or \"ms\" for min secs.\n \"\"\"\n mins, secs = divmod(seconds, 60)\n if format == \"ms\":\n t = \"{:d}m:{:02d}s\".format(int(mins), int(secs))\n elif format == \"hms\":\n h, mins = divmod(mins, 60)\n t = \"{:d}h:{:02d}m:{:02d}s\".format(int(h), int(mins), int(secs))\n else:\n raise Exception(\"Format {} not available, only 'hms' or 'ms'\".format(format))\n return t\n\nclass CombinedLogger(object):\n \"\"\"Combine console and tensorboard logger and record system metrics.\n \"\"\"\n\n def __init__(self, name: str, log_dir: str, server_env: bool=True, fold: Union[int, str]=\"all\"):\n self.pylogger = logging.getLogger(name)\n self.tboard = SummaryWriter(log_dir=os.path.join(log_dir, \"tboard\"))\n self.log_dir = log_dir\n self.fold = str(fold)\n self.server_env = server_env\n\n self.pylogger.setLevel(logging.DEBUG)\n self.log_file = os.path.join(log_dir, \"fold_\"+self.fold, 'exec.log')\n os.makedirs(os.path.dirname(self.log_file), exist_ok=True)\n self.pylogger.addHandler(logging.FileHandler(self.log_file))\n if not server_env:\n self.pylogger.addHandler(ColorHandler())\n else:\n self.pylogger.addHandler(logging.StreamHandler())\n self.pylogger.propagate = False\n\n def __getattr__(self, attr):\n \"\"\"delegate all undefined method requests to objects of\n this class in order pylogger, tboard (first find first serve).\n E.g., combinedlogger.add_scalars(...) should trigger self.tboard.add_scalars(...)\n \"\"\"\n for obj in [self.pylogger, self.tboard]:\n if attr in dir(obj):\n return getattr(obj, attr)\n print(\"logger attr not found\")\n\n def set_logfile(self, fold: Union[int, str, None]=None, log_file: Union[str, None]=None):\n if fold is not None:\n self.fold = str(fold)\n if log_file is None:\n self.log_file = os.path.join(self.log_dir, \"fold_\"+self.fold, 'exec.log')\n else:\n self.log_file = log_file\n os.makedirs(os.path.dirname(self.log_file), exist_ok=True)\n for hdlr in self.pylogger.handlers:\n hdlr.close()\n self.pylogger.handlers = []\n self.pylogger.addHandler(logging.FileHandler(self.log_file))\n if not self.server_env:\n self.pylogger.addHandler(ColorHandler())\n else:\n self.pylogger.addHandler(logging.StreamHandler())\n\n def metrics2tboard(self, metrics, global_step=None, suptitle=None):\n \"\"\"\n :param metrics: {'train': dataframe, 'val':df}, df as produced in\n evaluator.py.evaluate_predictions\n \"\"\"\n # print(\"metrics\", metrics)\n if global_step is None:\n global_step = len(metrics['train'][list(metrics['train'].keys())[0]]) - 1\n if suptitle is not None:\n suptitle = str(suptitle)\n else:\n suptitle = \"Fold_\" + str(self.fold)\n\n for key in ['train', 'val']:\n # series = {k:np.array(v[-1]) for (k,v) in metrics[key].items() if not np.isnan(v[-1]) and not 'Bin_Stats' in k}\n loss_series = {}\n mon_met_series = {}\n for tag, val in metrics[key].items():\n val = val[-1] # maybe remove list wrapping, recording in evaluator?\n if 'loss' in tag.lower() and not np.isnan(val):\n loss_series[\"{}\".format(tag)] = val\n elif not np.isnan(val):\n mon_met_series[\"{}\".format(tag)] = val\n\n self.tboard.add_scalars(suptitle + \"/Losses/{}\".format(key), loss_series, global_step)\n self.tboard.add_scalars(suptitle + \"/Monitor_Metrics/{}\".format(key), mon_met_series, global_step)\n self.tboard.add_scalars(suptitle + \"/Learning_Rate\", metrics[\"lr\"], global_step)\n return\n\n def __del__(self): # otherwise might produce multiple prints e.g. in ipython console\n for hdlr in self.pylogger.handlers:\n hdlr.close()\n self.pylogger.handlers = []\n del self.pylogger\n self.tboard.flush()\n # close somehow prevents main script from exiting\n # maybe revise this issue in a later pytorch version\n #self.tboard.close()\n\n\ndef get_logger(exp_dir: str, server_env: bool=False) -> CombinedLogger:\n \"\"\"\n creates logger instance. writing out info to file, to terminal and to tensorboard.\n :param exp_dir: experiment directory, where exec.log file is stored.\n :param server_env: True if operating in server environment (e.g., gpu cluster)\n :return: custom CombinedLogger instance.\n \"\"\"\n log_dir = os.path.join(exp_dir, \"logs\")\n logger = CombinedLogger('medicaldetectiontoolkit', log_dir, server_env=server_env)\n print(\"Logging to {}\".format(logger.log_file))\n return logger\n\n\ndef prep_exp(dataset_path, exp_path, server_env, use_stored_settings=True, is_training=True):\n \"\"\"\n I/O handling, creating of experiment folder structure. Also creates a snapshot of configs/model scripts and copies them to the exp_dir.\n This way the exp_dir contains all info needed to conduct an experiment, independent to changes in actual source code. Thus, training/inference of this experiment can be started at anytime. Therefore, the model script is copied back to the source code dir as tmp_model (tmp_backbone).\n Provides robust structure for cloud deployment.\n :param dataset_path: path to source code for specific data set. (e.g. medicaldetectiontoolkit/lidc_exp)\n :param exp_path: path to experiment directory.\n :param server_env: boolean flag. pass to configs script for cloud deployment.\n :param use_stored_settings: boolean flag. When starting training: If True, starts training from snapshot in existing experiment directory, else creates experiment directory on the fly using configs/model scripts from source code.\n :param is_training: boolean flag. distinguishes train vs. inference mode.\n :return:\n \"\"\"\n\n if is_training:\n if use_stored_settings:\n cf_file = import_module('cf_file', os.path.join(exp_path, 'configs.py'))\n cf = cf_file.configs(server_env)\n # in this mode, previously saved model and backbone need to be found in exp dir.\n if not os.path.isfile(os.path.join(exp_path, 'mdt_model.py')) or \\\n not os.path.isfile(os.path.join(exp_path, 'backbone.py')):\n raise Exception(\n \"Selected use_stored_settings option but no model and/or backbone source files exist in exp dir.\")\n cf.model_path = os.path.join(exp_path, 'mdt_model.py')\n cf.backbone_path = os.path.join(exp_path, 'backbone.py')\n else:\n # this case overwrites settings files in exp dir, i.e., default_configs, configs, backbone, model\n os.makedirs(exp_path, exist_ok=True)\n # run training with source code info and copy snapshot of model to exp_dir for later testing (overwrite scripts if exp_dir already exists.)\n subprocess.call('cp {} {}'.format('default_configs.py', os.path.join(exp_path, 'default_configs.py')),\n shell=True)\n subprocess.call(\n 'cp {} {}'.format(os.path.join(dataset_path, 'configs.py'), os.path.join(exp_path, 'configs.py')),\n shell=True)\n cf_file = import_module('cf_file', os.path.join(dataset_path, 'configs.py'))\n cf = cf_file.configs(server_env)\n subprocess.call('cp {} {}'.format(cf.model_path, os.path.join(exp_path, 'mdt_model.py')), shell=True)\n subprocess.call('cp {} {}'.format(cf.backbone_path, os.path.join(exp_path, 'backbone.py')), shell=True)\n if os.path.isfile(os.path.join(exp_path, \"fold_ids.pickle\")):\n subprocess.call('rm {}'.format(os.path.join(exp_path, \"fold_ids.pickle\")), shell=True)\n\n else:\n # testing, use model and backbone stored in exp dir.\n cf_file = import_module('cf_file', os.path.join(exp_path, 'configs.py'))\n cf = cf_file.configs(server_env)\n cf.model_path = os.path.join(exp_path, 'mdt_model.py')\n cf.backbone_path = os.path.join(exp_path, 'backbone.py')\n\n\n cf.exp_dir = exp_path\n cf.test_dir = os.path.join(cf.exp_dir, 'test')\n cf.plot_dir = os.path.join(cf.exp_dir, 'plots')\n if not os.path.exists(cf.test_dir):\n os.mkdir(cf.test_dir)\n if not os.path.exists(cf.plot_dir):\n os.mkdir(cf.plot_dir)\n cf.experiment_name = exp_path.split(\"/\")[-1]\n cf.created_fold_id_pickle = False\n\n return cf\n\n\n\ndef import_module(name: str, path: str):\n \"\"\"\n correct way of importing a module dynamically in python 3.\n :param name: name given to module instance.\n :param path: path to module.\n :return: module: returned module instance.\n \"\"\"\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n return module\n\n\ndef set_params_flag(module: torch.nn.Module, flag: Tuple[str, Any], check_overwrite: bool = True) -> torch.nn.Module:\n \"\"\"Set an attribute for all passed module parameters.\n\n :param flag: tuple (str attribute name : attr value)\n :param check_overwrite: if True, assert that attribute not already exists.\n\n \"\"\"\n for param in module.parameters():\n if check_overwrite:\n assert not hasattr(param, flag[0]), \\\n \"param {} already has attr {} (w/ val {})\".format(param, flag[0], getattr(param, flag[0]))\n setattr(param, flag[0], flag[1])\n return module\n\ndef parse_params_for_optim(net: torch.nn.Module, weight_decay: float = 0., exclude_from_wd: Iterable = (\"norm\",)) -> list:\n \"\"\"Split network parameters into weight-decay dependent groups for the optimizer.\n :param net: network.\n :param weight_decay: weight decay value for the parameters that it is applied to. excluded parameters will have\n weight decay 0.\n :param exclude_from_wd: List of strings of parameter-group names to exclude from weight decay. Options: \"norm\", \"bias\".\n :return:\n \"\"\"\n if weight_decay is None:\n weight_decay = 0.\n # pytorch implements parameter groups as dicts {'params': ...} and\n # weight decay as p.data.mul_(1 - group['lr'] * group['weight_decay'])\n norm_types = [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d,\n torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d,\n torch.nn.LayerNorm, torch.nn.GroupNorm, torch.nn.SyncBatchNorm, torch.nn.LocalResponseNorm]\n level_map = {\"bias\": \"weight\",\n \"norm\": \"module\"}\n type_map = {\"norm\": norm_types}\n\n exclude_from_wd = [str(name).lower() for name in exclude_from_wd]\n exclude_weight_names = [k for k, v in level_map.items() if k in exclude_from_wd and v == \"weight\"]\n exclude_module_types = tuple([type_ for k, v in level_map.items() if (k in exclude_from_wd and v == \"module\")\n for type_ in type_map[k]])\n\n if exclude_from_wd:\n print(\"excluding {} from weight decay.\".format(exclude_from_wd))\n\n for module in net.modules():\n if isinstance(module, exclude_module_types):\n set_params_flag(module, (\"no_wd\", True))\n for param_name, param in net.named_parameters():\n if np.any([ename in param_name for ename in exclude_weight_names]):\n setattr(param, \"no_wd\", True)\n\n with_dec, no_dec = [], []\n for param in net.parameters():\n if hasattr(param, \"no_wd\") and param.no_wd == True:\n no_dec.append(param)\n else:\n with_dec.append(param)\n orig_ps = sum(p.numel() for p in net.parameters())\n with_ps = sum(p.numel() for p in with_dec)\n wo_ps = sum(p.numel() for p in no_dec)\n assert orig_ps == with_ps + wo_ps, \"orig n parameters {} unequals sum of with wd {} and w/o wd {}.\"\\\n .format(orig_ps, with_ps, wo_ps)\n\n groups = [{'params': gr, 'weight_decay': wd} for (gr, wd) in [(no_dec, 0.), (with_dec, weight_decay)] if len(gr)>0]\n return groups\n\n\nclass ModelSelector:\n '''\n saves a checkpoint after each epoch as 'last_state' (can be loaded to continue interrupted training).\n saves the top-k (k=cf.save_n_models) ranked epochs. In inference, predictions of multiple epochs can be ensembled to improve performance.\n '''\n\n def __init__(self, cf, logger):\n\n self.cf = cf\n self.saved_epochs = [-1] * cf.save_n_models\n self.logger = logger\n\n def run_model_selection(self, net: torch.nn.Module, optimizer: torch.optim.Optimizer,\n monitor_metrics: dict, epoch: int):\n\n # take the mean over all selection criteria in each epoch\n non_nan_scores = np.mean(np.array([[0 if (ii is None or np.isnan(ii)) else ii for ii in monitor_metrics['val'][sc]] for sc in self.cf.model_selection_criteria]), 0)\n epochs_scores = [ii for ii in non_nan_scores[1:]]\n # ranking of epochs according to model_selection_criterion\n epoch_ranking = np.argsort(epochs_scores, kind=\"stable\")[::-1] + 1 #epochs start at 1\n # if set in configs, epochs < min_save_thresh are discarded from saving process.\n epoch_ranking = epoch_ranking[epoch_ranking >= self.cf.min_save_thresh]\n\n # check if current epoch is among the top-k epochs.\n if epoch in epoch_ranking[:self.cf.save_n_models]:\n\n save_dir = os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(epoch))\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n\n torch.save(net.state_dict(), os.path.join(save_dir, 'params.pth'))\n with open(os.path.join(save_dir, 'monitor_metrics.pickle'), 'wb') as handle:\n pickle.dump(monitor_metrics, handle)\n # save epoch_ranking to keep info for inference.\n np.save(os.path.join(self.cf.fold_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])\n np.save(os.path.join(save_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])\n\n self.logger.info(\n \"saving current epoch {} at rank {}\".format(epoch, np.argwhere(epoch_ranking == epoch)))\n # delete params of the epoch that just fell out of the top-k epochs.\n for se in [int(ii.split('_')[0]) for ii in os.listdir(self.cf.fold_dir) if 'best_checkpoint' in ii]:\n if se in epoch_ranking[self.cf.save_n_models:]:\n subprocess.call('rm -rf {}'.format(os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(se))), shell=True)\n self.logger.info('deleting epoch {} at rank {}'.format(se, np.argwhere(epoch_ranking == se)))\n\n state = {\n 'epoch': epoch,\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n\n # save checkpoint of current epoch.\n save_dir = os.path.join(self.cf.fold_dir, 'last_checkpoint'.format(epoch))\n if not os.path.exists(save_dir):\n os.mkdir(save_dir)\n torch.save(state, os.path.join(save_dir, 'params.pth'))\n np.save(os.path.join(save_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])\n with open(os.path.join(save_dir, 'monitor_metrics.pickle'), 'wb') as handle:\n pickle.dump(monitor_metrics, handle)\n return os.path.join(os.path.join(self.cf.fold_dir, f'{epoch_ranking[0]}_best_checkpoint'))\n\n\n\ndef load_checkpoint(checkpoint_path: str, net: torch.nn.Module, optimizer: torch.optim.Optimizer) -> Tuple:\n\n checkpoint = torch.load(os.path.join(checkpoint_path, 'params.pth'))\n net.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n with open(os.path.join(checkpoint_path, 'monitor_metrics.pickle'), 'rb') as handle:\n monitor_metrics = pickle.load(handle)\n starting_epoch = checkpoint['epoch'] + 1\n return starting_epoch, net, optimizer, monitor_metrics\n\n\n\ndef prepare_monitoring(cf):\n \"\"\"\n creates dictionaries, where train/val metrics are stored.\n \"\"\"\n metrics = {}\n # first entry for loss dict accounts for epoch starting at 1.\n metrics['train'] = OrderedDict()\n metrics['val'] = OrderedDict()\n metric_classes = []\n if 'rois' in cf.report_score_level:\n metric_classes.extend([v for k, v in cf.class_dict.items()])\n if 'patient' in cf.report_score_level:\n metric_classes.extend(['patient'])\n for cl in metric_classes:\n metrics['train'][cl + '_ap'] = [np.nan]\n metrics['val'][cl + '_ap'] = [np.nan]\n if cl == 'patient':\n metrics['train'][cl + '_auc'] = [np.nan]\n metrics['val'][cl + '_auc'] = [np.nan]\n\n return metrics\n\n\n\ndef create_csv_output(results_list, cf, logger):\n \"\"\"\n Write out test set predictions to .csv file. output format is one line per prediction:\n PatientID | PredictionID | [y1 x1 y2 x2 (z1) (z2)] | score | pred_classID\n Note, that prediction coordinates correspond to images as loaded for training/testing and need to be adapted when\n plotted over raw data (before preprocessing/resampling).\n :param results_list: [[patient_results, patient_id], [patient_results, patient_id], ...]\n \"\"\"\n\n logger.info('creating csv output file at {}'.format(os.path.join(cf.test_dir, 'results.csv')))\n predictions_df = pd.DataFrame(columns = ['patientID', 'predictionID', 'coords', 'score', 'pred_classID'])\n for r in results_list:\n\n pid = r[1]\n\n #optionally load resampling info from preprocessing to match output predictions with raw data.\n #with open(os.path.join(cf.exp_dir, 'test_resampling_info', pid), 'rb') as handle:\n # resampling_info = pickle.load(handle)\n\n for bix, box in enumerate(r[0][0]):\n if box[\"box_type\"] == \"gt\":\n continue\n assert box['box_type'] == 'det', box['box_type']\n coords = box['box_coords']\n score = box['box_score']\n pred_class_id = box['box_pred_class_id']\n out_coords = []\n if score >= cf.min_det_thresh:\n out_coords.append(coords[0]) #* resampling_info['scale'][0])\n out_coords.append(coords[1]) #* resampling_info['scale'][1])\n out_coords.append(coords[2]) #* resampling_info['scale'][0])\n out_coords.append(coords[3]) #* resampling_info['scale'][1])\n if len(coords) > 4:\n out_coords.append(coords[4]) #* resampling_info['scale'][2] + resampling_info['z_crop'])\n out_coords.append(coords[5]) #* resampling_info['scale'][2] + resampling_info['z_crop'])\n\n predictions_df.loc[len(predictions_df)] = [pid, bix, out_coords, score, pred_class_id]\n try:\n fold = cf.fold\n except:\n fold = 'hold_out'\n predictions_df.to_csv(os.path.join(cf.exp_dir, 'results_{}.csv'.format(fold)), index=False)\n\n\n\nclass _AnsiColorizer(object):\n \"\"\"\n A colorizer is an object that loosely wraps around a stream, allowing\n callers to write text to the stream in a particular color.\n\n Colorizer classes must implement C{supported()} and C{write(text, color)}.\n \"\"\"\n _colors = dict(black=30, red=31, green=32, yellow=33,\n blue=34, magenta=35, cyan=36, white=37, default=39)\n\n def __init__(self, stream):\n self.stream = stream\n\n @classmethod\n def supported(cls, stream=sys.stdout):\n \"\"\"\n A class method that returns True if the current platform supports\n coloring terminal output using this method. Returns False otherwise.\n \"\"\"\n if not stream.isatty():\n return False # auto color only on TTYs\n try:\n import curses\n except ImportError:\n return False\n else:\n try:\n try:\n return curses.tigetnum(\"colors\") > 2\n except curses.error:\n curses.setupterm()\n return curses.tigetnum(\"colors\") > 2\n except:\n raise\n # guess false in case of error\n return False\n\n def write(self, text, color):\n \"\"\"\n Write the given text to the stream in the given color.\n\n @param text: Text to be written to the stream.\n\n @param color: A string label for a color. e.g. 'red', 'white'.\n \"\"\"\n color = self._colors[color]\n self.stream.write('\\x1b[%sm%s\\x1b[0m' % (color, text))\n\n\n\nclass ColorHandler(logging.StreamHandler):\n\n\n def __init__(self, stream=sys.stdout):\n super(ColorHandler, self).__init__(_AnsiColorizer(stream))\n\n def emit(self, record):\n msg_colors = {\n logging.DEBUG: \"green\",\n logging.INFO: \"default\",\n logging.WARNING: \"red\",\n logging.ERROR: \"red\"\n }\n color = msg_colors.get(record.levelno, \"blue\")\n self.stream.write(record.msg + \"\\n\", color)\n\n"
] | [
[
"numpy.argwhere",
"numpy.any",
"pandas.DataFrame",
"numpy.argsort",
"numpy.isnan"
]
] |
theBraindonor/chicago-crime-arrests | [
"64cdb82fbe828d1316cf945b67ddc205ef190293"
] | [
"model/experiment/gaussian_naive_bayes_model.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Experiment with a gaussian naive bayes model with a variety of balancing techniques on the cleaned data set\n\"\"\"\n\n__author__ = \"John Hoff\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2019, John Hoff\"\n__license__ = \"Creative Commons Attribution-ShareAlike 4.0 International License\"\n__version__ = \"1.0\"\n\nfrom imblearn.combine import SMOTEENN\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.under_sampling import RandomUnderSampler\n\nfrom sklearn.naive_bayes import GaussianNB\n\nfrom utility import Runner\nfrom model import load_clean_sample_data_frame, binned_geo_one_hot_data_mapper\n\n\nsample = None\nfit_increment = 10000\n\n\ndef test_gaussian_naive_bayes():\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_basic',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1\n )\n\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_under_sampled',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1,\n sampling=RandomUnderSampler()\n )\n\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_over_sampled',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1,\n sampling=SMOTE()\n )\n\n runner = Runner(\n 'model/experiment/output/gaussian_naive_bayes_combine_sampled',\n load_clean_sample_data_frame(),\n 'arrest',\n GaussianNB()\n )\n runner.run_classification_experiment(\n sample=sample,\n record_predict_proba=True,\n transformer=binned_geo_one_hot_data_mapper,\n fit_increment=fit_increment,\n n_jobs=1,\n sampling=SMOTEENN()\n )\n\n\nif __name__ == '__main__':\n test_gaussian_naive_bayes()\n"
] | [
[
"sklearn.naive_bayes.GaussianNB"
]
] |
tejas-9er/SVM-vs-LSSVM | [
"e44f63458680c39df370ddfcdf22e8c450d23128"
] | [
"data/LSSVM.py"
] | [
"import numpy as np\nimport scipy\nfrom scipy.sparse import linalg\nfrom sklearn.metrics import accuracy_score\n\nclass LSSVM:\n def __init__(self, kernel = 'linear', C = 1.0,gamma = 1.0, d = 2.0):\n kernels = {\n 'rbf':self.rbf,\n 'poly':self.polynomial,\n 'linear':self.linear\n }\n \n self.kernel = kernels[kernel]\n self.C = C\n self.gamma = 1.0\n self.d = d\n \n #Build the gram matrix\n def build_kernel_matrix(self, X, y):\n instances, dimensions = X.shape\n\n gram_matrix = np.zeros((instances,instances))\n #computing the gram matrix, involves going over the dataset and computing pairwise kernel function\n for i in range(0, instances):\n for j in range(0, instances):\n \n gram_matrix[i, j] = self.kernel(X[i], X[j])\n return gram_matrix\n\n def fit(self, X, y):\n\n self.kernel_matrix = self.build_kernel_matrix(X,y)\n identity_matrix = np.identity(X.shape[0])\n #We wish to solve Ax = B, so we begin by defining the matrices A, B\n A = np.zeros((X.shape[0]+1, X.shape[0]+1))\n B = np.ones(((X.shape[0]+1,1)))\n\n A[0][0] = 0\n A[0,1:X.shape[0]+1] = np.hstack((np.ones(X.shape[0])))\n A[1:X.shape[0]+1,0] = np.ones(X.shape[0])\n A[1:X.shape[0]+1,1:X.shape[0]+1] = self.kernel_matrix + identity_matrix / self.C\n \n #B is a column vector. \n B[0][0] = 0\n B[1:X.shape[0]+1,0] = y\n\n solution = scipy.sparse.linalg.cg(A,B)\n\n self.bias = solution[:-1]\n \n solution = solution[:-1]\n self.support_vector_alphas = []\n self.support_vector_labels = []\n self.support_vectors = []\n for index,alpha in enumerate(solution[0]):\n if(alpha > 1e-3):\n self.support_vector_alphas.append(alpha)\n self.support_vector_labels.append(y[index])\n self.support_vectors.append(X[index])\n #define kernels\n def linear(self, x1, x2):\n return np.dot(x1, x2.T)\n\n def polynomial(self, x1, x2):\n return (np.dot(x1, x2.T) ** self.d)\n \n def rbf(self,xi,xj):\n return np.exp(-self.gamma * np.linalg.norm(xi-xj)**2)\n\n def predict(self,X_test):\n predictions = []\n \n for instance in X_test:\n\n for index, sv in enumerate(self.support_vectors):\n prediction = np.sum(self.support_vector_alphas[index] * self.support_vector_labels[index] * self.kernel(sv,instance) + self.bias)\n \n predictions.append(np.sign(prediction).astype(int))\n\n return np.array(predictions)"
] | [
[
"numpy.ones",
"scipy.sparse.linalg.cg",
"numpy.linalg.norm",
"numpy.sign",
"numpy.zeros",
"numpy.array",
"numpy.dot",
"numpy.identity"
]
] |
gcruchon/test-opencv | [
"fdf7cb7a86f5606ca6df6170107a0264fbc43e9c"
] | [
"chapter1-cam.py"
] | [
"import cv2\nimport numpy as np\ncap = cv2.VideoCapture(0)\nkernel = np.ones((5, 5), np.uint8)\n\nwhile True:\n success, img = cap.read()\n cv2.imshow(\"Cam\", cv2.Canny(img, 100, 100))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n"
] | [
[
"numpy.ones"
]
] |
diable201/ComputerVision | [
"5ee153363fa6757d3cd8b1add3e5d48b01a499e2"
] | [
"Lectures/lec_05/genSymbolImg.py"
] | [
"import cv2\nimport numpy as np\nfrom random import randint, uniform\nimport string, random\n\n\ndef addNoise(image): \n row,col = image.shape\n s_vs_p = 0.4\n amount = 0.01\n out = np.copy(image)\n # Salt mode\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[tuple(coords)] = 1\n\n # Pepper mode\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]\n out[tuple(coords)] = 0\n return out\n\n\n# def addLines(img):\n# for i in range(randint(0,2)):\n# y1 = randint(0, img.shape[0])\n# y2 = randint(0, img.shape[0])\n# cv2.line(img, (0, y1), (img.shape[1], y2), 0, 1)\n\n\ndef addBlur(img, kw, kh):\n return cv2.blur(img, (kw, kh))\n\n\ndef text_generator(chars, size = 8):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef addText(img, chars, font, size, line_size):\n\n text = text_generator(chars, 1) \n\n cv2.putText(img, text, (0, img.shape[0]-4), font, size, (0, 0, 255), line_size, cv2.LINE_AA)\n\n return text\n\nsizes = [(70,58),(40,35),(75,70),(70,70),(70,70),(50,50)]\n\ndef genSymbolImg(chars = string.ascii_uppercase + string.digits,\n font = None,\n line_size = None,\n blur = None,\n kw = None, \n kh = None):\n\n if font is None:\n font = randint(0, 5)\n\n # if size is None:\n # size = uniform(2.5, 3.5)\n\n if line_size is None:\n line_size = randint(1, 3)\n\n if blur is None:\n blur = randint(0, 1)\n\n if kw is None:\n kw = randint(3, 9)\n\n if kh is None:\n kh = randint(3, 9)\n\n\n genImg = np.full(sizes[font], 255, dtype= np.uint8)\n\n text = addText(genImg, chars, font, 3, line_size)\n\n if randint(0, 1):\n genImg = addNoise(genImg)\n \n # if lines:\n # addLines(genImg)\n\n if blur:\n genImg = addBlur(genImg, kw, kh)\n\n\n return genImg, text\n\n\n\nif __name__ == '__main__':\n\n for i in xrange(10000):\n img, text = genSymbolImg(kw = 5, kh = 5, blur = 1)\n print(text)\n\n cv2.imshow(\"W\", img)\n k = cv2.waitKey(0)\n if k == 27:\n break"
] | [
[
"numpy.full",
"numpy.ceil",
"numpy.copy"
]
] |
cradesto/pystella | [
"f6f44ed12d9648585a52a09e15d494daa4c70c59"
] | [
"tau.py"
] | [
"#!/usr/bin/env python3\n\nimport argparse\nimport logging\n\nimport numpy as np\nimport pystella as ps\nfrom pystella.model.sn_tau import StellaTauDetail\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\n\n__author__ = 'bakl'\n\n# todo Show filters\n# todo show valuse for filters\n# todo compute SED = 4 pi R^2 sig T^4\n\n\ndef plot_tau_moments(tau, moments=None, xlim=None):\n import matplotlib.pyplot as plt\n\n moments = moments or np.exp(np.linspace(np.log(0.5), np.log(400.), 40))\n\n fig, (axV, axT) = plt.subplots(2, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})\n axV.set_title(tau.Name)\n axV.set_xlabel('')\n axV.set_ylabel('Velocity [1000 km/s]')\n\n axT.set_xlabel('Radius [cm]')\n axT.set_ylabel('Temperature [K]')\n\n for i, time in enumerate(moments):\n b = tau.block_nearest(time)\n n = int(2 - np.log10(max(1e-03, abs(b.Time)))) # if b.Time >= 10. else 4 # label format\n p = axV.semilogx(b.R, b.V8, label=\"t= {:.{}f}\".format(b.Time, n))\n color = p[0].get_color()\n axT.loglog(b.R, b.T, label=\"t={:.2f}\".format(time), color=color)\n\n axV.legend(frameon=False)\n\n if xlim is not None:\n axT.set_xlim(xlim)\n axV.set_xlim(xlim)\n\n fig.tight_layout()\n return fig\n\n\ndef plot_bands(ax, bnames, amp=30, alpha=0.5):\n \"\"\"Plot the filter responses\"\"\"\n color_dic = ps.band.colors()\n res = {}\n for bname in bnames:\n b = ps.band.band_by_name(bname)\n wl = b.wl * ps.phys.cm_to_angs\n ax.plot(wl, b.resp_wl*amp, color_dic[bname], alpha=alpha)\n\n wl_eff = b.wl_eff_angs\n ax.axvline(x=wl_eff, ymin=0., ymax=0.99, linestyle='--', color=color_dic[bname], alpha=alpha)\n ax.text(wl_eff, 10, bname, fontsize=12)\n ax.text(wl_eff*.95, 3, \"{:.0f}\".format(wl_eff), fontsize=6)\n res[bname] = (wl_eff, color_dic[bname])\n return res\n\n\ndef plot_tau_phot(tau_data, pars, tau_ph, xlim=None, title='', bnames=None):\n \"\"\"\n Plot photosphere as Func(nu). Maybe: R, V, V8, T\n :param pars: the parameters of photosphere\n :param tau_data: the data at the optical depth tau_ph\n :param tau_ph: the photosphere location\n :param xlim: wave length interval [A]\n :param title: the plot title\n :param bnames: array of filter names to show the filter responses\n :return: figure\n \"\"\"\n import matplotlib.pyplot as plt\n\n def fr2wv(nu):\n return ps.phys.c / nu * ps.phys.cm_to_angs\n\n fig, axs = plt.subplots(len(pars)+1, figsize=(12, 12), sharex=True, gridspec_kw={'hspace': 0})\n\n # Setup\n ax = axs[0]\n ax.set_ylabel(r'Zone ($\\tau_{{ph}}= {:.2f}$)'.format(tau_ph))\n ax.set_title(title)\n ax.xaxis.set_ticks_position('top')\n # ax.xaxis.tick_top()\n # ax.tick_params(axis=\"x\", direction=\"in\", pad=-22)\n # ax.tick_params(direction='in')\n\n for i, p in enumerate(pars, 1):\n ax = axs[i]\n ax.set_ylabel(r'{}$_{{ph}}$'.format(p))\n if i < len(axs)-1:\n ax.set_xlabel('')\n ax.tick_params(which='both', top=False, bottom=False)\n else:\n ax.set_xlabel('Wavelength [A]')\n\n # Plot Zone_ph\n colors = []\n for j, (t, freq, y) in enumerate(tau_data[StellaTauDetail.col_zon]):\n axzon = axs[0]\n n = int(3 - np.log10(max(1e-03, abs(t)))) # label format\n lbl = \"t= {:.{}f} d\".format(t, n)\n\n ll = axzon.semilogx(fr2wv(freq), y, label=lbl)\n color = ll[0].get_color()\n colors.append(color)\n\n bnames_waves = None\n if bnames is not None:\n ylim = axzon.get_ylim()\n bnames_waves = plot_bands(axzon, bnames, amp=ylim[1]*0.25, alpha=0.5)\n\n # Plot other params\n for i, p in enumerate(pars, 1):\n is_log = p.startswith('log')\n p_data = p.replace('log', '') if is_log else p\n ax = axs[i]\n for j, (t, freq, y) in enumerate(tau_data[p_data]):\n x = fr2wv(freq)\n if is_log:\n ax.loglog(x, y, color=colors[j])\n else:\n ax.semilogx(x, y, color=colors[j])\n\n if bnames_waves is not None:\n for bn, (wl, col) in bnames_waves.items():\n ax.axvline(x=wl, ymin=0., ymax=0.99, linestyle='--', color=col, alpha=0.5)\n\n # Post-plotting\n for i, ax in enumerate(axs):\n ax.tick_params(which='both', left=True, right=True, direction=\"in\")\n # ax.grid(axis=\"x\", color=\"grey\", alpha=.5, linewidth=1, linestyle=\":\")\n\n if xlim is not None:\n ax.set_xlim(xlim)\n\n axs[0].legend(frameon=False)\n\n fig.tight_layout()\n return fig\n\n\ndef get_parser(times='0.1:1:10:25:65', bnames='U:B:V:R'):\n parser = argparse.ArgumentParser(description='Standard Candle Method.')\n print(\" Plot the tau-wave diagram for STELLA models\")\n parser.add_argument('-b', '--band',\n nargs='?',\n required=False,\n # default=bnames,\n const=bnames,\n type=str,\n dest=\"bnames\",\n help=\"-b <bands>: string. If set only -b BNAMES is {}\".format(bnames))\n parser.add_argument('-i', '--input',\n required=True,\n dest=\"input\",\n help=\"Model name, example: cat_R450_M15_Ni007\")\n parser.add_argument('-p', '--path',\n required=False,\n type=str,\n default=False,\n dest=\"path\",\n help=\"Model directory\")\n parser.add_argument('-ph', '--phot',\n required=False,\n type=str,\n default=False,\n dest=\"phot\",\n help='Plot photosphere parameter. Maybe: R, V, V8, T. Example: -ph R:V8:T ' \n 'You may use prefix log, e.g. logT or logV8')\n parser.add_argument('-s', '--save',\n action='store_const',\n const=True,\n dest=\"is_save\",\n help=\"To save the result plot to pdf-file. Format: tau_[name]_t[times].pdf.\")\n parser.add_argument('-t', '--time',\n required=False,\n type=str,\n default=times,\n dest=\"times\",\n help=\"Plot tau snap for selected time moments. Default: {0}\".format(times))\n parser.add_argument('--tau_ph',\n required=False,\n type=float,\n default=2./3.,\n dest=\"tau_ph\",\n help=\"The optical depth at the photosphere. Default: 2/3\")\n parser.add_argument('-x', '--xlim',\n required=False,\n type=str,\n default=None,\n dest=\"xlim\",\n help=\"wave length interval [A]. Example: 1.:25e3. Default: all waves in the tau-file\")\n parser.add_argument('-w', '--write',\n required=False,\n type=str,\n default=None,\n dest=\"write_prefix\",\n help=\"The prefix of file + -ParamName.dat\")\n\n return parser\n\n\ndef str2float(s):\n return list(map(float, s.split(':')))\n\n\ndef main():\n import os\n import sys\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n plt = None\n\n ps.Band.load_settings()\n\n model_ext = '.tau'\n\n parser = get_parser()\n args, unknownargs = parser.parse_known_args()\n\n path = os.getcwd()\n if args.path:\n path = os.path.expanduser(args.path)\n\n # Set model names\n fname = None\n if args.input:\n fname = args.input.strip()\n fname = fname.replace(model_ext, '')\n\n if fname is None:\n parser.print_help()\n sys.exit(2)\n\n model = ps.Stella(fname, path=path)\n\n if not model.is_tau:\n print(\"No tau-data for: \" + str(model))\n return None\n\n fig = None\n xlim = None\n fplot = None\n print('\\n Arguments')\n times = str2float(args.times)\n print(' The time moments: ', args.times)\n print(' The optical depth ', args.tau_ph)\n if args.phot:\n print(' The photospheric parameters ', args.phot)\n if args.xlim is not None:\n xlim = str2float(args.xlim)\n print(\" xlim: \", xlim)\n # Set band names\n bnames = ('B',)\n ps.Band.load_settings()\n if args.bnames:\n bnames = []\n for bname in args.bnames.split('-'):\n if not ps.band.is_exist(bname):\n print('No such band: ' + bname)\n parser.print_help()\n sys.exit(2)\n bnames.append(bname)\n\n tau = model.get_tau().load(is_info=False)\n print('\\n Loaded data from {}'.format(tau.FName))\n print('Model has Nzone= {} Ntimes= {}'.format(tau.Nzon, tau.Ntimes))\n print(\"The model time interval: {:.3e} - {:3e} days\".format(min(tau.Times), max(tau.Times)))\n print(\"The bnames are {}\".format(', '.join(bnames)))\n # print(tau.Wl2angs)\n # tau = b.Tau\n # print(tau.shape)\n\n ###\n # Plot\n if args.phot:\n pars = args.phot.split(':')\n if isinstance(pars, str):\n pars = [pars]\n pars_data = [p.replace('log', '') for p in pars]\n tau_data = tau.params_ph(pars=pars_data, moments=times, tau_ph=args.tau_ph)\n\n if args.write_prefix:\n fwrite = os.path.expanduser(args.write_prefix)\n tau.data_save(fwrite, tau_data, pars_data)\n else:\n # Print parameters\n print('\\nPhotospheric parameters:')\n for ii, p in enumerate(pars_data):\n print('{:9s} {}'.format('t_real', ' '.join([f'{p}_{b:10s}' for b in bnames])))\n for i, (t, freq, y) in enumerate(tau_data[p]):\n s = '{:9.4f} '.format(t)\n for bname in bnames:\n b = ps.band.band_by_name(bname)\n fr_eff = b.freq_eff\n idx = (np.abs(freq - fr_eff)).argmin()\n s += ' {:10e}'.format( y[idx])\n print(s)\n # Plot\n fig = plot_tau_phot(tau_data, pars, tau_ph=args.tau_ph, xlim=xlim, title=tau.Name, bnames=bnames)\n fplot = os.path.expanduser(\"~/tau_{}_{}.pdf\".format(fname, str.replace(args.phot, ':', '-')))\n else:\n fig = plot_tau_moments(tau, moments=times, xlim=xlim)\n\n if args.is_save:\n if fplot is None:\n fplot = os.path.expanduser(\"~/tau_{0}_t{1}.pdf\".format(fname, str.replace(args.times, ':', '-')))\n print(\"Save plot to {0}\".format(fplot))\n fig.savefig(fplot, bbox_inches='tight')\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.log",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.subplots"
]
] |
ares201005/qiskit-aer | [
"fb3bab00ab810e73ad333b0f538fa6c3c53f054e"
] | [
"test/terra/backends/qasm_simulator/qasm_snapshot.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nQasmSimulator Integration Tests for Snapshot instructions\n\"\"\"\n\nimport logging\nimport itertools as it\nimport numpy as np\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.compiler import assemble\nfrom qiskit.quantum_info import DensityMatrix, Pauli, Operator\nfrom qiskit.providers.aer import QasmSimulator\nfrom qiskit.providers.aer import AerError\n\nfrom test.terra.reference.ref_snapshot_state import (\n snapshot_state_circuits_deterministic, snapshot_state_counts_deterministic,\n snapshot_state_pre_measure_statevector_deterministic,\n snapshot_state_post_measure_statevector_deterministic,\n snapshot_state_circuits_nondeterministic,\n snapshot_state_counts_nondeterministic,\n snapshot_state_pre_measure_statevector_nondeterministic,\n snapshot_state_post_measure_statevector_nondeterministic)\nfrom test.terra.reference.ref_snapshot_probabilities import (\n snapshot_probabilities_circuits, snapshot_probabilities_counts,\n snapshot_probabilities_labels_qubits,\n snapshot_probabilities_post_meas_probs,\n snapshot_probabilities_pre_meas_probs)\nfrom test.terra.reference.ref_snapshot_expval import (\n snapshot_expval_circuits, snapshot_expval_counts, snapshot_expval_labels,\n snapshot_expval_post_meas_values, snapshot_expval_pre_meas_values)\n\n\nclass QasmSnapshotStatevectorTests:\n \"\"\"QasmSimulator snapshot statevector tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'matrix_product_state'\n ]\n BACKEND_OPTS = {}\n\n def statevector_snapshots(self, data, label):\n \"\"\"Format snapshots as list of Numpy arrays\"\"\"\n snaps = data.get(\"snapshots\", {}).get(\"statevector\", {}).get(label, [])\n statevecs = []\n for snap in snaps:\n self.assertIsInstance(snap, np.ndarray)\n statevecs.append(snap)\n return statevecs\n\n def test_snapshot_statevector_pre_measure_det(self):\n \"\"\"Test snapshot statevector before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'statevector',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = statevec_targets[j]\n value = snaps[0]\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_statevector_pre_measure_nondet(self):\n \"\"\"Test snapshot statevector before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'statevector',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = statevec_targets[j]\n value = snaps[0]\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_statevector_post_measure_det(self):\n \"\"\"Test snapshot statevector after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'statevector',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n logging.getLogger().setLevel(logging.CRITICAL)\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n self.assertTrue(np.allclose(snaps[j], target))\n\n def test_snapshot_statevector_post_measure_nondet(self):\n \"\"\"Test snapshot statevector after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'statevector',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStatevectorTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.statevector_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n self.assertTrue(np.allclose(snaps[j], target))\n\n\nclass QasmSnapshotStabilizerTests:\n \"\"\"QasmSimulator method snapshot stabilizer tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = ['automatic', 'stabilizer']\n BACKEND_OPTS = {}\n\n @staticmethod\n def stabilizer_snapshots(data, label):\n \"\"\"Get stabilizer snapshots\"\"\"\n return data.get(\"snapshots\", {}).get(\"stabilizer\", {}).get(label, [])\n\n @staticmethod\n def stabilizes_statevector(stabilizer, statevector):\n \"\"\"Return True if two stabilizer states are equal.\"\"\"\n # Get stabilizer and destabilizers and convert to sets\n for stab in stabilizer:\n if stab[0] == '-':\n pauli_mat = -1 * Pauli.from_label(stab[1:]).to_matrix()\n else:\n pauli_mat = Pauli.from_label(stab).to_matrix()\n val = statevector.conj().dot(pauli_mat.dot(statevector))\n if not np.isclose(val, 1):\n return False\n return True\n\n def test_snapshot_stabilizer_pre_measure_det(self):\n \"\"\"Test snapshot stabilizer before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'stabilizer',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n self.assertEqual(len(snaps), 1)\n statevec = statevec_targets[j]\n stabilizer = snaps[0]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_pre_measure_nondet(self):\n \"\"\"Test snapshot stabilizer before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'stabilizer',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n self.assertEqual(len(snaps), 1)\n statevec = statevec_targets[j]\n stabilizer = snaps[0]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_post_measure_det(self):\n \"\"\"Test snapshot stabilizer after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'stabilizer',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n statevec = statevec_targets[i].get(mem)\n stabilizer = snaps[j]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n def test_snapshot_stabilizer_post_measure_nondet(self):\n \"\"\"Test snapshot stabilizer after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'stabilizer',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotStabilizerTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.stabilizer_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n statevec = statevec_targets[i].get(mem)\n stabilizer = snaps[j]\n self.assertTrue(\n self.stabilizes_statevector(stabilizer, statevec))\n\n\nclass QasmSnapshotDensityMatrixTests:\n \"\"\"QasmSimulator snapshot density matrix tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'density_matrix', 'density_matrix_gpu',\n 'density_matrix_thrust'\n ]\n BACKEND_OPTS = {}\n\n def density_snapshots(self, data, label):\n \"\"\"Format snapshots as list of Numpy arrays\"\"\"\n # Check snapshot entry exists in data\n snaps = data.get(\"snapshots\", {}).get(\"density_matrix\",\n {}).get(label, [])\n # Convert nested lists to numpy arrays\n output = {}\n for snap_dict in snaps:\n memory = snap_dict['memory']\n self.assertIsInstance(snap_dict['value'], np.ndarray)\n output[memory] = snap_dict['value']\n return output\n\n def test_snapshot_density_matrix_pre_measure_det(self):\n \"\"\"Test snapshot density matrix before deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'density_matrix',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = np.outer(statevec_targets[j],\n statevec_targets[j].conj())\n # Pre-measurement all memory bits should be 0\n value = snaps.get('0x0')\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_density_matrix_pre_measure_nondet(self):\n \"\"\"Test snapshot density matrix before non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_pre_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'density_matrix',\n post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n self.assertTrue(len(snaps), 1)\n target = np.outer(statevec_targets[j],\n statevec_targets[j].conj())\n value = snaps.get('0x0')\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_density_matrix_post_measure_det(self):\n \"\"\"Test snapshot density matrix after deterministic final measurement\"\"\"\n shots = 10\n label = \"snap\"\n counts_targets = snapshot_state_counts_deterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_deterministic(\n )\n circuits = snapshot_state_circuits_deterministic(label,\n 'density_matrix',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result, circuits, counts_targets, delta=0)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n target = np.outer(target, target.conj())\n value = snaps.get(mem)\n self.assertTrue(np.allclose(value, target))\n\n def test_snapshot_density_matrix_post_measure_nondet(self):\n \"\"\"Test snapshot density matrix after non-deterministic final measurement\"\"\"\n shots = 100\n label = \"snap\"\n counts_targets = snapshot_state_counts_nondeterministic(shots)\n statevec_targets = snapshot_state_post_measure_statevector_nondeterministic(\n )\n circuits = snapshot_state_circuits_nondeterministic(label,\n 'density_matrix',\n post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, memory=True, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotDensityMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.2 * shots)\n # Check snapshots\n for i, circuit in enumerate(circuits):\n data = result.data(circuit)\n snaps = self.density_snapshots(data, label)\n for j, mem in enumerate(data['memory']):\n target = statevec_targets[i].get(mem)\n target = np.outer(target, target.conj())\n value = snaps.get(mem)\n self.assertTrue(np.allclose(value, target))\n\n\nclass QasmSnapshotProbabilitiesTests:\n \"\"\"QasmSimulator snapshot probabilities tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic',\n 'statevector',\n 'statevector_gpu',\n 'statevector_thrust',\n 'stabilizer',\n 'density_matrix',\n 'density_matrix_gpu',\n 'density_matrix_thrust',\n 'matrix_product_state',\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def probability_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"probabilities\",\n {}).get(label, [])\n output[label] = {\n snap_dict['memory']: snap_dict['value']\n for snap_dict in snaps\n }\n return output\n\n def test_snapshot_probabilities_pre_measure(self):\n \"\"\"Test snapshot probabilities before final measurement\"\"\"\n shots = 1000\n labels = list(snapshot_probabilities_labels_qubits().keys())\n counts_targets = snapshot_probabilities_counts(shots)\n prob_targets = snapshot_probabilities_pre_meas_probs()\n\n circuits = snapshot_probabilities_circuits(post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.probability_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = prob_targets[j].get(label, {}).get(memory, {})\n self.assertDictAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_probabilities_post_measure(self):\n \"\"\"Test snapshot probabilities after final measurement\"\"\"\n shots = 1000\n labels = list(snapshot_probabilities_labels_qubits().keys())\n counts_targets = snapshot_probabilities_counts(shots)\n prob_targets = snapshot_probabilities_post_meas_probs()\n\n circuits = snapshot_probabilities_circuits(post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotProbabilitiesTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.probability_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n for memory, value in snaps.items():\n target = prob_targets[j].get(label, {}).get(memory, {})\n self.assertDictAlmostEqual(value, target, delta=1e-7)\n\n\nclass QasmSnapshotExpValPauliTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',\n 'matrix_product_state', 'stabilizer'\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def expval_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"expectation_value\",\n {}).get(label, [])\n # Convert list into dict\n inner = {}\n for snap_dict in snaps:\n val = snap_dict['value']\n inner[snap_dict['memory']] = val\n output[label] = inner\n return output\n\n def test_snapshot_expval_pauli_pre_measure(self):\n \"\"\"Test snapshot expectation value (pauli) before final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_pre_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=True, post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_expval_pauli_post_measure(self):\n \"\"\"Test snapshot expectation value (pauli) after final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_post_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=True, post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValPauliTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n\nclass QasmSnapshotExpvalPauliNCTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests on random states.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'density_matrix', 'density_matrix_gpu', 'density_matrix_thrust',\n 'matrix_product_state',\n ]\n BACKEND_OPTS = {}\n\n def general_test(self, pauli, num_qubits=None, seed=None):\n \"\"\"General test case\"\"\"\n pauli_qubits = list(range(len(pauli)))\n if num_qubits is None:\n num_qubits = len(pauli_qubits)\n\n # Prepare random N-qubit product input state\n # from seed\n rng = np.random.default_rng(seed)\n params = rng.uniform(-1, 1, size=(num_qubits, 3))\n init_circ = QuantumCircuit(num_qubits)\n for i, par in enumerate(params):\n init_circ.u3(*par, i)\n\n # Compute the target expectation value\n rho = DensityMatrix.from_instruction(init_circ)\n op = Operator.from_label(pauli)\n target = np.trace(Operator(rho).compose(op, pauli_qubits).data)\n\n # Simulate expectation value\n qc = init_circ.copy()\n qc.snapshot_expectation_value('final', [(1, pauli)], pauli_qubits)\n qobj = assemble(qc)\n result = self.SIMULATOR.run(\n qobj, backend_options=self.BACKEND_OPTS).result()\n self.assertTrue(getattr(result, 'success', False))\n snapshots = result.data(0).get('snapshots', {})\n self.assertIn('expectation_value', snapshots)\n self.assertIn('final', snapshots['expectation_value'])\n expval = snapshots.get('expectation_value', {})['final'][0]['value']\n self.assertAlmostEqual(expval, target)\n\n def test_pauli1(self):\n \"\"\"Test all 1-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in ['I', 'X', 'Y', 'Z']:\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n def test_pauli2(self):\n \"\"\"Test all 2-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=2):\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n def test_pauli3(self):\n \"\"\"Test all 3-qubit Pauli snapshots.\"\"\"\n seed = 100\n for tup in it.product(['I', 'X', 'Y', 'Z'], repeat=3):\n pauli = ''.join(reversed(tup))\n with self.subTest(msg='Pauli {}'.format(pauli)):\n self.general_test(pauli, num_qubits=3, seed=seed)\n\n\nclass QasmSnapshotExpValMatrixTests:\n \"\"\"QasmSimulator snapshot pauli expectation value tests.\"\"\"\n\n SIMULATOR = QasmSimulator()\n SUPPORTED_QASM_METHODS = [\n 'automatic', 'statevector', 'statevector_gpu', 'statevector_thrust',\n 'matrix_product_state'\n ]\n BACKEND_OPTS = {}\n\n @staticmethod\n def expval_snapshots(data, labels):\n \"\"\"Format snapshots as nested dicts\"\"\"\n # Check snapshot entry exists in data\n output = {}\n for label in labels:\n snaps = data.get(\"snapshots\", {}).get(\"expectation_value\",\n {}).get(label, [])\n # Convert list into dict\n inner = {}\n for snap_dict in snaps:\n inner[snap_dict['memory']] = snap_dict['value']\n output[label] = inner\n return output\n\n def test_snapshot_expval_matrix_pre_measure(self):\n \"\"\"Test snapshot expectation value (matrix) before final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_pre_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=False, post_measure=False)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n\n def test_snapshot_expval_matrix_post_measure(self):\n \"\"\"Test snapshot expectation value (matrix) after final measurement\"\"\"\n shots = 1000\n labels = snapshot_expval_labels()\n counts_targets = snapshot_expval_counts(shots)\n value_targets = snapshot_expval_post_meas_values()\n\n circuits = snapshot_expval_circuits(pauli=False, post_measure=True)\n\n qobj = assemble(circuits, self.SIMULATOR, shots=shots)\n job = self.SIMULATOR.run(qobj, backend_options=self.BACKEND_OPTS)\n result = job.result()\n success = getattr(result, 'success', False)\n method = self.BACKEND_OPTS.get('method', 'automatic')\n if method not in QasmSnapshotExpValMatrixTests.SUPPORTED_QASM_METHODS:\n self.assertFalse(success)\n else:\n self.assertTrue(success)\n self.compare_counts(result,\n circuits,\n counts_targets,\n delta=0.1 * shots)\n # Check snapshots\n for j, circuit in enumerate(circuits):\n data = result.data(circuit)\n all_snapshots = self.expval_snapshots(data, labels)\n for label in labels:\n snaps = all_snapshots.get(label, {})\n self.assertTrue(len(snaps), 1)\n for memory, value in snaps.items():\n target = value_targets[j].get(label,\n {}).get(memory, {})\n self.assertAlmostEqual(value, target, delta=1e-7)\n"
] | [
[
"numpy.allclose",
"numpy.isclose",
"numpy.random.default_rng"
]
] |
oshiooshi/cirneco | [
"f71f1cd583bf6e290d7b8e74f148f06cadd39d63"
] | [
"samoyed_ts/nmt.py"
] | [
"import torch\n# import torchtext\nimport torch.nn as nn\n# from torchtext.vocab import Vocab, build_vocab_from_iterator\n# from torchtext.utils import unicode_csv_reader\n# from torchtext.data.datasets_utils import _RawTextIterableDataset\nfrom torch import Tensor\nfrom typing import Iterable, List\n# import sentencepiece as spm\n# import io\nimport math\nimport vocab\n\nSEED = 1234\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\n\n# 特殊トークンの定義\nUNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3\nspecial_symbols = ['<unk>', '<pad>', '<sos>', '<eos>', '<blk>', '</blk>', '<sep>']\n\nMAX_LEN=80\n# sp = spm.SentencePieceProcessor(model_file='corpus_Python-JPN/p3/p3.model')\n\n# def jpn_tokenizer(text):\n# ss = [tok.replace('▁', '') for tok in sp.encode(text, out_type=str)][:MAX_LEN]\n# return [s for s in ss if len(s) != 0]\n\n# def py_tokenizer(text):\n# return [tok for tok in text.split()][:MAX_LEN]\n\nfrom torch.nn.utils.rnn import pad_sequence\n\n# 連続した操作をまとめて行うためのヘルパー関数\ndef sequential_transforms(*transforms):\n def func(txt_input):\n for transform in transforms:\n txt_input = transform(txt_input)\n return txt_input\n return func\n\n# SOS/EOSトークンを追加し、入力配列のインデックス用のテンソルを作成\ndef tensor_transform(token_ids: List[int]):\n return torch.cat((torch.tensor([SOS_IDX]), \n torch.tensor(token_ids), \n torch.tensor([EOS_IDX])))\n\n## Transformer の定義\n\nfrom torch.nn import (TransformerEncoder, TransformerDecoder,\n TransformerEncoderLayer, TransformerDecoderLayer)\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, \n emb_size: int, \n dropout: float, \n maxlen: int = 5000):\n super(PositionalEncoding, self).__init__()\n den = torch.exp(- torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)\n pos = torch.arange(0, maxlen).reshape(maxlen, 1)\n pos_embedding = torch.zeros((maxlen, emb_size))\n pos_embedding[:, 0::2] = torch.sin(pos * den)\n pos_embedding[:, 1::2] = torch.cos(pos * den)\n pos_embedding = pos_embedding.unsqueeze(-2)\n\n self.dropout = nn.Dropout(dropout)\n self.register_buffer('pos_embedding', pos_embedding)\n\n def forward(self, token_embedding: Tensor):\n return self.dropout(token_embedding + \n self.pos_embedding[:token_embedding.size(0),:])\n\nclass TokenEmbedding(nn.Module):\n def __init__(self, vocab_size: int, emb_size):\n super(TokenEmbedding, self).__init__()\n self.embedding = nn.Embedding(vocab_size, emb_size)\n self.emb_size = emb_size\n def forward(self, tokens: Tensor):\n return self.embedding(tokens.long()) * math.sqrt(self.emb_size)\n\nclass Seq2SeqTransformer(nn.Module):\n def __init__(self, \n num_encoder_layers: int, \n num_decoder_layers: int,\n emb_size: int, \n nhead: int, \n src_vocab_size: int, \n tgt_vocab_size: int,\n dim_feedforward: int = 512, \n dropout: float = 0.1):\n super(Seq2SeqTransformer, self).__init__()\n encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,\n dim_feedforward=dim_feedforward)\n self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)\n decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,\n dim_feedforward=dim_feedforward)\n self.transformer_decoder = TransformerDecoder(decoder_layer, num_layers=num_decoder_layers)\n \n self.generator = nn.Linear(emb_size, tgt_vocab_size)\n self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)\n self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)\n self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)\n\n def forward(self, \n src: Tensor, \n tgt: Tensor, \n src_mask: Tensor,\n tgt_mask: Tensor, \n src_padding_mask: Tensor,\n tgt_padding_mask: Tensor, \n memory_key_padding_mask: Tensor):\n src_emb = self.positional_encoding(self.src_tok_emb(src))\n tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))\n memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)\n outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,\n tgt_padding_mask, memory_key_padding_mask)\n return self.generator(outs)\n\n def encode(self, src: Tensor, src_mask: Tensor):\n return self.transformer_encoder(self.positional_encoding(\n self.src_tok_emb(src)), src_mask)\n\n def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):\n return self.transformer_decoder(self.positional_encoding(\n self.tgt_tok_emb(tgt)), memory,\n tgt_mask)\n\nDEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n### Masking\n## 異なるマスク処理を行う2つの関数を定義\n\n# モデルが予測を行う際に、未来の単語を見ないようにするためのマスク\ndef generate_square_subsequent_mask(sz):\n mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n# ソースとターゲットのパディングトークンを隠すためのマスク\ndef create_mask(src, tgt):\n src_seq_len = src.shape[0]\n tgt_seq_len = tgt.shape[0]\n\n tgt_mask = generate_square_subsequent_mask(tgt_seq_len)\n src_mask = torch.zeros((src_seq_len, src_seq_len), device=DEVICE).type(torch.bool)\n\n src_padding_mask = (src == PAD_IDX).transpose(0, 1)\n tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)\n return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask\n\ndef greedy_decode(model, src, src_mask, max_len, beamsize, start_symbol):\n src = src.to(DEVICE)\n src_mask = src_mask.to(DEVICE)\n\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)\n for i in range(max_len-1):\n memory = memory.to(DEVICE)\n tgt_mask = (generate_square_subsequent_mask(ys.size(0))\n .type(torch.bool)).to(DEVICE)\n out = model.decode(ys, memory, tgt_mask)\n out = out.transpose(0, 1)\n prob = model.generator(out[:, -1]) # prob.size() の実行結果 : torch.Size([1, 1088]) => 1088 はTGT のVOCAV_SIZE\n next_prob, next_word = prob.topk(k=beamsize, dim=1)\n # print(next_word)\n # print(next_prob)\n\n next_word = next_word[:, 0] # greedy なので、もっとも確率が高いものを選ぶ\n next_word = next_word.item() # 要素の値を取得 (int に変換)\n\n ys = torch.cat([ys,\n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=0)\n if next_word == EOS_IDX:\n break\n return ys\n\nclass NMT(object):\n src_vocab: object\n tgt_vocab: object\n\n def __init__(self, src_vocab='kujira', tgt_vocab='python'):\n self.src_vocab = vocab.load_vocab(src_vocab)\n self.tgt_vocab = vocab.load_vocab(tgt_vocab)\n tokenizer = vocab.tokenizer_from_vocab(self.src_vocab)\n self.src_transform = sequential_transforms(tokenizer, #Tokenization\n self.src_vocab, #Numericalization\n tensor_transform) # Add SOS/EOS and create tensor\n\n # パラメータの定義\n self.SRC_VOCAB_SIZE = len(self.src_vocab)\n self.TGT_VOCAB_SIZE = len(self.tgt_vocab)\n self.EMB_SIZE = 512 # BERT の次元に揃えれば良いよ\n self.NHEAD = 8\n self.FFN_HID_DIM = 512\n self.BATCH_SIZE = 128\n self.NUM_ENCODER_LAYERS = 3\n self.NUM_DECODER_LAYERS = 3\n\n # インスタンスの作成\n self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS, \n self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,\n self.FFN_HID_DIM)\n\n # TODO: ?\n for p in self.transformer.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n # デバイスの設定\n self.transformer = self.transformer.to(DEVICE)\n\n # 損失関数の定義 (クロスエントロピー)\n self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=PAD_IDX)\n\n # オプティマイザの定義 (Adam)\n self.optimizer = torch.optim.Adam(self.transformer.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-9)\n\n\n def load(self, filename='all-model.pt'):\n self.transformer.load_state_dict(torch.load(filename, map_location=DEVICE)) \n\n def translate(self, src_sentence: str):\n self.transformer.eval()\n src = self.src_transform(src_sentence).view(-1, 1)\n num_tokens = src.shape[0]\n src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)\n tgt_tokens = greedy_decode(\n self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=5, start_symbol=SOS_IDX).flatten()\n return \" \".join(self.tgt_vocab.lookup_tokens(list(tgt_tokens.cpu().numpy()))).replace(\"<sos>\", \"\").replace(\"<eos>\", \"\")\n\nif __name__ == '__main__':\n nmt = NMT()\n nmt.load('./all-model.pt')\n pred = nmt.translate('もし<A>が偶数のとき')\n print('pred:', pred)"
] | [
[
"torch.nn.init.xavier_uniform_",
"torch.cuda.manual_seed",
"torch.nn.TransformerEncoder",
"torch.cuda.is_available",
"torch.nn.Dropout",
"torch.cos",
"torch.sin",
"torch.arange",
"torch.ones",
"torch.nn.TransformerDecoderLayer",
"torch.nn.TransformerEncoderLayer",
"torch.load",
"torch.manual_seed",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.Embedding",
"torch.nn.CrossEntropyLoss",
"torch.nn.TransformerDecoder",
"torch.zeros"
]
] |
lkoelman/python-neo | [
"58a207976fb33a50ea8e42b70d7da73b03474f42"
] | [
"neo/io/pynnio.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nModule for reading/writing data from/to legacy PyNN formats.\n\nPyNN is available at http://neuralensemble.org/PyNN\n\nClasses:\n PyNNNumpyIO\n PyNNTextIO\n\nSupported: Read/Write\n\nAuthors: Andrew Davison, Pierre Yger\n\"\"\"\n\nfrom itertools import chain\nimport numpy\nimport quantities as pq\nimport warnings\n\nfrom neo.io.baseio import BaseIO\nfrom neo.core import Segment, AnalogSignal, SpikeTrain\n\ntry:\n unicode\n PY2 = True\nexcept NameError:\n PY2 = False\n\nUNITS_MAP = {\n 'spikes': pq.ms,\n 'v': pq.mV,\n 'gsyn': pq.UnitQuantity('microsiemens', 1e-6 * pq.S, 'uS', 'µS'), # checked\n}\n\n\nclass BasePyNNIO(BaseIO):\n \"\"\"\n Base class for PyNN IO classes\n \"\"\"\n is_readable = True\n is_writable = True\n has_header = True\n is_streameable = False # TODO - correct spelling to \"is_streamable\"\n supported_objects = [Segment, AnalogSignal, SpikeTrain]\n readable_objects = supported_objects\n writeable_objects = supported_objects\n mode = 'file'\n\n def __init__(self, filename=None, **kargs):\n BaseIO.__init__(self, filename, *kargs)\n warnings.warn(\"PyNNTextIO and PyNNNumpyIO will be removed in Neo 0.7.0. \" +\n \"Please contact the Neo developers if this will cause you problems.\",\n DeprecationWarning)\n\n def _read_file_contents(self):\n raise NotImplementedError\n\n def _extract_array(self, data, channel_index):\n idx = numpy.where(data[:, 1] == channel_index)[0]\n return data[idx, 0]\n\n def _determine_units(self, metadata):\n if 'units' in metadata:\n return metadata['units']\n elif 'variable' in metadata and metadata['variable'] in UNITS_MAP:\n return UNITS_MAP[metadata['variable']]\n else:\n raise IOError(\"Cannot determine units\")\n\n def _extract_signals(self, data, metadata):\n\n arr = numpy.vstack(self._extract_array(data, channel_index)\n for channel_index in\n range(metadata['first_index'], metadata['last_index'] + 1))\n if len(arr) > 0:\n signal = AnalogSignal(arr.T,\n units=self._determine_units(metadata),\n sampling_period=metadata['dt'] * pq.ms)\n signal.annotate(label=metadata[\"label\"],\n variable=metadata[\"variable\"])\n return signal\n\n def _extract_spikes(self, data, metadata, channel_index):\n spiketrain = None\n spike_times = self._extract_array(data, channel_index)\n if len(spike_times) > 0:\n spiketrain = SpikeTrain(spike_times, units=pq.ms, t_stop=spike_times.max())\n spiketrain.annotate(label=metadata[\"label\"],\n channel_index=channel_index,\n dt=metadata[\"dt\"])\n return spiketrain\n\n def _write_file_contents(self, data, metadata):\n raise NotImplementedError\n\n def read_segment(self, lazy=False):\n assert not lazy, 'Do not support lazy'\n\n data, metadata = self._read_file_contents()\n annotations = dict((k, metadata.get(k, 'unknown'))\n for k in (\"label\", \"variable\", \"first_id\", \"last_id\"))\n seg = Segment(**annotations)\n if metadata['variable'] == 'spikes':\n for i in range(metadata['first_index'], metadata['last_index'] + 1):\n spiketrain = self._extract_spikes(data, metadata, i)\n if spiketrain is not None:\n seg.spiketrains.append(spiketrain)\n # store dt for SpikeTrains only, as can be retrieved from sampling_period for AnalogSignal\n seg.annotate(dt=metadata['dt'])\n else:\n signal = self._extract_signals(data, metadata)\n if signal is not None:\n seg.analogsignals.append(signal)\n seg.create_many_to_one_relationship()\n return seg\n\n def write_segment(self, segment):\n source = segment.analogsignals or segment.spiketrains\n assert len(source) > 0, \"Segment contains neither analog signals nor spike trains.\"\n metadata = segment.annotations.copy()\n s0 = source[0]\n if isinstance(s0, AnalogSignal):\n if len(source) > 1:\n warnings.warn(\"Cannot handle multiple analog signals. Writing only the first.\")\n source = s0.T\n metadata['size'] = s0.shape[1]\n n = source.size\n else:\n metadata['size'] = len(source)\n n = sum(s.size for s in source)\n metadata['first_index'] = 0\n metadata['last_index'] = metadata['size'] - 1\n if 'label' not in metadata:\n metadata['label'] = 'unknown'\n if 'dt' not in metadata: # dt not included in annotations if Segment contains only AnalogSignals\n metadata['dt'] = s0.sampling_period.rescale(pq.ms).magnitude\n metadata['n'] = n\n data = numpy.empty((n, 2))\n # if the 'variable' annotation is a standard one from PyNN, we rescale\n # to use standard PyNN units\n # we take the units from the first element of source and scale all\n # the signals to have the same units\n if 'variable' in segment.annotations:\n units = UNITS_MAP.get(segment.annotations['variable'], source[0].dimensionality)\n else:\n units = source[0].dimensionality\n metadata['variable'] = 'unknown'\n try:\n metadata['units'] = units.unicode\n except AttributeError:\n metadata['units'] = units.u_symbol\n\n start = 0\n for i, signal in enumerate(source): # here signal may be AnalogSignal or SpikeTrain\n end = start + signal.size\n data[start:end, 0] = numpy.array(signal.rescale(units))\n data[start:end, 1] = i * numpy.ones((signal.size,), dtype=float)\n start = end\n self._write_file_contents(data, metadata)\n\n def read_analogsignal(self, lazy=False):\n assert not lazy, 'Do not support lazy'\n\n data, metadata = self._read_file_contents()\n if metadata['variable'] == 'spikes':\n raise TypeError(\"File contains spike data, not analog signals\")\n else:\n signal = self._extract_signals(data, metadata)\n if signal is None:\n raise IndexError(\"File does not contain a signal\")\n else:\n return signal\n\n def read_spiketrain(self, lazy=False, channel_index=0):\n assert not lazy, 'Do not support lazy'\n data, metadata = self._read_file_contents()\n if metadata['variable'] != 'spikes':\n raise TypeError(\"File contains analog signals, not spike data\")\n else:\n spiketrain = self._extract_spikes(data, metadata, channel_index)\n if spiketrain is None:\n raise IndexError(\n \"File does not contain any spikes with channel index %d\" % channel_index)\n else:\n return spiketrain\n\n\nclass PyNNNumpyIO(BasePyNNIO):\n \"\"\"\n (DEPRECATED) Reads/writes data from/to PyNN NumpyBinaryFile format\n \"\"\"\n name = \"PyNN NumpyBinaryFile\"\n extensions = ['npz']\n\n def _read_file_contents(self):\n contents = numpy.load(self.filename)\n data = contents[\"data\"]\n metadata = {}\n for name, value in contents['metadata']:\n try:\n metadata[name] = eval(value)\n except Exception:\n metadata[name] = value\n return data, metadata\n\n def _write_file_contents(self, data, metadata):\n # we explicitly set the dtype to ensure roundtrips preserve file contents exactly\n max_metadata_length = max(chain([len(k) for k in metadata.keys()],\n [len(str(v)) for v in metadata.values()]))\n if PY2:\n dtype = \"S%d\" % max_metadata_length\n else:\n dtype = \"U%d\" % max_metadata_length\n metadata_array = numpy.array(sorted(metadata.items()), dtype)\n numpy.savez(self.filename, data=data, metadata=metadata_array)\n\n\nclass PyNNTextIO(BasePyNNIO):\n \"\"\"\n (DEPRECATED) Reads/writes data from/to PyNN StandardTextFile format\n \"\"\"\n name = \"PyNN StandardTextFile\"\n extensions = ['v', 'ras', 'gsyn']\n\n def _read_metadata(self):\n metadata = {}\n with open(self.filename) as f:\n for line in f:\n if line[0] == \"#\":\n name, value = line[1:].strip().split(\"=\")\n name = name.strip()\n try:\n metadata[name] = eval(value)\n except Exception:\n metadata[name] = value.strip()\n else:\n break\n return metadata\n\n def _read_file_contents(self):\n data = numpy.loadtxt(self.filename)\n metadata = self._read_metadata()\n return data, metadata\n\n def _write_file_contents(self, data, metadata):\n with open(self.filename, 'wb') as f:\n for item in sorted(metadata.items()):\n f.write((\"# %s = %s\\n\" % item).encode('utf8'))\n numpy.savetxt(f, data)\n"
] | [
[
"numpy.load",
"numpy.ones",
"numpy.empty",
"numpy.savetxt",
"numpy.savez",
"numpy.where",
"numpy.loadtxt"
]
] |
ilyasdc/pycro-manager | [
"5f0153e8a90104eb8715348c6eb22c4d8fdee477"
] | [
"pycromanager/zmq.py"
] | [
"import json\nimport re\nimport time\nimport typing\nimport warnings\nimport inspect\nimport numpy as np\nimport zmq\nfrom weakref import WeakSet\nimport threading\nimport copy\nimport sys\nfrom threading import Lock\n\n\nclass DataSocket:\n \"\"\"\n Wrapper for ZMQ socket that sends and recieves dictionaries\n Includes ZMQ client, push, and pull sockets\n \"\"\"\n\n def __init__(self, context, port, type, debug=False, ip_address=\"127.0.0.1\"):\n # request reply socket\n self._socket = context.socket(type)\n self._debug = debug\n # store these as wekrefs so that circular refs dont prevent garbage collection\n self._java_objects = set()\n self._port = port\n self._close_lock = Lock()\n self._closed = False\n if type == zmq.PUSH:\n if debug:\n print(\"binding {}\".format(port))\n self._socket.bind(\"tcp://{}:{}\".format(ip_address, port))\n else:\n if debug:\n print(\"connecting {}\".format(port))\n self._socket.connect(\"tcp://{}:{}\".format(ip_address, port))\n\n def _register_java_object(self, object):\n self._java_objects.add(object)\n\n def _convert_np_to_python(self, d):\n \"\"\"\n recursively search dictionary and convert any values from numpy floats/ints to\n python floats/ints so they can be json serialized\n :return:\n \"\"\"\n if type(d) != dict:\n return\n for k, v in d.items():\n if isinstance(v, dict):\n self._convert_np_to_python(v)\n elif type(v) == list:\n for e in v:\n self._convert_np_to_python(e)\n elif np.issubdtype(type(v), np.floating):\n d[k] = float(v)\n elif np.issubdtype(type(v), np.integer):\n d[k] = int(v)\n\n def _make_array_identifier(self, entry):\n \"\"\"\n make a string to replace bytes data or numpy array in message, which encode data type if numpy\n \"\"\"\n # make up a random 32 bit int as the identifier\n # TODO: change to simple counting\n identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]\n # '@{some_number}_{bytes_per_pixel}'\n # if its a numpy array, include bytes per pixel, otherwise just interpret it as raw byts\n # TODO : I thinkg its always raw binary and the argument deserialization types handles conversion to java arrays\n # This definitely could use some cleanup and simplification. Probably best to encode the data type here and remove\n # argument deserialization types\n return identifier, \"@\" + str(int(identifier)) + \"_\" + str(\n 0 if isinstance(entry, bytes) else entry.dtype.itemsize\n )\n\n def _remove_bytes(self, bytes_data, structure):\n if isinstance(structure, list):\n for i, entry in enumerate(structure):\n if isinstance(entry, bytes) or isinstance(entry, np.ndarray):\n int_id, str_id = self._make_array_identifier(entry)\n structure[i] = str_id\n bytes_data.append((int_id, entry))\n elif isinstance(entry, list) or isinstance(entry, dict):\n self._remove_bytes(bytes_data, entry)\n elif isinstance(structure, dict):\n for key in structure.keys():\n entry = structure[key]\n if isinstance(entry, bytes) or isinstance(entry, np.ndarray):\n int_id, str_id = self._make_array_identifier(entry)\n structure[key] = str_id\n bytes_data.append((int_id, entry))\n elif isinstance(entry, list) or isinstance(entry, dict):\n self._remove_bytes(bytes_data, structure[key])\n\n def send(self, message, timeout=0):\n if message is None:\n message = {}\n # make sure any np types convert to python types so they can be json serialized\n self._convert_np_to_python(message)\n # Send binary data in seperate messages so it doesnt need to be json serialized\n bytes_data = []\n self._remove_bytes(bytes_data, message)\n message_string = json.dumps(message)\n if self._debug:\n print(\"DEBUG, sending: {}\".format(message))\n # convert keys to byte array\n key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]\n message_parts = [bytes(message_string, \"iso-8859-1\")] + [\n item for keyval in key_vals for item in keyval\n ]\n if timeout == 0:\n self._socket.send_multipart(message_parts)\n else:\n start = time.time()\n while 1000 * (time.time() - start) < timeout:\n try:\n self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)\n return True\n except zmq.ZMQError:\n pass # ignore, keep trying\n return False\n\n def _replace_bytes(self, dict_or_list, hash, value):\n \"\"\"\n Replace placeholders for byte arrays in JSON message with their actual values\n \"\"\"\n if isinstance(dict_or_list, dict):\n for key in dict_or_list:\n if isinstance(dict_or_list[key], str) and \"@\" in dict_or_list[key]:\n hash_in_message = int(\n dict_or_list[key].split(\"@\")[1], 16\n ) # interpret hex hash string\n if hash == hash_in_message:\n dict_or_list[key] = value\n return\n elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):\n self._replace_bytes(dict_or_list[key], hash, value)\n elif isinstance(dict_or_list, list):\n for i, entry in enumerate(dict_or_list):\n if isinstance(entry, str) and \"@\" in dict_or_list[entry]:\n hash_in_message = int(entry.split(\"@\")[1], 16) # interpret hex hash string\n if hash == hash_in_message:\n dict_or_list[i] = value\n return\n elif isinstance(entry, list) or isinstance(entry, dict):\n self._replace_bytes(entry, hash, value)\n\n def receive(self, timeout=0):\n if timeout == 0:\n reply = self._socket.recv_multipart()\n else:\n start = time.time()\n reply = None\n while 1000 * (time.time() - start) < timeout:\n try:\n reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)\n if reply is not None:\n break\n except zmq.ZMQError:\n pass # ignore, keep trying\n if reply is None:\n return reply\n message = json.loads(reply[0].decode(\"iso-8859-1\"))\n # replace any byte data placeholders with the byte data itself\n for i in np.arange(1, len(reply), 2):\n # messages come in pairs: first is hash, second it byte data\n identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)\n value = reply[i + 1]\n self._replace_bytes(message, identity_hash, value)\n\n if self._debug:\n print(\"DEBUG, recieved: {}\".format(message))\n self._check_exception(message)\n return message\n\n def _check_exception(self, response):\n if \"type\" in response and response[\"type\"] == \"exception\":\n raise Exception(response[\"value\"])\n\n def __del__(self):\n self.close() # make sure it closes properly\n\n def close(self):\n with self._close_lock:\n if not self._closed:\n for java_object in self._java_objects:\n java_object._close()\n del java_object #potentially redundant, trying to fix closing race condition\n self._java_objects = None\n self._socket.close()\n while not self._socket.closed:\n time.sleep(0.01)\n self._socket = None\n if self._debug:\n print('closed socket {}'.format(self._port))\n self._closed = True\n\n\nclass Bridge:\n \"\"\"\n Create an object which acts as a client to a corresponding server (running in a Java process).\n This enables construction and interaction with arbitrary java objects. Each bridge object should\n be run using a context manager (i.e. `with Bridge() as b:`) or bridge.close() should be explicitly\n called when finished\n \"\"\"\n\n DEFAULT_PORT = 4827\n DEFAULT_TIMEOUT = 500\n _EXPECTED_ZMQ_SERVER_VERSION = \"4.2.0\"\n\n thread_local = threading.local()\n\n def __new__(cls, *args, **kwargs):\n \"\"\"\n Only one instance of Bridge per a thread\n \"\"\"\n port = kwargs.get('port', Bridge.DEFAULT_PORT)\n if hasattr(Bridge.thread_local, \"bridge\") and Bridge.thread_local.bridge is not None and port in Bridge.thread_local.bridge:\n Bridge.thread_local.bridge_count[port] += 1\n return Bridge.thread_local.bridge[port]\n else:\n if (not hasattr(Bridge.thread_local, \"bridge_count\")) or Bridge.thread_local.bridge_count is None:\n Bridge.thread_local.bridge_count = {}\n Bridge.thread_local.bridge_count[port] = 1\n return super(Bridge, cls).__new__(cls)\n\n def __init__(\n self, port: int=DEFAULT_PORT, convert_camel_case: bool=True,\n debug: bool=False, ip_address: str=\"127.0.0.1\", timeout: int=DEFAULT_TIMEOUT\n ):\n \"\"\"\n Parameters\n ----------\n port : int\n The port on which the bridge operates\n convert_camel_case : bool\n If True, methods for Java objects that are passed across the bridge\n will have their names converted from camel case to underscores. i.e. class.methodName()\n becomes class.method_name()\n debug : bool\n If True print helpful stuff for debugging\n \"\"\"\n self._ip_address = ip_address\n self._port = port\n self._closed = False\n if not hasattr(self, \"_context\"):\n Bridge._context = zmq.Context()\n # if hasattr(self.thread_local, \"bridge\") and port in self.thread_local.bridge:\n # return ### What was this supposed to do?\n if not hasattr(Bridge.thread_local, \"bridge\") or Bridge.thread_local.bridge is None:\n Bridge.thread_local.bridge = {}\n Bridge.thread_local.bridge[port] = self # cache a thread-local version of the bridge\n\n self._convert_camel_case = convert_camel_case\n self._debug = debug\n self._timeout = timeout\n self._master_socket = DataSocket(\n self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address\n )\n self._master_socket.send({\"command\": \"connect\", \"debug\": debug})\n self._class_factory = _JavaClassFactory()\n reply_json = self._master_socket.receive(timeout=timeout)\n if reply_json is None:\n raise TimeoutError(\n f\"Socket timed out after {timeout} milliseconds. Is Micro-Manager running and is the ZMQ server on {port} option enabled?\"\n )\n if reply_json[\"type\"] == \"exception\":\n raise Exception(reply_json[\"message\"])\n if \"version\" not in reply_json:\n reply_json[\"version\"] = \"2.0.0\" # before version was added\n if reply_json[\"version\"] != self._EXPECTED_ZMQ_SERVER_VERSION:\n warnings.warn(\n \"Version mistmatch between Java ZMQ server and Python client. \"\n \"\\nJava ZMQ server version: {}\\nPython client expected version: {}\"\n \"\\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build\".format(\n reply_json[\"version\"], self._EXPECTED_ZMQ_SERVER_VERSION\n )\n )\n\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n def close(self):\n Bridge.thread_local.bridge_count[self._port] -= 1\n if Bridge.thread_local.bridge_count[self._port] == 0:\n del Bridge.thread_local.bridge_count[self._port]\n del Bridge.thread_local.bridge[self._port]\n self._master_socket.close()\n self._master_socket = None\n self._closed = True\n\n if len(Bridge.thread_local.bridge) == 0:\n Bridge.thread_local.bridge = None\n Bridge.thread_local.bridge_count = None\n\n\n def get_class(self, serialized_object) -> typing.Type[\"JavaObjectShadow\"]:\n return self._class_factory.create(\n serialized_object, convert_camel_case=self._convert_camel_case\n )\n\n def construct_java_object(self, classpath: str, new_socket: bool=False, args: list=None):\n \"\"\"\n Create a new instance of a an object on the Java side. Returns a Python \"Shadow\" of the object, which behaves\n just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at\n runtime using iPython autocomplete\n\n Parameters\n ----------\n classpath : str\n Full classpath of the java object\n new_socket : bool\n If True, will create new java object on a new port so that blocking calls will not interfere\n with the bridges master port\n args : list\n list of arguments to the constructor, if applicable\n Returns\n -------\n\n Python \"Shadow\" to the Java object\n \"\"\"\n if args is None:\n args = []\n # classpath_minus_class = '.'.join(classpath.split('.')[:-1])\n # query the server for constructors matching this classpath\n message = {\"command\": \"get-constructors\", \"classpath\": classpath}\n self._master_socket.send(message)\n constructors = self._master_socket.receive()[\"api\"]\n\n methods_with_name = [m for m in constructors if m[\"name\"] == classpath]\n if len(methods_with_name) == 0:\n raise Exception(\"No valid java constructor found with classpath {}\".format(classpath))\n valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)\n\n # Calling a constructor, rather than getting return from method\n message = {\n \"command\": \"constructor\",\n \"classpath\": classpath,\n \"argument-types\": valid_method_spec[\"arguments\"],\n \"argument-deserialization-types\": deserialize_types,\n \"arguments\": _package_arguments(valid_method_spec, args),\n }\n if new_socket:\n message[\"new-port\"] = True\n self._master_socket.send(message)\n serialized_object = self._master_socket.receive()\n if new_socket:\n socket = DataSocket(\n self._context, serialized_object[\"port\"], zmq.REQ, ip_address=self._ip_address\n )\n else:\n socket = self._master_socket\n return self._class_factory.create(\n serialized_object, convert_camel_case=self._convert_camel_case\n )(socket=socket, serialized_object=serialized_object, bridge=self)\n\n def get_java_class(self, classpath: str, new_socket: bool=False):\n \"\"\"\n Get an an object corresponding to a java class, for example to be used\n when calling static methods on the class directly\n\n Parameters\n ----------\n classpath : str\n Full classpath of the java object\n new_socket : bool\n If True, will create new java object on a new port so that blocking calls will not interfere\n with the bridges master port\n Returns\n -------\n\n Python \"Shadow\" to the Java class\n \"\"\"\n message = {\"command\": \"get-class\", \"classpath\": classpath}\n if new_socket:\n message[\"new-port\"] = True\n self._master_socket.send(message)\n serialized_object = self._master_socket.receive()\n\n if new_socket:\n socket = DataSocket(\n self._context, serialized_object[\"port\"], zmq.REQ, ip_address=self._ip_address\n )\n else:\n socket = self._master_socket\n return self._class_factory.create(\n serialized_object, convert_camel_case=self._convert_camel_case\n )(socket=socket, serialized_object=serialized_object, bridge=self)\n\n def _connect_push(self, port):\n \"\"\"\n Connect a push socket on the given port\n :param port:\n :return:\n \"\"\"\n return DataSocket(\n self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address\n )\n\n def _connect_pull(self, port):\n \"\"\"\n Connect to a pull socket on the given port\n :param port:\n :return:\n \"\"\"\n return DataSocket(\n self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address\n )\n\n def get_magellan(self):\n \"\"\"\n return an instance of the Micro-Magellan API\n \"\"\"\n return self.construct_java_object(\"org.micromanager.magellan.api.MagellanAPI\")\n\n def get_core(self):\n \"\"\"\n Connect to CMMCore and return object that has its methods\n\n :return: Python \"shadow\" object for micromanager core\n \"\"\"\n if hasattr(self, \"core\"):\n return getattr(self, \"core\")\n self.core = self.construct_java_object(\"mmcorej.CMMCore\")\n return self.core\n\n def get_studio(self):\n \"\"\"\n return an instance of the Studio object that provides access to micro-manager Java APIs\n \"\"\"\n return self.construct_java_object(\"org.micromanager.Studio\")\n\n\nclass _JavaClassFactory:\n \"\"\"\n This class is responsible for generating subclasses of JavaObjectShadow. Each generated class is kept in a `dict`.\n If a given class has already been generate once it will be returns from the cache rather than re-generating it.\n \"\"\"\n\n def __init__(self):\n self.classes = {}\n\n def create(\n self, serialized_obj: dict, convert_camel_case: bool = True\n ) -> typing.Type[\"JavaObjectShadow\"]:\n \"\"\"Create a class (or return a class from the cache) based on the contents of `serialized_object` message.\"\"\"\n if serialized_obj[\"class\"] in self.classes.keys(): # Return a cached class\n return self.classes[serialized_obj[\"class\"]]\n else: # Generate a new class since it wasn't found in the cache.\n _java_class: str = serialized_obj[\"class\"]\n python_class_name_translation = _java_class.replace(\n \".\", \"_\"\n ) # Having periods in the name would be problematic.\n _interfaces = serialized_obj[\"interfaces\"]\n static_attributes = {\"_java_class\": _java_class, \"_interfaces\": _interfaces}\n\n fields = {} # Create a dict of field names with getter and setter funcs.\n for field in serialized_obj[\"fields\"]:\n fields[field] = property(\n fget=lambda instance, Field=field: instance._access_field(Field),\n fset=lambda instance, val, Field=field: instance._set_field(Field, val),\n )\n\n methods = {} # Create a dict of methods for the class by name.\n methodSpecs = serialized_obj[\"api\"]\n method_names = set([m[\"name\"] for m in methodSpecs])\n # parse method descriptions to make python stand ins\n for method_name in method_names:\n params, methods_with_name, method_name_modified = _parse_arg_names(\n methodSpecs, method_name, convert_camel_case\n )\n return_type = methods_with_name[0][\"return-type\"]\n fn = lambda instance, *args, signatures_list=tuple(\n methods_with_name\n ): instance._translate_call(signatures_list, args, static = _java_class == 'java.lang.Class')\n fn.__name__ = method_name_modified\n fn.__doc__ = \"{}.{}: A dynamically generated Java method.\".format(\n _java_class, method_name_modified\n )\n sig = inspect.signature(fn)\n params = [\n inspect.Parameter(\"self\", inspect.Parameter.POSITIONAL_ONLY)\n ] + params # Add `self` as the first argument.\n return_type = (\n _JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]\n if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE\n else return_type\n )\n fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)\n methods[method_name_modified] = fn\n\n newclass = type( # Dynamically create a class to shadow a java class.\n python_class_name_translation, # Name, based on the original java name\n (JavaObjectShadow,), # Inheritance\n {\n \"__init__\": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(\n instance, socket, serialized_object, bridge\n ),\n **static_attributes,\n **fields,\n **methods,\n },\n )\n\n self.classes[_java_class] = newclass\n return newclass\n\n\nclass JavaObjectShadow:\n \"\"\"\n Generic class for serving as a python interface for a java class using a zmq server backend\n \"\"\"\n\n _interfaces = (\n None # Subclasses should fill these out. This class should never be directly instantiated.\n )\n _java_class = None\n\n def __init__(self, socket, serialized_object, bridge: Bridge):\n self._socket = socket\n self._hash_code = serialized_object[\"hash-code\"]\n self._bridge = bridge\n # register objects with bridge so it can tell Java side to release them before socket shuts down\n socket._register_java_object(self)\n self._closed = False\n # atexit.register(self._close)\n self._close_lock = Lock()\n\n def _close(self):\n with self._close_lock:\n if self._closed:\n return\n if not hasattr(self, \"_hash_code\"):\n return # constructor didnt properly finish, nothing to clean up on java side\n message = {\"command\": \"destructor\", \"hash-code\": self._hash_code}\n if self._bridge._debug:\n \"closing: {}\".format(self)\n self._socket.send(message)\n reply_json = self._socket.receive()\n if reply_json[\"type\"] == \"exception\":\n raise Exception(reply_json[\"value\"])\n self._closed = True\n\n def __del__(self):\n \"\"\"\n Tell java side this object is garbage collected so it can do the same if needed\n \"\"\"\n self._close()\n\n def _access_field(self, name):\n \"\"\"\n Return a python version of the field with a given name\n :return:\n \"\"\"\n message = {\"command\": \"get-field\", \"hash-code\": self._hash_code, \"name\": name}\n self._socket.send(message)\n return self._deserialize(self._socket.receive())\n\n def _set_field(self, name, value):\n \"\"\"\n Return a python version of the field with a given name\n :return:\n \"\"\"\n message = {\n \"command\": \"set-field\",\n \"hash-code\": self._hash_code,\n \"name\": name,\n \"value\": _serialize_arg(value),\n }\n self._socket.send(message)\n reply = self._deserialize(self._socket.receive())\n\n def _translate_call(self, method_specs, fn_args: tuple, static: bool):\n \"\"\"\n Translate to appropriate Java method, call it, and return converted python version of its result\n Parameters\n ----------\n args :\n args[0] is list of dictionaries of possible method specifications\n kwargs :\n hold possible polymorphic args, or none\n \"\"\"\n # args that are none are placeholders to allow for polymorphism and not considered part of the spec\n # fn_args = [a for a in fn_args if a is not None]\n valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)\n # args are good, make call through socket, casting the correct type if needed (e.g. int to float)\n message = {\n \"command\": \"run-method\",\n \"static\": static,\n \"hash-code\": self._hash_code,\n \"name\": valid_method_spec[\"name\"],\n \"argument-types\": valid_method_spec[\"arguments\"],\n \"argument-deserialization-types\": deserialize_types,\n }\n message[\"arguments\"] = _package_arguments(valid_method_spec, fn_args)\n\n if self._bridge._closed:\n raise Exception('The Bridge used to create this has been closed. Are you trying to call it outside of a \"with\" block?')\n self._socket.send(message)\n recieved = self._socket.receive()\n return self._deserialize(recieved)\n\n def _deserialize(self, json_return):\n \"\"\"\n method_spec :\n info about the method that called it\n reply :\n bytes that represents return\n Returns\n -------\n An appropriate python type of the converted value\n \"\"\"\n if json_return[\"type\"] == \"exception\":\n raise Exception(json_return[\"value\"])\n elif json_return[\"type\"] == \"null\":\n return None\n elif json_return[\"type\"] == \"primitive\":\n return json_return[\"value\"]\n elif json_return[\"type\"] == \"string\":\n return json_return[\"value\"]\n elif json_return[\"type\"] == \"list\":\n return [self._deserialize(obj) for obj in json_return[\"value\"]]\n elif json_return[\"type\"] == \"object\":\n if json_return[\"class\"] == \"JSONObject\":\n return json.loads(json_return[\"value\"])\n else:\n raise Exception(\"Unrecognized return class\")\n elif json_return[\"type\"] == \"unserialized-object\":\n # inherit socket from parent object\n return self._bridge.get_class(json_return)(\n socket=self._socket, serialized_object=json_return, bridge=self._bridge\n )\n else:\n return deserialize_array(json_return)\n\n\ndef deserialize_array(json_return):\n \"\"\"\n Convert a serialized java array to the appropriate numpy type\n Parameters\n ----------\n json_return\n \"\"\"\n if json_return[\"type\"] in [\"byte-array\", \"int-array\", \"short-array\", \"float-array\"]:\n decoded = json_return[\"value\"]\n if json_return[\"type\"] == \"byte-array\":\n return np.frombuffer(decoded, dtype=\"=u1\").copy()\n elif json_return[\"type\"] == \"double-array\":\n return np.frombuffer(decoded, dtype=\"=f8\").copy()\n elif json_return[\"type\"] == \"int-array\":\n return np.frombuffer(decoded, dtype=\"=u4\").copy()\n elif json_return[\"type\"] == \"short-array\":\n return np.frombuffer(decoded, dtype=\"=u2\").copy()\n elif json_return[\"type\"] == \"float-array\":\n return np.frombuffer(decoded, dtype=\"=f4\").copy()\n\n\ndef _package_arguments(valid_method_spec, fn_args):\n \"\"\"\n Serialize function arguments and also include description of their Java types\n\n Parameters\n ----------\n valid_method_spec:\n fn_args :\n \"\"\"\n arguments = []\n for arg_type, arg_val in zip(valid_method_spec[\"arguments\"], fn_args):\n if isinstance(arg_val, JavaObjectShadow):\n arguments.append(_serialize_arg(arg_val))\n elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:\n arguments.append(_serialize_arg(arg_val))\n elif arg_val is None:\n arguments.append(_serialize_arg(arg_val))\n elif isinstance(arg_val, np.ndarray):\n arguments.append(_serialize_arg(arg_val))\n else:\n arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))\n return arguments\n\n\ndef _serialize_arg(arg):\n if arg is None:\n return None\n if type(arg) in [bool, str, int, float]:\n return arg # json handles serialization\n elif type(arg) == np.ndarray:\n return arg.tobytes()\n elif isinstance(arg, JavaObjectShadow):\n return {\"hash-code\": arg._hash_code}\n else:\n raise Exception(\"Unknown argumetn type\")\n\n\ndef _check_single_method_spec(method_spec, fn_args):\n \"\"\"\n Check if a single method specificiation is compatible with the arguments the function recieved\n\n Parameters\n ----------\n method_spec :\n fn_args :\n \"\"\"\n if len(method_spec[\"arguments\"]) != len(fn_args):\n return False\n for arg_java_type, arg_val in zip(method_spec[\"arguments\"], fn_args):\n if isinstance(arg_val, JavaObjectShadow):\n if arg_java_type not in arg_val._interfaces:\n # check that it shadows object of the correct type\n return False\n elif type(arg_val) == np.ndarray:\n # For ND Arrays, need to make sure data types match\n if (\n arg_java_type != \"java.lang.Object\"\n and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]\n ):\n return False\n elif not any(\n [\n isinstance(arg_val, acceptable_type)\n for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]\n ]\n ) and not (\n arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES\n ): # could be null if its an object\n # if a type that gets converted\n return False\n return True\n\n\ndef _check_method_args(method_specs, fn_args):\n \"\"\"\n Compare python arguments to java arguments to find correct function to call\n\n Parameters\n ----------\n method_specs :\n fn_args :\n\n Returns\n -------\n one of the method_specs that is valid\n \"\"\"\n valid_method_spec = None\n for method_spec in method_specs:\n if _check_single_method_spec(method_spec, fn_args):\n valid_method_spec = method_spec\n break\n\n if valid_method_spec is None:\n raise Exception(\n \"Incorrect arguments. \\nExpected {} \\nGot {}\".format(\n \" or \".join([\", \".join(method_spec[\"arguments\"]) for method_spec in method_specs]),\n \", \".join([str(type(a)) for a in fn_args]),\n )\n )\n\n # subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects\n valid_method_spec = copy.deepcopy(valid_method_spec)\n deserialize_types = []\n for java_arg_class, python_arg_val in zip(valid_method_spec[\"arguments\"], fn_args):\n if isinstance(python_arg_val, np.ndarray):\n deserialize_types.append(\n [\n ja\n for ja, npdt in zip(\n _JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()\n )\n if python_arg_val.dtype.type == npdt\n ][0]\n )\n else:\n deserialize_types.append(java_arg_class)\n\n return valid_method_spec, deserialize_types\n\n\ndef _parse_arg_names(methods, method_name, convert_camel_case):\n method_name_modified = (\n _camel_case_2_snake_case(method_name) if convert_camel_case else method_name\n )\n # all methods with this name and different argument lists\n methods_with_name = [m for m in methods if m[\"name\"] == method_name]\n min_required_args = (\n 0\n if len(methods_with_name) == 1 and len(methods_with_name[0][\"arguments\"]) == 0\n else min([len(m[\"arguments\"]) for m in methods_with_name])\n )\n # sort with largest number of args last so lambda at end gets max num args\n methods_with_name.sort(key=lambda val: len(val[\"arguments\"]))\n method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.\n params = []\n unique_argument_names = []\n for arg_index, typ in enumerate(method[\"arguments\"]):\n hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else \"object\"\n python_type = (\n _JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ\n )\n if hint in unique_argument_names: # append numbers to end so arg hints have unique names\n i = 1\n while hint + str(i) in unique_argument_names:\n i += 1\n arg_name = hint + str(i)\n else:\n arg_name = hint\n unique_argument_names.append(arg_name)\n # this is how overloading is handled for now, by making default arguments as none, but\n # it might be better to explicitly compare argument types\n if arg_index >= min_required_args:\n default_arg_value = None\n else:\n default_arg_value = inspect.Parameter.empty\n params.append(\n inspect.Parameter(\n name=arg_name,\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n default=default_arg_value,\n annotation=python_type,\n )\n )\n return params, methods_with_name, method_name_modified\n\n\ndef _camel_case_2_snake_case(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n\n# Used for generating type hints in arguments\n_CLASS_NAME_MAPPING = {\n \"byte[]\": \"uint8array\",\n \"double[]\": \"float64_array\",\n \"int[]\": \"uint32_array\",\n \"short[]\": \"int16_array\",\n \"char[]\": \"int16_array\",\n \"float[]\": \"int16_array\",\n \"long[]\": \"int16_array\",\n \"java.lang.String\": \"string\",\n \"boolean\": \"boolean\",\n \"double\": \"float\",\n \"float\": \"float\",\n \"int\": \"int\",\n \"long\": \"int\",\n \"short\": \"int\",\n \"void\": \"void\",\n}\n#Used for deserializing java arrarys into numpy arrays\n_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {\n \"boolean[]\": np.bool,\n \"byte[]\": np.uint8,\n \"short[]\": np.int16,\n \"char[]\": np.uint16,\n \"float[]\": np.float32,\n \"double[]\": np.float64,\n \"int[]\": np.int32,\n \"long[]\": np.int64,\n}\n#used for figuring our which java methods to call and if python args match\n_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {\n \"boolean\": bool,\n \"double\": float,\n \"float\": float,\n #maybe could make these more specific to array type?\n \"byte[]\": np.ndarray,\n \"short[]\": np.ndarray,\n \"double[]\": np.ndarray,\n \"int[]\": np.ndarray,\n \"char[]\": np.ndarray,\n \"float[]\": np.ndarray,\n \"long[]\": np.ndarray,\n \"int\": int,\n \"java.lang.String\": str,\n \"long\": int,\n \"short\": int,\n \"char\": int,\n \"byte\": int,\n \"void\": None,\n \"java.lang.Object\": object,\n}\n# type conversions that allow for autocasting\n_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {\n \"boolean\": {bool},\n \"byte[]\": {np.ndarray},\n \"double\": {float, int},\n \"double[]\": {np.ndarray},\n \"float\": {float},\n \"int\": {int},\n \"int[]\": {np.ndarray},\n \"java.lang.String\": {str},\n \"long\": {int},\n \"short\": {int},\n \"char\": {int},\n \"byte\": {int},\n \"void\": {None},\n \"java.lang.Object\": {object},\n}\n_JAVA_NON_PRIMITIVES = {\"byte[]\", \"double[]\", \"int[]\", \"short[]\", \"char[]\", \"long[]\", \"boolean[]\",\n \"java.lang.String\", \"java.lang.Object\"}\n\nif __name__ == \"__main__\":\n # Test basic bridge operations\n import traceback\n\n b = Bridge()\n try:\n s = b.get_studio()\n except:\n traceback.print_exc()\n try:\n c = b.get_core()\n except:\n traceback.print_exc()\n a = 1\n"
] | [
[
"numpy.random.randint",
"numpy.frombuffer"
]
] |
GFDRR/mobility_app | [
"27285a0691fabcc2cede6772a04bb98d29e636da"
] | [
"app2.py"
] | [
"import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport pylab as plt\nimport datetime as dt\n#import geopandas as gpd\n\ndf = pd.read_csv('/Users/nicholasjones/Desktop/code/wbg-location-data/notebooks/nick/df_india_may9.csv')\ndf.ds = pd.to_datetime(df.ds)\ndf = df.set_index('ds')\ndf['datetime'] = df.index.copy()\n\n## Header\n\nst.title('Mobility trends of states in India')\nst.write('This app visualizes mobility trends for states in India, based on the Facebook movement range maps data.')\n\ndefault_states = ['Gujarat','NCT of Delhi','West Bengal','Rajasthan','Tamil Nadu','Maharashtra','Bihar']\nstates = st.multiselect('Select a state',df.polygon_name.unique())\n\n# Line plot\n\ncolors = 'rgbycmkrgbycmkrgbycmkrgbycmk'\n\nf, ax = plt.subplots(figsize = [9,9])\nfor background_state in df.polygon_name.unique():\n sns.lineplot(x=df.index[df.polygon_name == background_state], y=df[\"all_day_bing_tiles_visited_relative_change\"][df.polygon_name == background_state], color = 'grey', alpha = 0.3, linewidth = 1)\nfor n, state in enumerate(list(states)):\n\tcol = colors[n]\n\tax = sns.lineplot(x=df.index[df.polygon_name == state], y=\"all_day_bing_tiles_visited_relative_change\", color = col,data=df[df.polygon_name == state], linewidth = 4)\nplt.axvline(dt.datetime(2020, 3, 22),linestyle='--', alpha = 0.5)\nplt.axvline(dt.datetime(2020, 3, 24),linestyle='--', alpha = 0.5)\nplt.title('Percent users remaining in home grid cell all day', fontsize = 16);\n \nst.write(f)\n\ndf\n\n## Map\n\ngdf = gpd.read_file('/Users/nicholasjones/Desktop/code/data/FB/India/gadm36_IND_shp/gadm36_IND_1.shp')\ngdf = gdf[['NAME_1','geometry']]\n\nincome_data = pd.read_csv('/Users/nicholasjones/Desktop/code/data/FB/India/NSDP_per_capita.csv',names=['state','nsdp_USD'])\nincome_data = income_data.dropna()\nincome_data.nsdp_USD = [x[4:] for x in income_data.nsdp_USD]\nincome_data.nsdp_USD = income_data.nsdp_USD.str.replace(',','')\nincome_data.nsdp_USD = income_data.nsdp_USD.astype(int)\n\ngdf = gpd.GeoDataFrame(df.merge(gdf, left_on='polygon_name', right_on = 'NAME_1'))\ngdf = gdf[['NAME_1','all_day_bing_tiles_visited_relative_change','all_day_ratio_single_tile_users','geometry','datetime']]\ngdf.head(1)\n\nmydate = st.selectbox('Select a date',['2020-03-05','2020-03-22','2020-04-29'])\nf = gdf[gdf.datetime == mydate].plot(column = 'all_day_bing_tiles_visited_relative_change')\nst.pyplot()\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
ozacas/asxtrade | [
"a3645ae526bfc7a546fdf2a39520feda99e3390a"
] | [
"src/ingest_financials.py"
] | [
"#!/usr/bin/python3\n\"\"\"\nResponsible for ingesting data related to the business performance over time. Data is placed into the asx_company_financial_metric\ncollection, ready for the core viewer app to use. Stocks whose financial details have been retrieved in the past month are skipped.\n\"\"\"\nimport pymongo\nimport argparse\nimport yfinance as yf\nimport time\nfrom utils import read_config\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom bson.objectid import ObjectId\n\n\ndef melt_dataframes(dfs: tuple) -> pd.DataFrame:\n result = None\n for df in filter(lambda df: df is not None and len(df) > 0, dfs):\n df[\"metric\"] = df.index\n melted = pd.melt(df, id_vars=(\"metric\"), var_name=\"date\")\n melted = melted.dropna(axis=0, how=\"any\")\n if len(melted) == 0:\n continue\n # print(melted)\n # print(melted.shape)\n if result is None:\n result = melted\n else:\n result = result.append(melted)\n if result is not None and \"date\" in result.columns:\n # print(result)\n result[\"date\"] = pd.to_datetime(\n result[\"date\"], infer_datetime_format=True\n ) # format=\"%Y-%m-%d\")\n # print(result)\n return result\n\n\ndef desired_stocks():\n available_stocks = set(db.asx_company_details.distinct(\"asx_code\"))\n print(f\"Found {len(available_stocks)} available stocks.\")\n gen_time = datetime.today() - timedelta(days=30)\n month_ago = ObjectId.from_datetime(gen_time)\n recently_updated_stocks = set(\n [\n rec[\"asx_code\"]\n for rec in db.asx_company_financial_metrics.find(\n {\"_id\": {\"$gte\": month_ago}}\n )\n ]\n )\n\n ret = available_stocks.difference(recently_updated_stocks)\n print(f\"Found {len(ret)} desired stocks to process.\")\n return ret\n\n\ndef update_all_metrics(df: pd.DataFrame, asx_code: str) -> int:\n \"\"\"\n Add (or update) all financial metrics (ie. rows) for the specified asx_code in the specified dataframe\n :rtype: the number of records updated/created is returned\n \"\"\"\n print(f\"Updating {len(df)} financial metrics for {asx_code}\")\n n = 0\n for t in df.itertuples():\n d = {\n \"metric\": t.metric,\n \"date\": t.date,\n \"value\": t.value,\n \"asx_code\": t.asx_code,\n }\n assert t.asx_code == asx_code\n result = db.asx_company_financial_metrics.update_one(\n {\"asx_code\": asx_code, \"date\": t.date, \"metric\": t.metric},\n {\"$set\": d},\n upsert=True,\n )\n assert result is not None\n assert isinstance(result, pymongo.results.UpdateResult)\n assert result.matched_count == 1 or result.upserted_id is not None\n n += 1\n return n\n\n\ndef fetch_metrics(asx_code: str) -> pd.DataFrame:\n \"\"\"\n Using the excellent yfinance, we fetch all possible metrics of business performance for the specified stock code.\n Returns a dataframe (possibly empty or none) representing each metric and its datapoints as separate rows\n \"\"\"\n assert len(asx_code) >= 3\n ticker = yf.Ticker(asx_code + \".AX\")\n cashflow_df = ticker.cashflow\n financial_df = ticker.financials\n earnings_df = ticker.earnings\n if set(earnings_df.columns) == set([\"Earnings\", \"Revenue\"]):\n earnings_df.index = earnings_df.index.map(\n str\n ) # convert years to str (maybe int)\n earnings_df = earnings_df.transpose()\n\n # print(earnings_df)\n balance_sheet_df = ticker.balance_sheet\n melted_df = melt_dataframes(\n (cashflow_df, financial_df, earnings_df, balance_sheet_df)\n )\n return melted_df\n\n\ndef make_asx_prices_dict(new_quote: tuple, asx_code: str) -> dict:\n #print(new_quote)\n\n d = {\n \"asx_code\": asx_code,\n \"fetch_date\": new_quote.Index,\n \"volume\": new_quote.Volume,\n \"last_price\": new_quote.Close,\n \"day_low_price\": new_quote.Low,\n \"day_high_price\": new_quote.High,\n \"open_price\": new_quote.Open,\n \"error_code\": \"\",\n \"error_descr\": \"\",\n # we dont set nan fields so that existing values (if any) are used ie. merge with existing data\n # \"annual_dividend_yield\": np.nan, # no available data from yf.Ticker.history() although may be available elsewhere, but for now set to missing\n # \"annual_daily_volume\": np.nan,\n # \"bid_price\": np.nan,\n \"change_price\": new_quote.change_price,\n \"change_in_percent\": new_quote.change_in_percent,\n }\n return d\n\n\ndef fill_stock_quote_gaps(db, stock_to_fetch: str, force=False) -> int:\n assert db is not None\n assert len(stock_to_fetch) >= 3\n ticker = yf.Ticker(stock_to_fetch + \".AX\")\n df = ticker.history(period=\"max\")\n df.index = [d.strftime(\"%Y-%m-%d\") for d in df.index]\n # print(df)\n available_dates = set(df.index)\n available_quotes = list(db.asx_prices.find({\"asx_code\": stock_to_fetch}))\n quoted_dates = set(\n [q[\"fetch_date\"] for q in available_quotes if not np.isnan(q[\"last_price\"])]\n )\n assert set(df.columns) == set(\n [\"Open\", \"High\", \"Low\", \"Close\", \"Volume\", \"Dividends\", \"Stock Splits\"]\n )\n dates_to_fill = (\n available_dates.difference(quoted_dates) if not force else available_dates\n )\n print(\n \"Got {} existing daily quotes for {}, found {} yfinance daily quotes, gap filling for {} dates (force={})\".format(\n len(available_quotes), stock_to_fetch, len(df), len(dates_to_fill), force\n )\n )\n if len(dates_to_fill) < 1:\n return 0\n\n df[\"change_price\"] = df[\"Close\"].diff()\n df[\"change_in_percent\"] = df[\"Close\"].pct_change() * 100.0\n gap_quotes_df = df.filter(dates_to_fill, axis=0)\n # print(df)\n n = 0\n for new_quote in gap_quotes_df.itertuples():\n d = make_asx_prices_dict(new_quote, stock_to_fetch)\n result = db.asx_prices.update_one(\n {\"fetch_date\": d[\"fetch_date\"], \"asx_code\": d[\"asx_code\"]},\n {\"$set\": d},\n upsert=True,\n )\n assert result is not None\n\n # assert result.modified_count == 1 or result.upserted_id is not None\n n += 1\n assert n == len(gap_quotes_df)\n return n\n\n\nif __name__ == \"__main__\":\n args = argparse.ArgumentParser(\n description=\"Update financial performance metrics for ASX stocks using yfinance\"\n )\n args.add_argument(\n \"--config\",\n help=\"Configuration file to use [config.json]\",\n type=str,\n default=\"config.json\",\n )\n args.add_argument(\n \"--fill-gaps\",\n help=\"Fill dates with no existing quotes for each stock (use --debug for a particular stock)\",\n action=\"store_true\",\n )\n args.add_argument(\"--fail-fast\", help=\"Stop on first error\", action=\"store_true\")\n args.add_argument(\n \"--delay\", help=\"Delay between stocks in seconds [30]\", type=int, default=30\n )\n args.add_argument(\"--force\", help=\"Overwrite existing data (if any)\", action=\"store_true\")\n args.add_argument(\n \"--debug\",\n help=\"Try to fetch specified stock (for debugging)\",\n type=str,\n required=False,\n default=None,\n )\n a = args.parse_args()\n config, password = read_config(a.config)\n m = config.get(\"mongo\")\n mongo = pymongo.MongoClient(\n m.get(\"host\"), m.get(\"port\"), username=m.get(\"user\"), password=password\n )\n db = mongo[m.get(\"db\")]\n\n stock_codes = desired_stocks() if not a.debug else set([a.debug])\n print(f\"Updating financial metrics for {len(stock_codes)} stocks\")\n for asx_code in sorted(stock_codes):\n print(f\"Processing stock {asx_code}\")\n try:\n melted_df = fetch_metrics(asx_code)\n if melted_df is None or len(melted_df) < 1:\n raise ValueError(f\"No data available for {asx_code}... skipping\")\n melted_df[\"asx_code\"] = asx_code\n ret = update_all_metrics(melted_df, asx_code)\n assert ret == len(melted_df)\n if a.fill_gaps:\n fill_stock_quote_gaps(db, asx_code, force=a.force)\n # FALLTHRU...\n time.sleep(a.delay)\n except Exception as e:\n print(f\"WARNING: unable to download financials for {asx_code}\")\n print(str(e))\n if a.fail_fast:\n raise e\n\n exit(0)\n"
] | [
[
"pandas.to_datetime",
"pandas.melt",
"numpy.isnan"
]
] |
f0k/scipy | [
"3145a226339b14bbc22f2e984848e05def7659c5",
"3145a226339b14bbc22f2e984848e05def7659c5"
] | [
"scipy/interpolate/polyint.py",
"scipy/io/idl.py"
] | [
"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nfrom scipy.misc import factorial\n\nfrom scipy.lib.six.moves import xrange\n\n__all__ = [\"KroghInterpolator\", \"krogh_interpolate\", \"BarycentricInterpolator\", \"barycentric_interpolate\", \"PiecewisePolynomial\", \"piecewise_polynomial_interpolate\",\"approximate_taylor_polynomial\", \"pchip\"]\n\nclass KroghInterpolator(object):\n \"\"\"\n The interpolating polynomial for a set of points\n\n Constructs a polynomial that passes through a given set of points,\n optionally with specified derivatives at those points.\n Allows evaluation of the polynomial and all its derivatives.\n For reasons of numerical stability, this function does not compute\n the coefficients of the polynomial, although they can be obtained\n by evaluating all the derivatives.\n\n Be aware that the algorithms implemented here are not necessarily\n the most numerically stable known. Moreover, even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon. In general, even with well-chosen\n x values, degrees higher than about thirty cause problems with\n numerical instability in this code.\n\n Based on [1]_.\n\n Parameters\n ----------\n xi : array_like, length N\n Known x-coordinates\n yi : array_like, N by R\n Known y-coordinates, interpreted as vectors of length R,\n or scalars if R=1. When an xi occurs two or more times in\n a row, the corresponding yi's represent derivative values.\n\n References\n ----------\n .. [1] Krogh, \"Efficient Algorithms for Polynomial Interpolation\n and Numerical Differentiation\", 1970.\n\n \"\"\"\n def __init__(self, xi, yi):\n \"\"\"Construct an interpolator passing through the specified points\n\n The polynomial passes through all the pairs (xi,yi). One may additionally\n specify a number of derivatives at each point xi; this is done by\n repeating the value xi and specifying the derivatives as successive\n yi values.\n\n Parameters\n ----------\n xi : array-like, length N\n known x-coordinates\n yi : array-like, N by R\n known y-coordinates, interpreted as vectors of length R,\n or scalars if R=1. When an xi occurs two or more times in\n a row, the corresponding yi's represent derivative values.\n\n Examples\n --------\n To produce a polynomial that is zero at 0 and 1 and has\n derivative 2 at 0, call\n\n >>> KroghInterpolator([0,0,1],[0,2,0])\n\n This constructs the quadratic 2*X**2-2*X. The derivative condition\n is indicated by the repeated zero in the xi array; the corresponding\n yi values are 0, the function value, and 2, the derivative value.\n\n For another example, given xi, yi, and a derivative ypi for each\n point, appropriate arrays can be constructed as:\n\n >>> xi_k, yi_k = np.repeat(xi, 2), np.ravel(np.dstack((yi,ypi)))\n >>> KroghInterpolator(xi_k, yi_k)\n\n To produce a vector-valued polynomial, supply a higher-dimensional\n array for yi:\n\n >>> KroghInterpolator([0,1],[[2,3],[4,5]])\n\n This constructs a linear polynomial giving (2,3) at 0 and (4,5) at 1.\n\n \"\"\"\n self.xi = np.asarray(xi)\n self.yi = np.asarray(yi)\n if len(self.yi.shape)==1:\n self.vector_valued = False\n self.yi = self.yi[:,np.newaxis]\n elif len(self.yi.shape)>2:\n raise ValueError(\"y coordinates must be either scalars or vectors\")\n else:\n self.vector_valued = True\n\n n = len(xi)\n self.n = n\n nn, r = self.yi.shape\n if nn!=n:\n raise ValueError(\"%d x values provided and %d y values; must be equal\" % (n, nn))\n self.r = r\n\n c = np.zeros((n+1,r))\n c[0] = yi[0]\n Vk = np.zeros((n,r))\n for k in xrange(1,n):\n s = 0\n while s<=k and xi[k-s]==xi[k]:\n s += 1\n s -= 1\n Vk[0] = yi[k]/float(factorial(s))\n for i in xrange(k-s):\n if xi[i] == xi[k]:\n raise ValueError(\"Elements if `xi` can't be equal.\")\n if s==0:\n Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k])\n else:\n Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k])\n c[k] = Vk[k-s]\n self.c = c\n\n def __call__(self,x):\n \"\"\"Evaluate the polynomial at the point x\n\n Parameters\n ----------\n x : scalar or array-like of length N\n\n Returns\n -------\n y : scalar, array of length R, array of length N, or array of length N by R\n If x is a scalar, returns either a vector or a scalar depending on\n whether the interpolator is vector-valued or scalar-valued.\n If x is a vector, returns a vector of values.\n \"\"\"\n if _isscalar(x):\n scalar = True\n m = 1\n else:\n scalar = False\n m = len(x)\n x = np.asarray(x)\n\n n = self.n\n pi = 1\n p = np.zeros((m,self.r))\n p += self.c[0,np.newaxis,:]\n for k in xrange(1,n):\n w = x - self.xi[k-1]\n pi = w*pi\n p = p + np.multiply.outer(pi,self.c[k])\n if not self.vector_valued:\n if scalar:\n return p[0,0]\n else:\n return p[:,0]\n else:\n if scalar:\n return p[0]\n else:\n return p\n\n def derivatives(self,x,der=None):\n \"\"\"\n Evaluate many derivatives of the polynomial at the point x\n\n Produce an array of all derivative values at the point x.\n\n Parameters\n ----------\n x : scalar or array_like of length N\n Point or points at which to evaluate the derivatives\n\n der : None or integer\n How many derivatives to extract; None for all potentially\n nonzero derivatives (that is a number equal to the number\n of points). This number includes the function value as 0th\n derivative.\n\n Returns\n -------\n d : ndarray\n If the interpolator's values are R-dimensional then the\n returned array will be der by N by R. If x is a scalar,\n the middle dimension will be dropped; if R is 1 then the\n last dimension will be dropped.\n\n Examples\n --------\n >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives(0)\n array([1.0,2.0,3.0])\n >>> KroghInterpolator([0,0,0],[1,2,3]).derivatives([0,0])\n array([[1.0,1.0],\n [2.0,2.0],\n [3.0,3.0]])\n\n \"\"\"\n if _isscalar(x):\n scalar = True\n m = 1\n else:\n scalar = False\n m = len(x)\n x = np.asarray(x)\n\n n = self.n\n r = self.r\n\n if der is None:\n der = self.n\n dern = min(self.n,der)\n pi = np.zeros((n,m))\n w = np.zeros((n,m))\n pi[0] = 1\n p = np.zeros((m,self.r))\n p += self.c[0,np.newaxis,:]\n\n for k in xrange(1,n):\n w[k-1] = x - self.xi[k-1]\n pi[k] = w[k-1]*pi[k-1]\n p += np.multiply.outer(pi[k],self.c[k])\n\n cn = np.zeros((max(der,n+1),m,r))\n cn[:n+1,...] += self.c[:n+1,np.newaxis,:]\n cn[0] = p\n for k in xrange(1,n):\n for i in xrange(1,n-k+1):\n pi[i] = w[k+i-1]*pi[i-1]+pi[i]\n cn[k] = cn[k]+pi[i,:,np.newaxis]*cn[k+i]\n cn[k]*=factorial(k)\n\n cn[n,...] = 0\n if not self.vector_valued:\n if scalar:\n return cn[:der,0,0]\n else:\n return cn[:der,:,0]\n else:\n if scalar:\n return cn[:der,0]\n else:\n return cn[:der]\n def derivative(self,x,der):\n \"\"\"\n Evaluate one derivative of the polynomial at the point x\n\n Parameters\n ----------\n x : scalar or array_like of length N\n Point or points at which to evaluate the derivatives\n\n der : None or integer\n Which derivative to extract. This number includes the\n function value as 0th derivative.\n\n Returns\n -------\n d : ndarray\n If the interpolator's values are R-dimensional then the\n returned array will be N by R. If x is a scalar,\n the middle dimension will be dropped; if R is 1 then the\n last dimension will be dropped.\n\n Notes\n -----\n This is computed by evaluating all derivatives up to the desired\n one (using self.derivatives()) and then discarding the rest.\n\n \"\"\"\n return self.derivatives(x,der=der+1)[der]\n\ndef krogh_interpolate(xi,yi,x,der=0):\n \"\"\"\n Convenience function for polynomial interpolation.\n\n Constructs a polynomial that passes through a given set of points,\n optionally with specified derivatives at those points.\n Evaluates the polynomial or some of its derivatives.\n For reasons of numerical stability, this function does not compute\n the coefficients of the polynomial, although they can be obtained\n by evaluating all the derivatives.\n\n Be aware that the algorithms implemented here are not necessarily\n the most numerically stable known. Moreover, even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon. In general, even with well-chosen\n x values, degrees higher than about thirty cause problems with\n numerical instability in this code.\n\n Based on Krogh 1970, \"Efficient Algorithms for Polynomial Interpolation\n and Numerical Differentiation\"\n\n The polynomial passes through all the pairs (xi,yi). One may additionally\n specify a number of derivatives at each point xi; this is done by\n repeating the value xi and specifying the derivatives as successive\n yi values.\n\n Parameters\n ----------\n xi : array_like, length N\n known x-coordinates\n yi : array_like, N by R\n known y-coordinates, interpreted as vectors of length R,\n or scalars if R=1\n x : scalar or array_like of length N\n Point or points at which to evaluate the derivatives\n der : integer or list\n How many derivatives to extract; None for all potentially\n nonzero derivatives (that is a number equal to the number\n of points), or a list of derivatives to extract. This number\n includes the function value as 0th derivative.\n\n Returns\n -------\n d : ndarray\n If the interpolator's values are R-dimensional then the\n returned array will be the number of derivatives by N by R.\n If x is a scalar, the middle dimension will be dropped; if\n the yi are scalars then the last dimension will be dropped.\n\n Notes\n -----\n Construction of the interpolating polynomial is a relatively expensive\n process. If you want to evaluate it repeatedly consider using the class\n KroghInterpolator (which is what this function uses).\n\n \"\"\"\n P = KroghInterpolator(xi, yi)\n if der==0:\n return P(x)\n elif _isscalar(der):\n return P.derivative(x,der=der)\n else:\n return P.derivatives(x,der=np.amax(der)+1)[der]\n\n\n\n\ndef approximate_taylor_polynomial(f,x,degree,scale,order=None):\n \"\"\"\n Estimate the Taylor polynomial of f at x by polynomial fitting.\n\n Parameters\n ----------\n f : callable\n The function whose Taylor polynomial is sought. Should accept\n a vector of x values.\n x : scalar\n The point at which the polynomial is to be evaluated.\n degree : int\n The degree of the Taylor polynomial\n scale : scalar\n The width of the interval to use to evaluate the Taylor polynomial.\n Function values spread over a range this wide are used to fit the\n polynomial. Must be chosen carefully.\n order : int or None\n The order of the polynomial to be used in the fitting; f will be\n evaluated ``order+1`` times. If None, use `degree`.\n\n Returns\n -------\n p : poly1d instance\n The Taylor polynomial (translated to the origin, so that\n for example p(0)=f(x)).\n\n Notes\n -----\n The appropriate choice of \"scale\" is a trade-off; too large and the\n function differs from its Taylor polynomial too much to get a good\n answer, too small and round-off errors overwhelm the higher-order terms.\n The algorithm used becomes numerically unstable around order 30 even\n under ideal circumstances.\n\n Choosing order somewhat larger than degree may improve the higher-order\n terms.\n\n \"\"\"\n if order is None:\n order=degree\n\n n = order+1\n # Choose n points that cluster near the endpoints of the interval in\n # a way that avoids the Runge phenomenon. Ensure, by including the\n # endpoint or not as appropriate, that one point always falls at x\n # exactly.\n xs = scale*np.cos(np.linspace(0,np.pi,n,endpoint=n%1)) + x\n\n P = KroghInterpolator(xs, f(xs))\n d = P.derivatives(x,der=degree+1)\n\n return np.poly1d((d/factorial(np.arange(degree+1)))[::-1])\n\n\nclass BarycentricInterpolator(object):\n \"\"\"The interpolating polynomial for a set of points\n\n Constructs a polynomial that passes through a given set of points.\n Allows evaluation of the polynomial, efficient changing of the y\n values to be interpolated, and updating by adding more x values.\n For reasons of numerical stability, this function does not compute\n the coefficients of the polynomial.\n\n This class uses a \"barycentric interpolation\" method that treats\n the problem as a special case of rational function interpolation.\n This algorithm is quite stable, numerically, but even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon.\n\n Based on Berrut and Trefethen 2004, \"Barycentric Lagrange Interpolation\".\n \"\"\"\n def __init__(self, xi, yi=None):\n \"\"\"Construct an object capable of interpolating functions sampled at xi\n\n The values yi need to be provided before the function is evaluated,\n but none of the preprocessing depends on them, so rapid updates\n are possible.\n\n Parameters\n ----------\n xi : array-like of length N\n The x coordinates of the points the polynomial should pass through\n yi : array-like N by R or None\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued. If None the y values\n will be supplied later.\n \"\"\"\n self.n = len(xi)\n self.xi = np.asarray(xi)\n if yi is not None and len(yi)!=len(self.xi):\n raise ValueError(\"yi dimensions do not match xi dimensions\")\n self.set_yi(yi)\n self.wi = np.zeros(self.n)\n self.wi[0] = 1\n for j in xrange(1,self.n):\n self.wi[:j]*=(self.xi[j]-self.xi[:j])\n self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])\n self.wi**=-1\n\n def set_yi(self, yi):\n \"\"\"\n Update the y values to be interpolated\n\n The barycentric interpolation algorithm requires the calculation\n of weights, but these depend only on the xi. The yi can be changed\n at any time.\n\n Parameters\n ----------\n yi : array_like N by R\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued. If None the y values\n will be supplied later.\n\n \"\"\"\n if yi is None:\n self.yi = None\n return\n yi = np.asarray(yi)\n if len(yi.shape)==1:\n self.vector_valued = False\n yi = yi[:,np.newaxis]\n elif len(yi.shape)>2:\n raise ValueError(\"y coordinates must be either scalars or vectors\")\n else:\n self.vector_valued = True\n\n n, r = yi.shape\n if n!=len(self.xi):\n raise ValueError(\"yi dimensions do not match xi dimensions\")\n self.yi = yi\n self.r = r\n\n\n def add_xi(self, xi, yi=None):\n \"\"\"\n Add more x values to the set to be interpolated\n\n The barycentric interpolation algorithm allows easy updating by\n adding more points for the polynomial to pass through.\n\n Parameters\n ----------\n xi : array_like of length N1\n The x coordinates of the points the polynomial should pass through\n yi : array_like N1 by R or None\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued. If None the y values\n will be supplied later. The yi should be specified if and only if\n the interpolator has y values specified.\n\n \"\"\"\n if yi is not None:\n if self.yi is None:\n raise ValueError(\"No previous yi value to update!\")\n yi = np.asarray(yi)\n if len(yi.shape)==1:\n if self.vector_valued:\n raise ValueError(\"Cannot extend dimension %d y vectors with scalars\" % self.r)\n yi = yi[:,np.newaxis]\n elif len(yi.shape)>2:\n raise ValueError(\"y coordinates must be either scalars or vectors\")\n else:\n n, r = yi.shape\n if r!=self.r:\n raise ValueError(\"Cannot extend dimension %d y vectors with dimension %d y vectors\" % (self.r, r))\n\n self.yi = np.vstack((self.yi,yi))\n else:\n if self.yi is not None:\n raise ValueError(\"No update to yi provided!\")\n old_n = self.n\n self.xi = np.concatenate((self.xi,xi))\n self.n = len(self.xi)\n self.wi**=-1\n old_wi = self.wi\n self.wi = np.zeros(self.n)\n self.wi[:old_n] = old_wi\n for j in xrange(old_n,self.n):\n self.wi[:j]*=(self.xi[j]-self.xi[:j])\n self.wi[j] = np.multiply.reduce(self.xi[:j]-self.xi[j])\n self.wi**=-1\n\n def __call__(self, x):\n \"\"\"Evaluate the interpolating polynomial at the points x\n\n Parameters\n ----------\n x : scalar or array-like of length M\n\n Returns\n -------\n y : scalar or array-like of length R or length M or M by R\n The shape of y depends on the shape of x and whether the\n interpolator is vector-valued or scalar-valued.\n\n Notes\n -----\n Currently the code computes an outer product between x and the\n weights, that is, it constructs an intermediate array of size\n N by M, where N is the degree of the polynomial.\n \"\"\"\n scalar = _isscalar(x)\n x = np.atleast_1d(x)\n c = np.subtract.outer(x,self.xi)\n z = c==0\n c[z] = 1\n c = self.wi/c\n p = np.dot(c,self.yi)/np.sum(c,axis=-1)[:,np.newaxis]\n i, j = np.nonzero(z)\n p[i] = self.yi[j]\n if not self.vector_valued:\n if scalar:\n return p[0,0]\n else:\n return p[:,0]\n else:\n if scalar:\n return p[0]\n else:\n return p\ndef barycentric_interpolate(xi, yi, x):\n \"\"\"\n Convenience function for polynomial interpolation\n\n Constructs a polynomial that passes through a given set of points,\n then evaluates the polynomial. For reasons of numerical stability,\n this function does not compute the coefficients of the polynomial.\n\n This function uses a \"barycentric interpolation\" method that treats\n the problem as a special case of rational function interpolation.\n This algorithm is quite stable, numerically, but even in a world of\n exact computation, unless the x coordinates are chosen very\n carefully - Chebyshev zeros (e.g. cos(i*pi/n)) are a good choice -\n polynomial interpolation itself is a very ill-conditioned process\n due to the Runge phenomenon.\n\n Based on Berrut and Trefethen 2004, \"Barycentric Lagrange Interpolation\".\n\n\n Parameters\n ----------\n xi : array_like of length N\n The x coordinates of the points the polynomial should pass through\n yi : array_like N by R\n The y coordinates of the points the polynomial should pass through;\n if R>1 the polynomial is vector-valued.\n x : scalar or array_like of length M\n\n\n Returns\n -------\n y : scalar or array_like of length R or length M or M by R\n The shape of y depends on the shape of x and whether the\n interpolator is vector-valued or scalar-valued.\n\n\n Notes\n -----\n\n Construction of the interpolation weights is a relatively slow process.\n If you want to call this many times with the same xi (but possibly\n varying yi or x) you should use the class BarycentricInterpolator.\n This is what this function uses internally.\n\n \"\"\"\n return BarycentricInterpolator(xi, yi)(x)\n\n\nclass PiecewisePolynomial(object):\n \"\"\"Piecewise polynomial curve specified by points and derivatives\n\n This class represents a curve that is a piecewise polynomial. It\n passes through a list of points and has specified derivatives at\n each point. The degree of the polynomial may very from segment to\n segment, as may the number of derivatives available. The degree\n should not exceed about thirty.\n\n Appending points to the end of the curve is efficient.\n \"\"\"\n def __init__(self, xi, yi, orders=None, direction=None):\n \"\"\"Construct a piecewise polynomial\n\n Parameters\n ----------\n xi : array-like of length N\n a sorted list of x-coordinates\n yi : list of lists of length N\n yi[i] is the list of derivatives known at xi[i]\n orders : list of integers, or integer\n a list of polynomial orders, or a single universal order\n direction : {None, 1, -1}\n indicates whether the xi are increasing or decreasing\n +1 indicates increasing\n -1 indicates decreasing\n None indicates that it should be deduced from the first two xi\n\n Notes\n -----\n If orders is None, or orders[i] is None, then the degree of the\n polynomial segment is exactly the degree required to match all i\n available derivatives at both endpoints. If orders[i] is not None,\n then some derivatives will be ignored. The code will try to use an\n equal number of derivatives from each end; if the total number of\n derivatives needed is odd, it will prefer the rightmost endpoint. If\n not enough derivatives are available, an exception is raised.\n \"\"\"\n yi0 = np.asarray(yi[0])\n if len(yi0.shape)==2:\n self.vector_valued = True\n self.r = yi0.shape[1]\n elif len(yi0.shape)==1:\n self.vector_valued = False\n self.r = 1\n else:\n raise ValueError(\"Each derivative must be a vector, not a higher-rank array\")\n\n self.xi = [xi[0]]\n self.yi = [yi0]\n self.n = 1\n\n self.direction = direction\n self.orders = []\n self.polynomials = []\n self.extend(xi[1:],yi[1:],orders)\n\n def _make_polynomial(self,x1,y1,x2,y2,order,direction):\n \"\"\"Construct the interpolating polynomial object\n\n Deduces the number of derivatives to match at each end\n from order and the number of derivatives available. If\n possible it uses the same number of derivatives from\n each end; if the number is odd it tries to take the\n extra one from y2. In any case if not enough derivatives\n are available at one end or another it draws enough to\n make up the total from the other end.\n \"\"\"\n n = order+1\n n1 = min(n//2,len(y1))\n n2 = min(n-n1,len(y2))\n n1 = min(n-n2,len(y1))\n if n1+n2!=n:\n raise ValueError(\"Point %g has %d derivatives, point %g has %d derivatives, but order %d requested\" % (x1, len(y1), x2, len(y2), order))\n if not (n1 <= len(y1) and n2 <= len(y2)):\n raise ValueError(\"`order` input incompatible with length y1 or y2.\")\n\n xi = np.zeros(n)\n if self.vector_valued:\n yi = np.zeros((n,self.r))\n else:\n yi = np.zeros((n,))\n\n xi[:n1] = x1\n yi[:n1] = y1[:n1]\n xi[n1:] = x2\n yi[n1:] = y2[:n2]\n\n return KroghInterpolator(xi,yi)\n\n def append(self, xi, yi, order=None):\n \"\"\"\n Append a single point with derivatives to the PiecewisePolynomial\n\n Parameters\n ----------\n xi : float\n\n yi : array_like\n yi is the list of derivatives known at xi\n\n order : integer or None\n a polynomial order, or instructions to use the highest\n possible order\n\n \"\"\"\n\n yi = np.asarray(yi)\n if self.vector_valued:\n if (len(yi.shape)!=2 or yi.shape[1]!=self.r):\n raise ValueError(\"Each derivative must be a vector of length %d\" % self.r)\n else:\n if len(yi.shape)!=1:\n raise ValueError(\"Each derivative must be a scalar\")\n\n if self.direction is None:\n self.direction = np.sign(xi-self.xi[-1])\n elif (xi-self.xi[-1])*self.direction < 0:\n raise ValueError(\"x coordinates must be in the %d direction: %s\" % (self.direction, self.xi))\n\n self.xi.append(xi)\n self.yi.append(yi)\n\n\n if order is None:\n n1 = len(self.yi[-2])\n n2 = len(self.yi[-1])\n n = n1+n2\n order = n-1\n\n self.orders.append(order)\n self.polynomials.append(self._make_polynomial(\n self.xi[-2], self.yi[-2],\n self.xi[-1], self.yi[-1],\n order, self.direction))\n self.n += 1\n\n\n def extend(self, xi, yi, orders=None):\n \"\"\"\n Extend the PiecewisePolynomial by a list of points\n\n Parameters\n ----------\n xi : array_like of length N1\n a sorted list of x-coordinates\n yi : list of lists of length N1\n yi[i] is the list of derivatives known at xi[i]\n orders : list of integers, or integer\n a list of polynomial orders, or a single universal order\n direction : {None, 1, -1}\n indicates whether the xi are increasing or decreasing\n +1 indicates increasing\n -1 indicates decreasing\n None indicates that it should be deduced from the first two xi\n\n \"\"\"\n\n for i in xrange(len(xi)):\n if orders is None or _isscalar(orders):\n self.append(xi[i],yi[i],orders)\n else:\n self.append(xi[i],yi[i],orders[i])\n\n def __call__(self, x):\n \"\"\"Evaluate the piecewise polynomial\n\n Parameters\n ----------\n x : scalar or array-like of length N\n\n Returns\n -------\n y : scalar or array-like of length R or length N or N by R\n \"\"\"\n if _isscalar(x):\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n y = self.polynomials[pos](x)\n else:\n x = np.asarray(x)\n m = len(x)\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n if self.vector_valued:\n y = np.zeros((m,self.r))\n else:\n y = np.zeros(m)\n for i in xrange(self.n-1):\n c = pos==i\n y[c] = self.polynomials[i](x[c])\n return y\n\n def derivative(self, x, der):\n \"\"\"\n Evaluate a derivative of the piecewise polynomial\n\n Parameters\n ----------\n x : scalar or array_like of length N\n\n der : integer\n which single derivative to extract\n\n Returns\n -------\n y : scalar or array_like of length R or length N or N by R\n\n Notes\n -----\n This currently computes (using self.derivatives()) all derivatives\n of the curve segment containing each x but returns only one.\n\n \"\"\"\n return self.derivatives(x,der=der+1)[der]\n\n def derivatives(self, x, der):\n \"\"\"\n Evaluate a derivative of the piecewise polynomial\n\n Parameters\n ----------\n x : scalar or array_like of length N\n\n der : integer\n how many derivatives (including the function value as\n 0th derivative) to extract\n\n Returns\n -------\n y : array_like of shape der by R or der by N or der by N by R\n\n \"\"\"\n if _isscalar(x):\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n y = self.polynomials[pos].derivatives(x,der=der)\n else:\n x = np.asarray(x)\n m = len(x)\n pos = np.clip(np.searchsorted(self.xi, x) - 1, 0, self.n-2)\n if self.vector_valued:\n y = np.zeros((der,m,self.r))\n else:\n y = np.zeros((der,m))\n for i in xrange(self.n-1):\n c = pos==i\n y[:,c] = self.polynomials[i].derivatives(x[c],der=der)\n return y\n\n\ndef piecewise_polynomial_interpolate(xi,yi,x,orders=None,der=0):\n \"\"\"\n Convenience function for piecewise polynomial interpolation\n\n Parameters\n ----------\n xi : array_like\n A sorted list of x-coordinates, of length N.\n yi : list of lists\n yi[i] is the list of derivatives known at xi[i]. Of length N.\n x : scalar or array_like\n Of length M.\n orders : int or list of ints\n a list of polynomial orders, or a single universal order\n der : int\n Which single derivative to extract.\n\n Returns\n -------\n y : scalar or array_like\n The result, of length R or length M or M by R,\n\n Notes\n -----\n If orders is None, or orders[i] is None, then the degree of the\n polynomial segment is exactly the degree required to match all i\n available derivatives at both endpoints. If orders[i] is not None,\n then some derivatives will be ignored. The code will try to use an\n equal number of derivatives from each end; if the total number of\n derivatives needed is odd, it will prefer the rightmost endpoint. If\n not enough derivatives are available, an exception is raised.\n\n Construction of these piecewise polynomials can be an expensive process;\n if you repeatedly evaluate the same polynomial, consider using the class\n PiecewisePolynomial (which is what this function does).\n\n \"\"\"\n\n P = PiecewisePolynomial(xi, yi, orders)\n if der==0:\n return P(x)\n elif _isscalar(der):\n return P.derivative(x,der=der)\n else:\n return P.derivatives(x,der=np.amax(der)+1)[der]\n\ndef _isscalar(x):\n \"\"\"Check whether x is if a scalar type, or 0-dim\"\"\"\n return np.isscalar(x) or hasattr(x, 'shape') and x.shape == ()\n\ndef _edge_case(m0, d1):\n return np.where((d1==0) | (m0==0), 0.0, 1.0/(1.0/m0+1.0/d1))\n\ndef _find_derivatives(x, y):\n # Determine the derivatives at the points y_k, d_k, by using\n # PCHIP algorithm is:\n # We choose the derivatives at the point x_k by\n # Let m_k be the slope of the kth segment (between k and k+1)\n # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0\n # else use weighted harmonic mean:\n # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}\n # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})\n # where h_k is the spacing between x_k and x_{k+1}\n\n hk = x[1:] - x[:-1]\n mk = (y[1:] - y[:-1]) / hk\n smk = np.sign(mk)\n condition = ((smk[1:] != smk[:-1]) | (mk[1:]==0) | (mk[:-1]==0))\n\n w1 = 2*hk[1:] + hk[:-1]\n w2 = hk[1:] + 2*hk[:-1]\n whmean = 1.0/(w1+w2)*(w1/mk[1:] + w2/mk[:-1])\n\n dk = np.zeros_like(y)\n dk[1:-1][condition] = 0.0\n dk[1:-1][~condition] = 1.0/whmean[~condition]\n\n # For end-points choose d_0 so that 1/d_0 = 1/m_0 + 1/d_1 unless\n # one of d_1 or m_0 is 0, then choose d_0 = 0\n\n dk[0] = _edge_case(mk[0],dk[1])\n dk[-1] = _edge_case(mk[-1],dk[-2])\n return dk\n\n\ndef pchip(x, y):\n \"\"\"PCHIP 1-d monotonic cubic interpolation\n\n x and y are arrays of values used to approximate some function f, with\n ``y = f(x)``. This class factory function returns a callable class whose\n ``__call__`` method uses monotonic cubic, interpolation to find the value\n of new points.\n\n Parameters\n ----------\n x : array\n A 1D array of monotonically increasing real values. x cannot\n include duplicate values (otherwise f is overspecified)\n y : array\n A 1-D array of real values. y's length along the interpolation\n axis must be equal to the length of x.\n\n Assumes x is sorted in monotonic order (e.g. ``x[1] > x[0]``).\n\n Returns\n -------\n pchip : PiecewisePolynomial instance\n The result of the interpolation.\n\n \"\"\"\n derivs = _find_derivatives(x,y)\n return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None)\n",
"# IDLSave - a python module to read IDL 'save' files\n# Copyright (c) 2010 Thomas P. Robitaille\n\n# Many thanks to Craig Markwardt for publishing the Unofficial Format\n# Specification for IDL .sav files, without which this Python module would not\n# exist (http://cow.physics.wisc.edu/~craigm/idl/savefmt).\n\n# This code was developed by with permission from ITT Visual Information\n# Systems. IDL(r) is a registered trademark of ITT Visual Information Systems,\n# Inc. for their Interactive Data Language software.\n\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\nfrom __future__ import division, print_function, absolute_import\n\nimport struct\nimport numpy as np\nfrom numpy.compat import asstr\nimport tempfile\nimport zlib\nimport warnings\n\n# Define the different data types that can be found in an IDL save file\nDTYPE_DICT = {}\nDTYPE_DICT[1] = '>u1'\nDTYPE_DICT[2] = '>i2'\nDTYPE_DICT[3] = '>i4'\nDTYPE_DICT[4] = '>f4'\nDTYPE_DICT[5] = '>f8'\nDTYPE_DICT[6] = '>c8'\nDTYPE_DICT[7] = '|O'\nDTYPE_DICT[8] = '|O'\nDTYPE_DICT[9] = '>c16'\nDTYPE_DICT[10] = '|O'\nDTYPE_DICT[11] = '|O'\nDTYPE_DICT[12] = '>u2'\nDTYPE_DICT[13] = '>u4'\nDTYPE_DICT[14] = '>i8'\nDTYPE_DICT[15] = '>u8'\n\n# Define the different record types that can be found in an IDL save file\nRECTYPE_DICT = {}\nRECTYPE_DICT[0] = \"START_MARKER\"\nRECTYPE_DICT[1] = \"COMMON_VARIABLE\"\nRECTYPE_DICT[2] = \"VARIABLE\"\nRECTYPE_DICT[3] = \"SYSTEM_VARIABLE\"\nRECTYPE_DICT[6] = \"END_MARKER\"\nRECTYPE_DICT[10] = \"TIMESTAMP\"\nRECTYPE_DICT[12] = \"COMPILED\"\nRECTYPE_DICT[13] = \"IDENTIFICATION\"\nRECTYPE_DICT[14] = \"VERSION\"\nRECTYPE_DICT[15] = \"HEAP_HEADER\"\nRECTYPE_DICT[16] = \"HEAP_DATA\"\nRECTYPE_DICT[17] = \"PROMOTE64\"\nRECTYPE_DICT[19] = \"NOTICE\"\n\n# Define a dictionary to contain structure definitions\nSTRUCT_DICT = {}\n\n\ndef _align_32(f):\n '''Align to the next 32-bit position in a file'''\n\n pos = f.tell()\n if pos % 4 != 0:\n f.seek(pos + 4 - pos % 4)\n return\n\n\ndef _skip_bytes(f, n):\n '''Skip `n` bytes'''\n f.read(n)\n return\n\n\ndef _read_bytes(f, n):\n '''Read the next `n` bytes'''\n return f.read(n)\n\n\ndef _read_byte(f):\n '''Read a single byte'''\n return np.uint8(struct.unpack('>B', f.read(4)[:1])[0])\n\n\ndef _read_long(f):\n '''Read a signed 32-bit integer'''\n return np.int32(struct.unpack('>l', f.read(4))[0])\n\n\ndef _read_int16(f):\n '''Read a signed 16-bit integer'''\n return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])\n\n\ndef _read_int32(f):\n '''Read a signed 32-bit integer'''\n return np.int32(struct.unpack('>i', f.read(4))[0])\n\n\ndef _read_int64(f):\n '''Read a signed 64-bit integer'''\n return np.int64(struct.unpack('>q', f.read(8))[0])\n\n\ndef _read_uint16(f):\n '''Read an unsigned 16-bit integer'''\n return np.uint16(struct.unpack('>H', f.read(4)[2:4])[0])\n\n\ndef _read_uint32(f):\n '''Read an unsigned 32-bit integer'''\n return np.uint32(struct.unpack('>I', f.read(4))[0])\n\n\ndef _read_uint64(f):\n '''Read an unsigned 64-bit integer'''\n return np.uint64(struct.unpack('>Q', f.read(8))[0])\n\n\ndef _read_float32(f):\n '''Read a 32-bit float'''\n return np.float32(struct.unpack('>f', f.read(4))[0])\n\n\ndef _read_float64(f):\n '''Read a 64-bit float'''\n return np.float64(struct.unpack('>d', f.read(8))[0])\n\n\nclass Pointer(object):\n '''Class used to define pointers'''\n\n def __init__(self, index):\n self.index = index\n return\n\n\nclass ObjectPointer(Pointer):\n '''Class used to define object pointers'''\n pass\n\n\ndef _read_string(f):\n '''Read a string'''\n length = _read_long(f)\n if length > 0:\n chars = _read_bytes(f, length)\n _align_32(f)\n chars = asstr(chars)\n else:\n warnings.warn(\"warning: empty strings are now set to '' instead of None\")\n chars = ''\n return chars\n\n\ndef _read_string_data(f):\n '''Read a data string (length is specified twice)'''\n length = _read_long(f)\n if length > 0:\n length = _read_long(f)\n string_data = _read_bytes(f, length)\n _align_32(f)\n else:\n warnings.warn(\"warning: empty strings are now set to '' instead of None\")\n string_data = ''\n return string_data\n\n\ndef _read_data(f, dtype):\n '''Read a variable with a specified data type'''\n if dtype==1:\n if _read_int32(f) != 1:\n raise Exception(\"Error occurred while reading byte variable\")\n return _read_byte(f)\n elif dtype==2:\n return _read_int16(f)\n elif dtype==3:\n return _read_int32(f)\n elif dtype==4:\n return _read_float32(f)\n elif dtype==5:\n return _read_float64(f)\n elif dtype==6:\n real = _read_float32(f)\n imag = _read_float32(f)\n return np.complex64(real + imag * 1j)\n elif dtype==7:\n return _read_string_data(f)\n elif dtype==8:\n raise Exception(\"Should not be here - please report this\")\n elif dtype==9:\n real = _read_float64(f)\n imag = _read_float64(f)\n return np.complex128(real + imag * 1j)\n elif dtype==10:\n return Pointer(_read_int32(f))\n elif dtype==11:\n return ObjectPointer(_read_int32(f))\n elif dtype==12:\n return _read_uint16(f)\n elif dtype==13:\n return _read_uint32(f)\n elif dtype==14:\n return _read_int64(f)\n elif dtype==15:\n return _read_uint64(f)\n else:\n raise Exception(\"Unknown IDL type: %i - please report this\" % dtype)\n\n\ndef _read_structure(f, array_desc, struct_desc):\n '''\n Read a structure, with the array and structure descriptors given as\n `array_desc` and `structure_desc` respectively.\n '''\n\n nrows = array_desc['nelements']\n ncols = struct_desc['ntags']\n columns = struct_desc['tagtable']\n\n dtype = []\n for col in columns:\n if col['structure'] or col['array']:\n dtype.append(((col['name'].lower(), col['name']), np.object_))\n else:\n if col['typecode'] in DTYPE_DICT:\n dtype.append(((col['name'].lower(), col['name']),\n DTYPE_DICT[col['typecode']]))\n else:\n raise Exception(\"Variable type %i not implemented\" %\n col['typecode'])\n\n structure = np.recarray((nrows, ), dtype=dtype)\n\n for i in range(nrows):\n for col in columns:\n dtype = col['typecode']\n if col['structure']:\n structure[col['name']][i] = _read_structure(f, \\\n struct_desc['arrtable'][col['name']], \\\n struct_desc['structtable'][col['name']])\n elif col['array']:\n structure[col['name']][i] = _read_array(f, dtype, \\\n struct_desc['arrtable'][col['name']])\n else:\n structure[col['name']][i] = _read_data(f, dtype)\n\n # Reshape structure if needed\n if array_desc['ndims'] > 1:\n warnings.warn(\"warning: multi-dimensional structures are now correctly reshaped\")\n dims = array_desc['dims'][:int(array_desc['ndims'])]\n dims.reverse()\n structure = structure.reshape(dims)\n\n return structure\n\n\ndef _read_array(f, typecode, array_desc):\n '''\n Read an array of type `typecode`, with the array descriptor given as\n `array_desc`.\n '''\n\n if typecode in [1, 3, 4, 5, 6, 9, 13, 14, 15]:\n\n if typecode == 1:\n nbytes = _read_int32(f)\n if nbytes != array_desc['nbytes']:\n raise Exception(\"Error occurred while reading byte array\")\n\n # Read bytes as numpy array\n array = np.fromstring(f.read(array_desc['nbytes']), \\\n dtype=DTYPE_DICT[typecode])\n\n elif typecode in [2, 12]:\n\n # These are 2 byte types, need to skip every two as they are not packed\n\n array = np.fromstring(f.read(array_desc['nbytes']*2), \\\n dtype=DTYPE_DICT[typecode])[1::2]\n\n else:\n\n # Read bytes into list\n array = []\n for i in range(array_desc['nelements']):\n dtype = typecode\n data = _read_data(f, dtype)\n array.append(data)\n\n array = np.array(array, dtype=np.object_)\n\n # Reshape array if needed\n if array_desc['ndims'] > 1:\n dims = array_desc['dims'][:int(array_desc['ndims'])]\n dims.reverse()\n array = array.reshape(dims)\n\n # Go to next alignment position\n _align_32(f)\n\n return array\n\n\ndef _read_record(f):\n '''Function to read in a full record'''\n\n record = {}\n\n recpos = f.tell()\n record['rectype'] = _read_long(f)\n\n nextrec = _read_uint32(f)\n nextrec += _read_uint32(f) * 2**32\n\n _skip_bytes(f, 4)\n\n if not record['rectype'] in RECTYPE_DICT:\n raise Exception(\"Unknown RECTYPE: %i\" % record['rectype'])\n\n record['rectype'] = RECTYPE_DICT[record['rectype']]\n\n if record['rectype'] in [\"VARIABLE\", \"HEAP_DATA\"]:\n\n if record['rectype'] == \"VARIABLE\":\n record['varname'] = _read_string(f)\n else:\n record['heap_index'] = _read_long(f)\n _skip_bytes(f, 4)\n\n rectypedesc = _read_typedesc(f)\n\n varstart = _read_long(f)\n if varstart != 7:\n raise Exception(\"VARSTART is not 7\")\n\n if rectypedesc['structure']:\n record['data'] = _read_structure(f, rectypedesc['array_desc'], \\\n rectypedesc['struct_desc'])\n elif rectypedesc['array']:\n record['data'] = _read_array(f, rectypedesc['typecode'], \\\n rectypedesc['array_desc'])\n else:\n dtype = rectypedesc['typecode']\n record['data'] = _read_data(f, dtype)\n\n elif record['rectype'] == \"TIMESTAMP\":\n\n _skip_bytes(f, 4*256)\n record['date'] = _read_string(f)\n record['user'] = _read_string(f)\n record['host'] = _read_string(f)\n\n elif record['rectype'] == \"VERSION\":\n\n record['format'] = _read_long(f)\n record['arch'] = _read_string(f)\n record['os'] = _read_string(f)\n record['release'] = _read_string(f)\n\n elif record['rectype'] == \"IDENTIFICATON\":\n\n record['author'] = _read_string(f)\n record['title'] = _read_string(f)\n record['idcode'] = _read_string(f)\n\n elif record['rectype'] == \"NOTICE\":\n\n record['notice'] = _read_string(f)\n\n elif record['rectype'] == \"HEAP_HEADER\":\n\n record['nvalues'] = _read_long(f)\n record['indices'] = []\n for i in range(record['nvalues']):\n record['indices'].append(_read_long(f))\n\n elif record['rectype'] == \"COMMONBLOCK\":\n\n record['nvars'] = _read_long(f)\n record['name'] = _read_string(f)\n record['varnames'] = []\n for i in range(record['nvars']):\n record['varnames'].append(_read_string(f))\n\n elif record['rectype'] == \"END_MARKER\":\n\n record['end'] = True\n\n elif record['rectype'] == \"UNKNOWN\":\n\n warnings.warn(\"Skipping UNKNOWN record\")\n\n elif record['rectype'] == \"SYSTEM_VARIABLE\":\n\n warnings.warn(\"Skipping SYSTEM_VARIABLE record\")\n\n else:\n\n raise Exception(\"record['rectype']=%s not implemented\" % \\\n record['rectype'])\n\n f.seek(nextrec)\n\n return record\n\n\ndef _read_typedesc(f):\n '''Function to read in a type descriptor'''\n\n typedesc = {}\n\n typedesc['typecode'] = _read_long(f)\n typedesc['varflags'] = _read_long(f)\n\n if typedesc['varflags'] & 2 == 2:\n raise Exception(\"System variables not implemented\")\n\n typedesc['array'] = typedesc['varflags'] & 4 == 4\n typedesc['structure'] = typedesc['varflags'] & 32 == 32\n\n if typedesc['structure']:\n typedesc['array_desc'] = _read_arraydesc(f)\n typedesc['struct_desc'] = _read_structdesc(f)\n elif typedesc['array']:\n typedesc['array_desc'] = _read_arraydesc(f)\n\n return typedesc\n\n\ndef _read_arraydesc(f):\n '''Function to read in an array descriptor'''\n\n arraydesc = {}\n\n arraydesc['arrstart'] = _read_long(f)\n\n if arraydesc['arrstart'] == 8:\n\n _skip_bytes(f, 4)\n\n arraydesc['nbytes'] = _read_long(f)\n arraydesc['nelements'] = _read_long(f)\n arraydesc['ndims'] = _read_long(f)\n\n _skip_bytes(f, 8)\n\n arraydesc['nmax'] = _read_long(f)\n\n arraydesc['dims'] = []\n for d in range(arraydesc['nmax']):\n arraydesc['dims'].append(_read_long(f))\n\n elif arraydesc['arrstart'] == 18:\n\n warnings.warn(\"Using experimental 64-bit array read\")\n\n _skip_bytes(f, 8)\n\n arraydesc['nbytes'] = _read_uint64(f)\n arraydesc['nelements'] = _read_uint64(f)\n arraydesc['ndims'] = _read_long(f)\n\n _skip_bytes(f, 8)\n\n arraydesc['nmax'] = 8\n\n arraydesc['dims'] = []\n for d in range(arraydesc['nmax']):\n v = _read_long(f)\n if v != 0:\n raise Exception(\"Expected a zero in ARRAY_DESC\")\n arraydesc['dims'].append(_read_long(f))\n\n else:\n\n raise Exception(\"Unknown ARRSTART: %i\" % arraydesc['arrstart'])\n\n return arraydesc\n\n\ndef _read_structdesc(f):\n '''Function to read in a structure descriptor'''\n\n structdesc = {}\n\n structstart = _read_long(f)\n if structstart != 9:\n raise Exception(\"STRUCTSTART should be 9\")\n\n structdesc['name'] = _read_string(f)\n predef = _read_long(f)\n structdesc['ntags'] = _read_long(f)\n structdesc['nbytes'] = _read_long(f)\n\n structdesc['predef'] = predef & 1\n structdesc['inherits'] = predef & 2\n structdesc['is_super'] = predef & 4\n\n if not structdesc['predef']:\n\n structdesc['tagtable'] = []\n for t in range(structdesc['ntags']):\n structdesc['tagtable'].append(_read_tagdesc(f))\n\n for tag in structdesc['tagtable']:\n tag['name'] = _read_string(f)\n\n structdesc['arrtable'] = {}\n for tag in structdesc['tagtable']:\n if tag['array']:\n structdesc['arrtable'][tag['name']] = _read_arraydesc(f)\n\n structdesc['structtable'] = {}\n for tag in structdesc['tagtable']:\n if tag['structure']:\n structdesc['structtable'][tag['name']] = _read_structdesc(f)\n\n if structdesc['inherits'] or structdesc['is_super']:\n structdesc['classname'] = _read_string(f)\n structdesc['nsupclasses'] = _read_long(f)\n structdesc['supclassnames'] = []\n for s in range(structdesc['nsupclasses']):\n structdesc['supclassnames'].append(_read_string(f))\n structdesc['supclasstable'] = []\n for s in range(structdesc['nsupclasses']):\n structdesc['supclasstable'].append(_read_structdesc(f))\n\n STRUCT_DICT[structdesc['name']] = structdesc\n\n else:\n\n if not structdesc['name'] in STRUCT_DICT:\n raise Exception(\"PREDEF=1 but can't find definition\")\n\n structdesc = STRUCT_DICT[structdesc['name']]\n\n return structdesc\n\n\ndef _read_tagdesc(f):\n '''Function to read in a tag descriptor'''\n\n tagdesc = {}\n\n tagdesc['offset'] = _read_long(f)\n\n if tagdesc['offset'] == -1:\n tagdesc['offset'] = _read_uint64(f)\n\n tagdesc['typecode'] = _read_long(f)\n tagflags = _read_long(f)\n\n tagdesc['array'] = tagflags & 4 == 4\n tagdesc['structure'] = tagflags & 32 == 32\n tagdesc['scalar'] = tagdesc['typecode'] in DTYPE_DICT\n # Assume '10'x is scalar\n\n return tagdesc\n\n\ndef _replace_heap(variable, heap):\n\n if isinstance(variable, Pointer):\n\n while isinstance(variable, Pointer):\n\n if variable.index == 0:\n variable = None\n else:\n variable = heap[variable.index]\n\n replace, new = _replace_heap(variable, heap)\n\n if replace:\n variable = new\n\n return True, variable\n\n elif isinstance(variable, np.core.records.recarray):\n\n # Loop over records\n for ir, record in enumerate(variable):\n\n replace, new = _replace_heap(record, heap)\n\n if replace:\n variable[ir] = new\n\n return False, variable\n\n elif isinstance(variable, np.core.records.record):\n\n # Loop over values\n for iv, value in enumerate(variable):\n\n replace, new = _replace_heap(value, heap)\n\n if replace:\n variable[iv] = new\n\n return False, variable\n\n elif isinstance(variable, np.ndarray):\n\n # Loop over values if type is np.object_\n if variable.dtype.type is np.object_:\n\n for iv in range(variable.size):\n\n replace, new = _replace_heap(variable.item(iv), heap)\n\n if replace:\n variable.itemset(iv, new)\n\n return False, variable\n\n else:\n\n return False, variable\n\n\nclass AttrDict(dict):\n '''\n A case-insensitive dictionary with access via item, attribute, and call\n notations:\n\n >>> d = AttrDict()\n >>> d['Variable'] = 123\n >>> d['Variable']\n 123\n >>> d.Variable\n 123\n >>> d.variable\n 123\n >>> d('VARIABLE')\n 123\n '''\n\n def __init__(self, init={}):\n dict.__init__(self, init)\n\n def __getitem__(self, name):\n return super(AttrDict, self).__getitem__(name.lower())\n\n def __setitem__(self, key, value):\n return super(AttrDict, self).__setitem__(key.lower(), value)\n\n __getattr__ = __getitem__\n __setattr__ = __setitem__\n __call__ = __getitem__\n\n\ndef readsav(file_name, idict=None, python_dict=False,\n uncompressed_file_name=None, verbose=False):\n '''\n Read an IDL .sav file\n\n Parameters\n ----------\n file_name : str\n Name of the IDL save file.\n idict : dict, optional\n Dictionary in which to insert .sav file variables\n python_dict : bool, optional\n By default, the object return is not a Python dictionary, but a\n case-insensitive dictionary with item, attribute, and call access\n to variables. To get a standard Python dictionary, set this option\n to True.\n uncompressed_file_name : str, optional\n This option only has an effect for .sav files written with the\n /compress option. If a file name is specified, compressed .sav\n files are uncompressed to this file. Otherwise, readsav will use\n the `tempfile` module to determine a temporary filename\n automatically, and will remove the temporary file upon successfully\n reading it in.\n verbose : bool, optional\n Whether to print out information about the save file, including\n the records read, and available variables.\n\n Returns\n ----------\n idl_dict : AttrDict or dict\n If `python_dict` is set to False (default), this function returns a\n case-insensitive dictionary with item, attribute, and call access\n to variables. If `python_dict` is set to True, this function\n returns a Python dictionary with all variable names in lowercase.\n If `idict` was specified, then variables are written to the\n dictionary specified, and the updated dictionary is returned.\n '''\n\n # Initialize record and variable holders\n records = []\n if python_dict or idict:\n variables = {}\n else:\n variables = AttrDict()\n\n # Open the IDL file\n f = open(file_name, 'rb')\n\n # Read the signature, which should be 'SR'\n signature = _read_bytes(f, 2)\n if signature != b'SR':\n raise Exception(\"Invalid SIGNATURE: %s\" % signature)\n\n # Next, the record format, which is '\\x00\\x04' for normal .sav\n # files, and '\\x00\\x06' for compressed .sav files.\n recfmt = _read_bytes(f, 2)\n\n if recfmt == b'\\x00\\x04':\n pass\n\n elif recfmt == b'\\x00\\x06':\n\n if verbose:\n print(\"IDL Save file is compressed\")\n\n if uncompressed_file_name:\n fout = open(uncompressed_file_name, 'w+b')\n else:\n fout = tempfile.NamedTemporaryFile(suffix='.sav')\n\n if verbose:\n print(\" -> expanding to %s\" % fout.name)\n\n # Write header\n fout.write(b'SR\\x00\\x04')\n\n # Cycle through records\n while True:\n\n # Read record type\n rectype = _read_long(f)\n fout.write(struct.pack('>l', int(rectype)))\n\n # Read position of next record and return as int\n nextrec = _read_uint32(f)\n nextrec += _read_uint32(f) * 2**32\n\n # Read the unknown 4 bytes\n unknown = f.read(4)\n\n # Check if the end of the file has been reached\n if RECTYPE_DICT[rectype] == 'END_MARKER':\n fout.write(struct.pack('>I', int(nextrec) % 2**32))\n fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))\n fout.write(unknown)\n break\n\n # Find current position\n pos = f.tell()\n\n # Decompress record\n rec_string = zlib.decompress(f.read(nextrec-pos))\n\n # Find new position of next record\n nextrec = fout.tell() + len(rec_string) + 12\n\n # Write out record\n fout.write(struct.pack('>I', int(nextrec % 2**32)))\n fout.write(struct.pack('>I', int((nextrec - (nextrec % 2**32)) / 2**32)))\n fout.write(unknown)\n fout.write(rec_string)\n\n # Close the original compressed file\n f.close()\n\n # Set f to be the decompressed file, and skip the first four bytes\n f = fout\n f.seek(4)\n\n else:\n raise Exception(\"Invalid RECFMT: %s\" % recfmt)\n\n # Loop through records, and add them to the list\n while True:\n r = _read_record(f)\n records.append(r)\n if 'end' in r:\n if r['end']:\n break\n\n # Close the file\n f.close()\n\n # Find heap data variables\n heap = {}\n for r in records:\n if r['rectype'] == \"HEAP_DATA\":\n heap[r['heap_index']] = r['data']\n\n # Find all variables\n for r in records:\n if r['rectype'] == \"VARIABLE\":\n replace, new = _replace_heap(r['data'], heap)\n if replace:\n r['data'] = new\n variables[r['varname'].lower()] = r['data']\n\n if verbose:\n\n # Print out timestamp info about the file\n for record in records:\n if record['rectype'] == \"TIMESTAMP\":\n print(\"-\"*50)\n print(\"Date: %s\" % record['date'])\n print(\"User: %s\" % record['user'])\n print(\"Host: %s\" % record['host'])\n break\n\n # Print out version info about the file\n for record in records:\n if record['rectype'] == \"VERSION\":\n print(\"-\"*50)\n print(\"Format: %s\" % record['format'])\n print(\"Architecture: %s\" % record['arch'])\n print(\"Operating System: %s\" % record['os'])\n print(\"IDL Version: %s\" % record['release'])\n break\n\n # Print out identification info about the file\n for record in records:\n if record['rectype'] == \"IDENTIFICATON\":\n print(\"-\"*50)\n print(\"Author: %s\" % record['author'])\n print(\"Title: %s\" % record['title'])\n print(\"ID Code: %s\" % record['idcode'])\n break\n\n print(\"-\"*50)\n print(\"Successfully read %i records of which:\" % \\\n (len(records)))\n\n # Create convenience list of record types\n rectypes = [r['rectype'] for r in records]\n\n for rt in set(rectypes):\n if rt != 'END_MARKER':\n print(\" - %i are of type %s\" % (rectypes.count(rt), rt))\n print(\"-\"*50)\n\n if 'VARIABLE' in rectypes:\n print(\"Available variables:\")\n for var in variables:\n print(\" - %s [%s]\" % (var, type(variables[var])))\n print(\"-\"*50)\n\n if idict:\n for var in variables:\n idict[var] = variables[var]\n return idict\n else:\n return variables\n"
] | [
[
"numpy.sum",
"numpy.asarray",
"numpy.amax",
"numpy.isscalar",
"numpy.vstack",
"numpy.concatenate",
"numpy.multiply.reduce",
"numpy.multiply.outer",
"scipy.misc.factorial",
"numpy.where",
"numpy.linspace",
"numpy.nonzero",
"numpy.zeros",
"numpy.searchsorted",
"numpy.arange",
"numpy.subtract.outer",
"numpy.zeros_like",
"numpy.sign",
"scipy.lib.six.moves.xrange",
"numpy.atleast_1d",
"numpy.dot"
],
[
"numpy.complex64",
"numpy.complex128",
"numpy.compat.asstr",
"numpy.recarray",
"numpy.array"
]
] |
hendriksanta/probability | [
"6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f",
"6eedc0f01a539b3bee7be28ccd2a9cce15d92f7f"
] | [
"tensorflow_probability/python/distributions/student_t_process.py",
"tensorflow_probability/python/distributions/joint_distribution_vmap_mixin.py"
] | [
"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"The StudentTProcess distribution class.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport warnings\n\n# Dependency imports\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import identity as identity_bijector\nfrom tensorflow_probability.python.distributions import distribution\nfrom tensorflow_probability.python.distributions import multivariate_student_t\nfrom tensorflow_probability.python.distributions import student_t\nfrom tensorflow_probability.python.internal import assert_util\nfrom tensorflow_probability.python.internal import distribution_util\nfrom tensorflow_probability.python.internal import dtype_util\nfrom tensorflow_probability.python.internal import reparameterization\nfrom tensorflow_probability.python.internal import tensor_util\nfrom tensorflow_probability.python.internal import tensorshape_util\nfrom tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import\n\n__all__ = [\n 'StudentTProcess',\n]\n\n\ndef _add_diagonal_shift(matrix, shift):\n return tf.linalg.set_diag(\n matrix, tf.linalg.diag_part(matrix) + shift, name='add_diagonal_shift')\n\n\ndef make_cholesky_factored_marginal_fn(jitter):\n \"\"\"Construct a `marginal_fn` for use with `tfd.StudentTProcess`.\n\n The returned function computes the Cholesky factorization of the input\n covariance plus a diagonal jitter, and uses that for the `scale` of a\n `tfd.MultivariateNormalLinearOperator`.\n\n Args:\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n\n Returns:\n marginal_fn: A Python function that takes a location, covariance matrix,\n optional `validate_args`, `allow_nan_stats` and `name` arguments, and\n returns a `tfd.MultivariateNormalLinearOperator`.\n \"\"\"\n def marginal_fn(\n df,\n loc,\n covariance,\n validate_args=False,\n allow_nan_stats=False,\n name='marginal_distribution'):\n squared_scale = ((df - 2.) / df)[\n ..., tf.newaxis, tf.newaxis] * covariance\n scale = tf.linalg.LinearOperatorLowerTriangular(\n tf.linalg.cholesky(_add_diagonal_shift(squared_scale, jitter)),\n is_non_singular=True,\n name='StudentTProcessScaleLinearOperator')\n return multivariate_student_t.MultivariateStudentTLinearOperator(\n df=df,\n loc=loc,\n scale=scale,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n name=name)\n\n return marginal_fn\n\n\nclass StudentTProcess(distribution.Distribution):\n \"\"\"Marginal distribution of a Student's T process at finitely many points.\n\n A Student's T process (TP) is an indexed collection of random variables, any\n finite collection of which are jointly Multivariate Student's T. While this\n definition applies to finite index sets, it is typically implicit that the\n index set is infinite; in applications, it is often some finite dimensional\n real or complex vector space. In such cases, the TP may be thought of as a\n distribution over (real- or complex-valued) functions defined over the index\n set.\n\n Just as Student's T distributions are fully specified by their degrees of\n freedom, location and scale, a Student's T process can be completely specified\n by a degrees of freedom parameter, mean function and covariance function.\n Let `S` denote the index set and `K` the space in\n which each indexed random variable takes its values (again, often R or C).\n The mean function is then a map `m: S -> K`, and the covariance function,\n or kernel, is a positive-definite function `k: (S x S) -> K`. The properties\n of functions drawn from a TP are entirely dictated (up to translation) by\n the form of the kernel function.\n\n This `Distribution` represents the marginal joint distribution over function\n values at a given finite collection of points `[x[1], ..., x[N]]` from the\n index set `S`. By definition, this marginal distribution is just a\n multivariate Student's T distribution, whose mean is given by the vector\n `[ m(x[1]), ..., m(x[N]) ]` and whose covariance matrix is constructed from\n pairwise applications of the kernel function to the given inputs:\n\n ```none\n | k(x[1], x[1]) k(x[1], x[2]) ... k(x[1], x[N]) |\n | k(x[2], x[1]) k(x[2], x[2]) ... k(x[2], x[N]) |\n | ... ... ... |\n | k(x[N], x[1]) k(x[N], x[2]) ... k(x[N], x[N]) |\n ```\n\n For this to be a valid covariance matrix, it must be symmetric and positive\n definite; hence the requirement that `k` be a positive definite function\n (which, by definition, says that the above procedure will yield PD matrices).\n\n Note also we use a parameterization as suggested in [1], which requires `df`\n to be greater than 2. This allows for the covariance for any finite\n dimensional marginal of the TP (a multivariate Student's T distribution) to\n just be the PD matrix generated by the kernel.\n\n\n #### Mathematical Details\n\n The probability density function (pdf) is a multivariate Student's T whose\n parameters are derived from the TP's properties:\n\n ```none\n pdf(x; df, index_points, mean_fn, kernel) = MultivariateStudentT(df, loc, K)\n K = (df - 2) / df * (kernel.matrix(index_points, index_points) +\n observation_noise_variance * eye(N))\n loc = (x - mean_fn(index_points))^T @ K @ (x - mean_fn(index_points))\n ```\n\n where:\n\n * `df` is the degrees of freedom parameter for the TP.\n * `index_points` are points in the index set over which the TP is defined,\n * `mean_fn` is a callable mapping the index set to the TP's mean values,\n * `kernel` is `PositiveSemidefiniteKernel`-like and represents the covariance\n function of the TP,\n * `observation_noise_variance` is a term added to the diagonal of the kernel\n matrix. In the limit of `df` to `inf`, this represents the observation noise\n of a gaussian likelihood.\n * `eye(N)` is an N-by-N identity matrix.\n\n #### Examples\n\n ##### Draw joint samples from a TP prior\n\n ```python\n import numpy as np\n import tensorflow.compat.v2 as tf\n import tensorflow_probability as tfp\n\n tf.enable_v2_behavior()\n\n tfd = tfp.distributions\n psd_kernels = tfp.math.psd_kernels\n\n num_points = 100\n # Index points should be a collection (100, here) of feature vectors. In this\n # example, we're using 1-d vectors, so we just need to reshape the output from\n # np.linspace, to give a shape of (100, 1).\n index_points = np.expand_dims(np.linspace(-1., 1., num_points), -1)\n\n # Define a kernel with default parameters.\n kernel = psd_kernels.ExponentiatedQuadratic()\n\n tp = tfd.StudentTProcess(3., kernel, index_points)\n\n samples = tp.sample(10)\n # ==> 10 independently drawn, joint samples at `index_points`\n\n noisy_tp = tfd.StudentTProcess(\n df=3.,\n kernel=kernel,\n index_points=index_points)\n noisy_samples = noisy_tp.sample(10)\n # ==> 10 independently drawn, noisy joint samples at `index_points`\n ```\n\n ##### Optimize kernel parameters via maximum marginal likelihood.\n\n ```python\n # Suppose we have some data from a known function. Note the index points in\n # general have shape `[b1, ..., bB, f1, ..., fF]` (here we assume `F == 1`),\n # so we need to explicitly consume the feature dimensions (just the last one\n # here).\n f = lambda x: np.sin(10*x[..., 0]) * np.exp(-x[..., 0]**2)\n observed_index_points = np.expand_dims(np.random.uniform(-1., 1., 50), -1)\n # Squeeze to take the shape from [50, 1] to [50].\n observed_values = f(observed_index_points)\n\n amplitude = tfp.util.TransformedVariable(\n 1., tfp.bijectors.Softplus(), dtype=np.float64, name='amplitude')\n length_scale = tfp.util.TransformedVariable(\n 1., tfp.bijectors.Softplus(), dtype=np.float64, name='length_scale')\n\n # Define a kernel with trainable parameters.\n kernel = psd_kernels.ExponentiatedQuadratic(\n amplitude=amplitude,\n length_scale=length_scale)\n\n tp = tfd.StudentTProcess(3., kernel, observed_index_points)\n\n optimizer = tf.optimizers.Adam()\n\n @tf.function\n def optimize():\n with tf.GradientTape() as tape:\n loss = -tp.log_prob(observed_values)\n grads = tape.gradient(loss, tp.trainable_variables)\n optimizer.apply_gradients(zip(grads, tp.trainable_variables))\n return loss\n\n for i in range(1000):\n nll = optimize()\n if i % 100 == 0:\n print(\"Step {}: NLL = {}\".format(i, nll))\n print(\"Final NLL = {}\".format(nll))\n ```\n\n #### References\n\n [1]: Amar Shah, Andrew Gordon Wilson, and Zoubin Ghahramani. Student-t\n Processes as Alternatives to Gaussian Processes. In _Artificial\n Intelligence and Statistics_, 2014.\n https://www.cs.cmu.edu/~andrewgw/tprocess.pdf\n \"\"\"\n\n @deprecation.deprecated_args(\n '2021-06-26',\n '`jitter` is deprecated; please use `marginal_fn` directly.',\n 'jitter')\n def __init__(self,\n df,\n kernel,\n index_points=None,\n mean_fn=None,\n observation_noise_variance=0.,\n marginal_fn=None,\n jitter=1e-6,\n validate_args=False,\n allow_nan_stats=False,\n name='StudentTProcess'):\n \"\"\"Instantiate a StudentTProcess Distribution.\n\n Args:\n df: Positive Floating-point `Tensor` representing the degrees of freedom.\n Must be greater than 2.\n kernel: `PositiveSemidefiniteKernel`-like instance representing the\n TP's covariance function.\n index_points: `float` `Tensor` representing finite (batch of) vector(s) of\n points in the index set over which the TP is defined. Shape has the form\n `[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e` is the number\n (size) of index points in each batch. Ultimately this distribution\n corresponds to a `e`-dimensional multivariate Student's T. The batch\n shape must be broadcastable with `kernel.batch_shape` and any batch dims\n yielded by `mean_fn`.\n mean_fn: Python `callable` that acts on `index_points` to produce a (batch\n of) vector(s) of mean values at `index_points`. Takes a `Tensor` of\n shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is\n broadcastable with `[b1, ..., bB]`. Default value: `None` implies\n constant zero function.\n observation_noise_variance: `float` `Tensor` representing (batch of)\n scalar variance(s) of the noise in the Normal likelihood\n distribution of the model. If batched, the batch shape must be\n broadcastable with the shapes of all other batched parameters\n (`kernel.batch_shape`, `index_points`, etc.).\n Default value: `0.`\n marginal_fn: A Python callable that takes a location, covariance matrix,\n optional `validate_args`, `allow_nan_stats` and `name` arguments, and\n returns a multivariate normal subclass of `tfd.Distribution`.\n Default value: `None`, in which case a Cholesky-factorizing function is\n is created using `make_cholesky_factorizing_marginal_fn` and the\n `jitter` argument.\n jitter: `float` scalar `Tensor` added to the diagonal of the covariance\n matrix to ensure positive definiteness of the covariance matrix.\n Default value: `1e-6`.\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n Default value: `False`.\n allow_nan_stats: Python `bool`, default `True`. When `True`,\n statistics (e.g., mean, mode, variance) use the value \"`NaN`\" to\n indicate the result is undefined. When `False`, an exception is raised\n if one or more of the statistic's batch members are undefined.\n Default value: `False`.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: \"StudentTProcess\".\n\n Raises:\n ValueError: if `mean_fn` is not `None` and is not callable.\n \"\"\"\n parameters = dict(locals())\n with tf.name_scope(name) as name:\n dtype = dtype_util.common_dtype(\n [df, index_points, observation_noise_variance, jitter], tf.float32)\n df = tensor_util.convert_nonref_to_tensor(df, dtype=dtype, name='df')\n observation_noise_variance = tensor_util.convert_nonref_to_tensor(\n observation_noise_variance,\n dtype=dtype,\n name='observation_noise_variance')\n index_points = tensor_util.convert_nonref_to_tensor(\n index_points, dtype=dtype, name='index_points')\n jitter = tensor_util.convert_nonref_to_tensor(\n jitter, dtype=dtype, name='jitter')\n\n self._kernel = kernel\n self._index_points = index_points\n # Default to a constant zero function, borrowing the dtype from\n # index_points to ensure consistency.\n if mean_fn is None:\n mean_fn = lambda x: tf.zeros([1], dtype=dtype)\n else:\n if not callable(mean_fn):\n raise ValueError('`mean_fn` must be a Python callable')\n self._df = df\n self._observation_noise_variance = observation_noise_variance\n self._mean_fn = mean_fn\n self._jitter = jitter\n if marginal_fn is None:\n self._marginal_fn = make_cholesky_factored_marginal_fn(jitter)\n else:\n self._marginal_fn = marginal_fn\n\n with tf.name_scope('init'):\n super(StudentTProcess, self).__init__(\n dtype=dtype,\n reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,\n validate_args=validate_args,\n allow_nan_stats=allow_nan_stats,\n parameters=parameters,\n name=name)\n\n def _is_univariate_marginal(self, index_points):\n \"\"\"True if the given index_points would yield a univariate marginal.\n\n Args:\n index_points: the set of index set locations at which to compute the\n marginal Student T distribution. If this set is of size 1, the marginal is\n univariate.\n\n Returns:\n is_univariate: Boolean indicating whether the marginal is univariate or\n multivariate. In the case of dynamic shape in the number of index points,\n defaults to \"multivariate\" since that's the best we can do.\n \"\"\"\n num_index_points = tf.compat.dimension_value(\n index_points.shape[-(self.kernel.feature_ndims + 1)])\n if num_index_points is None:\n warnings.warn(\n 'Unable to detect statically whether the number of index_points is '\n '1. As a result, defaulting to treating the marginal Student T '\n 'Process at `index_points` as a multivariate Student T. This makes '\n 'some methods, like `cdf` unavailable.')\n return num_index_points == 1\n\n def _compute_covariance(self, index_points):\n kernel_matrix = self.kernel.matrix(index_points, index_points)\n if self._is_univariate_marginal(index_points):\n # kernel_matrix thus has shape [..., 1, 1]; squeeze off the last dims and\n # tack on the observation noise variance.\n return (tf.squeeze(kernel_matrix, axis=[-2, -1]) +\n self.observation_noise_variance)\n else:\n observation_noise_variance = tf.convert_to_tensor(\n self.observation_noise_variance)\n # We are compute K + obs_noise_variance * I. The shape of this matrix\n # is going to be a broadcast of the shapes of K and obs_noise_variance *\n # I.\n broadcast_shape = distribution_util.get_broadcast_shape(\n kernel_matrix,\n # We pad with two single dimension since this represents a batch of\n # scaled identity matrices.\n observation_noise_variance[..., tf.newaxis, tf.newaxis])\n\n kernel_matrix = tf.broadcast_to(kernel_matrix, broadcast_shape)\n return _add_diagonal_shift(\n kernel_matrix, observation_noise_variance[..., tf.newaxis])\n\n def get_marginal_distribution(self, index_points=None):\n \"\"\"Compute the marginal over function values at `index_points`.\n\n Args:\n index_points: `float` `Tensor` representing finite (batch of) vector(s) of\n points in the index set over which the TP is defined. Shape has the form\n `[b1, ..., bB, e, f1, ..., fF]` where `F` is the number of feature\n dimensions and must equal `kernel.feature_ndims` and `e` is the number\n (size) of index points in each batch. Ultimately this distribution\n corresponds to a `e`-dimensional multivariate student t. The batch shape\n must be broadcastable with `kernel.batch_shape` and any batch dims\n yielded by `mean_fn`.\n\n Returns:\n marginal: a `StudentT` or `MultivariateStudentT` distribution,\n according to whether `index_points` consists of one or many index\n points, respectively.\n \"\"\"\n with self._name_and_control_scope('get_marginal_distribution'):\n df = tf.convert_to_tensor(self.df)\n index_points = self._get_index_points(index_points)\n covariance = self._compute_covariance(index_points)\n loc = self._mean_fn(index_points)\n\n # If we're sure the number of index points is 1, we can just construct a\n # scalar Normal. This has computational benefits and supports things like\n # CDF that aren't otherwise straightforward to provide.\n if self._is_univariate_marginal(index_points):\n squared_scale = (df - 2.) / df * covariance\n scale = tf.sqrt(squared_scale)\n # `loc` has a trailing 1 in the shape; squeeze it.\n loc = tf.squeeze(loc, axis=-1)\n return student_t.StudentT(\n df=df,\n loc=loc,\n scale=scale,\n validate_args=self.validate_args,\n allow_nan_stats=self.allow_nan_stats,\n name='marginal_distribution')\n else:\n return self._marginal_fn(\n df=df,\n loc=loc,\n covariance=covariance,\n validate_args=self.validate_args,\n allow_nan_stats=self.allow_nan_stats,\n name='marginal_distribution')\n\n @property\n def df(self):\n return self._df\n\n @property\n def observation_noise_variance(self):\n return self._observation_noise_variance\n\n @property\n def mean_fn(self):\n return self._mean_fn\n\n @property\n def kernel(self):\n return self._kernel\n\n @property\n def index_points(self):\n return self._index_points\n\n @property\n def marginal_fn(self):\n return self._marginal_fn\n\n @property\n def jitter(self):\n return self._jitter\n\n def _get_index_points(self, index_points=None):\n \"\"\"Return `index_points` if not None, else `self._index_points`.\n\n Args:\n index_points: if given, this is what is returned; else,\n `self._index_points`\n\n Returns:\n index_points: the given arg, if not None, else the class member\n `self._index_points`.\n\n Rases:\n ValueError: if `index_points` and `self._index_points` are both `None`.\n \"\"\"\n if self._index_points is None and index_points is None:\n raise ValueError(\n 'This StudentTProcess instance was not instantiated with a value for '\n 'index_points. One must therefore be provided when calling sample, '\n 'log_prob, and other such methods.')\n return (index_points if index_points is not None\n else tf.convert_to_tensor(self._index_points))\n\n def _log_prob(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).log_prob(value)\n\n def _batch_shape_tensor(self, index_points=None):\n index_points = self._get_index_points(index_points)\n return functools.reduce(tf.broadcast_dynamic_shape, [\n tf.shape(index_points)[:-(self.kernel.feature_ndims + 1)],\n self.kernel.batch_shape_tensor(),\n tf.shape(self.observation_noise_variance),\n tf.shape(self.df)\n ])\n\n def _batch_shape(self, index_points=None):\n index_points = (\n index_points if index_points is not None else self._index_points)\n return functools.reduce(\n tf.broadcast_static_shape,\n [index_points.shape[:-(self.kernel.feature_ndims + 1)],\n self.kernel.batch_shape,\n self.observation_noise_variance.shape,\n self.df.shape])\n\n def _event_shape_tensor(self, index_points=None):\n index_points = self._get_index_points(index_points)\n if self._is_univariate_marginal(index_points):\n return tf.constant([], dtype=tf.int32)\n else:\n # The examples index is one position to the left of the feature dims.\n examples_index = -(self.kernel.feature_ndims + 1)\n return tf.shape(index_points)[examples_index:examples_index + 1]\n\n def _event_shape(self, index_points=None):\n index_points = (\n index_points if index_points is not None else self._index_points)\n if self._is_univariate_marginal(index_points):\n return tf.TensorShape([])\n else:\n # The examples index is one position to the left of the feature dims.\n examples_index = -(self.kernel.feature_ndims + 1)\n shape = index_points.shape[examples_index:examples_index + 1]\n if tensorshape_util.rank(shape) is None:\n return tf.TensorShape([None])\n return shape\n\n def _sample_n(self, n, seed=None, index_points=None):\n return self.get_marginal_distribution(index_points).sample(n, seed=seed)\n\n def _log_survival_function(self, value, index_points=None):\n return self.get_marginal_distribution(\n index_points).log_survival_function(value)\n\n def _survival_function(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).survival_function(value)\n\n def _log_cdf(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).log_cdf(value)\n\n def _entropy(self, index_points=None):\n return self.get_marginal_distribution(index_points).entropy()\n\n def _mean(self, index_points=None):\n return self.get_marginal_distribution(index_points).mean()\n\n def _quantile(self, value, index_points=None):\n return self.get_marginal_distribution(index_points).quantile(value)\n\n def _stddev(self, index_points=None):\n return tf.sqrt(self._variance(index_points=index_points))\n\n def _variance(self, index_points=None):\n index_points = self._get_index_points(index_points)\n\n kernel_diag = self.kernel.apply(index_points, index_points, example_ndims=1)\n if self._is_univariate_marginal(index_points):\n return (tf.squeeze(kernel_diag, axis=[-1]) +\n self.observation_noise_variance)\n else:\n # We are computing diag(K + obs_noise_variance * I) = diag(K) +\n # obs_noise_variance. We pad obs_noise_variance with a dimension in order\n # to broadcast batch shapes of kernel_diag and obs_noise_variance (since\n # kernel_diag has an extra dimension corresponding to the number of index\n # points).\n return kernel_diag + self.observation_noise_variance[..., tf.newaxis]\n\n def _covariance(self, index_points=None):\n # Using the result of get_marginal_distribution would involve an extra\n # matmul, and possibly even an unnecessary cholesky first. We can avoid that\n # by going straight through the kernel function.\n return self._compute_covariance(self._get_index_points(index_points))\n\n def _mode(self, index_points=None):\n return self.get_marginal_distribution(index_points).mode()\n\n def _default_event_space_bijector(self):\n return identity_bijector.Identity(validate_args=self.validate_args)\n\n def _parameter_control_dependencies(self, is_init):\n if not self.validate_args:\n return []\n assertions = []\n if is_init != tensor_util.is_ref(self.df):\n assertions.append(\n assert_util.assert_greater(\n self.df, dtype_util.as_numpy_dtype(self.df.dtype)(2.),\n message='`df` must be greater than 2.'))\n return assertions\n",
"# Copyright 2020 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"`JointDistribution` mixin class implementing automatic vectorization.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow_probability.python.bijectors import bijector as bijector_lib\nfrom tensorflow_probability.python.distributions import joint_distribution as joint_distribution_lib\nfrom tensorflow_probability.python.internal import prefer_static as ps\nfrom tensorflow_probability.python.internal import samplers\nfrom tensorflow_probability.python.internal import vectorization_util\n\n\nJAX_MODE = False\n\n\ndef _might_have_nonzero_size(sample_shape):\n static_size = tf.get_static_value(tf.size(sample_shape))\n return (static_size is None) or static_size >= 1\n\n\ndef _might_have_excess_ndims(flat_value, flat_core_ndims):\n for v, nd in zip(flat_value, flat_core_ndims):\n static_excess_ndims = (\n 0 if v is None else\n tf.get_static_value(ps.convert_to_shape_tensor(ps.rank(v) - nd)))\n if static_excess_ndims is None or static_excess_ndims > 0:\n return True\n return False\n\n\ndef _pad_value_to_full_length(value, dtype):\n \"\"\"Fills a partial `value` structure with `None`s for any unspecified RVs.\"\"\"\n # If dtype is dict-like, set missing values to `None`.\n if hasattr(dtype, 'keys'):\n return type(dtype)({k: value.get(k, None) for k in dtype.keys()})\n\n # Otherwise, dtype is a sequence, so append `None`s.\n return tf.nest.pack_sequence_as(dtype,\n [value[i] if i < len(value) else None\n for i in range(len(dtype))])\n\n\n# Lint doesn't know that docstrings are defined in the base JD class.\n# pylint: disable=missing-docstring\nclass JointDistributionVmapMixin(object):\n \"\"\"A joint distribution with automatically vectorized sample and log-prob.\n\n Auto-vectorized variants of JointDistribution treat the underlying\n model as describing a single possible world, or equivalently, as\n specifying the process of generating a single sample from the model.\n Drawing multiple samples, and computing batched log-probs, is accomplished\n using `tf.vectorized_map`. In many cases this allows for significant\n simplication of the model. For example, the following\n manually-vectorized `tfd.JointDistributionCoroutine` model:\n\n ```python\n def model_fn():\n x = yield tfd.JointDistributionCoroutine.Root(\n tfd.Normal(0., tf.ones([3])))\n y = yield tfd.JointDistributionCoroutine.Root(\n tfd.Normal(0., 1.)))\n z = yield tfd.Normal(x[..., :2] + y[..., tf.newaxis], 1.)\n\n can be written in auto-vectorized form as\n\n ```python\n def model_fn():\n x = yield tfd.Normal(0., tf.ones([3]))\n y = yield tfd.Normal(0., 1.))\n z = yield tfd.Normal(x[:2] + y, 1.)\n ```\n\n in which we were able to drop the specification of `Root` nodes and to\n avoid explicitly accounting for batch dimensions when indexing and slicing\n computed quantities in the third line.\n\n Note: auto-vectorization is still experimental and some TensorFlow ops may\n be unsupported.\n\n A limitation relative to standard `JointDistribution`s is that the\n `sample_distributions()` method does not currently support (nontrivial) sample\n shapes.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._use_vectorized_map = kwargs.pop('use_vectorized_map', True)\n super(JointDistributionVmapMixin, self).__init__(*args, **kwargs)\n\n # TODO(b/166658748): Drop this (make it always True).\n _stateful_to_stateless = JAX_MODE\n\n @property\n def use_vectorized_map(self):\n return self._use_vectorized_map\n\n @property\n def _single_sample_ndims(self):\n \"\"\"Computes the rank of values produced by executing the base model.\"\"\"\n result = []\n for d in self._get_single_sample_distributions():\n batch_ndims = ps.rank_from_shape(d.batch_shape_tensor, d.batch_shape)\n result.append(tf.nest.map_structure(\n lambda a, b, nd=batch_ndims: nd + ps.rank_from_shape(a, b),\n d.event_shape_tensor(),\n d.event_shape))\n return result\n\n def sample_distributions(self, sample_shape=(), seed=None, value=None,\n name='sample_distributions', **kwargs):\n with self._name_and_control_scope(name):\n\n value_might_have_sample_dims = False\n if (value is None) and kwargs:\n value = self._resolve_value_from_kwargs(**kwargs)\n if value is not None:\n value = _pad_value_to_full_length(value, self.dtype)\n value = tf.nest.map_structure(\n lambda v: v if v is None else tf.convert_to_tensor(v), value)\n value_might_have_sample_dims = _might_have_excess_ndims(\n flat_value=self._model_flatten(value),\n flat_core_ndims=self._single_sample_ndims)\n\n # TODO(b/157953455): Return distributions as CompositeTensors once\n # vectorized_map supports this.\n if self.use_vectorized_map and (\n _might_have_nonzero_size(sample_shape) or\n value_might_have_sample_dims):\n raise NotImplementedError('`sample_distributions` with nontrivial '\n 'sample shape is not yet supported '\n 'for autovectorized JointDistributions.')\n else:\n ds, xs = self._call_flat_sample_distributions(\n sample_shape=sample_shape, seed=seed, value=value)\n return self._model_unflatten(ds), self._model_unflatten(xs)\n\n def _sample_n(self, sample_shape, seed, value=None, **kwargs):\n\n value_might_have_sample_dims = False\n if (value is None) and kwargs:\n value = self._resolve_value_from_kwargs(**kwargs)\n if value is not None:\n value = _pad_value_to_full_length(value, self.dtype)\n value = tf.nest.map_structure(\n lambda v: v if v is None else tf.convert_to_tensor(v), value)\n value_might_have_sample_dims = _might_have_excess_ndims(\n flat_value=self._model_flatten(value),\n flat_core_ndims=self._single_sample_ndims)\n\n if not self.use_vectorized_map or not (\n _might_have_nonzero_size(sample_shape) or\n value_might_have_sample_dims):\n # No need to auto-vectorize.\n xs = self._call_flat_sample_distributions(\n sample_shape=sample_shape, seed=seed, value=value)[1]\n return self._model_unflatten(xs)\n\n # Set up for autovectorized sampling. To support the `value` arg, we need to\n # first understand which dims are from the model itself, then wrap\n # `_call_flat_sample_distributions` to batch over all remaining dims.\n value_core_ndims = None\n if value is not None:\n value_core_ndims = tf.nest.map_structure(\n lambda v, nd: None if v is None else nd,\n value, self._model_unflatten(self._single_sample_ndims),\n check_types=False)\n batch_flat_sample = vectorization_util.make_rank_polymorphic(\n lambda v, seed: self._call_flat_sample_distributions( # pylint: disable=g-long-lambda\n sample_shape=(), seed=seed, value=v)[1],\n core_ndims=[value_core_ndims, None],\n validate_args=self.validate_args)\n\n # Draw samples.\n vectorized_flat_sample = vectorization_util.iid_sample(\n # Redefine the polymorphic fn to hack around `make_rank_polymorphic`\n # not currently supporting keyword args.\n lambda v, seed: batch_flat_sample(v, seed), sample_shape) # pylint: disable=unnecessary-lambda\n xs = vectorized_flat_sample(value, seed=seed)\n return self._model_unflatten(xs)\n\n # Redefine `_map_measure_over_dists` to autovectorize the measure if needed.\n def _map_measure_over_dists(self, attr, value):\n if any(x is None for x in self._model_flatten(value)):\n raise ValueError('No `value` part can be `None`; saw: {}.'.format(value))\n if value is not None:\n value = self._model_flatten(value)\n\n def map_measure_fn(value):\n # We always provide a seed, since _flat_sample_distributions will\n # unconditionally split the seed.\n with tf.name_scope('map_measure_fn'):\n constant_seed = samplers.zeros_seed()\n return [getattr(d, attr)(x) for (d, x) in zip(\n *self._flat_sample_distributions(value=value, seed=constant_seed))]\n if self.use_vectorized_map:\n map_measure_fn = vectorization_util.make_rank_polymorphic(\n map_measure_fn,\n core_ndims=[self._single_sample_ndims],\n validate_args=self.validate_args)\n\n return map_measure_fn(value)\n\n def _default_event_space_bijector(self, *args, **kwargs):\n bijector_class = joint_distribution_lib._DefaultJointBijector # pylint: disable=protected-access\n if self.use_vectorized_map:\n bijector_class = _DefaultJointBijectorAutoBatched\n if bool(args) or bool(kwargs):\n return self.experimental_pin(\n *args, **kwargs).experimental_default_event_space_bijector()\n return bijector_class(self)\n\n\nclass _DefaultJointBijectorAutoBatched(bijector_lib.Bijector):\n \"\"\"Automatically vectorized support bijector for autobatched JDs.\"\"\"\n\n def __init__(self, jd, **kwargs):\n parameters = dict(locals())\n self._jd = jd\n self._bijector_kwargs = kwargs\n self._joint_bijector = joint_distribution_lib._DefaultJointBijector(\n jd=self._jd, **self._bijector_kwargs)\n super(_DefaultJointBijectorAutoBatched, self).__init__(\n forward_min_event_ndims=self._joint_bijector.forward_min_event_ndims,\n inverse_min_event_ndims=self._joint_bijector.inverse_min_event_ndims,\n validate_args=self._joint_bijector.validate_args,\n parameters=parameters,\n name=self._joint_bijector.name)\n # Wrap the non-batched `joint_bijector` to take batched args.\n # pylint: disable=protected-access\n self._forward = self._vectorize_member_fn(\n lambda bij, x: bij._forward(x),\n core_ndims=[self._joint_bijector.forward_min_event_ndims])\n self._inverse = self._vectorize_member_fn(\n lambda bij, y: bij._inverse(y),\n core_ndims=[self._joint_bijector.inverse_min_event_ndims])\n self._forward_log_det_jacobian = self._vectorize_member_fn(\n lambda bij, x: bij._forward_log_det_jacobian( # pylint: disable=g-long-lambda\n x, event_ndims=bij.forward_min_event_ndims),\n core_ndims=[self._joint_bijector.forward_min_event_ndims])\n self._inverse_log_det_jacobian = self._vectorize_member_fn(\n lambda bij, y: bij._inverse_log_det_jacobian( # pylint: disable=g-long-lambda\n y, event_ndims=bij.inverse_min_event_ndims),\n core_ndims=[self._joint_bijector.inverse_min_event_ndims])\n for attr in ('_forward_event_shape',\n '_forward_event_shape_tensor',\n '_inverse_event_shape',\n '_inverse_event_shape_tensor',\n '_forward_dtype',\n '_inverse_dtype',\n 'forward_event_ndims',\n 'inverse_event_ndims',):\n setattr(self, attr, getattr(self._joint_bijector, attr))\n # pylint: enable=protected-access\n\n def _vectorize_member_fn(self, member_fn, core_ndims):\n return vectorization_util.make_rank_polymorphic(\n lambda x: member_fn(self._joint_bijector, x),\n core_ndims=core_ndims)\n"
] | [
[
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.squeeze",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.sqrt",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.TensorShape",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.compat.v2.compat.dimension_value",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.linalg.diag_part"
],
[
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.size"
]
] |
eberharf/cfl | [
"077b99a05824f1371ac47d76dfed6bb160222668"
] | [
"testing/test_cde_io.py"
] | [
"import os\nimport shutil\nfrom shutil import Error\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom cdes_for_testing import all_cdes \nfrom cfl.dataset import Dataset\n\n''' The following code runs all tests in CondExpInputTests on all implemented\n CondExpXxxx classes.\n'''\n\n\ndef make_cde_io_tests(cond_exp_class):\n\n # generic test class for any CondExpBase descendant \n # (passed in as cond_exp_class)\n class CondExpIOTests(unittest.TestCase):\n def setUp(self): # overriden unittest.TestCase method that will be\n # called in initializaiton\n self.data_info = { 'X_dims' : (10,3), \n 'Y_dims' : (10,2), \n 'Y_type' : 'continuous'}\n self.params = { 'show_plot' : False,\n 'n_epochs' : 2}\n self.ceb = cond_exp_class(self.data_info, self.params)\n\n ## INIT ###############################################################\n def test_init_wrong_input_types(self):\n data_info = 'str is bad'\n params = 'these are not params'\n self.assertRaises(AssertionError, cond_exp_class, data_info, params)\n\n def test_init_wrong_data_info_keys(self):\n data_info = {}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n\n def test_init_wrong_data_info_value_types(self):\n data_info = {'X_dims' : None, 'Y_dims' : None, 'Y_type' : None}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n\n def test_init_wrong_data_info_values(self):\n data_info = { 'X_dims' : (0,0), \n 'Y_dims' : (0,0), \n 'Y_type' : 'continuous'}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n \n data_info = { 'X_dims' : (10,3), \n 'Y_dims' : (12,2), \n 'Y_type' : 'continuous'}\n params = {}\n self.assertRaises(AssertionError, cond_exp_class, data_info, \n params)\n\n def test_init_correct_inputs(self):\n data_info = {'X_dims' : (10,3), \n 'Y_dims' : (10,2), \n 'Y_type' : 'continuous'}\n params = {}\n ceb = cond_exp_class(data_info, params)\n\n ## SAVE_BLOCK #########################################################\n def test_save_block_wrong_input_type(self):\n path = 123\n self.assertRaises(AssertionError, self.ceb.save_block, path)\n\n def test_save_block_correct_input_type(self):\n path = 'not/a/real/path'\n self.ceb.save_block(path)\n shutil.rmtree('not')\n\n ## LOAD_BLOCK #########################################################\n def test_load_block_wrong_input_type(self):\n path = 123\n self.assertRaises(AssertionError, self.ceb.load_block, path)\n\n def test_load_block_correct_input_type(self):\n # should only be run after test_save_block_correct_input_type so \n # there is something to load\n path = 'not/a/real/path'\n self.ceb.save_block(path)\n self.ceb.load_block(path)\n shutil.rmtree('not')\n # check and reset state\n assert self.ceb.trained, 'CDE should be trained after loading'\n self.ceb.trained = False\n\n\n ### TRAIN ############################################################\n def test_train_wrong_input_type(self):\n dataset = 'this is not a Dataset'\n prev_results = 'this is not a dict'\n self.assertRaises(AssertionError, self.ceb.train, dataset, \n prev_results)\n\n def test_train_correct_input_type(self):\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n\n # what we expect from train outputs\n tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']\n tshapes = {'train_loss' : (self.params['n_epochs'],),\n 'val_loss' : (self.params['n_epochs'],),\n 'pyx' : (self.data_info['Y_dims'])\n }\n\n for prev_results in [None, {}]:\n # reset\n self.ceb.trained = False\n\n train_results = self.ceb.train(dataset, prev_results)\n\n # check state\n assert self.ceb.trained, 'CDE should be trained after loading'\n\n # check outputs\n assert set(train_results.keys())==set(tkeys), \\\n f'train should return dict with keys: {tkeys}'\n for k in tshapes.keys():\n assert tshapes[k]==np.array(train_results[k]).shape, \\\n f'expected {k} to have shape {tshapes[k]} but got \\\n {train_results[k].shape}'\n\n def test_train_twice(self):\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n prev_results = None\n\n # reset\n self.ceb.trained = False\n\n # what we expect from train outputs first time\n tkeys = ['train_loss','val_loss','loss_plot','model_weights','pyx']\n \n train_results = self.ceb.train(dataset, prev_results)\n\n # check state and outputs\n assert self.ceb.trained, 'CDE should be trained after loading'\n assert set(train_results.keys())==set(tkeys), \\\n f'train should return dict with keys: {tkeys}'\n\n # what we expect from train outputs second time\n tkeys = ['pyx']\n \n train_results = self.ceb.train(dataset, prev_results)\n\n # check state and outputs\n assert self.ceb.trained, 'CDE should be trained after loading'\n assert set(train_results.keys())==set(tkeys), \\\n f'train should return dict with keys: {tkeys}'\n\n\n ### PREDICT ##########################################################\n def test_predict_wrong_input_type(self):\n # artifically set CDE trained = True\n self.ceb.trained = True\n\n dataset = 'this is not a Dataset'\n prev_results = 'this is not a dict'\n self.assertRaises(AssertionError, self.ceb.predict, dataset, \n prev_results)\n\n def test_predict_correct_input_type(self):\n\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n prev_results = None\n\n for prev_results in [None, {}]:\n self.ceb.train(dataset, prev_results)\n pred_results = self.ceb.predict(dataset, prev_results)\n\n # check output\n assert set(pred_results.keys())==set(['pyx']), f'pred_results \\\n keys should contain pyx, but contains {pred_results.keys()}'\n assert pred_results['pyx'].shape==self.data_info['Y_dims'], \\\n f\"expected {self.data_info['Y_dims']} but got \\\n {pred_results['pyx'].shape}\"\n \n ### EVALUATE #########################################################\n def test_evaluate_wrong_input_type(self):\n # artifically set CDE trained = True\n self.ceb.trained = True\n \n dataset = 'this is not a Dataset'\n prev_results = 'this is not a dict'\n self.assertRaises(AssertionError, self.ceb.evaluate, dataset)\n\n def test_evaluate_correct_input_type(self):\n\n dataset = Dataset(X=np.ones(self.data_info['X_dims']), \n Y=np.zeros(self.data_info['Y_dims']))\n prev_results = None\n\n self.ceb.train(dataset, prev_results)\n score = self.ceb.evaluate(dataset)\n assert score.shape==()\n assert score.dtype==np.float32\n\n ### BUILD_MODEL ######################################################\n\n def test_build_model(self):\n assert isinstance(self.ceb._build_model(), tf.keras.Sequential)\n\n\n return CondExpIOTests\n\n\nfor cond_exp_class in all_cdes:\n class ConcreteIOTests(make_cde_io_tests(cond_exp_class)):\n pass\n\n"
] | [
[
"numpy.array",
"numpy.ones",
"numpy.zeros"
]
] |
shengxinhu/tvm | [
"06c443e9959452c6da3a911fe0c11e08c5554477"
] | [
"tests/python/unittest/test_tir_ptx_ldmatrix.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nfrom tvm.script import tir as T\nimport numpy as np\nimport tvm.testing\n\n\[email protected]_func\ndef ptx_ldmatrix(\n A: T.Buffer[(16, 16), \"float16\"], B: T.Buffer[(16, 16), \"float16\"], num: T.int32, trans: T.uint8\n) -> None:\n T.func_attr({\"global_symbol\": \"default_function\", \"tir.noalias\": True})\n bx = T.env_thread(\"blockIdx.x\")\n tx = T.env_thread(\"threadIdx.x\")\n T.launch_thread(bx, 1)\n T.launch_thread(tx, 32)\n with T.block():\n A_shared = T.alloc_buffer([16, 16], \"float16\", scope=\"shared\")\n A_local = T.alloc_buffer([8], \"float16\", scope=\"local\")\n\n for i in range(8):\n A_shared[i * 2 + tx // 16, tx % 16] = A[i * 2 + tx // 16, tx % 16]\n\n T.evaluate(\n T.ptx_ldmatrix(\n trans,\n num,\n \".b16\",\n A_local.data,\n 0,\n A_shared.data,\n 16 * (tx % 16) + 8 * (tx // 16),\n dtype=\"float16\",\n )\n )\n\n for k in range(2):\n for j in range(2):\n for i in range(2):\n B[8 * j + tx // 4, 8 * k + (tx % 4) * 2 + i] = A_local[4 * k + 2 * j + i]\n\n\[email protected]_cuda\ndef test_ptx_ldmatrix():\n f = ptx_ldmatrix\n _, _, param_num, param_trans = f.params\n arch = tvm.contrib.nvcc.get_target_compute_version()\n major, minor = tvm.contrib.nvcc.parse_compute_version(arch)\n if major * 10 + minor < 75:\n # Require at least SM75\n return\n for num in [1, 2, 4]:\n for trans in [False, True]:\n mod = tvm.build(f.specialize({param_num: num, param_trans: trans}), target=\"cuda\")\n A_np = np.random.rand(16, 16).astype(\"float16\")\n A_mask_np = np.zeros_like(A_np)\n if num == 1:\n if trans:\n A_mask_np[:8, :8] = A_np[:8, :8].T\n else:\n A_mask_np[:8, :8] = A_np[:8, :8]\n elif num == 2:\n if trans:\n A_mask_np[:8, :8] = A_np[:8, :8].T\n A_mask_np[8:16, :8] = A_np[8:16, :8].T\n else:\n A_mask_np[:16, :8] = A_np[:16, :8]\n else: # num == 4\n if trans:\n A_mask_np[:8, :8] = A_np[:8, :8].T\n A_mask_np[8:16, :8] = A_np[8:16, :8].T\n A_mask_np[:8, 8:16] = A_np[:8, 8:16].T\n A_mask_np[8:16, 8:16] = A_np[8:16, 8:16].T\n else:\n A_mask_np[:16, :16] = A_np[:16, :16]\n B_np = np.zeros((16, 16)).astype(\"float16\")\n dev = tvm.cuda(0)\n A_nd = tvm.nd.array(A_np, device=dev)\n B_nd = tvm.nd.array(B_np, device=dev)\n mod(A_nd, B_nd)\n tvm.testing.assert_allclose(B_nd.numpy(), A_mask_np)\n\n\nif __name__ == \"__main__\":\n test_ptx_ldmatrix()\n"
] | [
[
"numpy.zeros_like",
"numpy.random.rand",
"numpy.zeros"
]
] |
brandontrabucco/playground | [
"069be961aaecb45d75f12f4a71cfa65d7152ea8a"
] | [
"playground/algorithms/ddpg.py"
] | [
"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom playground.algorithms.algorithm import Algorithm\nimport tensorflow as tf\n\n\nclass DDPG(Algorithm):\n\n def __init__(\n self,\n policy,\n target_policy,\n qf,\n target_qf,\n replay_buffer,\n reward_scale=1.0,\n discount=0.99,\n observation_key=\"observation\",\n batch_size=32,\n update_every=1,\n update_after=0,\n logger=None,\n logging_prefix=\"ddpg/\"\n ):\n # train a policy using the deep deterministic policy gradient\n Algorithm.__init__(\n self,\n replay_buffer,\n batch_size=batch_size,\n update_every=update_every,\n update_after=update_after,\n logger=logger,\n logging_prefix=logging_prefix)\n\n # each neural network is probabilistic\n self.policy = policy\n self.target_policy = target_policy\n self.qf = qf\n self.target_qf = target_qf\n\n # select into the observation dictionary\n self.observation_key = observation_key\n\n # control some parameters that are important for ddpg\n self.reward_scale = reward_scale\n self.discount = discount\n\n def update_algorithm(\n self,\n observations,\n actions,\n rewards,\n next_observations,\n terminals\n ):\n # select from the observation dictionary\n observations = observations[self.observation_key]\n next_observations = next_observations[self.observation_key]\n\n # build a tape to collect gradients from the policy and critics\n with tf.GradientTape(persistent=True) as tape:\n mean_actions, log_pi = self.policy.expected_value(observations)\n next_mean_actions, next_log_pi = self.target_policy.expected_value(\n next_observations)\n\n # build the q function target value\n inputs = tf.concat([next_observations, next_mean_actions], -1)\n target_qf_value = self.target_qf(inputs)[..., 0]\n self.record(\"target_qf_value\", tf.reduce_mean(target_qf_value).numpy())\n qf_targets = tf.stop_gradient(\n self.reward_scale * rewards + terminals * self.discount * (\n target_qf_value))\n self.record(\"qf_targets\", tf.reduce_mean(qf_targets).numpy())\n\n # build the q function loss\n inputs = tf.concat([observations, actions], -1)\n qf_value = self.qf(inputs)[..., 0]\n self.record(\"qf_value\", tf.reduce_mean(qf_value).numpy())\n qf_loss = tf.reduce_mean(tf.keras.losses.logcosh(qf_targets, qf_value))\n self.record(\"qf_loss\", qf_loss.numpy())\n\n # build the policy loss\n inputs = tf.concat([observations, mean_actions], -1)\n policy_qf_value = self.qf(inputs)[..., 0]\n self.record(\"policy_qf_value\", tf.reduce_mean(policy_qf_value).numpy())\n policy_loss = -tf.reduce_mean(policy_qf_value)\n self.record(\"policy_loss\", policy_loss.numpy())\n\n # back prop gradients\n self.policy.apply_gradients(\n self.policy.compute_gradients(policy_loss, tape))\n self.qf.apply_gradients(\n self.qf.compute_gradients(qf_loss, tape))\n\n # soft update target parameters\n self.target_policy.soft_update(self.policy.get_weights())\n self.target_qf.soft_update(self.qf.get_weights())\n"
] | [
[
"tensorflow.keras.losses.logcosh",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient",
"tensorflow.GradientTape",
"tensorflow.concat"
]
] |
folk85/gen_turb | [
"4390938c4cefae334e95414f83b9c484991bff67"
] | [
"tests/plot_time_space.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nimport matplotlib as m\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import *\n\nfrom plot_spectr import *\n\ndef main_routine():\n print(os.getcwd())\n nfile = './store.dat'\n #Read the file by blocks to reduce required memory\n with open(nfile,'r') as f:\n nel = sum(1 for _ in f)\n f.close()\n #repeat for each timesteps\n nk = 64*64 *64\n ntimes = nel / nk\n\ndef get_nel(nfile):\n with open(nfile,'r') as f:\n nel = sum(1 for _ in f)\n f.close()\n return nel\n \ndef plot_spectr(uin,vin,win):\n\n alpha = 1.339e0\n L = 1.0e-1\n sigma = 1.0e+1\n\n # x,y,z = np.genfromtxt('tests/spectr.dat',unpack=True)\n # x,y,z = np.genfromtxt('../hita/spectrum.dat',unpack=True)\n # x1,y1,z1 = np.genfromtxt('../hita/spectrum_32.dat',unpack=True)\n \n uvel,vvel,wvel = np.genfromtxt('./store.dat',unpack=True)\n nk = int(round(np.size(uvel)**(1./3.)))\n nel = nk\n ufft = fftn(uvel.reshape(nk,nk,nk))\n vfft = fftn(vvel.reshape(nk,nk,nk))\n wfft = fftn(wvel.reshape(nk,nk,nk))\n muu = ufft*np.conj(ufft) / nel**6\n mvv = vfft*np.conj(vfft) / nel**6\n mww = wfft*np.conj(wfft) / nel**6\n\n # calc std\n umean = np.array([np.mean(uvel),np.mean(vvel),np.mean(wvel)])\n std_i = np.array([np.std(uvel),np.std(vvel),np.std(wvel)])\n sigma = np.sqrt(np.sum(std_i[:]**2))\n print(std_i[0],np.sqrt(np.mean((uvel[:]-umean[0])**2)), sigma)\n dx = 10.\n k = np.arange(-nk//2,nk//2)*dx\n k = np.roll(k,nk//2)\n spectrum = np.zeros(nk)\n count = np.zeros(nk)\n # ?np.meshgrid(k,k,k)\n X,Y,Z = np.meshgrid(k,k,k)\n r = np.sqrt(X**2+Y**2+Z**2) #*dx\n # print(np.shape(r),r.min(),r.max(),k.max(),r[:,0,0])\n for i,ki in enumerate(k[:nk//2]):\n t = np.where((r<=ki+dx/2)&(r>ki-dx/2))\n spectrum[i] = np.sum(muu[t].real) + np.sum(mvv[t].real) + np.sum(mww[t].real)\n count[i] = np.size(t[0]) \n spectrum[i] *= 2.*np.pi*k[i]**2/dx**3/(count[i]+1.0e-30)\n\n font = {'family': 'Droid Sans',\n 'weight': 'normal',\n 'size': 12}\n m.rc('axes',linewidth=2)\n m.rc('font',**font)\n m.rc('lines',markeredgewidth=1.0)\n f,ax = plt.subplots()\n xf = np.linspace(np.log(k[1]/2),np.log(k[nk//2-1]*2.),100)\n xf = np.exp(xf)\n ax.loglog(xf,Ek(xf,alpha,L,sigma),c='g',lw=2)\n ax.loglog(k[:nk//2],spectrum[:nk//2],'bx-',lw=0.5,ms=8)\n # ax.loglog(x,y,'bx')\n # ax.loglog(x1,y1,'ro')\n ax.set_xlabel(u'$k, 1/м$',size='large')\n ax.set_ylabel(u'$E(k), м^3/с^2$',size='large')\n plt.grid()\n plt.tight_layout()\n plt.show()\n del(f)\n del(ax)\n plt.clf()\n\n Rij_x=(ufft*np.conj(ufft)) # compute velo. correlation tensor\n Rij_y=(vfft*np.conj(vfft))\n Rij_z=(wfft*np.conj(wfft))\n\n R1=ifftn(Rij_x)/np.std((uvel))**2/nel**3;\n R2=ifftn(Rij_y)/np.std((vvel))**2/nel**3;\n R3=ifftn(Rij_z)/np.std((wvel))**2/nel**3;\n \n NFFT=np.size(ufft,1)\n R11 = (R3[0,0,:]+R2[0,:,0]+R1[:,0,0])/3.\n # R11 = R11[:np.size(ufft)//2+1]\n R1_22 = (R1[0,:,0]+R3[0,:,0])/2.0e0\n R2_22 = (R2[:,0,0]+R3[:,0,0])/2.0e0\n R3_22 = (R1[0,0,:]+R2[0,0,:])/2.0e0\n\n R22 = (R1_22+R2_22+R3_22)/3.0e0\n # R22 = R22(1:size(u_fft)/2+1);\n Lx = 2.0*np.pi*1.0e-1\n r = np.linspace(0,Lx,NFFT)/(Lx/2);\n\n l11 = np.trapz(np.real(R11[:NFFT//2+1]),dx=r[1]-r[0])\n l22 = np.trapz(np.real(R22[:NFFT//2+1]),dx=r[1]-r[0])\n print(\"Integral Length Scale Longitudal: %g\"%(l11))\n print(\"Integral Length Scale Tangent: %g\"%(l22))\n\n f,ax = plt.subplots(1)\n ax.plot(r[:NFFT//2+1],R11[:NFFT//2+1],marker='>',mfc='w',lw=2,label=u'$R_{11}$')\n ax.plot(r[:NFFT//2+1],R22[:NFFT//2+1],marker='s',markerfacecolor='w',lw=2,label=u'$R_{22}$')\n ax.plot(r[:NFFT//2],np.exp(-r[:NFFT//2]/l11))\n ax.plot(r[:NFFT//2],1.e0+(1.0e0-R22[NFFT//2])*(np.exp(-r[:NFFT//2]/(l22-R22[NFFT//2]))-1.0e0))\n plt.legend()\n plt.tight_layout()\n ax.set_xlabel(u'$r$')\n ax.set_ylabel(u'$R_{11}, R_{22}$')\n plt.grid()\n plt.show()\n return [k[:nk//2],spectrum[:nk//2],r[:NFFT//2+1],R11[:NFFT//2+1],R22[:NFFT//2+1]]\n\ndef Ek(k,alpha=1.339,L=0.01,sigma=10.):\n tmp = (alpha * L * k) **2\n tmp = sigma*sigma*L * tmp * tmp * 5.5e+1/ (27.0 * np.pi * (1.0 + tmp)**(1.7e+1/6.0e0))\n return tmp\n\nif __name__ == '__main__':\n main_routine()\n\n"
] | [
[
"numpy.sum",
"matplotlib.pyplot.tight_layout",
"numpy.size",
"numpy.log",
"numpy.meshgrid",
"matplotlib.rc",
"numpy.where",
"numpy.linspace",
"numpy.mean",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf",
"numpy.arange",
"numpy.std",
"numpy.roll",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.grid",
"numpy.conj",
"numpy.exp",
"matplotlib.pyplot.show",
"numpy.sqrt",
"numpy.genfromtxt",
"numpy.real"
]
] |
sagnik1511/U-Net-Lowered-with-keras | [
"364336b244ece288a52cf76df451501a665e745a"
] | [
"code/UNET_lowered.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nUNET LOwered Model :\r\n \r\n This customized UNet Model has been generated lowering the filters to their 25% .\r\n \r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers \r\nfrom tensorflow.keras.layers import Input , Conv2D , MaxPooling2D , Dropout , concatenate , UpSampling2D\r\nfrom tensorflow.keras import models\r\nfrom tensorflow.keras import losses\r\nfrom tensorflow.keras import optimizers\r\nimport numpy as np\r\n\r\n\r\ndef UNet(input_shape):\r\n keras.backend.clear_session()\r\n inputs = Input(input_shape)\r\n conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)\r\n conv1 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n\r\n conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)\r\n conv2 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)\r\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)\r\n conv3 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)\r\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\r\n \r\n conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)\r\n conv4 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)\r\n drop4 = Dropout(0.5)(conv4)\r\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\r\n\r\n conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)\r\n conv5 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)\r\n drop5 = Dropout(0.5)(conv5)\r\n\r\n up6 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))\r\n merge6 = concatenate([drop4,up6], axis = 3)\r\n conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)\r\n conv6 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)\r\n\r\n up7 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))\r\n merge7 = concatenate([conv3,up7], axis = 3)\r\n conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)\r\n conv7 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)\r\n\r\n up8 = Conv2D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))\r\n merge8 = concatenate([conv2,up8], axis = 3)\r\n conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)\r\n conv8 = Conv2D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)\r\n\r\n up9 = Conv2D(16, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))\r\n merge9 = concatenate([conv1,up9], axis = 3)\r\n conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)\r\n conv9 = Conv2D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\r\n conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)\r\n\r\n outputs = layers.Conv2D(1, 1, activation = 'sigmoid')(conv9)\r\n\r\n model = keras.Model(inputs = inputs , outputs = outputs,name = 'UNet')\r\n\r\n return model"
] | [
[
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Input"
]
] |
Rippling/modin | [
"b2cf1d5fc704803a1ce6699e9a373dc7abeb409e"
] | [
"modin/experimental/engines/omnisci_on_ray/frame/calcite_builder.py"
] | [
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom .expr import (\n InputRefExpr,\n LiteralExpr,\n OpExpr,\n AggregateExpr,\n build_if_then_else,\n build_row_idx_filter_expr,\n)\nfrom .calcite_algebra import (\n CalciteBaseNode,\n CalciteInputRefExpr,\n CalciteInputIdxExpr,\n CalciteScanNode,\n CalciteProjectionNode,\n CalciteFilterNode,\n CalciteAggregateNode,\n CalciteCollation,\n CalciteSortNode,\n CalciteJoinNode,\n CalciteUnionNode,\n)\nfrom .df_algebra import (\n FrameNode,\n MaskNode,\n GroupbyAggNode,\n TransformNode,\n JoinNode,\n UnionNode,\n SortNode,\n FilterNode,\n)\n\nfrom collections import abc\nfrom pandas.core.dtypes.common import _get_dtype\n\n\nclass CalciteBuilder:\n class CompoundAggregate:\n def __init__(self, builder, arg):\n self._builder = builder\n self._arg = arg\n\n def gen_proj_exprs(self):\n return []\n\n def gen_agg_exprs(self):\n pass\n\n def gen_reduce_expr(self):\n pass\n\n class StdAggregate(CompoundAggregate):\n def __init__(self, builder, arg):\n assert isinstance(arg, InputRefExpr)\n super().__init__(builder, arg)\n\n self._quad_name = self._arg.column + \"__quad__\"\n self._sum_name = self._arg.column + \"__sum__\"\n self._quad_sum_name = self._arg.column + \"__quad_sum__\"\n self._count_name = self._arg.column + \"__count__\"\n\n def gen_proj_exprs(self):\n expr = self._builder._translate(self._arg.mul(self._arg))\n return {self._quad_name: expr}\n\n def gen_agg_exprs(self):\n count_expr = self._builder._translate(AggregateExpr(\"count\", self._arg))\n sum_expr = self._builder._translate(AggregateExpr(\"sum\", self._arg))\n self._sum_dtype = sum_expr._dtype\n qsum_expr = AggregateExpr(\n \"SUM\",\n self._builder._ref_idx(self._arg.modin_frame, self._quad_name),\n dtype=sum_expr._dtype,\n )\n\n return {\n self._sum_name: sum_expr,\n self._quad_sum_name: qsum_expr,\n self._count_name: count_expr,\n }\n\n def gen_reduce_expr(self):\n count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)\n count_expr._dtype = _get_dtype(int)\n sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)\n sum_expr._dtype = self._sum_dtype\n qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)\n qsum_expr._dtype = self._sum_dtype\n\n null_expr = LiteralExpr(None)\n count_or_null = build_if_then_else(\n count_expr.eq(LiteralExpr(0)), null_expr, count_expr, count_expr._dtype\n )\n count_m_1_or_null = build_if_then_else(\n count_expr.eq(LiteralExpr(1)),\n null_expr,\n count_expr.sub(LiteralExpr(1)),\n count_expr._dtype,\n )\n\n # sqrt((sum(x * x) - sum(x) * sum(x) / n) / (n - 1))\n return (\n qsum_expr.sub(sum_expr.mul(sum_expr).truediv(count_or_null))\n .truediv(count_m_1_or_null)\n .pow(LiteralExpr(0.5))\n )\n\n class SkewAggregate(CompoundAggregate):\n def __init__(self, builder, arg):\n assert isinstance(arg, InputRefExpr)\n super().__init__(builder, arg)\n\n self._quad_name = self._arg.column + \"__quad__\"\n self._cube_name = self._arg.column + \"__cube__\"\n self._sum_name = self._arg.column + \"__sum__\"\n self._quad_sum_name = self._arg.column + \"__quad_sum__\"\n self._cube_sum_name = self._arg.column + \"__cube_sum__\"\n self._count_name = self._arg.column + \"__count__\"\n\n def gen_proj_exprs(self):\n quad_expr = self._builder._translate(self._arg.mul(self._arg))\n cube_expr = self._builder._translate(\n self._arg.mul(self._arg).mul(self._arg)\n )\n return {self._quad_name: quad_expr, self._cube_name: cube_expr}\n\n def gen_agg_exprs(self):\n count_expr = self._builder._translate(AggregateExpr(\"count\", self._arg))\n sum_expr = self._builder._translate(AggregateExpr(\"sum\", self._arg))\n self._sum_dtype = sum_expr._dtype\n qsum_expr = AggregateExpr(\n \"SUM\",\n self._builder._ref_idx(self._arg.modin_frame, self._quad_name),\n dtype=sum_expr._dtype,\n )\n csum_expr = AggregateExpr(\n \"SUM\",\n self._builder._ref_idx(self._arg.modin_frame, self._cube_name),\n dtype=sum_expr._dtype,\n )\n\n return {\n self._sum_name: sum_expr,\n self._quad_sum_name: qsum_expr,\n self._cube_sum_name: csum_expr,\n self._count_name: count_expr,\n }\n\n def gen_reduce_expr(self):\n count_expr = self._builder._ref(self._arg.modin_frame, self._count_name)\n count_expr._dtype = _get_dtype(int)\n sum_expr = self._builder._ref(self._arg.modin_frame, self._sum_name)\n sum_expr._dtype = self._sum_dtype\n qsum_expr = self._builder._ref(self._arg.modin_frame, self._quad_sum_name)\n qsum_expr._dtype = self._sum_dtype\n csum_expr = self._builder._ref(self._arg.modin_frame, self._cube_sum_name)\n csum_expr._dtype = self._sum_dtype\n\n mean_expr = sum_expr.truediv(count_expr)\n\n # n * sqrt(n - 1) / (n - 2)\n # * (sum(x ** 3) - 3 * mean * sum(x * x) + 2 * mean * mean * sum(x))\n # / (sum(x * x) - mean * sum(x)) ** 1.5\n part1 = count_expr.mul(\n count_expr.sub(LiteralExpr(1)).pow(LiteralExpr(0.5))\n ).truediv(count_expr.sub(LiteralExpr(2)))\n part2 = csum_expr.sub(mean_expr.mul(qsum_expr).mul(LiteralExpr(3.0))).add(\n mean_expr.mul(mean_expr).mul(sum_expr).mul(LiteralExpr(2.0))\n )\n part3 = qsum_expr.sub(mean_expr.mul(sum_expr)).pow(LiteralExpr(1.5))\n skew_expr = part1.mul(part2).truediv(part3)\n\n # The result is NULL if n <= 2\n return build_if_then_else(\n count_expr.le(LiteralExpr(2)),\n LiteralExpr(None),\n skew_expr,\n skew_expr._dtype,\n )\n\n _compound_aggregates = {\"std\": StdAggregate, \"skew\": SkewAggregate}\n\n class InputContext:\n _simple_aggregates = {\n \"sum\": \"SUM\",\n \"mean\": \"AVG\",\n \"max\": \"MAX\",\n \"min\": \"MIN\",\n \"size\": \"COUNT\",\n \"count\": \"COUNT\",\n }\n _no_arg_aggregates = {\"size\"}\n\n def __init__(self, input_frames, input_nodes):\n self.input_nodes = input_nodes\n self.frame_to_node = {x: y for x, y in zip(input_frames, input_nodes)}\n self.input_offsets = {}\n self.replacements = {}\n offs = 0\n for frame in input_frames:\n self.input_offsets[frame] = offs\n offs += len(frame._table_cols)\n # Materialized frames have additional 'rowid' column\n if isinstance(frame._op, FrameNode):\n offs += 1\n\n def replace_input_node(self, frame, node, new_cols):\n self.replacements[frame] = new_cols\n\n def _idx(self, frame, col):\n assert (\n frame in self.input_offsets\n ), f\"unexpected reference to {frame.id_str()}\"\n\n offs = self.input_offsets[frame]\n\n if frame in self.replacements:\n return self.replacements[frame].index(col) + offs\n\n if col == \"__rowid__\":\n if not isinstance(self.frame_to_node[frame], CalciteScanNode):\n raise NotImplementedError(\n \"rowid can be accessed in materialized frames only\"\n )\n return len(frame._table_cols) + offs\n\n assert (\n col in frame._table_cols\n ), f\"unexpected reference to '{col}' in {frame.id_str()}\"\n return frame._table_cols.index(col) + offs\n\n def ref(self, frame, col):\n return CalciteInputRefExpr(self._idx(frame, col))\n\n def ref_idx(self, frame, col):\n return CalciteInputIdxExpr(self._idx(frame, col))\n\n def input_ids(self):\n return [x.id for x in self.input_nodes]\n\n def translate(self, expr):\n \"\"\"Copy those parts of expr tree that have input references\n and translate all references into CalciteInputRefExr\"\"\"\n return self._maybe_copy_and_translate_expr(expr)\n\n def _maybe_copy_and_translate_expr(self, expr, ref_idx=False):\n if isinstance(expr, InputRefExpr):\n if ref_idx:\n return self.ref_idx(expr.modin_frame, expr.column)\n else:\n return self.ref(expr.modin_frame, expr.column)\n\n if isinstance(expr, AggregateExpr):\n expr = expr.copy()\n if expr.agg in self._no_arg_aggregates:\n expr.operands = []\n else:\n expr.operands[0] = self._maybe_copy_and_translate_expr(\n expr.operands[0], True\n )\n expr.agg = self._simple_aggregates[expr.agg]\n return expr\n\n copied = False\n for i, op in enumerate(getattr(expr, \"operands\", [])):\n new_op = self._maybe_copy_and_translate_expr(op)\n if new_op != op:\n if not copied:\n expr = expr.copy()\n expr.operands[i] = new_op\n return expr\n\n class InputContextMgr:\n def __init__(self, builder, input_frames, input_nodes):\n self.builder = builder\n self.input_frames = input_frames\n self.input_nodes = input_nodes\n\n def __enter__(self):\n self.builder._input_ctx_stack.append(\n self.builder.InputContext(self.input_frames, self.input_nodes)\n )\n return self.builder._input_ctx_stack[-1]\n\n def __exit__(self, type, value, traceback):\n self.builder._input_ctx_stack.pop()\n\n type_strings = {\n int: \"INTEGER\",\n bool: \"BOOLEAN\",\n }\n\n def __init__(self):\n self._input_ctx_stack = []\n\n def build(self, op):\n CalciteBaseNode.reset_id()\n self.res = []\n self._to_calcite(op)\n return self.res\n\n def _input_ctx(self):\n return self._input_ctx_stack[-1]\n\n def _set_input_ctx(self, op):\n input_frames = getattr(op, \"input\", [])\n input_nodes = [self._to_calcite(x._op) for x in input_frames]\n return self.InputContextMgr(self, input_frames, input_nodes)\n\n def _set_tmp_ctx(self, input_frames, input_nodes):\n return self.InputContextMgr(self, input_frames, input_nodes)\n\n def _ref(self, frame, col):\n return self._input_ctx().ref(frame, col)\n\n def _ref_idx(self, frame, col):\n return self._input_ctx().ref_idx(frame, col)\n\n def _translate(self, exprs):\n if isinstance(exprs, abc.Iterable):\n return [self._input_ctx().translate(x) for x in exprs]\n return self._input_ctx().translate(exprs)\n\n def _push(self, node):\n self.res.append(node)\n\n def _last(self):\n return self.res[-1]\n\n def _input_nodes(self):\n return self._input_ctx().input_nodes\n\n def _input_node(self, idx):\n return self._input_nodes()[idx]\n\n def _input_ids(self):\n return self._input_ctx().input_ids()\n\n def _to_calcite(self, op):\n # This context translates input operands and setup current\n # input context to translate input references (recursion\n # over tree happens here).\n with self._set_input_ctx(op):\n if isinstance(op, FrameNode):\n self._process_frame(op)\n elif isinstance(op, MaskNode):\n self._process_mask(op)\n elif isinstance(op, GroupbyAggNode):\n self._process_groupby(op)\n elif isinstance(op, TransformNode):\n self._process_transform(op)\n elif isinstance(op, JoinNode):\n self._process_join(op)\n elif isinstance(op, UnionNode):\n self._process_union(op)\n elif isinstance(op, SortNode):\n self._process_sort(op)\n elif isinstance(op, FilterNode):\n self._process_filter(op)\n else:\n raise NotImplementedError(\n f\"CalciteBuilder doesn't support {type(op).__name__}\"\n )\n return self.res[-1]\n\n def _process_frame(self, op):\n self._push(CalciteScanNode(op.modin_frame))\n\n def _process_mask(self, op):\n if op.row_indices is not None:\n raise NotImplementedError(\"row indices masking is not yet supported\")\n\n frame = op.input[0]\n\n # select rows by rowid\n rowid_col = self._ref(frame, \"__rowid__\")\n condition = build_row_idx_filter_expr(op.row_numeric_idx, rowid_col)\n self._push(CalciteFilterNode(condition))\n\n # mask is currently always applied over scan, it means\n # we need additional projection to remove rowid column\n fields = frame._table_cols\n exprs = [self._ref(frame, col) for col in frame._table_cols]\n self._push(CalciteProjectionNode(fields, exprs))\n\n def _process_groupby(self, op):\n frame = op.input[0]\n\n # Aggregation's input should always be a projection and\n # group key columns should always go first\n proj_cols = op.by.copy()\n for col in frame._table_cols:\n if col not in op.by:\n proj_cols.append(col)\n proj_exprs = [self._ref(frame, col) for col in proj_cols]\n # Add expressions required for compound aggregates\n compound_aggs = {}\n for agg, expr in op.agg_exprs.items():\n if expr.agg in self._compound_aggregates:\n compound_aggs[agg] = self._compound_aggregates[expr.agg](\n self, expr.operands[0]\n )\n extra_exprs = compound_aggs[agg].gen_proj_exprs()\n proj_cols.extend(extra_exprs.keys())\n proj_exprs.extend(extra_exprs.values())\n proj = CalciteProjectionNode(proj_cols, proj_exprs)\n self._push(proj)\n\n self._input_ctx().replace_input_node(frame, proj, proj_cols)\n\n group = [self._ref_idx(frame, col) for col in op.by]\n fields = op.by.copy()\n aggs = []\n for agg, expr in op.agg_exprs.items():\n if agg in compound_aggs:\n extra_aggs = compound_aggs[agg].gen_agg_exprs()\n fields.extend(extra_aggs.keys())\n aggs.extend(extra_aggs.values())\n else:\n fields.append(agg)\n aggs.append(self._translate(expr))\n node = CalciteAggregateNode(fields, group, aggs)\n self._push(node)\n\n if compound_aggs:\n self._input_ctx().replace_input_node(frame, node, fields)\n proj_cols = op.by.copy()\n proj_exprs = [self._ref(frame, col) for col in proj_cols]\n proj_cols.extend(op.agg_exprs.keys())\n for agg in op.agg_exprs:\n if agg in compound_aggs:\n proj_exprs.append(compound_aggs[agg].gen_reduce_expr())\n else:\n proj_exprs.append(self._ref(frame, agg))\n proj = CalciteProjectionNode(proj_cols, proj_exprs)\n self._push(proj)\n\n if op.groupby_opts[\"sort\"]:\n collation = [CalciteCollation(col) for col in group]\n self._push(CalciteSortNode(collation))\n\n def _process_transform(self, op):\n fields = list(op.exprs.keys())\n exprs = self._translate(op.exprs.values())\n self._push(CalciteProjectionNode(fields, exprs))\n\n def _process_join(self, op):\n left = op.input[0]\n right = op.input[1]\n\n assert (\n op.on is not None\n ), \"Merge with unspecified 'on' parameter is not supported in the engine\"\n\n for col in op.on:\n assert (\n col in left._table_cols and col in right._table_cols\n ), f\"Column '{col}'' is missing in one of merge operands\"\n\n \"\"\" Join, only equal-join supported \"\"\"\n cmps = [self._ref(left, c).eq(self._ref(right, c)) for c in op.on]\n if len(cmps) > 1:\n condition = OpExpr(\"AND\", cmps, _get_dtype(bool))\n else:\n condition = cmps[0]\n node = CalciteJoinNode(\n left_id=self._input_node(0).id,\n right_id=self._input_node(1).id,\n how=op.how,\n condition=condition,\n )\n self._push(node)\n\n \"\"\"Projection for both frames\"\"\"\n fields = []\n exprs = []\n conflicting_cols = set(left.columns) & set(right.columns) - set(op.on)\n \"\"\"First goes 'on' column then all left columns(+suffix for conflicting names)\n but 'on' then all right columns(+suffix for conflicting names) but 'on'\"\"\"\n on_idx = [-1] * len(op.on)\n for c in left.columns:\n if c in op.on:\n on_idx[op.on.index(c)] = len(fields)\n suffix = op.suffixes[0] if c in conflicting_cols else \"\"\n fields.append(c + suffix)\n exprs.append(self._ref(left, c))\n\n for c in right.columns:\n if c not in op.on:\n suffix = op.suffixes[1] if c in conflicting_cols else \"\"\n fields.append(c + suffix)\n exprs.append(self._ref(right, c))\n\n self._push(CalciteProjectionNode(fields, exprs))\n\n # TODO: current input translation system doesn't work here\n # because there is no frame to reference for index computation.\n # We should build calcite tree to keep references to input\n # nodes and keep scheme in calcite nodes. For now just use\n # known index on_idx.\n if op.sort is True:\n \"\"\"Sort by key column\"\"\"\n collation = [CalciteCollation(CalciteInputIdxExpr(x)) for x in on_idx]\n self._push(CalciteSortNode(collation))\n\n def _process_union(self, op):\n self._push(CalciteUnionNode(self._input_ids(), True))\n\n def _process_sort(self, op):\n frame = op.input[0]\n\n # Sort should be applied to projections.\n if not isinstance(self._input_node(0), CalciteProjectionNode):\n proj = CalciteProjectionNode(\n frame._table_cols, [self._ref(frame, col) for col in frame._table_cols]\n )\n self._push(proj)\n self._input_ctx().replace_input_node(frame, proj, frame._table_cols)\n\n nulls = op.na_position.upper()\n collations = []\n for col, asc in zip(op.columns, op.ascending):\n ascending = \"ASCENDING\" if asc else \"DESCENDING\"\n collations.append(\n CalciteCollation(self._ref_idx(frame, col), ascending, nulls)\n )\n self._push(CalciteSortNode(collations))\n\n def _process_filter(self, op):\n condition = self._translate(op.condition)\n self._push(CalciteFilterNode(condition))\n"
] | [
[
"pandas.core.dtypes.common._get_dtype"
]
] |
lucadiliello/metrics | [
"e98fbafd2af5d217596958f9cfe6152543a00b7f"
] | [
"torchmetrics/regression/pearson.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.pearson import _pearson_corrcoef_compute, _pearson_corrcoef_update\nfrom torchmetrics.metric import Metric\n\n\ndef _final_aggregation(\n means_x: Tensor,\n means_y: Tensor,\n vars_x: Tensor,\n vars_y: Tensor,\n corrs_xy: Tensor,\n nbs: Tensor,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Aggregate the statistics from multiple devices.\n\n Formula taken from here: `Aggregate the statistics from multiple devices`_\n \"\"\"\n # assert len(means_x) > 1 and len(means_y) > 1 and len(vars_x) > 1 and len(vars_y) > 1 and len(corrs_xy) > 1\n mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n for i in range(1, len(means_x)):\n mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]\n\n nb = n1 + n2\n mean_x = (n1 * mx1 + n2 * mx2) / nb\n mean_y = (n1 * my1 + n2 * my2) / nb\n var_x = 1 / (n1 + n2 - 1) * ((n1 - 1) * vx1 + (n2 - 1) * vx2 + ((n1 * n2) / (n1 + n2)) * (mx1 - mx2) ** 2)\n var_y = 1 / (n1 + n2 - 1) * ((n1 - 1) * vy1 + (n2 - 1) * vy2 + ((n1 * n2) / (n1 + n2)) * (my1 - my2) ** 2)\n\n corr1 = n1 * cxy1 + n1 * (mx1 - mean_x) * (my1 - mean_y)\n corr2 = n2 * cxy2 + n2 * (mx2 - mean_x) * (my2 - mean_y)\n corr_xy = (corr1 + corr2) / (n1 + n2)\n\n mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb\n\n return var_x, var_y, corr_xy, nb\n\n\nclass PearsonCorrcoef(Metric):\n r\"\"\"\n Computes `Pearson Correlation Coefficient`_:\n\n .. math::\n P_{corr}(x,y) = \\frac{cov(x,y)}{\\sigma_x \\sigma_y}\n\n Where :math:`y` is a tensor of target values, and :math:`x` is a\n tensor of predictions.\n\n Forward accepts\n\n - ``preds`` (float tensor): ``(N,)``\n - ``target``(float tensor): ``(N,)``\n\n Args:\n compute_on_step:\n Forward only calls ``update()`` and return None if this is set to False. default: True\n dist_sync_on_step:\n Synchronize metric state across processes at each ``forward()``\n before returning the value at the step. default: False\n process_group:\n Specify the process group on which synchronization is called. default: None (which selects the entire world)\n\n Example:\n >>> from torchmetrics import PearsonCorrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson = PearsonCorrcoef()\n >>> pearson(preds, target)\n tensor(0.9849)\n\n \"\"\"\n is_differentiable = True\n higher_is_better = None # both -1 and 1 are optimal\n preds: List[Tensor]\n target: List[Tensor]\n mean_x: Tensor\n mean_y: Tensor\n var_x: Tensor\n var_y: Tensor\n corr_xy: Tensor\n n_total: Tensor\n\n def __init__(\n self,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ) -> None:\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n )\n\n self.add_state(\"mean_x\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"mean_y\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"var_x\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"var_y\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"corr_xy\", default=torch.tensor(0.0), dist_reduce_fx=None)\n self.add_state(\"n_total\", default=torch.tensor(0.0), dist_reduce_fx=None)\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\n\n Args:\n preds: Predictions from model\n target: Ground truth values\n \"\"\"\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total = _pearson_corrcoef_update(\n preds, target, self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total\n )\n\n def compute(self) -> Tensor:\n \"\"\"Computes pearson correlation coefficient over state.\"\"\"\n if self.mean_x.numel() > 1: # multiple devices, need further reduction\n var_x, var_y, corr_xy, n_total = _final_aggregation(\n self.mean_x, self.mean_y, self.var_x, self.var_y, self.corr_xy, self.n_total\n )\n else:\n var_x = self.var_x\n var_y = self.var_y\n corr_xy = self.corr_xy\n n_total = self.n_total\n\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, n_total)\n"
] | [
[
"torch.tensor"
]
] |
Lizhi-sjtu/DRL-code-pytorch | [
"2ca05f4ed64d2d032e161fc3a2d2a68c818c4337"
] | [
"8.SAC/SAC-continuous.py"
] | [
"import gym\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport copy\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nfrom torch.distributions import Normal\r\n\r\n\r\nclass Actor(nn.Module):\r\n def __init__(self, state_dim, action_dim, hidden_width, max_action):\r\n super(Actor, self).__init__()\r\n self.max_action = max_action\r\n self.l1 = nn.Linear(state_dim, hidden_width)\r\n self.l2 = nn.Linear(hidden_width, hidden_width)\r\n self.mean_layer = nn.Linear(hidden_width, action_dim)\r\n self.log_std_layer = nn.Linear(hidden_width, action_dim)\r\n\r\n def forward(self, x, deterministic=False, with_logprob=True):\r\n x = F.relu(self.l1(x))\r\n x = F.relu(self.l2(x))\r\n mean = self.mean_layer(x)\r\n log_std = self.log_std_layer(x) # We output the log_std to ensure that std=exp(log_std)>0\r\n log_std = torch.clamp(log_std, -20, 2)\r\n std = torch.exp(log_std)\r\n\r\n dist = Normal(mean, std) # Generate a Gaussian distribution\r\n if deterministic: # When evaluating,we use the deterministic policy\r\n a = mean\r\n else:\r\n a = dist.rsample() # reparameterization trick: mean+std*N(0,1)\r\n\r\n if with_logprob: # The method refers to Open AI Spinning up, which is more stable.\r\n log_pi = dist.log_prob(a).sum(dim=1, keepdim=True)\r\n log_pi -= (2 * (np.log(2) - a - F.softplus(-2 * a))).sum(dim=1, keepdim=True)\r\n else:\r\n log_pi = None\r\n\r\n a = self.max_action * torch.tanh(a) # Use tanh to compress the unbounded Gaussian distribution into a bounded action interval.\r\n\r\n return a, log_pi\r\n\r\n\r\nclass Critic(nn.Module): # According to (s,a), directly calculate Q(s,a)\r\n def __init__(self, state_dim, action_dim, hidden_width):\r\n super(Critic, self).__init__()\r\n # Q1\r\n self.l1 = nn.Linear(state_dim + action_dim, hidden_width)\r\n self.l2 = nn.Linear(hidden_width, hidden_width)\r\n self.l3 = nn.Linear(hidden_width, 1)\r\n # Q2\r\n self.l4 = nn.Linear(state_dim + action_dim, hidden_width)\r\n self.l5 = nn.Linear(hidden_width, hidden_width)\r\n self.l6 = nn.Linear(hidden_width, 1)\r\n\r\n def forward(self, s, a):\r\n s_a = torch.cat([s, a], 1)\r\n q1 = F.relu(self.l1(s_a))\r\n q1 = F.relu(self.l2(q1))\r\n q1 = self.l3(q1)\r\n\r\n q2 = F.relu(self.l4(s_a))\r\n q2 = F.relu(self.l5(q2))\r\n q2 = self.l6(q2)\r\n\r\n return q1, q2\r\n\r\n\r\nclass ReplayBuffer(object):\r\n def __init__(self, state_dim, action_dim):\r\n self.max_size = int(1e6)\r\n self.count = 0\r\n self.size = 0\r\n self.s = np.zeros((self.max_size, state_dim))\r\n self.a = np.zeros((self.max_size, action_dim))\r\n self.r = np.zeros((self.max_size, 1))\r\n self.s_ = np.zeros((self.max_size, state_dim))\r\n self.dw = np.zeros((self.max_size, 1))\r\n\r\n def store(self, s, a, r, s_, dw):\r\n self.s[self.count] = s\r\n self.a[self.count] = a\r\n self.r[self.count] = r\r\n self.s_[self.count] = s_\r\n self.dw[self.count] = dw\r\n self.count = (self.count + 1) % self.max_size # When the 'count' reaches max_size, it will be reset to 0.\r\n self.size = min(self.size + 1, self.max_size) # Record the number of transitions\r\n\r\n def sample(self, batch_size):\r\n index = np.random.choice(self.size, size=batch_size) # Randomly sampling\r\n batch_s = torch.tensor(self.s[index], dtype=torch.float)\r\n batch_a = torch.tensor(self.a[index], dtype=torch.float)\r\n batch_r = torch.tensor(self.r[index], dtype=torch.float)\r\n batch_s_ = torch.tensor(self.s_[index], dtype=torch.float)\r\n batch_dw = torch.tensor(self.dw[index], dtype=torch.float)\r\n\r\n return batch_s, batch_a, batch_r, batch_s_, batch_dw\r\n\r\n\r\nclass SAC(object):\r\n def __init__(self, state_dim, action_dim, max_action):\r\n self.max_action = max_action\r\n self.hidden_width = 256 # The number of neurons in hidden layers of the neural network\r\n self.batch_size = 256 # batch size\r\n self.GAMMA = 0.99 # discount factor\r\n self.TAU = 0.005 # Softly update the target network\r\n self.lr = 3e-4 # learning rate\r\n self.adaptive_alpha = True # Whether to automatically learn the temperature alpha\r\n if self.adaptive_alpha:\r\n # Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper\r\n self.target_entropy = -action_dim\r\n # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\r\n self.log_alpha = torch.zeros(1, requires_grad=True)\r\n self.alpha = self.log_alpha.exp()\r\n self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=self.lr)\r\n else:\r\n self.alpha = 0.2\r\n\r\n self.actor = Actor(state_dim, action_dim, self.hidden_width, max_action)\r\n self.critic = Critic(state_dim, action_dim, self.hidden_width)\r\n self.critic_target = copy.deepcopy(self.critic)\r\n\r\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr)\r\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.lr)\r\n\r\n def choose_action(self, s, deterministic=False):\r\n s = torch.unsqueeze(torch.tensor(s, dtype=torch.float), 0)\r\n a, _ = self.actor(s, deterministic, False) # When choosing actions, we do not need to compute log_pi\r\n return a.data.numpy().flatten()\r\n\r\n def learn(self, relay_buffer):\r\n batch_s, batch_a, batch_r, batch_s_, batch_dw = relay_buffer.sample(self.batch_size) # Sample a batch\r\n\r\n with torch.no_grad():\r\n batch_a_, log_pi_ = self.actor(batch_s_) # a' from the current policy\r\n # Compute target Q\r\n target_Q1, target_Q2 = self.critic_target(batch_s_, batch_a_)\r\n target_Q = batch_r + self.GAMMA * (1 - batch_dw) * (torch.min(target_Q1, target_Q2) - self.alpha * log_pi_)\r\n\r\n # Compute current Q\r\n current_Q1, current_Q2 = self.critic(batch_s, batch_a)\r\n # Compute critic loss\r\n critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\r\n # Optimize the critic\r\n self.critic_optimizer.zero_grad()\r\n critic_loss.backward()\r\n self.critic_optimizer.step()\r\n\r\n # Freeze critic networks so you don't waste computational effort\r\n for params in self.critic.parameters():\r\n params.requires_grad = False\r\n\r\n # Compute actor loss\r\n a, log_pi = self.actor(batch_s)\r\n Q1, Q2 = self.critic(batch_s, a)\r\n Q = torch.min(Q1, Q2)\r\n actor_loss = (self.alpha * log_pi - Q).mean()\r\n\r\n # Optimize the actor\r\n self.actor_optimizer.zero_grad()\r\n actor_loss.backward()\r\n self.actor_optimizer.step()\r\n\r\n # Unfreeze critic networks\r\n for params in self.critic.parameters():\r\n params.requires_grad = True\r\n\r\n # Update alpha\r\n if self.adaptive_alpha:\r\n # We learn log_alpha instead of alpha to ensure that alpha=exp(log_alpha)>0\r\n alpha_loss = -(self.log_alpha.exp() * (log_pi + self.target_entropy).detach()).mean()\r\n self.alpha_optimizer.zero_grad()\r\n alpha_loss.backward()\r\n self.alpha_optimizer.step()\r\n self.alpha = self.log_alpha.exp()\r\n\r\n # Softly update target networks\r\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\r\n target_param.data.copy_(self.TAU * param.data + (1 - self.TAU) * target_param.data)\r\n\r\n\r\ndef evaluate_policy(env, agent):\r\n times = 3 # Perform three evaluations and calculate the average\r\n evaluate_reward = 0\r\n for _ in range(times):\r\n s = env.reset()\r\n done = False\r\n episode_reward = 0\r\n while not done:\r\n a = agent.choose_action(s, deterministic=True) # We use the deterministic policy during the evaluating\r\n s_, r, done, _ = env.step(a)\r\n episode_reward += r\r\n s = s_\r\n evaluate_reward += episode_reward\r\n\r\n return int(evaluate_reward / times)\r\n\r\n\r\ndef reward_adapter(r, env_index):\r\n if env_index == 0: # Pendulum-v1\r\n r = (r + 8) / 8\r\n elif env_index == 1: # BipedalWalker-v3\r\n if r <= -100:\r\n r = -1\r\n return r\r\n\r\n\r\nif __name__ == '__main__':\r\n env_name = ['Pendulum-v1', 'BipedalWalker-v3', 'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2']\r\n env_index = 0\r\n env = gym.make(env_name[env_index])\r\n env_evaluate = gym.make(env_name[env_index]) # When evaluating the policy, we need to rebuild an environment\r\n number = 1\r\n seed = 0\r\n # Set random seed\r\n env.seed(seed)\r\n env.action_space.seed(seed)\r\n env_evaluate.seed(seed)\r\n env_evaluate.action_space.seed(seed)\r\n np.random.seed(seed)\r\n torch.manual_seed(seed)\r\n\r\n state_dim = env.observation_space.shape[0]\r\n action_dim = env.action_space.shape[0]\r\n max_action = float(env.action_space.high[0])\r\n max_episode_steps = env._max_episode_steps # Maximum number of steps per episode\r\n print(\"env={}\".format(env_name[env_index]))\r\n print(\"state_dim={}\".format(state_dim))\r\n print(\"action_dim={}\".format(action_dim))\r\n print(\"max_action={}\".format(max_action))\r\n print(\"max_episode_steps={}\".format(max_episode_steps))\r\n\r\n agent = SAC(state_dim, action_dim, max_action)\r\n replay_buffer = ReplayBuffer(state_dim, action_dim)\r\n # Build a tensorboard\r\n writer = SummaryWriter(log_dir='runs/SAC/SAC_env_{}_number_{}_seed_{}'.format(env_name[env_index], number, seed))\r\n\r\n max_train_steps = 3e6 # Maximum number of training steps\r\n random_steps = 25e3 # Take the random actions in the beginning for the better exploration\r\n evaluate_freq = 5e3 # Evaluate the policy every 'evaluate_freq' steps\r\n evaluate_num = 0 # Record the number of evaluations\r\n evaluate_rewards = [] # Record the rewards during the evaluating\r\n total_steps = 0 # Record the total steps during the training\r\n\r\n while total_steps < max_train_steps:\r\n s = env.reset()\r\n episode_steps = 0\r\n done = False\r\n while not done:\r\n episode_steps += 1\r\n if total_steps < random_steps: # Take the random actions in the beginning for the better exploration\r\n a = env.action_space.sample()\r\n else:\r\n a = agent.choose_action(s)\r\n s_, r, done, _ = env.step(a)\r\n r = reward_adapter(r, env_index) # Adjust rewards for better performance\r\n # When dead or win or reaching the max_episode_steps, done will be Ture, we need to distinguish them;\r\n # dw means dead or win,there is no next state s';\r\n # but when reaching the max_episode_steps,there is a next state s' actually.\r\n if done and episode_steps != max_episode_steps:\r\n dw = True\r\n else:\r\n dw = False\r\n replay_buffer.store(s, a, r, s_, dw) # Store the transition\r\n s = s_\r\n\r\n if total_steps >= random_steps:\r\n agent.learn(replay_buffer)\r\n\r\n # Evaluate the policy every 'evaluate_freq' steps\r\n if (total_steps + 1) % evaluate_freq == 0:\r\n evaluate_num += 1\r\n evaluate_reward = evaluate_policy(env_evaluate, agent)\r\n evaluate_rewards.append(evaluate_reward)\r\n print(\"evaluate_num:{} \\t evaluate_reward:{}\".format(evaluate_num, evaluate_reward))\r\n writer.add_scalar('step_rewards_{}'.format(env_name[env_index]), evaluate_reward, global_step=total_steps)\r\n # Save the rewards\r\n if evaluate_num % 10 == 0:\r\n np.save('./data_train/SAC_env_{}_number_{}_seed_{}.npy'.format(env_name[env_index], number, seed), np.array(evaluate_rewards))\r\n\r\n total_steps += 1\r\n"
] | [
[
"torch.min",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"numpy.random.seed",
"numpy.log",
"torch.nn.functional.softplus",
"torch.cat",
"torch.distributions.Normal",
"numpy.random.choice",
"torch.optim.Adam",
"torch.tanh",
"numpy.zeros",
"torch.manual_seed",
"torch.tensor",
"torch.zeros",
"torch.nn.Linear",
"torch.exp",
"numpy.array",
"torch.clamp"
]
] |
tliu68/graspologic | [
"d1cf7678bc63ab9769828a82a90f66bf1dfa0eff"
] | [
"graspologic/layouts/render.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport networkx as nx\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom graspologic.layouts.classes import NodePosition\nimport matplotlib.pyplot as plt\n\n\ndef _calculate_x_y_domain(\n positions: List[NodePosition],\n) -> Tuple[Tuple[float, float], Tuple[float, float]]:\n \"\"\"calculate the overall x/y domain, converting to a square\n so we can have a consistent scale\n \"\"\"\n min_x = min_y = float(\"inf\")\n max_x = max_y = float(\"-inf\")\n for node_position in positions:\n min_x = min(min_x, node_position.x - node_position.size)\n max_x = max(max_x, node_position.x + node_position.size)\n min_y = min(min_y, node_position.y - node_position.size)\n max_y = max(max_y, node_position.y + node_position.size)\n\n x_delta = max_x - min_x\n y_delta = max_y - min_y\n max_delta = max(x_delta, y_delta)\n\n if max_delta == x_delta:\n difference = (max_delta - y_delta) / 2\n min_y = min_y - difference\n max_y = max_y + difference\n elif max_delta == y_delta:\n difference = (max_delta - x_delta) / 2\n min_x = min_x - difference\n max_x = max_x + difference\n\n return (min_x, max_x), (min_y, max_y)\n\n\ndef _scale_value(\n domain: Tuple[float, float], data_range: Tuple[float, float], value: float\n) -> float:\n return data_range[0] + (data_range[1] - data_range[0]) * (\n (value - domain[0]) / (domain[1] - domain[0])\n )\n\n\ndef _scale_node_sizes_for_rendering(\n sizes: List[float],\n spatial_domain: Tuple[float, float],\n spatial_range: Tuple[float, float],\n dpi: float,\n):\n \"\"\"scale the size again to match the rendered pixel range\n we would expect this to be handled by the underlying viz framework, but it isn't, size is specified\n as the bounding box in points of the rendered output, so we need to transform our size to match.\n\n There are 72 points per inch. Multiplying by 72 / dpi converts from pixels to points.\n \"\"\"\n spatial_domain = (0, spatial_domain[1] - spatial_domain[0])\n return list(\n map(\n lambda s: _scale_value(spatial_domain, spatial_range, s * 2 * 72.0 / dpi)\n ** 2,\n sizes,\n )\n )\n\n\ndef _draw_graph(\n graph: nx.Graph,\n positions: List[NodePosition],\n node_colors: Dict[Any, str],\n vertex_alpha: float,\n edge_line_width: float,\n edge_alpha: float,\n figure_width: float,\n figure_height: float,\n vertex_line_width: float = 0.01,\n vertex_shape: str = \"o\",\n arrows: bool = False,\n dpi: int = 100,\n):\n if len(positions) != len(graph.nodes()):\n raise ValueError(\n f\"The number of positions provided {len(positions)} is not the same as the \"\n f\"number of nodes in the graph {len(graph.nodes())}\"\n )\n for position in positions:\n if position.node_id not in graph:\n raise ValueError(\n f\"The node position provided for {position.node_id} references a node \"\n f\"not found in our graph\"\n )\n\n plt.rcParams[\"figure.dpi\"] = dpi # TODO, test at different dpi\n\n plt.clf()\n figure = plt.gcf()\n ax = plt.gca()\n ax.set_axis_off()\n figure.set_size_inches(figure_width, figure_height)\n window_extent_width = ax.get_window_extent().width\n\n x_domain, y_domain = _calculate_x_y_domain(positions)\n\n position_map = {position.node_id: position for position in positions}\n node_positions = {\n position.node_id: (position.x, position.y) for position in positions\n }\n\n vertices = []\n vertex_sizes = []\n node_color_list = []\n edge_color_list = []\n\n for node in graph.nodes():\n vertices.append(node)\n vertex_sizes.append(position_map[node].size)\n node_color_list.append(node_colors[node])\n\n vertex_sizes = _scale_node_sizes_for_rendering(\n vertex_sizes, x_domain, (0, window_extent_width), dpi\n )\n\n for source, target in graph.edges():\n edge_color_list.append(node_colors[source])\n\n ax.set_xbound(x_domain)\n ax.set_xlim(x_domain)\n ax.set_ybound(y_domain)\n ax.set_ylim(y_domain)\n\n nx.draw_networkx_edges(\n graph,\n pos=node_positions,\n alpha=edge_alpha,\n width=edge_line_width,\n edge_color=edge_color_list,\n arrows=arrows,\n ax=ax,\n )\n\n nx.draw_networkx_nodes(\n graph,\n pos=node_positions,\n nodelist=vertices,\n node_color=node_color_list,\n alpha=vertex_alpha,\n linewidths=vertex_line_width,\n node_size=vertex_sizes,\n node_shape=vertex_shape,\n ax=ax,\n )\n\n\ndef show_graph(\n graph: nx.Graph,\n positions: List[NodePosition],\n node_colors: Dict[Any, str],\n vertex_line_width: float = 0.01,\n vertex_alpha: float = 0.55,\n edge_line_width: float = 0.5,\n edge_alpha: float = 0.02,\n figure_width: float = 15.0,\n figure_height: float = 15.0,\n light_background: bool = True,\n vertex_shape: str = \"o\",\n arrows: bool = False,\n dpi: int = 500,\n):\n \"\"\"\n Renders and displays a graph.\n\n Attempts to display it via the platform-specific display library such as TkInter\n\n Edges will be displayed with the same color as the source node.\n\n Parameters\n ----------\n graph : nx.Graph\n The graph to be displayed. If the networkx Graph contains only nodes, no\n edges will be displayed.\n positions : List[:class:`graspologic.layouts.NodePosition`]\n The positionsfor every node in the graph.\n node_colors : Dict[Any, str]\n A mapping of node id to colors. Must contain an entry for every node in the\n graph.\n vertex_line_width : float\n Line width of vertex outline. Default is``0.01``.\n vertex_alpha : float\n Alpha (transparency) of vertices in visualization. Default is``0.55``.\n edge_line_width : float\n Line width of edge. Default is``0.5``.\n edge_alpha : float\n Alpha (transparency) of edges in visualization. Default is``0.02``.\n figure_width : float\n Width of figure. Default is ``15.0``.\n figure_height : float\n eight of figure. Default is``15.0``.\n light_background : bool\n Light background or dark background. Default is``True``.\n vertex_shape : str\n Matplotlib Marker for the vertex shape. See\n `https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_\n for a list of allowed values . Default is ``o`` (i.e: a circle)\n arrows : bool\n For directed graphs, if ``True``, draw arrow heads. Default is ``False``\n dpi : float\n Dots per inch of the figure. Default is ``500``.\n \"\"\"\n ax = plt.gca()\n if light_background:\n facecolor = ax.get_facecolor()\n else:\n facecolor = \"#030303\"\n\n _draw_graph(\n graph=graph,\n positions=positions,\n node_colors=node_colors,\n vertex_line_width=vertex_line_width,\n vertex_alpha=vertex_alpha,\n edge_line_width=edge_line_width,\n edge_alpha=edge_alpha,\n figure_width=figure_width,\n figure_height=figure_height,\n vertex_shape=vertex_shape,\n arrows=arrows,\n dpi=dpi,\n )\n plt.gcf().set_facecolor(facecolor)\n plt.show()\n plt.close(\"all\")\n\n\ndef save_graph(\n output_path: str,\n graph: nx.Graph,\n positions: List[NodePosition],\n node_colors: Dict[Any, str],\n vertex_line_width: float = 0.01,\n vertex_alpha: float = 0.55,\n edge_line_width: float = 0.5,\n edge_alpha: float = 0.02,\n figure_width: float = 15.0,\n figure_height: float = 15.0,\n light_background: bool = True,\n vertex_shape: str = \"o\",\n arrows: bool = False,\n dpi: int = 100,\n):\n \"\"\"\n Renders a graph to file.\n\n Edges will be displayed with the same color as the source node.\n\n Parameters\n ----------\n output_path : str\n The output path to write the rendered graph to. Suggested file extension is\n ``.png``.\n graph : nx.Graph\n The graph to be displayed. If the networkx Graph contains only nodes, no\n edges will be displayed.\n positions : List[:class:`graspologic.layouts.NodePosition`]\n The positionsfor every node in the graph.\n node_colors : Dict[Any, str]\n A mapping of node id to colors. Must contain an entry for every node in the\n graph.\n vertex_line_width : float\n Line width of vertex outline. Default is``0.01``.\n vertex_alpha : float\n Alpha (transparency) of vertices in visualization. Default is``0.55``.\n edge_line_width : float\n Line width of edge. Default is``0.5``.\n edge_alpha : float\n Alpha (transparency) of edges in visualization. Default is``0.02``.\n figure_width : float\n Width of figure. Default is ``15.0``.\n figure_height : float\n eight of figure. Default is``15.0``.\n light_background : bool\n Light background or dark background. Default is``True``.\n vertex_shape : str\n Matplotlib Marker for the vertex shape. See\n `https://matplotlib.org/api/markers_api.html <https://matplotlib.org/api/markers_api.html>`_\n for a list of allowed values . Default is ``o`` (i.e: a circle)\n arrows : bool\n For directed graphs, if ``True``, draw arrow heads. Default is ``False``\n dpi : float\n Dots per inch of the figure. Default is ``100``.\n\n Returns\n -------\n\n \"\"\"\n _draw_graph(\n graph=graph,\n positions=positions,\n node_colors=node_colors,\n vertex_line_width=vertex_line_width,\n vertex_alpha=vertex_alpha,\n edge_line_width=edge_line_width,\n edge_alpha=edge_alpha,\n figure_width=figure_width,\n figure_height=figure_height,\n vertex_shape=vertex_shape,\n arrows=arrows,\n dpi=dpi,\n )\n ax = plt.gca()\n if light_background:\n facecolor = ax.get_facecolor()\n else:\n facecolor = \"#030303\"\n plt.savefig(output_path, facecolor=facecolor)\n plt.close(\"all\")\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close"
]
] |
playma/stockAI-trading_calendars | [
"97aa9451961b000ef38e791c394c450015f4724d"
] | [
"trading_calendars/exchange_calendar_twse.py"
] | [
"from datetime import time\nimport pandas as pd\nfrom pytz import timezone\nfrom .precomputed_trading_calendar import PrecomputedTradingCalendar\n\nprecomputed_taiwan_holidays = pd.to_datetime([\n \"1999-01-01\",\n \"1999-02-10\",\n \"1999-02-11\",\n \"1999-02-12\",\n \"1999-02-15\",\n \"1999-02-16\"\n # TODO\n])\n\n\nclass TWSEExchangeCalendar(PrecomputedTradingCalendar):\n \"\"\"\n Exchange calendar for the Taiwan Stock Exchange (TWSE).\n\n Open time: 9:00 Asia/Taipei\n Close time: 13:30 Asia/Taipei\n\n Due to the complexity around the Taiwan exchange holidays, we are\n hardcoding a list of holidays covering 1999-2025, inclusive. There are\n no known early closes or late opens.\n \"\"\"\n\n name = \"TWSE\"\n tz = timezone(\"Asia/Taipei\")\n open_times = (\n (None, time(9, 1)),\n )\n close_times = (\n (None, time(13, 30)),\n )\n\n @property\n def precomputed_holidays(self):\n return precomputed_taiwan_holidays\n"
] | [
[
"pandas.to_datetime"
]
] |
lucgiffon/psm-nets | [
"dec43c26281febf6e5c8b8f42bfb78098ae7101d"
] | [
"code/scripts/2020/04/11_12_fine_tune_palminized.py"
] | [
"\"\"\"\nThis script finds a palminized model with given arguments then finetune it.\n\nUsage:\n script.py --input-dir path [-h] [-v|-vv] [--seed int] [--train-val-split float] [--keep-last-layer] [--lr float] [--use-clr policy] [--min-lr float --max-lr float] [--epoch-step-size int] [--nb-epoch int] [--only-mask] [--tb] (--mnist|--svhn|--cifar10|--cifar100|--test-data) [--cifar100-resnet50|--cifar100-resnet20|--mnist-500|--mnist-lenet|--test-model|--cifar10-vgg19|--cifar100-vgg19|--svhn-vgg19] --sparsity-factor=int [--nb-iteration-palm=int] [--delta-threshold=float] [--hierarchical] [--nb-factor=int]\n\nOptions:\n -h --help Show this screen.\n -vv Set verbosity to debug.\n -v Set verbosity to info.\n --seed int The seed for the experiments\n --input-dir path Path to input directory where to find previously generated results.\n --tb Tell if tensorboard should be printed.\n --lr float Flat lr to be used (Overidable)\n --min-lr float Tells the min reasonable lr (Overide everything else).\n --max-lr float Tells the max reasonable lr (Overide everything else).\n --nb-epoch int Number of epochs of training (Overide everything else).\n --epoch-step-size int Number of epochs for an half cycle of CLR.\n --use-clr policy Tell to use clr. Policy can be \"triangular\" or \"triangular2\" (see Cyclical learning rate)\n --keep-last-layer Do not compress classification layer.\n --train-val-split float Tells the proportion of validation data. If not specified, validation data is test data.\n\n\nDataset:\n --mnist Use Mnist dataset.\n --svhn Use svhn dataset.\n --cifar10 Use cifar10 dataset.\n --cifar100 Use cifar100 dataset.\n --test-data Use test datasset (that is actually mnist).\n\nModel:\n --mnist-lenet Use model lenet pretrained for mnist.\n --test-model Use test, small, model.\n --cifar10-vgg19 Use model vgg19 pretrained on cifar10.\n --cifar100-vgg19 Use model vgg19 pretrained on cifar100.\n --svhn-vgg19 Use model vgg19 pretrained on svhn.\n --mnist-500 Use model fc 500 hidden units pretrained on mnist.\n --cifar100-resnet50 Use model resnet50 pretrained on cifar100.\n --cifar100-resnet20 Use model resnet20 pretrained on cifar100.\n\nPalm-Specifc options:\n --sparsity-factor=int Integer coefficient from which is computed the number of value in each factor.\n --nb-iteration-palm=int Number of iterations in the inner palm4msa calls. [default: 300]\n --delta-threshold=float Threshold value before stopping palm iterations. [default: 1e-6]\n --hierarchical Tells if palm should use the hierarchical euristic or not. Muhc longer but better approximation results.\n --nb-factor=int Tells the number of sparse factor for palm\n --only-mask Use only sparsity mask given by palm but re-initialize weights.\n\"\"\"\nimport logging\nimport os\nimport pickle\nimport pandas as pd\nimport sys\nfrom collections import defaultdict\nfrom sklearn.model_selection import train_test_split\nimport time\nfrom copy import deepcopy\nimport keras\nfrom keras.engine import Model, InputLayer\nimport signal\nimport docopt\nfrom scipy.sparse import coo_matrix\nfrom palmnet.utils import CyclicLR\n\nfrom palmnet.core.palminizer import Palminizer\nfrom palmnet.core.palminizable import Palminizable\nfrom palmnet.data import Mnist, Test, Svhn, Cifar100, Cifar10\n# from palmnet.layers.sparse_tensor import SparseFactorisationDense#, SparseFactorisationConv2DDensify\nfrom palmnet.layers.sparse_facto_conv2D_masked import SparseFactorisationConv2D\nfrom palmnet.layers.sparse_facto_dense_masked import SparseFactorisationDense\nfrom palmnet.utils import get_sparsity_pattern, insert_layer_nonseq, timeout_signal_handler, get_lr_metric, CSVLoggerByBatch\nfrom palmnet.experiments.utils import ParameterManagerPalminize, ParameterManagerPalminizeFinetune, ResultPrinter\nfrom skluc.utils import logger, log_memory_usage\nfrom keras.layers import Dense, Conv2D\nimport numpy as np\nimport keras.backend as K\nfrom palmnet.core import palminizable\nfrom palmnet.core.palminizer import Palminizer\npalminizable.Palminizer = Palminizer\nimport sys\nsys.modules[\"palmnet.core.palminize\"] = palminizable\nlst_results_header = [\n \"test_accuracy_finetuned_model\"\n]\n\ndef get_idx_last_dense_layer(model):\n idx_last_dense_layer = -1\n for i, layer in enumerate(model.layers):\n if isinstance(layer, Dense):\n idx_last_dense_layer = i\n if idx_last_dense_layer == -1:\n logger.warning(\"No dense layer found\")\n return idx_last_dense_layer\n\ndef replace_layers_with_sparse_facto(model, dct_name_facto):\n new_model = deepcopy(model)\n log_memory_usage(\"After copy model\")\n lst_tpl_str_bool_new_model_layers = []\n dct_new_layer_attr = defaultdict(lambda: {})\n\n idx_last_dense_layer = get_idx_last_dense_layer(new_model) if paraman[\"--keep-last-layer\"] else -1\n\n for i, layer in enumerate(new_model.layers):\n layer_name = layer.name\n sparse_factorization = dct_name_facto[layer_name]\n logger.info('Prepare layer {}'.format(layer.name))\n # if sparse_factorization != (None, None) and (i != idx_last_dense_layer and paraman[\"--keep-last-layer\"]):\n if sparse_factorization != (None, None) and not (i == idx_last_dense_layer and paraman[\"--keep-last-layer\"]):\n # scaling = 1.\n if paraman[\"--only-mask\"]:\n scaling = []\n else:\n scaling = [np.array(sparse_factorization[0])[None]]\n # factors_sparse = [coo_matrix(fac.toarray()) for fac in sparse_factorization[1].get_list_of_factors()]\n factors = [fac.toarray() for fac in sparse_factorization[1].get_list_of_factors()]\n # sparsity_patterns = [get_sparsity_pattern(w.toarray()) for w in factors]\n sparsity_patterns = [get_sparsity_pattern(w) for w in factors]\n nb_val_sparse_factors = np.sum([np.sum(fac) for fac in sparsity_patterns])\n # factor_data_sparse = [f.data for f in factors_sparse]\n factor_data = factors\n reconstructed_matrix = np.linalg.multi_dot(factors) * scaling[0]\n nb_val_full_matrix = np.prod(reconstructed_matrix.shape)\n\n if nb_val_full_matrix <= nb_val_sparse_factors:\n logger.info(\"Less values in full matrix than factorization. Keep full matrix. {} <= {}\".format(nb_val_full_matrix, nb_val_sparse_factors))\n dct_new_layer_attr[layer_name][\"modified\"] = False\n lst_tpl_str_bool_new_model_layers.append((layer_name, False))\n dct_new_layer_attr[layer_name][\"layer_obj\"] = layer\n continue\n\n base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)\n diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)\n # assert np.allclose(diff, 0, atol=1e-5), \"Reconstructed is different than base\"\n\n # create new layer\n if isinstance(layer, Dense):\n logger.debug(\"Dense layer treatment\")\n hidden_layer_dim = layer.units\n activation = layer.activation\n regularizer = layer.kernel_regularizer\n replacing_layer = SparseFactorisationDense(use_scaling=not paraman[\"--only-mask\"], units=hidden_layer_dim, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, kernel_regularizer=regularizer)\n replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []\n # new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position=\"replace\")\n # replacing_layer.set_weights(replacing_weights)\n\n elif isinstance(layer, Conv2D):\n logger.debug(\"Conv2D layer treatment\")\n nb_filters = layer.filters\n strides = layer.strides\n kernel_size = layer.kernel_size\n activation = layer.activation\n padding = layer.padding\n regularizer = layer.kernel_regularizer\n replacing_layer = SparseFactorisationConv2D(use_scaling=not paraman[\"--only-mask\"], strides=strides, filters=nb_filters, kernel_size=kernel_size, sparsity_patterns=sparsity_patterns, use_bias=layer.use_bias, activation=activation, padding=padding, kernel_regularizer=regularizer)\n replacing_weights = scaling + factor_data + [layer.get_weights()[-1]] if layer.use_bias else []\n # new_model = insert_layer_nonseq(new_model, layer_name, lambda: replacing_layer, position=\"replace\")\n # replacing_layer.set_weights(replacing_weights)\n\n else:\n raise ValueError(\"unknown layer class\")\n\n dct_new_layer_attr[layer_name][\"layer_weights\"] = replacing_weights\n dct_new_layer_attr[layer_name][\"sparsity_pattern\"] = sparsity_patterns\n dct_new_layer_attr[layer_name][\"layer_obj\"] = replacing_layer\n dct_new_layer_attr[layer_name][\"modified\"] = True\n\n lst_tpl_str_bool_new_model_layers.append((layer_name, True))\n else:\n dct_new_layer_attr[layer_name][\"modified\"] = False\n lst_tpl_str_bool_new_model_layers.append((layer_name, False))\n dct_new_layer_attr[layer_name][\"layer_obj\"] = layer\n\n log_memory_usage(\"After prepare all sparse layers \")\n\n network_dict = {'input_layers_of': defaultdict(lambda: []), 'new_output_tensor_of': defaultdict(lambda: [])}\n\n if not isinstance(new_model.layers[0], InputLayer):\n new_model = Model(input=new_model.input, output=new_model.output)\n\n # Set the input layers of each layer\n for layer in new_model.layers:\n # each layer is set as `input` layer of all its outbound layers\n for node in layer._outbound_nodes:\n outbound_layer_name = node.outbound_layer.name\n # if outbound_layer_name not in network_dict\n # network_dict['input_layers_of'].update({outbound_layer_name: [layer.name]})\n network_dict['input_layers_of'][outbound_layer_name].append(layer.name)\n\n # Set the output tensor of the input layer\n network_dict['new_output_tensor_of'].update(\n {new_model.layers[0].name: new_model.input})\n\n for layer in new_model.layers[1:]:\n log_memory_usage(\"Before layer {}\".format(layer.name))\n layer_name = layer.name\n\n layer_input = [network_dict['new_output_tensor_of'][layer_aux] for layer_aux in network_dict['input_layers_of'][layer.name]]\n\n if len(layer_input) == 1:\n layer_input = layer_input[0]\n\n proxy_new_layer_attr = dct_new_layer_attr[layer_name]\n\n if proxy_new_layer_attr[\"modified\"]:\n x = layer_input\n\n new_layer = proxy_new_layer_attr[\"layer_obj\"] # type: keras.layers.Layer\n new_layer.name = '{}_{}'.format(layer.name,\n new_layer.name)\n x = new_layer(x)\n\n if not paraman[\"--only-mask\"]:\n if layer.use_bias:\n reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr[\"layer_weights\"][1:-1]) * proxy_new_layer_attr[\"layer_weights\"][0]\n else:\n reconstructed_matrix = np.linalg.multi_dot(proxy_new_layer_attr[\"layer_weights\"][1:]) * proxy_new_layer_attr[\"layer_weights\"][0]\n\n base_palminized_matrix = np.reshape(layer.get_weights()[0], reconstructed_matrix.shape)\n diff = np.linalg.norm(base_palminized_matrix - reconstructed_matrix) / np.linalg.norm(base_palminized_matrix)\n # assert np.allclose(diff, 0, atol=1e-5), \"Reconstructed is different than base\"\n del base_palminized_matrix\n\n new_layer.set_weights(proxy_new_layer_attr[\"layer_weights\"])\n\n else:\n masked_weights = []\n i = 0\n for w in new_layer.get_weights():\n if len(w.shape) > 1:\n new_weight = w * proxy_new_layer_attr[\"sparsity_pattern\"][i]\n i += 1\n else:\n new_weight = w\n masked_weights.append(new_weight)\n new_layer.set_weights(masked_weights)\n\n logger.info('Layer {} modified into {}'.format(layer.name, new_layer.name))\n else:\n x = layer(layer_input)\n logger.info('Layer {} unmodified'.format(layer.name))\n\n network_dict['new_output_tensor_of'].update({layer.name: x})\n\n del dct_new_layer_attr[layer_name]\n\n new_model = Model(inputs=new_model.inputs, outputs=x)\n\n return new_model\n\ndef main():\n\n if paraman[\"--mnist-lenet\"]:\n param_train_dataset = Mnist.get_model_param_training()\n elif paraman[\"--mnist-500\"]:\n param_train_dataset = Mnist.get_model_param_training(\"mnist_500\")\n elif paraman[\"--cifar10-vgg19\"]:\n param_train_dataset = Cifar10.get_model_param_training()\n elif paraman[\"--cifar100-vgg19\"]:\n param_train_dataset = Cifar100.get_model_param_training()\n elif paraman[\"--cifar100-resnet20\"] or paraman[\"--cifar100-resnet50\"]:\n param_train_dataset = Cifar100.get_model_param_training(\"cifar100_resnet\")\n elif paraman[\"--svhn-vgg19\"]:\n param_train_dataset = Svhn.get_model_param_training()\n elif paraman[\"--test-model\"]:\n param_train_dataset = Test.get_model_param_training()\n else:\n raise NotImplementedError(\"No dataset specified.\")\n\n (x_train, y_train), (x_test, y_test) = paraman.get_dataset().load_data()\n\n if paraman[\"--mnist-500\"]:\n x_test = np.reshape(x_test, (-1, 784))\n x_train = np.reshape(x_train, (-1, 784))\n\n if paraman[\"--train-val-split\"] is not None:\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=paraman[\"--train-val-split\"], random_state=paraman[\"--seed\"])\n\n else:\n x_val, y_val = x_test, y_test\n\n # noinspection PyUnreachableCode\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n df = pd.read_csv(paraman[\"output_file_resprinter\"])\n init_nb_epoch = pd.read_csv(paraman[\"output_file_csvcbprinter\"])[\"epoch\"].max() -1\n logger.debug(\"Loaded results \" + str(df))\n base_score = float(df[\"base_score\"])\n before_finetuned_score = float(df[\"before_finetuned_score\"])\n palminized_score = float(df[\"palminized_score\"])\n actual_learning_rate = float(df[\"actual-lr\"])\n fine_tuned_model = keras.models.load_model(paraman[\"output_file_modelprinter\"],custom_objects={'SparseFactorisationConv2D':SparseFactorisationConv2D,\n \"SparseFactorisationDense\": SparseFactorisationDense})\n else:\n init_nb_epoch = 0\n\n mypalminizedmodel = pickle.load(open(paraman[\"input_model_path\"], \"rb\"))\n log_memory_usage(\"After load mypalminized model\")\n base_model = mypalminizedmodel.base_model\n dct_name_facto = mypalminizedmodel.sparsely_factorized_layers\n base_score = base_model.evaluate(x_test, y_test, verbose=0)[1]\n print(base_score)\n palminized_model = mypalminizedmodel.compressed_model\n palminized_score = palminized_model.evaluate(x_test, y_test, verbose=1)[1]\n print(palminized_score)\n fine_tuned_model = replace_layers_with_sparse_facto(palminized_model, dct_name_facto)\n log_memory_usage(\"After get_finetuned_model\")\n # fine_tuned_model = palminized_model\n\n input_by_shape = {(32,32,3): x_test[:3]}\n\n # for i, layer in enumerate(palminized_model.layers[1:]):\n # i = i+1\n # print(\"Start with layer {}\".format(layer.name))\n # dense_palm_layer = layer\n # sparsefacto_palm_layer = fine_tuned_model.layers[i]\n #\n # dense_layer_output_function = K.function([dense_palm_layer.input],\n # [dense_palm_layer.output])\n #\n # sparsefacto_layer_outut_function = K.function([sparsefacto_palm_layer.get_input_at(-1)],\n # [sparsefacto_palm_layer.get_output_at(-1)])\n #\n # necessary_input_shapes = [tuple(inpt.shape.as_list()[1:]) for inpt in dense_layer_output_function.inputs]\n # input_data_layer = [input_by_shape[shap] for shap in necessary_input_shapes]\n #\n # dense_layer_output = dense_layer_output_function(input_data_layer)[0]\n # sparsefacto_layer_output = sparsefacto_layer_outut_function(input_data_layer)[0]\n #\n # # try:\n # assert np.allclose(np.linalg.norm(dense_layer_output - sparsefacto_layer_output) / np.linalg.norm(dense_layer_output), 0, atol=1e-5)\n # # except:\n # # print(\"error\")\n # input_by_shape[dense_layer_output.shape[1:]] = dense_layer_output\n\n params_optimizer = param_train_dataset.params_optimizer\n\n params_optimizer[\"lr\"] = paraman[\"--lr\"] if paraman[\"--lr\"] is not None else params_optimizer[\"lr\"]\n\n fine_tuned_model.compile(loss=param_train_dataset.loss,\n optimizer=param_train_dataset.optimizer(**params_optimizer),\n metrics=['categorical_accuracy'])\n # metrics=['categorical_accuracy', get_lr_metric(param_train_dataset.optimizer)])\n\n before_finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]\n print(before_finetuned_score)\n actual_learning_rate = K.eval(fine_tuned_model.optimizer.lr)\n\n # results must be already printed once in case process is killed afterward\n dct_results = {\n \"actual-lr\": actual_learning_rate,\n \"finetuned_score\": None,\n \"before_finetuned_score\": before_finetuned_score,\n \"base_score\": base_score,\n \"palminized_score\": palminized_score,\n }\n resprinter.add(dct_results)\n resprinter.print()\n\n # if paraman[\"--hierarchical\"]:\n # if not paraman[\"--only-mask\"]:\n # assert before_finetuned_score == palminized_score, \\\n # \"the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}\".format(before_finetuned_score, palminized_score)\n # else: # small fix for a bug where when I wasn't using hierarchical palm returned a matrix that wasn't multiplied by lambda\n # # this should pass until results are generated without bug..\n # assert before_finetuned_score != palminized_score, \\\n # \"the reconstructed model with sparse facto should equal in perf to the reconstructed model with dense product. {} != {}\".format(before_finetuned_score, palminized_score)\n fine_tuned_model.summary()\n\n call_backs = []\n\n model_checkpoint_callback = keras.callbacks.ModelCheckpoint(str(paraman[\"output_file_modelprinter\"]),\n monitor='val_loss',\n verbose=0, save_best_only=False,\n save_weights_only=False, mode='auto', period=1)\n call_backs.append(model_checkpoint_callback)\n if paraman[\"--tb\"]:\n tbCallBack = keras.callbacks.TensorBoard(log_dir=str(paraman[\"output_file_tensorboardprinter\"]), histogram_freq=20, write_graph=False, write_images=False, batch_size=param_train_dataset.batch_size, write_grads=True, update_freq=\"epoch\")\n call_backs.append(tbCallBack)\n\n actual_min_lr = param_train_dataset.min_lr if paraman[\"--min-lr\"] is None else paraman[\"--min-lr\"]\n actual_max_lr = param_train_dataset.max_lr if paraman[\"--max-lr\"] is None else paraman[\"--max-lr\"]\n if paraman[\"--use-clr\"] is not None:\n clr_cb = CyclicLR(base_lr=actual_min_lr,\n max_lr=actual_max_lr,\n step_size=(paraman[\"--epoch-step-size\"]*(x_train.shape[0] // param_train_dataset.batch_size)),\n logrange=True,\n mode=paraman[\"--use-clr\"])\n call_backs.append(clr_cb)\n\n csvcallback = CSVLoggerByBatch(str(paraman[\"output_file_csvcbprinter\"]), n_batch_between_display=100, separator=',', append=True)\n call_backs.append(csvcallback)\n\n finetuned_score = None\n\n open(paraman[\"output_file_notfinishedprinter\"], 'w').close()\n actual_number_of_epochs = (param_train_dataset.epochs if paraman[\"--nb-epoch\"] is None else paraman[\"--nb-epoch\"])\n actual_batch_size = param_train_dataset.batch_size\n history = fine_tuned_model.fit(param_train_dataset.image_data_generator.flow(x_train, y_train, batch_size=param_train_dataset.batch_size),\n epochs= actual_number_of_epochs - init_nb_epoch,\n # epochs=2 - init_nb_epoch,\n verbose=2,\n validation_data=(x_val, y_val),\n callbacks=param_train_dataset.callbacks + call_backs)\n\n finetuned_score = fine_tuned_model.evaluate(x_test, y_test, verbose=1)[1]\n print(finetuned_score)\n\n if os.path.exists(paraman[\"output_file_notfinishedprinter\"]):\n os.remove(paraman[\"output_file_notfinishedprinter\"])\n\n\n dct_results = {\n \"actual-batch-size\": actual_batch_size,\n \"actual-nb-epochs\": actual_number_of_epochs,\n \"actual-min-lr\":actual_min_lr,\n \"actual-max-lr\":actual_max_lr,\n \"actual-lr\": actual_learning_rate,\n \"finetuned_score\": finetuned_score,\n \"before_finetuned_score\": before_finetuned_score,\n \"base_score\": base_score,\n \"palminized_score\": palminized_score,\n }\n fine_tuned_model.save(str(paraman[\"output_file_modelprinter\"]))\n resprinter.add(dct_results)\n\n\nif __name__ == \"__main__\":\n logger.info(\"Command line: \" + \" \".join(sys.argv))\n log_memory_usage(\"Memory at startup\")\n arguments = docopt.docopt(__doc__)\n paraman = ParameterManagerPalminizeFinetune(arguments)\n initialized_results = dict((v, None) for v in lst_results_header)\n resprinter = ResultPrinter(output_file=paraman[\"output_file_resprinter\"])\n resprinter.add(initialized_results)\n resprinter.add(paraman)\n if paraman[\"-v\"] >= 2:\n logger.setLevel(level=logging.DEBUG)\n elif paraman[\"-v\"] >= 1:\n logger.setLevel(level=logging.INFO)\n else:\n logger.setLevel(level=logging.WARNING)\n\n logger.warning(\"Verbosity set to warning\")\n logger.info(\"Verbosity set to info\")\n logger.debug(\"Verbosity set to debug\")\n\n if not os.path.exists(paraman[\"output_file_notfinishedprinter\"]) and \\\n os.path.exists(paraman[\"output_file_resprinter\"]) and \\\n os.path.exists(paraman[\"output_file_modelprinter\"]):\n sys.exit(\"Expe {} already executed. Exit\".format(paraman[\"hash\"]))\n\n has_failed = False\n try:\n main()\n except Exception as e:\n has_failed = True\n raise e\n\n finally:\n failure_dict = {\n \"failure\": has_failed\n }\n\n resprinter.add(failure_dict)\n resprinter.print()"
] | [
[
"numpy.sum",
"pandas.read_csv",
"numpy.reshape",
"numpy.linalg.multi_dot",
"numpy.prod",
"numpy.array",
"numpy.linalg.norm",
"sklearn.model_selection.train_test_split"
]
] |
dgwakeman/mne-python | [
"3cc7a3f8456d78c828355f1860dd7e0297e59c73"
] | [
"mne/forward/tests/test_forward.py"
] | [
"import os\nimport os.path as op\nimport warnings\nimport gc\n\nfrom nose.tools import assert_true, assert_raises\nimport numpy as np\nfrom numpy.testing import (assert_array_almost_equal, assert_equal,\n assert_array_equal, assert_allclose)\n\nfrom mne.datasets import testing\nfrom mne.io import Raw\nfrom mne import (read_forward_solution, apply_forward, apply_forward_raw,\n average_forward_solutions, write_forward_solution,\n convert_forward_solution)\nfrom mne import SourceEstimate, pick_types_forward, read_evokeds\nfrom mne.label import read_label\nfrom mne.utils import (requires_mne, run_subprocess, _TempDir,\n run_tests_if_main, slow_test)\nfrom mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,\n Forward)\n\ndata_path = testing.data_path(download=False)\nfname_meeg = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')\nfname_meeg_grad = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')\n\nfname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',\n 'test_raw.fif')\n\nfname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',\n 'data', 'test-ave.fif')\nfname_mri = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_trunc-trans.fif')\nsubjects_dir = os.path.join(data_path, 'subjects')\nfname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')\n\n\ndef compare_forwards(f1, f2):\n \"\"\"Helper to compare two potentially converted forward solutions\"\"\"\n assert_allclose(f1['sol']['data'], f2['sol']['data'])\n assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])\n assert_allclose(f1['source_nn'], f2['source_nn'])\n if f1['sol_grad'] is not None:\n assert_true(f2['sol_grad'] is not None)\n assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])\n assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])\n else:\n assert_true(f2['sol_grad'] is None)\n assert_equal(f1['source_ori'], f2['source_ori'])\n assert_equal(f1['surf_ori'], f2['surf_ori'])\n\n\[email protected]_testing_data\ndef test_convert_forward():\n \"\"\"Test converting forward solution between different representations\n \"\"\"\n fwd = read_forward_solution(fname_meeg_grad)\n assert_true(repr(fwd))\n assert_true(isinstance(fwd, Forward))\n # look at surface orientation\n fwd_surf = convert_forward_solution(fwd, surf_ori=True)\n fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)\n compare_forwards(fwd_surf, fwd_surf_io)\n del fwd_surf_io\n gc.collect()\n # go back\n fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)\n assert_true(repr(fwd_new))\n assert_true(isinstance(fwd_new, Forward))\n compare_forwards(fwd, fwd_new)\n # now go to fixed\n fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,\n force_fixed=True)\n del fwd_surf\n gc.collect()\n assert_true(repr(fwd_fixed))\n assert_true(isinstance(fwd_fixed, Forward))\n fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,\n force_fixed=True)\n compare_forwards(fwd_fixed, fwd_fixed_io)\n del fwd_fixed_io\n gc.collect()\n # now go back to cartesian (original condition)\n fwd_new = convert_forward_solution(fwd_fixed)\n assert_true(repr(fwd_new))\n assert_true(isinstance(fwd_new, Forward))\n compare_forwards(fwd, fwd_new)\n del fwd, fwd_new, fwd_fixed\n gc.collect()\n\n\n@slow_test\[email protected]_testing_data\ndef test_io_forward():\n \"\"\"Test IO for forward solutions\n \"\"\"\n temp_dir = _TempDir()\n # do extensive tests with MEEG + grad\n n_channels, n_src = 366, 108\n fwd = read_forward_solution(fname_meeg_grad)\n assert_true(isinstance(fwd, Forward))\n fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)\n leadfield = fwd['sol']['data']\n assert_equal(leadfield.shape, (n_channels, n_src))\n assert_equal(len(fwd['sol']['row_names']), n_channels)\n fname_temp = op.join(temp_dir, 'test-fwd.fif')\n write_forward_solution(fname_temp, fwd, overwrite=True)\n\n fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)\n fwd_read = read_forward_solution(fname_temp, surf_ori=True)\n leadfield = fwd_read['sol']['data']\n assert_equal(leadfield.shape, (n_channels, n_src))\n assert_equal(len(fwd_read['sol']['row_names']), n_channels)\n assert_equal(len(fwd_read['info']['chs']), n_channels)\n assert_true('dev_head_t' in fwd_read['info'])\n assert_true('mri_head_t' in fwd_read)\n assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])\n\n fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)\n leadfield = fwd['sol']['data']\n assert_equal(leadfield.shape, (n_channels, n_src / 3))\n assert_equal(len(fwd['sol']['row_names']), n_channels)\n assert_equal(len(fwd['info']['chs']), n_channels)\n assert_true('dev_head_t' in fwd['info'])\n assert_true('mri_head_t' in fwd)\n assert_true(fwd['surf_ori'])\n\n # test warnings on bad filenames\n fwd = read_forward_solution(fname_meeg_grad)\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')\n write_forward_solution(fwd_badname, fwd)\n read_forward_solution(fwd_badname)\n assert_true(len(w) == 2)\n\n fwd = read_forward_solution(fname_meeg)\n write_forward_solution(fname_temp, fwd, overwrite=True)\n fwd_read = read_forward_solution(fname_temp)\n compare_forwards(fwd, fwd_read)\n\n\[email protected]_testing_data\ndef test_apply_forward():\n \"\"\"Test projection of source space data to sensor space\n \"\"\"\n start = 0\n stop = 5\n n_times = stop - start - 1\n sfreq = 10.0\n t_start = 0.123\n\n fwd = read_forward_solution(fname_meeg, force_fixed=True)\n fwd = pick_types_forward(fwd, meg=True)\n assert_true(isinstance(fwd, Forward))\n\n vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]\n stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))\n stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)\n\n gain_sum = np.sum(fwd['sol']['data'], axis=1)\n\n # Evoked\n with warnings.catch_warnings(record=True) as w:\n evoked = read_evokeds(fname_evoked, condition=0)\n evoked = apply_forward(fwd, stc, evoked, start=start, stop=stop)\n assert_equal(len(w), 2)\n data = evoked.data\n times = evoked.times\n\n # do some tests\n assert_array_almost_equal(evoked.info['sfreq'], sfreq)\n assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)\n assert_array_almost_equal(times[0], t_start)\n assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)\n\n # Raw\n raw = Raw(fname_raw)\n raw_proj = apply_forward_raw(fwd, stc, raw, start=start, stop=stop)\n data, times = raw_proj[:, :]\n\n # do some tests\n assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)\n assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)\n atol = 1. / sfreq\n assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)\n assert_allclose(raw_proj.last_samp / sfreq,\n t_start + (n_times - 1) / sfreq, atol=atol)\n\n\[email protected]_testing_data\ndef test_restrict_forward_to_stc():\n \"\"\"Test restriction of source space to source SourceEstimate\n \"\"\"\n start = 0\n stop = 5\n n_times = stop - start - 1\n sfreq = 10.0\n t_start = 0.123\n\n fwd = read_forward_solution(fname_meeg, force_fixed=True)\n fwd = pick_types_forward(fwd, meg=True)\n\n vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]\n stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))\n stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)\n\n fwd_out = restrict_forward_to_stc(fwd, stc)\n assert_true(isinstance(fwd_out, Forward))\n\n assert_equal(fwd_out['sol']['ncol'], 20)\n assert_equal(fwd_out['src'][0]['nuse'], 15)\n assert_equal(fwd_out['src'][1]['nuse'], 5)\n assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])\n assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])\n\n fwd = read_forward_solution(fname_meeg, force_fixed=False)\n fwd = pick_types_forward(fwd, meg=True)\n\n vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]\n stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))\n stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)\n\n fwd_out = restrict_forward_to_stc(fwd, stc)\n\n assert_equal(fwd_out['sol']['ncol'], 60)\n assert_equal(fwd_out['src'][0]['nuse'], 15)\n assert_equal(fwd_out['src'][1]['nuse'], 5)\n assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])\n assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])\n\n\[email protected]_testing_data\ndef test_restrict_forward_to_label():\n \"\"\"Test restriction of source space to label\n \"\"\"\n fwd = read_forward_solution(fname_meeg, force_fixed=True)\n fwd = pick_types_forward(fwd, meg=True)\n\n label_path = op.join(data_path, 'MEG', 'sample', 'labels')\n labels = ['Aud-lh', 'Vis-rh']\n label_lh = read_label(op.join(label_path, labels[0] + '.label'))\n label_rh = read_label(op.join(label_path, labels[1] + '.label'))\n\n fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])\n\n src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)\n src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)\n\n src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)\n src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +\n len(fwd['src'][0]['vertno']))\n\n assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))\n assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))\n assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))\n assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)\n assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)\n\n fwd = read_forward_solution(fname_meeg, force_fixed=False)\n fwd = pick_types_forward(fwd, meg=True)\n\n label_path = op.join(data_path, 'MEG', 'sample', 'labels')\n labels = ['Aud-lh', 'Vis-rh']\n label_lh = read_label(op.join(label_path, labels[0] + '.label'))\n label_rh = read_label(op.join(label_path, labels[1] + '.label'))\n\n fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])\n\n src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)\n src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)\n\n src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)\n src_sel_rh = (np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh) +\n len(fwd['src'][0]['vertno']))\n\n assert_equal(fwd_out['sol']['ncol'],\n 3 * (len(src_sel_lh) + len(src_sel_rh)))\n assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))\n assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))\n assert_equal(fwd_out['src'][0]['vertno'], src_sel_lh)\n assert_equal(fwd_out['src'][1]['vertno'], src_sel_rh)\n\n\[email protected]_testing_data\n@requires_mne\ndef test_average_forward_solution():\n \"\"\"Test averaging forward solutions\n \"\"\"\n temp_dir = _TempDir()\n fwd = read_forward_solution(fname_meeg)\n # input not a list\n assert_raises(TypeError, average_forward_solutions, 1)\n # list is too short\n assert_raises(ValueError, average_forward_solutions, [])\n # negative weights\n assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])\n # all zero weights\n assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])\n # weights not same length\n assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])\n # list does not only have all dict()\n assert_raises(TypeError, average_forward_solutions, [1, fwd])\n\n # try an easy case\n fwd_copy = average_forward_solutions([fwd])\n assert_true(isinstance(fwd_copy, Forward))\n assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])\n\n # modify a fwd solution, save it, use MNE to average with old one\n fwd_copy['sol']['data'] *= 0.5\n fname_copy = op.join(temp_dir, 'copy-fwd.fif')\n write_forward_solution(fname_copy, fwd_copy, overwrite=True)\n cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',\n fname_copy, '--out', fname_copy)\n run_subprocess(cmd)\n\n # now let's actually do it, with one filename and one fwd\n fwd_ave = average_forward_solutions([fwd, fwd_copy])\n assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])\n # fwd_ave_mne = read_forward_solution(fname_copy)\n # assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])\n\n # with gradient\n fwd = read_forward_solution(fname_meeg_grad)\n fwd_ave = average_forward_solutions([fwd, fwd])\n compare_forwards(fwd, fwd_ave)\n\nrun_tests_if_main()\n"
] | [
[
"numpy.sum",
"numpy.searchsorted",
"numpy.intersect1d",
"numpy.testing.assert_equal",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_array_almost_equal",
"numpy.testing.assert_allclose"
]
] |
christophbrgr/ood_detection_framework | [
"c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f"
] | [
"models/wide_resnet.py"
] | [
"import sys\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom torch.autograd import Variable\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n\ndef conv_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n init.xavier_uniform_(m.weight, gain=np.sqrt(2))\n init.constant_(m.bias, 0)\n elif classname.find('BatchNorm') != -1:\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n\n\nclass wide_basic(nn.Module):\n def __init__(self, in_planes, planes, dropout_rate, stride=1):\n super(wide_basic, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes)\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, padding=1, bias=True)\n self.dropout = nn.Dropout(p=dropout_rate)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=stride, padding=1, bias=True)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, planes, kernel_size=1,\n stride=stride, bias=True),\n )\n\n def forward(self, x):\n out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n out = self.conv2(F.relu(self.bn2(out)))\n out += self.shortcut(x)\n\n return out\n\n\nclass Wide_ResNet(nn.Module):\n def __init__(self, depth, widen_factor, dropout_rate, num_classes):\n super(Wide_ResNet, self).__init__()\n self.in_planes = 16\n\n assert ((depth-4) % 6 == 0), 'Wide-resnet depth should be 6n+4'\n n = (depth-4)/6\n k = widen_factor\n\n print('Wide-Resnet %dx%d' % (depth, k))\n nStages = [16, 16*k, 32*k, 64*k]\n\n self.conv1 = conv3x3(3, nStages[0])\n self.layer1 = self._wide_layer(\n wide_basic, nStages[1], n, dropout_rate, stride=1)\n self.layer2 = self._wide_layer(\n wide_basic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(\n wide_basic, nStages[3], n, dropout_rate, stride=2)\n self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)\n self.linear = nn.Linear(nStages[3], num_classes)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n strides = [stride] + [1]*(int(num_blocks)-1)\n layers = []\n\n for stride in strides:\n layers.append(block(self.in_planes, planes, dropout_rate, stride))\n self.in_planes = planes\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n # print(f'Shape before avg pooling: {out.shape}')\n out = F.avg_pool2d(out, int(out.shape[3]))\n # print(f'Shape after avg pooling: {out.shape}')\n out = out.view(out.size(0), -1)\n penultimate = out\n out = self.linear(out)\n\n return out, penultimate\n\n # feature extraction for Mahalanobis\n def feature_list(self, x):\n out_list = []\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n # print shape\n # print(f'Shape: {out.shape}')\n # out2 = F.max_pool3d(out, (4,4,4))\n out2 = F.max_pool2d(out, (8,8))\n out_list.append(out2)\n print(f'Shape: {out2.shape}')\n out = F.avg_pool2d(out, int(out.shape[3]))\n out = out.view(out.size(0), -1)\n\n return self.linear(out), out_list\n\n def intermediate_forward(self, x, layer_index):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n return F.max_pool2d(out, (8,8))# F.max_pool3d(out, (4,4,4))\n\n # function to extract the penultimate features\n def penultimate_forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n penultimate = F.relu(self.bn1(out))\n penultimate = F.max_pool2d(penultimate, (8,8))\n # penultimate = F.max_pool3d(penultimate, (4,4,4))\n out = F.avg_pool2d(penultimate, int(out.shape[3]))\n out = out.view(out.size(0), -1)\n\n return self.linear(out), penultimate\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.init.constant_",
"torch.nn.functional.max_pool2d",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"numpy.sqrt",
"torch.nn.Dropout"
]
] |
esoha-nvidia/cudf | [
"663457b186bbf27ea2926e08438b8c01b5c7633e"
] | [
"python/cudf/cudf/tests/test_binops.py"
] | [
"# Copyright (c) 2018-2021, NVIDIA CORPORATION.\n\nfrom __future__ import division\n\nimport decimal\nimport operator\nimport random\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport cudf\nfrom cudf.core import Series\nfrom cudf.core.index import as_index\nfrom cudf.tests import utils\nfrom cudf.utils.dtypes import (\n BOOL_TYPES,\n DATETIME_TYPES,\n FLOAT_TYPES,\n INTEGER_TYPES,\n NUMERIC_TYPES,\n TIMEDELTA_TYPES,\n)\n\nSTRING_TYPES = {\"str\"}\n\n_binops = [\n operator.add,\n operator.sub,\n operator.mul,\n operator.floordiv,\n operator.truediv,\n operator.mod,\n operator.pow,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _binops)\ndef test_series_binop(binop, obj_class):\n nelem = 1000\n arr1 = utils.gen_rand(\"float64\", nelem) * 10000\n # Keeping a low value because CUDA 'pow' has 2 full range error\n arr2 = utils.gen_rand(\"float64\", nelem) * 10\n\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n expect = binop(pd.Series(arr1), pd.Series(arr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n utils.assert_eq(result, expect)\n\n\[email protected](\"binop\", _binops)\ndef test_series_binop_concurrent(binop):\n def func(index):\n arr = np.random.random(100) * 10\n sr = Series(arr)\n\n result = binop(sr.astype(\"int32\"), sr)\n expect = binop(arr.astype(\"int32\"), arr)\n\n np.testing.assert_almost_equal(result.to_array(), expect, decimal=5)\n\n from concurrent.futures import ThreadPoolExecutor\n\n indices = range(10)\n with ThreadPoolExecutor(4) as e: # four processes\n list(e.map(func, indices))\n\n\[email protected](\"use_cudf_scalar\", [False, True])\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem,binop\", list(product([1, 2, 100], _binops)))\ndef test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar):\n arr = np.random.random(nelem)\n rhs = random.choice(arr).item()\n\n sr = Series(arr)\n if obj_class == \"Index\":\n sr = as_index(sr)\n\n if use_cudf_scalar:\n result = binop(sr, rhs)\n else:\n result = binop(sr, cudf.Scalar(rhs))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr, rhs))\n\n\n_bitwise_binops = [operator.and_, operator.or_, operator.xor]\n\n\n_int_types = [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"binop\", _bitwise_binops)\[email protected](\n \"lhs_dtype,rhs_dtype\", list(product(_int_types, _int_types))\n)\ndef test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype):\n arr1 = (np.random.random(100) * 100).astype(lhs_dtype)\n sr1 = Series(arr1)\n\n arr2 = (np.random.random(100) * 100).astype(rhs_dtype)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(sr1, sr2)\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(arr1, arr2))\n\n\n_logical_binops = [\n (operator.and_, operator.and_),\n (operator.or_, operator.or_),\n (np.logical_and, cudf.logical_and),\n (np.logical_or, cudf.logical_or),\n]\n\n\[email protected](\"lhstype\", _int_types + [np.bool_])\[email protected](\"rhstype\", _int_types + [np.bool_])\[email protected](\"binop,cubinop\", _logical_binops)\ndef test_series_logical_binop(lhstype, rhstype, binop, cubinop):\n arr1 = pd.Series(np.random.choice([True, False], 10))\n if lhstype is not np.bool_:\n arr1 = arr1 * (np.random.random(10) * 100).astype(lhstype)\n sr1 = Series(arr1)\n\n arr2 = pd.Series(np.random.choice([True, False], 10))\n if rhstype is not np.bool_:\n arr2 = arr2 * (np.random.random(10) * 100).astype(rhstype)\n sr2 = Series(arr2)\n\n result = cubinop(sr1, sr2)\n expect = binop(arr1, arr2)\n\n utils.assert_eq(result, expect)\n\n\n_cmpops = [\n operator.lt,\n operator.gt,\n operator.le,\n operator.ge,\n operator.eq,\n operator.ne,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"dtype\", [\"int8\", \"int32\", \"int64\", \"float32\", \"float64\", \"datetime64[ms]\"]\n)\ndef test_series_compare(cmpop, obj_class, dtype):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n arr2 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n sr2 = Series(arr2)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result1 = cmpop(sr1, sr1)\n result2 = cmpop(sr2, sr2)\n result3 = cmpop(sr1, sr2)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n result3 = Series(result3)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, arr1))\n np.testing.assert_equal(result2.to_array(), cmpop(arr2, arr2))\n np.testing.assert_equal(result3.to_array(), cmpop(arr1, arr2))\n\n\ndef _series_compare_nulls_typegen():\n tests = []\n tests += list(product(DATETIME_TYPES, DATETIME_TYPES))\n tests += list(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n tests += list(product(NUMERIC_TYPES, NUMERIC_TYPES))\n tests += list(product(STRING_TYPES, STRING_TYPES))\n\n return tests\n\n\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtypes\", _series_compare_nulls_typegen())\ndef test_series_compare_nulls(cmpop, dtypes):\n ltype, rtype = dtypes\n\n ldata = [1, 2, None, None, 5]\n rdata = [2, 1, None, 4, None]\n\n lser = Series(ldata, dtype=ltype)\n rser = Series(rdata, dtype=rtype)\n\n lmask = ~lser.isnull()\n rmask = ~rser.isnull()\n\n expect_mask = np.logical_and(lmask, rmask)\n expect = cudf.Series([None] * 5, dtype=\"bool\")\n expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask])\n\n got = cmpop(lser, rser)\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"obj\", [pd.Series([\"a\", \"b\", None, \"d\", \"e\", None], dtype=\"string\"), \"a\"]\n)\[email protected](\"cmpop\", _cmpops)\[email protected](\n \"cmp_obj\",\n [pd.Series([\"b\", \"a\", None, \"d\", \"f\", None], dtype=\"string\"), \"a\"],\n)\ndef test_string_series_compare(obj, cmpop, cmp_obj):\n\n g_obj = obj\n if isinstance(g_obj, pd.Series):\n g_obj = Series.from_pandas(g_obj)\n g_cmp_obj = cmp_obj\n if isinstance(g_cmp_obj, pd.Series):\n g_cmp_obj = Series.from_pandas(g_cmp_obj)\n got = cmpop(g_obj, g_cmp_obj)\n expected = cmpop(obj, cmp_obj)\n\n if isinstance(expected, pd.Series):\n expected = cudf.from_pandas(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\"nelem\", [1, 2, 100])\[email protected](\"cmpop\", _cmpops)\[email protected](\"dtype\", utils.NUMERIC_TYPES + [\"datetime64[ms]\"])\[email protected](\"use_cudf_scalar\", [True, False])\ndef test_series_compare_scalar(\n nelem, cmpop, obj_class, dtype, use_cudf_scalar\n):\n arr1 = np.random.randint(0, 100, 100).astype(dtype)\n sr1 = Series(arr1)\n rhs = random.choice(arr1).item()\n\n if use_cudf_scalar:\n rhs = cudf.Scalar(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n\n result1 = cmpop(sr1, rhs)\n result2 = cmpop(rhs, sr1)\n\n if obj_class == \"Index\":\n result1 = Series(result1)\n result2 = Series(result2)\n\n np.testing.assert_equal(result1.to_array(), cmpop(arr1, rhs))\n np.testing.assert_equal(result2.to_array(), cmpop(rhs, arr1))\n\n\n_nulls = [\"none\", \"some\"]\n\n\[email protected](\"nelem\", [1, 7, 8, 9, 32, 64, 128])\[email protected](\"lhs_nulls,rhs_nulls\", list(product(_nulls, _nulls)))\ndef test_validity_add(nelem, lhs_nulls, rhs_nulls):\n np.random.seed(0)\n # LHS\n lhs_data = np.random.random(nelem)\n if lhs_nulls == \"some\":\n lhs_mask = utils.random_bitmask(nelem)\n lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem]\n lhs_null_count = utils.count_zero(lhs_bitmask)\n assert lhs_null_count >= 0\n lhs = Series.from_masked_array(lhs_data, lhs_mask)\n assert lhs.null_count == lhs_null_count\n else:\n lhs = Series(lhs_data)\n # RHS\n rhs_data = np.random.random(nelem)\n if rhs_nulls == \"some\":\n rhs_mask = utils.random_bitmask(nelem)\n rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem]\n rhs_null_count = utils.count_zero(rhs_bitmask)\n assert rhs_null_count >= 0\n rhs = Series.from_masked_array(rhs_data, rhs_mask)\n assert rhs.null_count == rhs_null_count\n else:\n rhs = Series(rhs_data)\n # Result\n res = lhs + rhs\n if lhs_nulls == \"some\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"some\" and rhs_nulls == \"none\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_\n )[:nelem]\n if lhs_nulls == \"none\" and rhs_nulls == \"some\":\n res_mask = np.asarray(\n utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_\n )[:nelem]\n # Fill NA values\n na_value = -10000\n got = res.fillna(na_value).to_array()\n expect = lhs_data + rhs_data\n if lhs_nulls == \"some\" or rhs_nulls == \"some\":\n expect[~res_mask] = na_value\n\n np.testing.assert_array_equal(expect, got)\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"binop,lhs_dtype,rhs_dtype\",\n list(\n product(\n [operator.add, operator.mul],\n utils.NUMERIC_TYPES,\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 10\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = binop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_almost_equal(result.to_array(), binop(lhs, rhs))\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"cmpop,lhs_dtype,rhs_dtype\",\n list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)),\n)\ndef test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class):\n nelem = 5\n lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype)\n rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype)\n\n sr1 = Series(lhs)\n sr2 = Series(rhs)\n\n if obj_class == \"Index\":\n sr1 = as_index(sr1)\n sr2 = as_index(sr2)\n\n result = cmpop(Series(sr1), Series(sr2))\n\n if obj_class == \"Index\":\n result = Series(result)\n\n np.testing.assert_array_equal(result.to_array(), cmpop(lhs, rhs))\n\n\n_reflected_ops = [\n lambda x: 1 + x,\n lambda x: 2 * x,\n lambda x: 2 - x,\n lambda x: 2 // x,\n lambda x: 2 / x,\n lambda x: 3 + x,\n lambda x: 3 * x,\n lambda x: 3 - x,\n lambda x: 3 // x,\n lambda x: 3 / x,\n lambda x: 3 % x,\n lambda x: -1 + x,\n lambda x: -2 * x,\n lambda x: -2 - x,\n lambda x: -2 // x,\n lambda x: -2 / x,\n lambda x: -3 + x,\n lambda x: -3 * x,\n lambda x: -3 - x,\n lambda x: -3 // x,\n lambda x: -3 / x,\n lambda x: -3 % x,\n lambda x: 0 + x,\n lambda x: 0 * x,\n lambda x: 0 - x,\n lambda x: 0 // x,\n lambda x: 0 / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"func, dtype\", list(product(_reflected_ops, utils.NUMERIC_TYPES))\n)\ndef test_reflected_ops_scalar(func, dtype, obj_class):\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\n_cudf_scalar_reflected_ops = [\n lambda x: cudf.Scalar(1) + x,\n lambda x: cudf.Scalar(2) * x,\n lambda x: cudf.Scalar(2) - x,\n lambda x: cudf.Scalar(2) // x,\n lambda x: cudf.Scalar(2) / x,\n lambda x: cudf.Scalar(3) + x,\n lambda x: cudf.Scalar(3) * x,\n lambda x: cudf.Scalar(3) - x,\n lambda x: cudf.Scalar(3) // x,\n lambda x: cudf.Scalar(3) / x,\n lambda x: cudf.Scalar(3) % x,\n lambda x: cudf.Scalar(-1) + x,\n lambda x: cudf.Scalar(-2) * x,\n lambda x: cudf.Scalar(-2) - x,\n lambda x: cudf.Scalar(-2) // x,\n lambda x: cudf.Scalar(-2) / x,\n lambda x: cudf.Scalar(-3) + x,\n lambda x: cudf.Scalar(-3) * x,\n lambda x: cudf.Scalar(-3) - x,\n lambda x: cudf.Scalar(-3) // x,\n lambda x: cudf.Scalar(-3) / x,\n lambda x: cudf.Scalar(-3) % x,\n lambda x: cudf.Scalar(0) + x,\n lambda x: cudf.Scalar(0) * x,\n lambda x: cudf.Scalar(0) - x,\n lambda x: cudf.Scalar(0) // x,\n lambda x: cudf.Scalar(0) / x,\n]\n\n\[email protected](\"obj_class\", [\"Series\", \"Index\"])\[email protected](\n \"funcs, dtype\",\n list(\n product(\n list(zip(_reflected_ops, _cudf_scalar_reflected_ops)),\n utils.NUMERIC_TYPES,\n )\n ),\n)\ndef test_reflected_ops_cudf_scalar(funcs, dtype, obj_class):\n cpu_func, gpu_func = funcs\n\n # create random series\n np.random.seed(12)\n random_series = utils.gen_rand(dtype, 100, low=10)\n\n # gpu series\n gs = Series(random_series)\n\n # class typing\n if obj_class == \"Index\":\n gs = as_index(gs)\n\n gs_result = gpu_func(gs)\n\n # class typing\n if obj_class == \"Index\":\n gs = Series(gs)\n\n # pandas\n ps_result = cpu_func(random_series)\n\n # verify\n np.testing.assert_allclose(ps_result, gs_result.to_array())\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Empty frame on the right side\n pd_frame = binop(pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({}))\n cd_frame = binop(cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({}))\n utils.assert_eq(cd_frame, pd_frame)\n\n # Empty frame on the left side\n pd_frame = pd.DataFrame({}) + pd.DataFrame({\"x\": [1, 2]})\n cd_frame = cudf.DataFrame({}) + cudf.DataFrame({\"x\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # Note: the below rely on a discrepancy between cudf and pandas\n # While pandas inserts columns in alphabetical order, cudf inserts in the\n # order of whichever column comes first. So the following code will not\n # work if the names of columns are reversed i.e. ('y', 'x') != ('x', 'y')\n\n # More rows on the left side\n pd_frame = pd.DataFrame({\"x\": [1, 2, 3]}) + pd.DataFrame({\"y\": [1, 2]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2, 3]}) + cudf.DataFrame({\"y\": [1, 2]})\n utils.assert_eq(cd_frame, pd_frame)\n\n # More rows on the right side\n pd_frame = pd.DataFrame({\"x\": [1, 2]}) + pd.DataFrame({\"y\": [1, 2, 3]})\n cd_frame = cudf.DataFrame({\"x\": [1, 2]}) + cudf.DataFrame({\"y\": [1, 2, 3]})\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_same_columns(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n pd_frame = binop(\n pd.DataFrame({\"x\": [1, 2]}), pd.DataFrame({\"x\": [1, 2, 3]})\n )\n cd_frame = binop(\n cudf.DataFrame({\"x\": [1, 2]}), cudf.DataFrame({\"x\": [1, 2, 3]})\n )\n # cast x as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\"binop\", _binops)\ndef test_different_shapes_and_columns_with_unaligned_indices(binop):\n\n # TODO: support `pow()` on NaN values. Particularly, the cases:\n # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1`\n if binop is operator.pow:\n return\n\n # Test with a RangeIndex\n pdf1 = pd.DataFrame({\"x\": [4, 3, 2, 1], \"y\": [7, 3, 8, 6]})\n # Test with a GenericIndex\n pdf2 = pd.DataFrame(\n {\"x\": [1, 2, 3, 7], \"y\": [4, 5, 6, 7]}, index=[0, 1, 3, 4]\n )\n # Test with a GenericIndex in a different order\n pdf3 = pd.DataFrame(\n {\"x\": [4, 5, 6, 7], \"y\": [1, 2, 3, 7], \"z\": [0, 5, 3, 7]},\n index=[0, 3, 5, 3],\n )\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2)\n gdf3 = cudf.DataFrame.from_pandas(pdf3)\n\n pd_frame = binop(binop(pdf1, pdf2), pdf3)\n cd_frame = binop(binop(gdf1, gdf2), gdf3)\n # cast x and y as float64 so it matches pandas dtype\n cd_frame[\"x\"] = cd_frame[\"x\"].astype(np.float64)\n cd_frame[\"y\"] = cd_frame[\"y\"].astype(np.float64)\n utils.assert_eq(cd_frame, pd_frame)\n\n\[email protected](\n \"df2\",\n [\n cudf.DataFrame({\"a\": [3, 2, 1]}, index=[3, 2, 1]),\n cudf.DataFrame([3, 2]),\n ],\n)\[email protected](\"binop\", [operator.eq, operator.ne])\ndef test_df_different_index_shape(df2, binop):\n df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3])\n\n pdf1 = df1.to_pandas()\n pdf2 = df2.to_pandas()\n\n utils.assert_exceptions_equal(\n lfunc=binop,\n rfunc=binop,\n lfunc_args_and_kwargs=([pdf1, pdf2],),\n rfunc_args_and_kwargs=([df1, df2],),\n )\n\n\[email protected](\"op\", [operator.eq, operator.ne])\ndef test_boolean_scalar_binop(op):\n psr = pd.Series(np.random.choice([True, False], 10))\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(op(psr, True), op(gsr, True))\n utils.assert_eq(op(psr, False), op(gsr, False))\n\n # cuDF scalar\n utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True)))\n utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False)))\n\n\n_operators_arithmetic = [\n \"add\",\n \"radd\",\n \"sub\",\n \"rsub\",\n \"mul\",\n \"rmul\",\n \"mod\",\n \"rmod\",\n \"pow\",\n \"rpow\",\n \"floordiv\",\n \"rfloordiv\",\n \"truediv\",\n \"rtruediv\",\n]\n\n_operators_comparison = [\"eq\", \"ne\", \"lt\", \"le\", \"gt\", \"ge\"]\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series(dtype, func, has_nulls, fill_value):\n count = 1000\n gdf_series_a = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n gdf_series_b = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=100\n )\n pdf_series_a = gdf_series_a.to_pandas()\n pdf_series_b = gdf_series_b.to_pandas()\n\n gdf_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_result, gdf_result)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"has_nulls\", [True, False])\[email protected](\"fill_value\", [None, 27])\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar(\n dtype, func, has_nulls, fill_value, use_cudf_scalar\n):\n count = 1000\n scalar = 59\n gdf_series = utils.gen_rand_series(\n dtype, count, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas()\n\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n utils.assert_eq(pdf_series_result, gdf_series_result)\n\n\n_permu_values = [0, 1, None, np.nan]\n\n\[email protected](\"fill_value\", _permu_values)\[email protected](\"scalar_a\", _permu_values)\[email protected](\"scalar_b\", _permu_values)\[email protected](\"func\", _operators_comparison)\[email protected](\"dtype\", [\"float32\", \"float64\"])\ndef test_operator_func_between_series_logical(\n dtype, func, scalar_a, scalar_b, fill_value\n):\n\n gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype)\n gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype)\n\n pdf_series_a = gdf_series_a.to_pandas(nullable=True)\n pdf_series_b = gdf_series_b.to_pandas(nullable=True)\n\n gdf_series_result = getattr(gdf_series_a, func)(\n gdf_series_b, fill_value=fill_value\n )\n pdf_series_result = getattr(pdf_series_a, func)(\n pdf_series_b, fill_value=fill_value\n )\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n # If fill_value is np.nan, things break down a bit,\n # because setting a NaN into a pandas nullable float\n # array still gets transformed to <NA>. As such,\n # pd_series_with_nulls.fillna(np.nan) has no effect.\n if (\n (pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum())\n and np.isscalar(fill_value)\n and np.isnan(fill_value)\n ):\n with pytest.raises(AssertionError):\n utils.assert_eq(expect, got)\n return\n utils.assert_eq(expect, got)\n\n\[email protected](\"dtype\", [\"float32\", \"float64\"])\[email protected](\"func\", _operators_comparison)\[email protected](\"has_nulls\", [True, False])\[email protected](\"scalar\", [-59.0, np.nan, 0, 59.0])\[email protected](\"fill_value\", [None, True, False, 1.0])\[email protected](\"use_cudf_scalar\", [False, True])\ndef test_operator_func_series_and_scalar_logical(\n dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar\n):\n gdf_series = utils.gen_rand_series(\n dtype, 1000, has_nulls=has_nulls, stride=10000\n )\n pdf_series = gdf_series.to_pandas(nullable=True)\n gdf_series_result = getattr(gdf_series, func)(\n cudf.Scalar(scalar) if use_cudf_scalar else scalar,\n fill_value=fill_value,\n )\n pdf_series_result = getattr(pdf_series, func)(\n scalar, fill_value=fill_value\n )\n\n expect = pdf_series_result\n got = gdf_series_result.to_pandas(nullable=True)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic)\[email protected](\"nulls\", _nulls)\[email protected](\"fill_value\", [None, 27])\[email protected](\"other\", [\"df\", \"scalar\"])\ndef test_operator_func_dataframe(func, nulls, fill_value, other):\n num_rows = 100\n num_cols = 3\n\n def gen_df():\n pdf = pd.DataFrame()\n from string import ascii_lowercase\n\n cols = np.random.choice(num_cols + 5, num_cols, replace=False)\n\n for i in range(num_cols):\n colname = ascii_lowercase[cols[i]]\n data = utils.gen_rand(\"float64\", num_rows) * 10000\n if nulls == \"some\":\n idx = np.random.choice(\n num_rows, size=int(num_rows / 2), replace=False\n )\n data[idx] = np.nan\n pdf[colname] = data\n return pdf\n\n pdf1 = gen_df()\n pdf2 = gen_df() if other == \"df\" else 59.0\n gdf1 = cudf.DataFrame.from_pandas(pdf1)\n gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == \"df\" else 59.0\n\n got = getattr(gdf1, func)(gdf2, fill_value=fill_value)\n expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)]\n\n utils.assert_eq(expect, got)\n\n\[email protected](\"func\", _operators_arithmetic + _operators_comparison)\[email protected](\"rhs\", [0, 1, 2, 128])\ndef test_binop_bool_uint(func, rhs):\n # TODO: remove this once issue #2172 is resolved\n if func == \"rmod\" or func == \"rfloordiv\":\n return\n psr = pd.Series([True, False, False])\n gsr = cudf.from_pandas(psr)\n utils.assert_eq(\n getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False\n )\n\n\ndef test_series_misc_binop():\n pds = pd.Series([1, 2, 4], name=\"abc xyz\")\n gds = cudf.Series([1, 2, 4], name=\"abc xyz\")\n\n utils.assert_eq(pds + 1, gds + 1)\n utils.assert_eq(1 + pds, 1 + gds)\n\n utils.assert_eq(pds + pds, gds + gds)\n\n pds1 = pd.Series([1, 2, 4], name=\"hello world\")\n gds1 = cudf.Series([1, 2, 4], name=\"hello world\")\n\n utils.assert_eq(pds + pds1, gds + gds1)\n utils.assert_eq(pds1 + pds, gds1 + gds)\n\n utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5)\n\n\ndef test_int8_float16_binop():\n a = cudf.Series([1], dtype=\"int8\")\n b = np.float16(2)\n expect = cudf.Series([0.5])\n got = a / b\n utils.assert_eq(expect, got, check_dtype=False)\n\n\[email protected](\"dtype\", [\"int64\", \"float64\", \"str\"])\ndef test_vector_to_none_binops(dtype):\n data = Series([1, 2, 3, None], dtype=dtype)\n\n expect = Series([None] * 4).astype(dtype)\n got = data + None\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"lhs\",\n [\n 1,\n 3,\n 4,\n pd.Series([5, 6, 2]),\n pd.Series([0, 10, 20, 30, 3, 4, 5, 6, 2]),\n 6,\n ],\n)\[email protected](\"rhs\", [1, 3, 4, pd.Series([5, 6, 2])])\[email protected](\n \"ops\",\n [\n (np.remainder, cudf.remainder),\n (np.floor_divide, cudf.floor_divide),\n (np.subtract, cudf.subtract),\n (np.add, cudf.add),\n (np.true_divide, cudf.true_divide),\n (np.multiply, cudf.multiply),\n ],\n)\ndef test_ufunc_ops(lhs, rhs, ops):\n np_op, cu_op = ops\n\n if isinstance(lhs, pd.Series):\n culhs = cudf.from_pandas(lhs)\n else:\n culhs = lhs\n\n if isinstance(rhs, pd.Series):\n curhs = cudf.from_pandas(rhs)\n else:\n curhs = rhs\n\n expect = np_op(lhs, rhs)\n got = cu_op(culhs, curhs)\n if np.isscalar(expect):\n assert got == expect\n else:\n utils.assert_eq(\n expect, got,\n )\n\n\ndef dtype_scalar(val, dtype):\n if dtype == \"str\":\n return str(val)\n dtype = np.dtype(dtype)\n if dtype.type in {np.datetime64, np.timedelta64}:\n res, _ = np.datetime_data(dtype)\n return dtype.type(val, res)\n else:\n return dtype.type(val)\n\n\ndef make_valid_scalar_add_data():\n valid = set()\n\n # to any int, we may add any kind of\n # other int, float, datetime timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES,\n FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # to any float, we may add any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # to any datetime, we may add any int, timedelta, or bool\n valid |= set(\n product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # to any timedelta, we may add any int, datetime, other timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES)\n )\n\n # to any bool, we may add any int, float, datetime, timedelta, or bool\n valid |= set(\n product(\n BOOL_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # to any string, we may add any other string\n valid |= {(\"str\", \"str\")}\n\n return sorted(list(valid))\n\n\ndef make_invalid_scalar_add_data():\n invalid = set()\n\n # we can not add a datetime to a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES))\n\n # We can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n # we can not add a float to any datetime\n invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES))\n\n # can can not add a datetime to a datetime\n invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES))\n\n # can not add a timedelta to a float\n invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_valid_scalar_add_data())\ndef test_scalar_add(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n # expect = np.add(lval_host, rval_host)\n expect = lval_host + rval_host\n got = lval_gpu + rval_gpu\n\n assert expect == got.value\n if not dtype_l == dtype_r == \"str\":\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_invalid_scalar_add_data())\ndef test_scalar_add_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu + rval_gpu\n\n\ndef make_scalar_difference_data():\n valid = set()\n\n # from an int, we may subtract any int, float, timedelta,\n # or boolean value\n valid |= set(\n product(\n INTEGER_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any float, we may subtract any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n # from any datetime we may subtract any int, datetime, timedelta, or bool\n valid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # from any timedelta we may subtract any int, timedelta, or bool\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES)\n )\n\n # from any bool we may subtract any int, float or timedelta\n valid |= set(\n product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_difference_data_invalid():\n invalid = set()\n\n # we can't subtract a datetime from an int\n invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or timedelta from a float\n invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES))\n\n # we can't subtract a float from a datetime or timedelta\n invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES))\n\n # We can't subtract a datetime from a timedelta\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n # we can't subtract a datetime or bool from a bool\n invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_difference_data())\ndef test_scalar_difference(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host - rval_host\n got = lval_gpu - rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_difference_data_invalid()\n)\ndef test_scalar_difference_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu - rval_gpu\n\n\ndef make_scalar_product_data():\n valid = set()\n\n # we can multiply an int, or bool by any int, float, timedelta, or bool\n valid |= set(\n product(\n INTEGER_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES,\n )\n )\n\n # we can muliply any timedelta by any int, or bool\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES))\n\n # we can multiply a float by any int, float, or bool\n valid |= set(\n product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)\n )\n\n return sorted(list(valid))\n\n\ndef make_scalar_product_data_invalid():\n invalid = set()\n\n # can't multiply a ints, floats, datetimes, timedeltas,\n # or bools by datetimes\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES,\n )\n )\n\n # can't multiply datetimes with anything really\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # can't multiply timedeltas by timedeltas\n invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data())\ndef test_scalar_product(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host * rval_host\n got = lval_gpu * rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_product_data_invalid())\ndef test_scalar_product_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu * rval_gpu\n\n\ndef make_scalar_floordiv_data():\n valid = set()\n\n # we can divide ints and floats by other ints, floats, or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can divide timedeltas by ints, floats or other timedeltas\n valid |= set(\n product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES)\n )\n\n # we can divide bools by ints, floats or bools\n valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_floordiv_data_invalid():\n invalid = set()\n\n # we can't numeric types into datelike types\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we can't divide datetime types into anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we can't divide timedeltas into bools, or datetimes\n invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_floordiv_data())\ndef test_scalar_floordiv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host // rval_host\n got = lval_gpu // rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_floordiv_data_invalid()\n)\ndef test_scalar_floordiv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu // rval_gpu\n\n\ndef make_scalar_truediv_data():\n valid = set()\n\n # we can true divide ints, floats, or bools by other\n # ints, floats or bools\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # we can true divide timedeltas by ints floats or timedeltas\n valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_truediv_data_invalid():\n invalid = set()\n\n # we can't divide ints, floats or bools by datetimes\n # or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # we cant true divide datetime types by anything\n invalid |= set(\n product(\n DATETIME_TYPES,\n INTEGER_TYPES\n | FLOAT_TYPES\n | DATETIME_TYPES\n | TIMEDELTA_TYPES\n | BOOL_TYPES,\n )\n )\n\n # we cant true divide timedeltas by datetimes or bools or floats\n invalid |= set(\n product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES)\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data())\ndef test_scalar_truediv(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = np.true_divide(lval_host, rval_host)\n got = lval_gpu / rval_gpu\n\n assert expect == got.value\n\n # numpy bug\n\n if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2:\n assert expect.dtype == \"float64\" and got.dtype == \"float32\"\n else:\n assert expect.dtype == got.dtype\n # assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_truediv_data_invalid())\ndef test_scalar_truediv_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu / rval_gpu\n\n\ndef make_scalar_remainder_data():\n valid = set()\n\n # can mod numeric types with each other\n valid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # can mod timedeltas by other timedeltas\n valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES))\n\n return sorted(list(valid))\n\n\ndef make_scalar_remainder_data_invalid():\n invalid = set()\n\n # numeric types cant be modded against timedeltas\n # or datetimes. Also, datetimes can't be modded\n # against datetimes or timedeltas\n invalid |= set(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetime and timedelta types cant be modded against\n # any numeric types\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n # timedeltas cant mod with datetimes\n invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES))\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_remainder_data())\ndef test_scalar_remainder(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host % rval_host\n got = lval_gpu % rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\n \"dtype_l,dtype_r\", make_scalar_remainder_data_invalid()\n)\ndef test_scalar_remainder_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu % rval_gpu\n\n\ndef make_scalar_power_data():\n # only numeric values form valid operands for power\n return sorted(\n product(\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES,\n )\n )\n\n\ndef make_scalar_power_data_invalid():\n invalid = set()\n\n # datetimes and timedeltas cant go in exponents\n invalid |= set(\n product(\n INTEGER_TYPES\n | FLOAT_TYPES\n | TIMEDELTA_TYPES\n | DATETIME_TYPES\n | BOOL_TYPES,\n DATETIME_TYPES | TIMEDELTA_TYPES,\n )\n )\n\n # datetimes and timedeltas may not be raised to\n # any exponent of any dtype\n invalid |= set(\n product(\n DATETIME_TYPES | TIMEDELTA_TYPES,\n DATETIME_TYPES\n | TIMEDELTA_TYPES\n | INTEGER_TYPES\n | FLOAT_TYPES\n | BOOL_TYPES,\n )\n )\n\n return sorted(list(invalid))\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data())\ndef test_scalar_power(dtype_l, dtype_r):\n test_value = 1\n\n lval_host = dtype_scalar(test_value, dtype=dtype_l)\n rval_host = dtype_scalar(test_value, dtype=dtype_r)\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n expect = lval_host ** rval_host\n got = lval_gpu ** rval_gpu\n\n assert expect == got.value\n assert expect.dtype == got.dtype\n\n\[email protected](\"dtype_l,dtype_r\", make_scalar_power_data_invalid())\ndef test_scalar_power_invalid(dtype_l, dtype_r):\n test_value = 1\n\n lval_gpu = cudf.Scalar(test_value, dtype=dtype_l)\n rval_gpu = cudf.Scalar(test_value, dtype=dtype_r)\n\n with pytest.raises(TypeError):\n lval_gpu ** rval_gpu\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop(\n date_col, n_periods, frequency, dtype, op\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n expect = op(psr, -poffset)\n got = op(gsr, -goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\n \"kwargs\",\n [\n {\"months\": 2, \"years\": 5},\n {\"microseconds\": 1, \"seconds\": 1},\n {\"months\": 2, \"years\": 5, \"seconds\": 923, \"microseconds\": 481},\n pytest.param(\n {\"milliseconds\": 4},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for milliseconds\"\n ),\n ),\n pytest.param(\n {\"milliseconds\": 4, \"years\": 2},\n marks=pytest.mark.xfail(\n reason=\"Pandas construction fails with these keywords\"\n ),\n ),\n pytest.param(\n {\"nanoseconds\": 12},\n marks=pytest.mark.xfail(\n reason=\"Pandas gets the wrong answer for nanoseconds\"\n ),\n ),\n ],\n)\[email protected](\"op\", [operator.add, operator.sub])\ndef test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op):\n\n gsr = cudf.Series(date_col, dtype=\"datetime64[ns]\")\n psr = gsr.to_pandas()\n\n poffset = pd.DateOffset(**kwargs)\n goffset = cudf.DateOffset(**kwargs)\n\n expect = op(psr, poffset)\n got = op(gsr, goffset)\n\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"date_col\",\n [\n [\n \"2000-01-01 00:00:00.012345678\",\n \"2000-01-31 00:00:00.012345678\",\n \"2000-02-29 00:00:00.012345678\",\n ]\n ],\n)\[email protected](\"n_periods\", [0, 1, -1, 12, -12])\[email protected](\n \"frequency\",\n [\n \"months\",\n \"years\",\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"microseconds\",\n pytest.param(\n \"nanoseconds\",\n marks=pytest.mark.xfail(\n reason=\"https://github.com/pandas-dev/pandas/issues/36589\"\n ),\n ),\n ],\n)\[email protected](\n \"dtype\",\n [\"datetime64[ns]\", \"datetime64[us]\", \"datetime64[ms]\", \"datetime64[s]\"],\n)\ndef test_datetime_dateoffset_binaryop_reflected(\n date_col, n_periods, frequency, dtype\n):\n gsr = cudf.Series(date_col, dtype=dtype)\n psr = gsr.to_pandas() # converts to nanos\n\n kwargs = {frequency: n_periods}\n\n goffset = cudf.DateOffset(**kwargs)\n poffset = pd.DateOffset(**kwargs)\n\n expect = poffset + psr\n got = goffset + gsr\n\n utils.assert_eq(expect, got)\n\n with pytest.raises(TypeError):\n poffset - psr\n\n with pytest.raises(TypeError):\n goffset - gsr\n\n\[email protected](\"frame\", [cudf.Series, cudf.Index, cudf.DataFrame])\[email protected](\n \"dtype\", [\"int\", \"str\", \"datetime64[s]\", \"timedelta64[s]\", \"category\"]\n)\ndef test_binops_with_lhs_numpy_scalar(frame, dtype):\n data = [1, 2, 3, 4, 5]\n\n data = (\n frame({\"a\": data}, dtype=dtype)\n if isinstance(frame, cudf.DataFrame)\n else frame(data, dtype=dtype)\n )\n\n if dtype == \"datetime64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"timedelta64[s]\":\n val = np.dtype(dtype).type(4, \"s\")\n elif dtype == \"category\":\n val = np.int64(4)\n else:\n val = np.dtype(dtype).type(4)\n\n expected = val == data.to_pandas()\n got = val == data\n\n # In case of index, expected would be a numpy array\n if isinstance(data, cudf.Index):\n expected = pd.Index(expected)\n\n utils.assert_eq(expected, got)\n\n\[email protected](\n \"dtype\",\n [\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"float32\",\n \"float64\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"op\", _operators_comparison)\ndef test_binops_with_NA_consistent(dtype, op):\n data = [1, 2, 3]\n sr = cudf.Series(data, dtype=dtype)\n\n result = getattr(sr, op)(cudf.NA)\n if dtype in NUMERIC_TYPES:\n if op == \"ne\":\n expect_all = True\n else:\n expect_all = False\n assert (result == expect_all).all()\n elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES:\n assert result._column.null_count == len(data)\n\n\ndef _decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input], dtype=dtype,\n )\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", \"3.005\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"100.1\", \"200.2\"],\n cudf.Decimal64Dtype(scale=3, precision=9),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", \"0.995\"],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"99.9\", \"199.8\"],\n cudf.Decimal64Dtype(scale=3, precision=9),\n ),\n (\n operator.mul,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", \"3.0\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", \"6.0\"],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", \"0.2\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", \"40.0\"],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.mul,\n [\"1000\", \"2000\"],\n cudf.Decimal64Dtype(scale=-3, precision=4),\n [\"0.343\", \"0.500\"],\n cudf.Decimal64Dtype(scale=3, precision=3),\n [\"343.0\", \"1000.0\"],\n cudf.Decimal64Dtype(scale=0, precision=8),\n ),\n (\n operator.add,\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None, \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"3.0\", None, \"4.0\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n ),\n (\n operator.add,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", \"1.005\"],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"3.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.sub,\n [\"1.5\", \"2.0\"],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"-0.75\", None],\n cudf.Decimal64Dtype(scale=3, precision=5),\n ),\n (\n operator.mul,\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=2, precision=2),\n [\"1.5\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"2.25\", None],\n cudf.Decimal64Dtype(scale=5, precision=7),\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"0.1\", None],\n cudf.Decimal64Dtype(scale=3, precision=4),\n [\"10.0\", None],\n cudf.Decimal64Dtype(scale=1, precision=8),\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.18\", \"0.21\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"0.18\", \"0.42\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1800\", \"0.2100\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False],\n bool,\n ),\n (\n operator.eq,\n [\"100\", None],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, False],\n bool,\n ),\n (\n operator.lt,\n [\"200\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, False],\n bool,\n ),\n (\n operator.gt,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, False],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [False, True, True],\n bool,\n ),\n (\n operator.le,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [False, None, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.10\", \"0.87\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"0.18\", \"0.42\", \"1.00\"],\n cudf.Decimal64Dtype(scale=2, precision=3),\n [\"0.1000\", \"0.8700\", \"1.0000\"],\n cudf.Decimal64Dtype(scale=4, precision=5),\n [True, False, True],\n bool,\n ),\n (\n operator.ge,\n [\"300\", None, \"100\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n [\"100\", \"200\", \"100\"],\n cudf.Decimal64Dtype(scale=-1, precision=4),\n [True, None, True],\n bool,\n ),\n ],\n)\ndef test_binops_decimal(args):\n op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype = args\n\n a = _decimal_series(lhs, l_dtype)\n b = _decimal_series(rhs, r_dtype)\n expect = (\n _decimal_series(expect, expect_dtype)\n if isinstance(expect_dtype, cudf.Decimal64Dtype)\n else cudf.Series(expect, dtype=expect_dtype)\n )\n\n got = op(a, b)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.000\", \"42.001\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100\", \"40\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 12],\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"28\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"42\", \"20\", None],\n cudf.Decimal64Dtype(scale=0, precision=3),\n [100, 40, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.000\", \"42.002\", \"23.999\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n [100, 42, 24, 12],\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100\", \"40\", \"10\", None],\n cudf.Decimal64Dtype(scale=-1, precision=3),\n [100, 42, 8, 12],\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n ],\n)\[email protected](\"integer_dtype\", cudf.tests.utils.INTEGER_TYPES)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 decimal data setups, with scale from {==0, >0, <0}.\n Decimal precisions are sufficient to hold the digits.\n For each decimal data setup, there is at least one row that lead to one\n of the following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = _decimal_series(ldata, ldtype)\n rhs = cudf.Series(rdata, dtype=integer_dtype)\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"args\",\n [\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(1),\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"101\", \"201\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.add,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"101.5\", \"201.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n False,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 1,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"200\", \"400\"],\n cudf.Decimal64Dtype(scale=-2, precision=5),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"1.5\"),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.mul,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"1.5\")),\n [\"150\", \"300\"],\n cudf.Decimal64Dtype(scale=-1, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"98\", \"198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"96\", \"196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"97.5\", \"197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n False,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(2),\n [\"-98\", \"-198\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n 4,\n [\"-96\", \"-196\"],\n cudf.Decimal64Dtype(scale=0, precision=6),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n decimal.Decimal(\"2.5\"),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n (\n operator.sub,\n [\"100\", \"200\"],\n cudf.Decimal64Dtype(scale=-2, precision=3),\n cudf.Scalar(decimal.Decimal(\"2.5\")),\n [\"-97.5\", \"-197.5\"],\n cudf.Decimal64Dtype(scale=1, precision=7),\n True,\n ),\n ],\n)\ndef test_binops_decimal_scalar(args):\n op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args\n\n def decimal_series(input, dtype):\n return cudf.Series(\n [x if x is None else decimal.Decimal(x) for x in input],\n dtype=dtype,\n )\n\n lhs = decimal_series(lhs, l_dtype)\n expect = decimal_series(expect, expect_dtype)\n\n if reflect:\n lhs, rhs = rhs, lhs\n\n got = op(lhs, rhs)\n assert expect.dtype == got.dtype\n utils.assert_eq(expect, got)\n\n\[email protected](\n \"args\",\n [\n (\n operator.eq,\n [\"100.00\", \"41\", None],\n cudf.Decimal64Dtype(scale=0, precision=5),\n 100,\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.eq,\n [\"100.123\", \"41\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, None], dtype=bool),\n cudf.Series([True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.gt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, False, True, None], dtype=bool),\n cudf.Series([False, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.ge,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, False, True, None], dtype=bool),\n cudf.Series([True, True, False, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.lt,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([False, True, False, None], dtype=bool),\n cudf.Series([False, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.00\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=2, precision=5),\n 100,\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n decimal.Decimal(\"100.123\"),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n (\n operator.le,\n [\"100.123\", \"41\", \"120.21\", None],\n cudf.Decimal64Dtype(scale=3, precision=6),\n cudf.Scalar(decimal.Decimal(\"100.123\")),\n cudf.Series([True, True, False, None], dtype=bool),\n cudf.Series([True, False, True, None], dtype=bool),\n ),\n ],\n)\[email protected](\"reflected\", [True, False])\ndef test_binops_decimal_scalar_compare(args, reflected):\n \"\"\"\n Tested compare operations:\n eq, lt, gt, le, ge\n Each operation has 3 data setups: pyints, Decimal, and\n decimal cudf.Scalar\n For each data setup, there is at least one row that lead to one of the\n following compare results: {True, False, None}.\n \"\"\"\n if not reflected:\n op, ldata, ldtype, rdata, expected, _ = args\n else:\n op, ldata, ldtype, rdata, _, expected = args\n\n lhs = _decimal_series(ldata, ldtype)\n rhs = rdata\n\n if reflected:\n rhs, lhs = lhs, rhs\n\n actual = op(lhs, rhs)\n\n utils.assert_eq(expected, actual)\n\n\[email protected](\n \"dtype\",\n [\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"str\",\n \"datetime64[ns]\",\n \"datetime64[us]\",\n \"datetime64[ms]\",\n \"datetime64[s]\",\n \"timedelta64[ns]\",\n \"timedelta64[us]\",\n \"timedelta64[ms]\",\n \"timedelta64[s]\",\n ],\n)\[email protected](\"null_scalar\", [None, cudf.NA, np.datetime64(\"NaT\")])\[email protected](\"cmpop\", _cmpops)\ndef test_column_null_scalar_comparison(dtype, null_scalar, cmpop):\n # This test is meant to validate that comparing\n # a series of any dtype with a null scalar produces\n # a new series where all the elements are <NA>.\n\n if isinstance(null_scalar, np.datetime64):\n if np.dtype(dtype).kind not in \"mM\":\n pytest.skip()\n null_scalar = null_scalar.astype(dtype)\n\n dtype = np.dtype(dtype)\n\n data = [1, 2, 3, 4, 5]\n sr = cudf.Series(data, dtype=dtype)\n result = cmpop(sr, null_scalar)\n\n assert result.isnull().all()\n\n\[email protected](\"fn\", [\"eq\", \"ne\", \"lt\", \"gt\", \"le\", \"ge\"])\ndef test_equality_ops_index_mismatch(fn):\n a = cudf.Series(\n [1, 2, 3, None, None, 4], index=[\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n )\n b = cudf.Series(\n [-5, 4, 3, 2, 1, 0, 19, 11],\n index=[\"aa\", \"b\", \"c\", \"d\", \"e\", \"f\", \"y\", \"z\"],\n )\n\n pa = a.to_pandas(nullable=True)\n pb = b.to_pandas(nullable=True)\n expected = getattr(pa, fn)(pb)\n actual = getattr(a, fn)(b).to_pandas(nullable=True)\n\n utils.assert_eq(expected, actual)\n\n\ndef generate_test_null_equals_columnops_data():\n # Generate tuples of:\n # (left_data, right_data, compare_bool\n # where compare_bool is the correct answer to\n # if the columns should compare as null equals\n\n def set_null_cases(column_l, column_r, case):\n if case == \"neither\":\n return column_l, column_r\n elif case == \"left\":\n column_l[1] = None\n elif case == \"right\":\n column_r[1] = None\n elif case == \"both\":\n column_l[1] = None\n column_r[1] = None\n else:\n raise ValueError(\"Unknown null case\")\n return column_l, column_r\n\n null_cases = [\"neither\", \"left\", \"right\", \"both\"]\n data = [1, 2, 3]\n\n results = []\n # TODO: Numeric types can be cross compared as null equal\n for dtype in (\n list(NUMERIC_TYPES)\n + list(DATETIME_TYPES)\n + list(TIMEDELTA_TYPES)\n + list(STRING_TYPES)\n + [\"category\"]\n ):\n for case in null_cases:\n left = cudf.Series(data, dtype=dtype)\n right = cudf.Series(data, dtype=dtype)\n if case in {\"left\", \"right\"}:\n answer = False\n else:\n answer = True\n left, right = set_null_cases(left, right, case)\n results.append((left._column, right._column, answer, case))\n\n return results\n\n\[email protected](\n \"lcol,rcol,ans,case\", generate_test_null_equals_columnops_data()\n)\ndef test_null_equals_columnops(lcol, rcol, ans, case):\n assert lcol._null_equals(rcol).all() == ans\n"
] | [
[
"pandas.Series",
"numpy.true_divide",
"numpy.dtype",
"numpy.logical_and",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.random.choice",
"numpy.datetime_data",
"numpy.random.random",
"numpy.float16",
"pandas.Index",
"numpy.int64",
"numpy.datetime64",
"pandas.DateOffset",
"numpy.isscalar",
"numpy.isnan",
"numpy.random.randint"
]
] |
pedbrgs/anomaly-detection-tool | [
"1b5d89eb1287eb13849d87851a8c3c4cc708a93e"
] | [
"utils.py"
] | [
"import cv2\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\nimport torch\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\n\ndef plot_image(image, figsize):\n\n \"\"\" Display an image \"\"\"\n\n fig = plt.figure(figsize = figsize)\n plt.imshow(image, cmap = 'gray')\n plt.title(''), plt.xticks([]), plt.yticks([])\n plt.show()\n\ndef pattern_detection(img, figsize):\n \n \"\"\" Performs object segmentation by morphological filtering \"\"\"\n\n # BGR to grayscale\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n img_backup = img.copy()\n\n # Get image size\n height, width, _ = np.array(img).shape\n\n # Erosion morphological filter\n kernel = np.ones((3,3), np.uint8)\n erosion = cv2.erode(imgGray, kernel, iterations = 2)\n th = cv2.threshold(erosion, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n\n # Image binarization \n th = erosion.mean()\n imBin = erosion > th\n \n # Finding contours\n ret, thresh = cv2.threshold(erosion, 127, 255, 0)\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n # Compute contour areas for noise filtering\n areas = [cv2.contourArea(cnt) for cnt in contours]\n\n patterns, objects = [], []\n \n # Drawing bounding boxes around the contours\n for cnt in contours:\n # Filtering large and small bounding boxes\n if (cv2.contourArea(cnt) > 50 and cv2.contourArea(cnt) < np.max(areas)):\n # Get bounding box coordinates\n x, y, w, h = cv2.boundingRect(cnt)\n patterns.append([x, y, w, h])\n objects.append(cv2.cvtColor(img_backup[y:(y + h), x:(x+w)], cv2.COLOR_BGR2RGB))\n # Draw bounding box\n img_backup = cv2.rectangle(img_backup, (x, y),(x+w, y+h),(255, 0, 0), 1)\n\n return patterns, objects\n\ndef image_loader(image):\n \n \"\"\" Load image and returns pytorch tensor \"\"\"\n\n imsize = 256\n loader = transforms.Compose([transforms.Resize(imsize), transforms.ToTensor()])\n\n image = Image.fromarray(image)\n image = loader(image).float()\n image = Variable(image, requires_grad = True)\n image = image.unsqueeze(0)\n # .cuda() assumes that you are using GPU\n return image\n\ndef build_model():\n\n \"\"\" Build feature extractor based on ResNet-34 \"\"\"\n\n # If True, returns a model pre-trained on ImageNet\n convnet = models.resnet34(pretrained = True)\n convnet = list(convnet.children())[:-2]\n convnet = torch.nn.Sequential(*convnet, torch.nn.AdaptiveAvgPool2d(output_size = (4, 4)))\n \n return convnet\n\ndef feature_extraction(model, objects, patterns):\n\n \"\"\" Feature extraction from all detected patterns \"\"\"\n\n feature_vectors = []\n\n for i in range(len(patterns)):\n\n x_min, y_min, width, height = patterns[i][0], patterns[i][1], patterns[i][2], patterns[i][3]\n image = image_loader(objects[i])\n # Forward pass in each pattern\n features = model.forward(image)\n features = features.flatten().detach().numpy()\n feature_vectors.append(features)\n\n return feature_vectors\n\ndef pairwise_matrix(feature_vectors):\n\n \"\"\" Compute cosine similarity between feature vectors \"\"\"\n\n cosine_similarity = np.ones((len(feature_vectors[0]), len(feature_vectors[0])))\n\n for i in range(len(feature_vectors)-1):\n for j in range(len(feature_vectors)-1):\n cosine_similarity[i,j] = np.dot(feature_vectors[i], feature_vectors[j]) / (np.linalg.norm(feature_vectors[i]) * np.linalg.norm(feature_vectors[j]))\n\n return cosine_similarity"
] | [
[
"numpy.ones",
"numpy.linalg.norm",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"torch.nn.AdaptiveAvgPool2d",
"torch.autograd.Variable",
"matplotlib.pyplot.title",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show",
"numpy.max",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.yticks"
]
] |
leonavery/KSFD | [
"090e388df13a2674676cbaa53171f2a87291ba9b"
] | [
"KSFD/ksfdtimeseries.py"
] | [
"\"\"\"\nMPI-aware read and write PETSc Vec to HDF5\n\nThe goal of this module is to save snapshots of a PETSc Vec to HDF5\nfiles, and obviously to read them again later. The obvious way to do\nthis is parallel HDF5. Unfortunately, distributions of HDF5 and h5py\nmay be built without support for parallel operation. (In particular,\nthe conda-forge version doesn't have it.) This is accomplished through\nthe following kludge:\n\nWhen a KSFD.TimeSeries is created with name tsname and argument mpiok\nTrue, the runtime envirnoment is checked to find out if parallel HDF5\nis enabled (using h5py.getconfig().mpi). If so, the data are stored in\nan HDF5 file named\n\n'{name}MPI.h5'.format(name=tsname). \n\nNote: there is a serious problem with parallel HDF5: variable length\nrecords can't be written. If you try, you get this exception:\n\nOSError: Can't write data (Parallel IO does not support writing VL\ndatatypes yet)\n\nSince that makes parallel HDF5 a nonstarter for my purposes, mpiok\ndefaults to False. You won't get parallel MPI unless you specifically\nask for it, and then dealing with the lack of VL records is your\nproblem.\n\nIf not, each process stores the data it owns in a file named\n\n'{name}s{size}r{rank}.h5'.format(name=tsname, size=comm.size, rank=comm.rank)\n\nwhere comm is the MPI communicator. If run sequentially the data will\nall be stored in a file called '{name}s1r0.h5'. It is intended that\nthe *MPI.h5 file created using parallele HDF5 and the *s1r0.h5 file\ncreated when running sequentially and parallel HDF5 is not available\nwill be the same. \n\nThe same procedure is used for finding the filename when opening in\nread/write mode ('r+' or 'a'). \n\nWhen opening a TimeSeries for read (mode 'r') TimeSeries checks (in\norder) for the *s<size>r<rank>.h5 file, then the *MPI.h5 file ,and\nfinally a *s1r0.h5 file, and opens the first it finds. In this case\nthe retrieve methods will only return the components of the vector\nowned by the local process. \n\nFinally, I will write a simple script to merge all the files of\n*s<size>r<rank>.h5 series into a single *MPI.h5 file. In this way an\nMPi process group of any size will be able to retrieve data written by\na process group of any size. \n\"\"\"\nimport h5py, os, re, gc, time\nimport traceback as tb\nimport numpy as np\nimport petsc4py\nfrom mpi4py import MPI\n#\n# These imports are placed inside a try/except so that this script can\n# be executed standalone to check for syntax errors.\n#\ntry:\n from .ksfddebug import log\n from .ksfdgrid import Grid\nexcept ImportError:\n from ksfddebug import log\n from ksfdgrid import Grid\n\ndef logSERIES(*args, **kwargs):\n log(*args, system='SERIES', **kwargs)\n\n\nclass KSFDTimeSeries:\n \"\"\"\n Base class for TimeSeries\n\n KSFDTimeSeries is intended as an abstract base class for reading and\n writing time series from KSFD solutions to HDF5 files. It is not\n formally defined as an ABC: you can instantiate it if you really\n wish, but it is not designed to make that a useful thing to do.\n \"\"\"\n def __init__(\n self,\n basename,\n size=1,\n rank=0,\n mpiok=False,\n mode='r+',\n retries=0,\n retry_interval=60\n ):\n \"\"\"\n Required parameter:\n\n basename: the prefix of the filename.\n\n Optional keyword parameters:\n size=1: Number of MPI processes. This typically corresponds to\n comm.size for an MPI communicator comm.\n rank=0: Number of the MPI process that created this\n file. Typically comm.rank.\n mpiok=True: Whether parallel HDF5 should be used to store to\n store all the data from all MPI processes in a single\n file.\n mode='r+': The file mode for opening the h5py.File.\n retries=0. If nonzero, retry faile dopens this many times.\n retry_interval=60: time (in secodns) between successive\n retries. Note: the open will block while waiting for a\n successful retry.\n\n size, rank, and mpiok are used mostly to figure out what\n filename to use. They need not correspond to the actual\n current MPU configuration. For instance, they may correspond\n to the config when the time series was created.\n \"\"\"\n self.get_filename(basename, size, rank, mpiok, mode)\n self.retries = retries\n self.retry_interval = retry_interval\n self._size = size\n self._rank = rank\n self._mode = mode\n self._tsf = self.open_with_retry()\n _ = self.info # make sure '/info' exists\n self.try_to_set('size', self.size)\n self.try_to_set('rank', self.rank)\n if 'times' in self.tsf:\n self.ts = np.array(self.tsf['times'][()])\n try:\n self.ks = np.array(self.tsf['ks'][()])\n except KeyError:\n self.ks = np.arange(len(self.ts))\n self.order = np.array(self.tsf['order'][()])\n else:\n self.ts = np.array([], dtype=float)\n self.ks = np.array([], dtype=int)\n self.order = np.array([], dtype=int)\n self.lastk = self.ks.size - 1\n self.sorted = False\n self.tsf.flush()\n\n def parse_filename(filename):\n \"\"\"\n filename is a name like 'bases2r1.h5'. parse_filename returns\n (basename, size, rank, mpi) (('base', 2, 1, False) for the\n example). For a filename like 'tests/test1mpi.h5', returns\n ('base', 1, 0, True). \n \"\"\"\n mpipat = '(.*)MPI\\.h5'\n nompi_pat = '(.*)s(\\d+)r(\\d+)\\.h5'\n res = re.fullmatch(mpipat, filename)\n if res:\n return (res[1], 1, 0, True)\n res = re.fullmatch(nompi_pat, filename)\n if res:\n return (res[1], res[2], res[3], False)\n raise ValueError(\n \"Couldn't parse filename {fname}\".format(fname=filename)\n )\n\n def set_grid(self, grid):\n self._grid = grid\n self._dim = grid.dim\n self._dof = grid.dof\n if self.rank_owns_file:\n self._ranges = grid.ranges\n # if (\n # 'ranges' in self.tsf and\n # not np.all(self.tsf['ranges'][()] == self.ranges)\n # ):\n # raise ValueError(\n # \"data ranges {filerange} in {file} doesn't \" +\n # \"match grid range {gridrange}\".format(\n # filerange=str(self.tsf['ranges'][()]),\n # file=self.filename,\n # gridrange=str(grid.ranges)\n # )\n # )\n self.myslice = (slice(0, None),)*(self.dim + 1)\n else:\n self._ranges = tuple((0, np) for np in grid.nps)\n #\n # Slice of the global array belonging to this process:\n self.myslice = (slice(0, None),) + tuple(\n slice(*r) for r in grid.ranges\n )\n self.try_to_set('ranges', self.ranges)\n \n def get_filename(self, basename, size=1, rank=0, mpiok=True,\n mode='r+'):\n \"\"\"\n Get name of file to be opened by this process\n\n self.filename is set to the name of the HDF5 file to be\n opened. This is also returned as the function value. In\n addition, the following flags are set:\n self.creating: True if creating a new file.\n self.rank_owns_file: True if the file will be exclusively\n owned by this process.\n \"\"\"\n self.usempi = mpiok and h5py.get_config().mpi\n name_nompi = '{name}s{size}r{rank}.h5'.format(\n name=basename,\n size=size,\n rank=rank\n )\n name_mpi = '{name}MPI.h5'.format(name=basename)\n name_seq = '{name}s1r0.h5'.format(name=basename)\n self.driver = None\n if self.usempi and os.path.isfile(name_mpi):\n self.creating = mode[0] == 'w' or mode[0] == 'x'\n self.rank_owns_file = size == 1\n self.filename = name_mpi\n elif self.usempi and (mode[0] == 'w' or mode[0] == 'x'):\n self.creating = True\n self.rank_owns_file = size == 1\n self.filename = name_mpi\n elif os.path.isfile(name_nompi):\n self.creating = mode[0] == 'w' or mode[0] == 'x'\n self.rank_owns_file = True\n self.filename = name_nompi\n elif (mode == 'r' or mode == 'a') and os.path.isfile(name_seq):\n self.creating = False\n self.rank_owns_file = size == 1\n self.filename = name_seq\n # Allow reading from MPi file even if we're not using MPI:\n elif (mode == 'r' or mode == 'a') and os.path.isfile(name_mpi):\n self.creating = False\n self.rank_owns_file = size == 1\n self.filename = name_mpi\n else:\n self.creating = mode != 'r'\n self.rank_owns_file = not self.usempi\n self.filename = name_mpi if self.usempi else name_nompi\n if self.creating and not self.rank_owns_file and self.usempi:\n self.driver = 'mpio'\n if self.creating:\n os.makedirs(os.path.dirname(self.filename), exist_ok=True)\n logSERIES('self.filename', self.filename)\n logSERIES('self.creating', self.creating)\n logSERIES('self.rank_owns_file', self.rank_owns_file)\n logSERIES('self.driver', self.driver)\n logSERIES('self.usempi', self.usempi)\n return self.filename\n\n def open(self, filename, usempi, mode):\n if mode in ['w', 'w-', 'x', 'a']:\n dirname = os.path.dirname(os.path.abspath(filename))\n try:\n os.makedirs(dirname, exist_ok=True)\n except FileExistsError:\n pass\n\n def grid_save(self):\n grid = self.grid\n attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',\n 'stencil_width', 'stencil_type', 'boundary_type',\n 'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',\n 'Vlshape', 'ranges', 'Clshape', 'Cashape',\n 'coordsNoGhosts', 'coordsWithGhosts',\n ]\n for a in attrs:\n self.try_to_set('/grid/' + a, getattr(grid, a))\n\n def grid_read(self):\n \"\"\"Reads grid params from open file, returns dict\"\"\"\n ggroup = self.tsf['grid']\n gd = {}\n attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',\n 'stencil_width', 'stencil_type', 'boundary_type',\n 'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',\n 'Vlshape', 'ranges', 'Clshape', 'Cashape',\n 'coordsNoGhosts', 'coordsWithGhosts',\n ]\n for a in attrs:\n try:\n val = ggroup[a][()]\n if a.endswith('shape'):\n gd[a] = tuple(val)\n elif np.isscalar(val):\n gd[a] = val.item()\n else:\n gd[a] = val\n except KeyError:\n gd[a] = None\n gd['width'] = gd['bounds'][0]\n gd['height'] = gd['bounds'][1] if gd['dim'] > 1 else 1.0\n gd['depth'] = gd['bounds'][2] if gd['dim'] > 2 else 1.0\n gd['nx'] = gd['nps'][0]\n gd['ny'] = gd['nps'][1] if gd['dim'] > 1 else 8\n gd['nz'] = gd['nps'][2] if gd['dim'] > 2 else 8\n return gd\n\n def grid_load(self, gd=None):\n \"\"\"Reads grid params from open file and creates new Grid.\"\"\"\n if gd is None:\n gd = self.grid_read()\n grid = Grid(\n dim=gd['dim'],\n width=gd['width'],\n height=gd['height'],\n depth=gd['depth'],\n nx=gd['nx'],\n ny=gd['ny'],\n nz=gd['nz'],\n dof=gd['dof'],\n order=gd['order'],\n stencil_width=gd['stencil_width'],\n stencil_type=gd['stencil_type'],\n boundary_type=gd['boundary_type']\n )\n self.set_grid(grid)\n\n #\n # info is a place for caller to store stuff\n @property\n def info(self):\n \"\"\"Place for caller to store extra stuff\"\"\"\n if not hasattr(self, '_info') or not self._info:\n self._info = self.tsf.require_group('/info')\n return self._info\n\n @property\n def tsFile(self):\n \"\"\"The open h5File object\"\"\"\n return self._tsf\n\n @property\n def tsf(self):\n return self._tsf\n\n @property\n def size(self):\n return self._size\n\n @property\n def rank(self):\n return self._rank\n\n @property\n def mode(self):\n return self._mode\n\n @property\n def ranges(self):\n return self._ranges\n\n @property\n def comm(self):\n return self._comm\n\n @property\n def grid(self):\n return self._grid\n\n @property\n def dim(self):\n return self._dim\n\n @property\n def dof(self):\n return self._dof\n\n def try_to_set(self, key, val):\n \"\"\"Try to set self.tsf[key] to val, but ignore exceptions\"\"\"\n if (self.mode == 'r'): return\n try:\n del self.tsf[key]\n except KeyError:\n pass\n try:\n self.tsf[key] = val\n except ValueError:\n pass\n \n def _sort(self):\n if getattr(self, 'sorted', False): return\n ts = getattr(self, 'ts', np.array([]))\n self.try_to_set('times', ts)\n self.order = ts.argsort()\n self.try_to_set('order', self.order)\n self.sts = ts\n self.sts.sort()\n ks = getattr(self, 'ks', [])\n lastk = getattr(self, 'lastk', -1)\n self.try_to_set('ks', ks)\n self.try_to_set('lastk', lastk)\n self.sorted = True\n\n def flush(self):\n self._sort()\n self.tsf.flush()\n\n def temp_close(self):\n \"\"\"\n temp_close closes the HDF5 file in which the TimeSeries is\n stored without destroying associated information. The file\n can be reopened with little loss of time. temp_close and\n reopen are intended for use during long solutions. If there is\n a crash during solution, a temp-closed TimeSeries will be left\n in a valid state for later use.\n \"\"\"\n self._sort()\n self.tsf.close()\n\n def open_with_retry(\n self,\n fname=None,\n mode=None,\n driver=None,\n comm=None\n ):\n if fname is None:\n fname = self.filename\n if mode is None:\n mode = self.mode\n if driver is None:\n driver = self.driver\n if comm is None:\n comm = self.comm\n if isinstance(comm, petsc4py.PETSc.Comm):\n comm = comm.tompi4py()\n logSERIES('fname, mode, driver, comm', fname, mode, driver, comm)\n try:\n if driver == 'mpio':\n logSERIES('trying 4-argument open')\n comm.Barrier()\n logSERIES('comm.rank, comm.size', comm.rank, comm.size)\n tsf = h5py.File(fname, mode=mode,\n driver=driver, comm=comm)\n else:\n logSERIES('trying 3-argument open')\n tsf = h5py.File(fname, mode=mode,\n driver=driver)\n except OSError:\n retries_left = self.retries\n if retries_left <= 0:\n logSERIES('open failed: re-raising exception')\n raise\n while retries_left > 0:\n logSERIES('reopen failed with OSError: {n} retries left'.format(\n n=retries_left\n ))\n logSERIES('tb.format_exc()', tb.format_exc())\n time.sleep(self.retry_interval)\n try: \n if driver == 'mpio':\n logSERIES('trying 4-argument open')\n comm.Barrier()\n logSERIES('comm.rank, comm.size', comm.rank, comm.size)\n tsf = h5py.File(fname, mode=mode,\n driver=driver, comm=comm)\n else:\n logSERIES('trying 3-argument open')\n tsf = h5py.File(fname, mode=mode,\n driver=driver)\n failed = False\n except OSError:\n failed = True\n if retries_left <= 1:\n raise\n if not failed:\n break\n retries_left -= 1\n return tsf\n \n def reopen(self):\n \"\"\"\n Reopen a temp_closed TimeSeries\n \"\"\"\n mode = self.mode if self.mode == 'r' else 'r+'\n self._tsf = self.open_with_retry(mode=mode)\n\n def close(self):\n if not hasattr(self, '_tsf') or not self._tsf:\n self.reopen()\n self._sort()\n self.tsf.close()\n del self._tsf\n gc.collect()\n \n # def __del__(self):\n # self.close()\n\n def store(self, data, t, k=None):\n if isinstance(data, petsc4py.PETSc.Vec):\n vals = data.array.reshape(self.grid.Vlshape, order='F')\n else:\n vals = data.reshape(self.grid.Vlshape, order='F')\n logSERIES('k, t', k, t)\n if k is None:\n k = self.lastk + 1\n self.lastk = k\n self.ks = np.append(self.ks, k)\n self.ts = np.append(self.ts, t)\n key = 'data' + str(k)\n try:\n dset = self.tsf.create_dataset(key, self.grid.Vlshape,\n dtype=vals.dtype)\n except OSError:\n dset = self.tsf[key] # dset already exists\n Cvals = vals.copy(order='C') # h5py requires C order\n if self.rank_owns_file:\n dset.write_direct(Cvals)\n else:\n dset[self.myslice] = Cvals \n dset.attrs['k'] = k\n dset.attrs['t'] = t\n self.sorted = False\n self.tsf.flush()\n\n def store_slice(self, ranges, data, t, tol=1e-7):\n shape = (self.grid.dof,) + tuple(\n r[1] - r[0] for r in ranges\n )\n slc = (slice(0, None),) + tuple(\n slice(*r) for r in ranges\n )\n vals = data.reshape(shape, order='F')\n na, nb, ta, tb = self.find_time(t)\n logSERIES('na, nb, ta, tb', na, nb, ta, tb)\n if abs(t-ta) <= abs(tb-t):\n n, tn = na, ta\n else:\n n, tn = nb, tb\n if (\n (not (t == 0.0 and tn == 0.0)) and\n ((self.sts.size <= n) or\n (abs(t-tn)/max(abs(t), abs(tn)) > tol))\n ):\n #\n # New time point: append it to the lists\n #\n k = self.lastk + 1\n self.lastk = k\n self.ks = np.append(self.ks, k)\n self.ts = np.append(self.ts, t)\n key = 'data' + str(k)\n dset = self.tsf.create_dataset(key, self.grid.Vlshape,\n dtype=vals.dtype)\n logSERIES('k, t', k, t)\n dset.attrs['k'] = k\n dset.attrs['t'] = t\n self.sorted = False\n else:\n k = n\n key = 'data' + str(k)\n dset = self.tsf[key]\n dset[slc] = vals \n self.tsf.flush()\n\n def times(self):\n self._sort()\n return self.ts\n\n def steps(self):\n self._sort()\n return self.ks\n\n def sorted_times(self):\n self._sort()\n return self.sts\n\n def sorted_steps(self):\n self._sort()\n return self.order\n\n def retrieve_by_number(self, k):\n key = 'data' + str(k)\n dset = self.tsf[key]\n if self.rank_owns_file:\n return np.array(dset)\n else:\n return np.array(dset)[self.myslice]\n\n def find_time(self, t):\n \"\"\"\n Find the time points closest to t\n\n Returns tuple (a, b, ta, tb)\n a and b are the numbers (ints) of the points flanking t. ta\n and tb (floats) are the corresponding times. If there is a\n time point exactly matchig nt, than a == b, ta == tb == t.\n \"\"\"\n self._sort()\n if self.sts.size == 0:\n return (0, 0, t - 1.0, t - 1.0)\n if (t <= self.sts[0]):\n a = 0\n return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])\n elif (t >= self.sts[-1]):\n a = len(self.sts) - 1\n return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])\n else:\n b = self.sts.searchsorted(t)\n nb = self.order[b]\n tb = self.sts[b]\n if (b >= len(self.order) - 1):\n return(b, b, self.sts[b], self.sts[b])\n elif tb == t:\n return(b, b, tb, tb)\n a = b - 1\n na = self.order[a]\n ta = self.sts[a]\n return (a, b, ta, tb)\n\n def retrieve_by_time(self, t):\n \"\"\"\n Retrieve a time point.\n \n Arguments:\n t: the time to be retrieved.\n \"\"\"\n na, nb, ta, tb = self.find_time(t)\n adata = self.retrieve_by_number(na)\n if na == nb:\n return adata\n bdata = self.retrieve_by_number(nb)\n data = ((t-ta)*bdata + (tb-t)*adata)/(tb-ta)\n return(data)\n\nclass TimeSeries(KSFDTimeSeries):\n\n def __init__(\n self,\n basename,\n grid=None,\n comm=None,\n mpiok=False,\n mode='r+',\n retries=0,\n retry_interval=60\n ):\n \"\"\"\n Open a KSFD.TimeSeries\n\n Required parameters:\n basename: the name of the TimeSeries. (This is a prefix of the\n names of the HDF5 files in which data are stored.)\n\n Optional parameters:\n grid: The KSFD.Grid on which the PETSc Vecs to be saved are\n defined. This must be supplied when creating a new\n TimeSeries. When opening an existig nseries, it will be\n read from the file if not supplied.\n comm: the MPI communicator. (If not supplied, grid.comm is\n used.)\n mpiok=False: whether it is Ok to use parallel HDF5.\n mode: the file mode (See h5py.h5File.)\n retries=0. If nonzero, retry faile dopens this many times.\n retry_interval=60: time (in secodns) between successive\n retries. Note: the open will block while waiting for a\n successful retry.\n \"\"\"\n if comm:\n self._comm = comm\n elif grid:\n self._comm = grid.comm\n else:\n self._comm = MPI.COMM_SELF\n self._mode = mode\n self._size = self.comm.size\n self._rank = self.comm.rank\n self.mpiok = mpiok\n super().__init__(basename, size=self.size, rank=self.rank,\n mpiok=mpiok, mode=mode, retries=retries,\n retry_interval=retry_interval)\n if (grid):\n self.set_grid(grid)\n self.grid_save()\n else:\n self.grid_load()\n\n\nclass Gatherer(KSFDTimeSeries):\n \"\"\"\n Gatherer is a special-purpose iterator to allow a single\n sequential process to read the separate files written by a\n TimeSeries run under MPI. For instance, to reconstruct the global\n vector at the last time (assuming it fits in memory in a single\n process):\n\n gather = Gatherer(basename='base', size=4)\n grid = gather.grid\n lastk = gather.sorted_steps()[-1]\n vec = grid.Vdmda.createGlobalVec()\n vecarray = vec.array.reshape(grid.globalVshape, order='F')\n for series in gather:\n vec = grid.Vdmda.createGlobalVec()\n rank = series.rank\n vecarray[series.slice] = series.retrieve_by_number(lastk)\n \n <do something with vec...>\n\n This gatherer would iterate through files bases4r0.h5,\n bases4r1.h5, bases4r2.h5, and bases4r3.h5. Note that with every\n iteration it closes the last file and opens the next. Thus, if you\n want to iterate over all times, it is more efficient to nest the\n loops like this:\n\n for series in gather:\n for t in series.times():\n <do something for this file at this time)\n\n than the other way. (The other way would be more intuitive, but my\n expectation is that this class will be used mostly to gather all\n TimeSeries files into a single file, which then can be processed\n efficiently as a TimeSeries.)\n \"\"\"\n \n def __init__(\n self,\n basename,\n size=None,\n retries=0,\n retry_interval=60\n ):\n \"\"\"\n Required positional parameter\n \n basename: the prefix of the filenames for the TimeSeries being\n read. As a convenience, this can be a special filename\n that matches the regular expression '(.+)s(\\d+)@.*' (That\n is a literal '@'. Then the basename is the (.+) and the\n size is the (\\d+) following the 's' and preceding\n '@'. For example, \"bases4@' or '[email protected]' would both\n serve for a series with basename 'base' and size 4.\n\n Optional keyword parameter:\n size=None: This argument can be omitted only if the basename\n has the special @ filename format. Otherwise, it must be\n supplied.\n\n Gatherer is read-only (mode 'r'). \n \"\"\"\n self._comm = MPI.COMM_SELF\n self.retries = retries\n self.retry_interval = retry_interval\n gatherre = '(.+)s(\\d+)@.*'\n fname_match = re.fullmatch(gatherre, basename)\n if fname_match:\n base = fname_match[1]\n size = int(fname_match[2])\n else:\n base = basename\n size = size\n self.basename = base\n if not isinstance(size, int) or size <= 0:\n raise ValueError(\n 'size {size} is not a positive int'\n )\n #\n # This opens the first file. We have to do that so as to read\n # and initialize things like grid, times, etc.\n #\n super().__init__(\n basename=base,\n size=size,\n rank=0,\n mpiok=False,\n mode='r',\n retries=retries,\n retry_interval=retry_interval\n )\n self.set_ranges()\n #\n # Since we have to open the rank 0 file before startig\n # iteration, the following flag is used to determine whether\n # to open a new file when __iter__ is called\n #\n self.iter_started = False\n self.iter_stopped = False\n\n def set_ranges(self):\n self.rank_owns_file = True\n gd = self.grid_read()\n self.grid_load(gd)\n self._ranges = gd['ranges']\n self._shape = (self.dof,) + tuple(\n r[1] - r[0] for r in self.ranges\n )\n self._slice = (slice(0, None),) + tuple(\n slice(*r) for r in self.ranges\n )\n \n @property\n def slice(self):\n return self._slice\n\n @property\n def shape(self):\n return self._shape\n\n def __iter__(self):\n return self\n \n def __next__(self):\n if self.iter_stopped:\n #\n # We previously exhausted the iteration. Restart it\n #\n self.tsf.close()\n self.__init__(self.basename,\n self.size,\n retries=self.retries,\n retry_interval=self.retry_interval\n )\n elif self.iter_started:\n #\n # We're not just starting: move on to next file\n #\n self.tsf.close()\n self._rank = self.rank + 1\n if self.rank >= self.size:\n self.iter_stopped = True\n raise StopIteration\n super().__init__(\n basename=self.basename,\n size=self.size,\n rank=self.rank,\n mpiok=False,\n mode='r',\n retries=self.retries,\n retry_interval=self.retry_interval\n )\n self.set_ranges()\n self.iter_started = True\n self.iter_stopped = False\n return self\n\n"
] | [
[
"numpy.array",
"numpy.isscalar",
"numpy.append"
]
] |
didichuxing/delta | [
"31dfebc8f20b7cb282b62f291ff25a87e403cc86"
] | [
"utils/avg_checkpoints.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Script to average values of variables in a list of checkpoint files.\"\"\"\nimport os\nimport six\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom six.moves import zip # pylint: disable=redefined-builtin\nimport numpy as np\nimport delta.compat as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"checkpoints\", \"\",\n \"Comma-separated list of checkpoints to average.\")\nflags.DEFINE_integer(\n \"num_last_checkpoints\", 0, \"Averages the last N saved checkpoints.\"\n \" If the checkpoints flag is set, this is ignored.\")\nflags.DEFINE_string(\"prefix\", \"\",\n \"Prefix (e.g., directory) to append to each checkpoint.\")\nflags.DEFINE_string(\"output_path\", \"/tmp/averaged.ckpt\",\n \"Path to output the averaged checkpoint to.\")\n\n\ndef checkpoint_exists(path):\n return (tf.io.gfile.exists(path) or tf.io.gfile.exists(path + \".meta\") or\n tf.io.gfile.exists(path + \".index\"))\n\n\ndef main(_):\n if FLAGS.checkpoints:\n # Get the checkpoints list from flags and run some basic checks.\n checkpoints = [c.strip() for c in FLAGS.checkpoints.split(\",\")]\n checkpoints = [c for c in checkpoints if c]\n if not checkpoints:\n raise ValueError(\"No checkpoints provided for averaging.\")\n if FLAGS.prefix:\n checkpoints = [FLAGS.prefix + c for c in checkpoints]\n else:\n assert FLAGS.num_last_checkpoints >= 1, \"Must average at least one model\"\n assert FLAGS.prefix, (\"Prefix must be provided when averaging last\"\n \" N checkpoints\")\n checkpoint_state = tf.train.get_checkpoint_state(\n os.path.dirname(FLAGS.prefix))\n # Checkpoints are ordered from oldest to newest.\n checkpoints = checkpoint_state.all_model_checkpoint_paths[\n -FLAGS.num_last_checkpoints:]\n\n checkpoints = [c for c in checkpoints if checkpoint_exists(c)]\n if not checkpoints:\n if FLAGS.checkpoints:\n raise ValueError(\"None of the provided checkpoints exist. %s\" %\n FLAGS.checkpoints)\n else:\n raise ValueError(\"Could not find checkpoints at %s\" %\n os.path.dirname(FLAGS.prefix))\n\n # Read variables from all checkpoints and average them.\n logging.info(\"Reading variables and averaging checkpoints:\")\n for c in checkpoints:\n logging.info(\"%s \", c)\n var_list = tf.train.list_variables(checkpoints[0])\n var_values, var_dtypes = {}, {}\n for (name, shape) in var_list:\n if not name.startswith(\"global_step\"):\n var_values[name] = np.zeros(shape)\n for checkpoint in checkpoints:\n reader = tf.train.load_checkpoint(checkpoint)\n for name in var_values:\n tensor = reader.get_tensor(name)\n var_dtypes[name] = tensor.dtype\n var_values[name] += tensor\n logging.info(\"Read from checkpoint %s\", checkpoint)\n for name in var_values: # Average.\n var_values[name] /= len(checkpoints)\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n tf_vars = [\n tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v])\n for v in var_values\n ]\n placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]\n assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]\n global_step = tf.Variable(\n 0, name=\"global_step\", trainable=False, dtype=tf.int64)\n saver = tf.train.Saver(tf.all_variables())\n\n # Build a model consisting only of variables, set them to the average values.\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for p, assign_op, (name, value) in zip(placeholders, assign_ops,\n six.iteritems(var_values)):\n sess.run(assign_op, {p: value})\n # Use the built saver to save the averaged checkpoint.\n saver.save(sess, FLAGS.output_path, global_step=global_step)\n\n logging.info(\"Averaged checkpoints saved in %s\", FLAGS.output_path)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"numpy.zeros"
]
] |
TemsyChen/Spotifinder | [
"b069ffcd63bd7654e1afd51cde3288c9678d121a"
] | [
"app/app_3rdtry.py"
] | [
"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nfrom dash.dependencies import Input, Output\nimport pandas as pd\nimport pickle\n# from os.path import dirname\n\n# DIR = dirname(__file__)\n# MODELS_DIR = DIR + '/../models/'\n# DATA_DIR = DIR + '/../data/'\n\n# data_filename = DATA_DIR + 'NLP_songs_data.zip'\n# model_filename = MODELS_DIR + 'nlp_model.pkl'\n# dtm_filename = MODELS_DIR + 'nlp_dtm.pkl'\n\n# df = None\n# loaded_model = None\n# dtm = None\n\n# def load_files():\n# global df, loaded_model, dtm\n\n# df = pd.read_csv(data_filename)\n# loaded_model = pickle.load(open(model_filename, 'rb'))\n# dtm = pickle.load(open(dtm_filename, 'rb'))\n\n# load_files()\n\ndata_filename = r'C:\\Users\\temsy\\Documents\\GitHub\\Spotifinder\\data\\NLP_songs_data.zip'\n\ndf = pd.read_csv(data_filename)\nloaded_model = pickle.load(open(r'C:\\Users\\temsy\\Documents\\GitHub\\Spotifinder\\models\\nlp_model.pkl', 'rb'))\ndtm = pickle.load(open(r'C:\\Users\\temsy\\Documents\\GitHub\\Spotifinder\\models\\nlp_dtm.pkl', 'rb'))\n\n#Plotly Dash\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets, requests_pathname_prefix = '/dash/')\n\napp.layout = html.Div([\n html.Label(\"Artist:\", style={'fontSize':30, 'textAlign':'center'}),\n dcc.Dropdown(\n id='Artist',\n options=[{\n 'label': c,\n 'value': c}\n for c in df['track_artist']],\n value = df['track_artist'][0]\n ),\n html.Label(\"Songs:\", style={'fontSize':30, 'textAlign':'center'}),\n dcc.Dropdown(id='Songs',\n multi=False),\n html.Label(\"Recommendations:\", style={'fontSize':30, 'textAlign':'center'}),\n html.Div(id='Recommendations')\n])\n\[email protected](\n Output('Songs', 'options'),\n [Input('Artist', 'value')]\n)\ndef set_options(artist):\n dff = df[df.track_artist == artist]\n dicosongs = [{'label': c, 'value': c} for c in sorted(dff.track_name.unique())]\n return dicosongs\n\[email protected](\n Output('Recommendations', 'dicorecs')\n [Input('Songs', 'value')],\n [Input('Artist', 'value')]\n)\ndef predict(artist, song):\n # if dtm is None:\n # load_files()\n #translate artist, song into doc dtm.iloc[x].values\n artist_songs = df.loc[df['track_artist'] == artist]\n selected_song = artist_songs.loc[artist_songs['track_name'] == song]\n x = selected_song.index\n x = x[0]\n x = x.item()\n \n doc = dtm.loc[x].values\n result = loaded_model.kneighbors([doc], n_neighbors=6)\n\n songs = []\n # rec_songs = {\"artist\": [], \"song\": []};\n\n for i in range(5):\n song = result[1][0][1 + i]\n\n # translate the loc into an artist and song title\n artist = df.loc[song]['track_artist']\n song = df.loc[song]['track_name']\n\n # rec_songs['artist'].append(artist)\n # rec_songs['song'].append(song)\n songs.append(song)\n\n return result[1][0]\n\nif __name__ == '__main__':\n app.run_server(debug=True)"
] | [
[
"pandas.read_csv"
]
] |
alifianmahardhika/galaxy_simpy | [
"799d11b00a3b14991d89ddac0aabf0bcd447b800"
] | [
"two-body-mond.py"
] | [
"import matplotlib.pyplot as plt\nfrom numpy import sin,cos,pi,sqrt,exp,floor,zeros,copy,array\nfrom numpy.random import normal\nfrom numpy.linalg import norm\nfrom random import uniform\nfrom time import time\n\nstart = time()\ndef euler(x,v):\n for i in range(n_particles):\n sigmaF = zeros(2)\n for j in range(n_particles):\n if(i!=j): \n sigmaF += f(x[i],x[j])\n x[i] += v[i]*dt\n v[i] += a_0*phi_inv(norm(sigmaF)/a_0)*(sigmaF/norm(sigmaF))*dt\ndef symplectic(x,v):\n for i in range(n_particles):\n sigmaF = zeros(2)\n for j in range(n_particles):\n if(i!=j): \n sigmaF += f(x[i],x[j])\n v[i] += G*sigmaF*dt\n x[i] += v[i]*dt\ndef f(xi,xj):\n rij = xj-xi\n return (G*m*rij)/(norm(rij)+epsilon)**3\ndef init_two():\n x1 = ([R*cos(omega*0),R*sin(omega*0)])\n x2 = -copy(x1)\n v1 = ([omega*x1[1],omega*x1[0]])\n v2 = -copy(v1)\n x = array([x1,x2])\n v = array([v1,v2])\n return x,v\ndef kinetic_energy():\n sigmaN = 0.0\n for i in range(n_particles):\n sigmaN += 0.5*m*norm(v[i])**2\n return sigmaN\ndef phi_inv(q):\n return sqrt(q)*sqrt((1.0+sqrt(1.0+(4.0/r**2)))/2.0)\n#Global parameter\nn_particles = 2 #particles\nd = 2 #dimension\nm = 10e11/n_particles #[MO]\nR = 2.9 #[kpc]\nG = 13.34*10e-11 #[kpc^3 MO^-1 gy^-2]\nomega = sqrt((G*m)/(4*R**3)) #velocities\nepsilon = 1e-3\nT = 100\ndt = 0.001\nN = int(floor(T/dt))\nscale = 30.0\na_0 = 10e-1\n#initial condition\nx,v = init_two()\n#x = get_init_coordinates()\n#v = get_init_velocities()\nprint(x)\n#main loop\nplt.plot(x[:,0],x[:,1], 'ro')\nfor k in range(N):\n euler(x,v)\n #print(kinetic_energy())\n #plt.plot(xe[:,0],xe[:,1], 'b.')\n #plt.xlim(right=scale,left=-scale)\n #plt.ylim(top=scale,bottom=-scale)\n #plt.axes(aspect='equal')\n if(k%100==0):\n plt.plot(x[:,0],x[:,1], 'b.')\n#filename='./figures/plot.png'\n#plt.savefig(filename)\nprint(\"Time for running \", N, \"iteration :\", time()-start, \"seconds\")\nprint(x)\nplt.show()"
] | [
[
"numpy.linalg.norm",
"numpy.sin",
"numpy.zeros",
"numpy.floor",
"numpy.cos",
"numpy.copy",
"matplotlib.pyplot.show",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"numpy.array"
]
] |
sagartomar/aesara | [
"477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6",
"477f4e5dd757b1ccd3deaf59bf75fc27d7ab9cf6"
] | [
"aesara/scan/op.py",
"aesara/tensor/math.py"
] | [
"\"\"\"This module provides the `Scan` `Op`.\n\nMemory reuse in scan\n--------------------\n\nTo reduce the number of memory allocations and copies associated with calling\nthe inner function and recovering the outputs at every iteration, Scan uses a\nmemory pre-allocation mechanism for some of its outputs. Instead of repeatedly\ncalling the inner function and copying the outputs to designated locations,\nit tries to make the inner function write the outputs directly to the\ndesignated locations.\n\nThis is achieved by initializing, at every iteration, the output storage\nof the inner function with references to previously allocated memory. Other\nthan the code in the Python and Cython backends to do this and to ensure that\nthe pre-allocated memory has been used, the memory pre-allocation mechanism\nrelies on the following elements to work properly :\n- In make_thunk(), when compiling the inner function, the borrow flag must\n be set to False for the inputs. This will prevent aliasing between the\n inputs and the outputs of the inner function which could lead to invalid\n results.\n- In make_thunk(), again, the borrow flag must be set to True for the outputs.\n This will make Aesara consider the output storages as persistent and make\n Aesara provide them as pre-allocated storage to the ops that compute the\n outputs of the inner function instead of letting these ops allocate their\n own output storage.\n- The ops that produce the outputs of the inner function must be prevented\n from working inplace because if they do, they're not using the pre-allocated\n storage. This is achieved by including the optimization\n 'add_no_output_from_inplace' to the compilation mode used by scan. It\n prevents other optimizations from altering the graph such that outputs are\n produced by inplace operations.\n- The ScanSaveMem optimization, whose goal is to limit the amount of memory\n used by scan, needs to allocate buffers large enough to be able, at every\n iteration, to simultaneously read the needed previous states and storing\n the new states. Before the memory reuse feature, the buffers could be\n smaller because, often, Scan only needed buffers large enough to read the\n needed previous states. This is because all the outputs of the inner\n function were computed before any of them was stored in the buffers. Now,\n the outputs are stored as they are computed which means that, if the buffer\n is too small, computing an output can overwrite an input that is still\n needed to compute another output.\n\n\"\"\"\n\n\nimport copy\nimport itertools\nimport logging\nimport time\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport aesara\nfrom aesara import tensor as aet\nfrom aesara.compile.builders import infer_shape\nfrom aesara.compile.function import function\nfrom aesara.compile.io import In, Out\nfrom aesara.compile.mode import AddFeatureOptimizer, get_mode\nfrom aesara.compile.profiling import ScanProfileStats, register_profiler_printer\nfrom aesara.configdefaults import config\nfrom aesara.gradient import DisconnectedType, NullType, Rop, grad, grad_undefined\nfrom aesara.graph.basic import (\n Apply,\n Constant,\n Variable,\n clone_replace,\n equal_computations,\n graph_inputs,\n io_connection_pattern,\n)\nfrom aesara.graph.features import NoOutputFromInplace\nfrom aesara.graph.fg import MissingInputError\nfrom aesara.graph.op import Op, ops_with_inner_function\nfrom aesara.link.c.basic import CLinker\nfrom aesara.link.c.exceptions import MissingGXX\nfrom aesara.link.utils import raise_with_op\nfrom aesara.scan.utils import Validator, forced_replace, hash_listsDictsTuples, safe_new\nfrom aesara.tensor.basic import as_tensor_variable\nfrom aesara.tensor.math import minimum\nfrom aesara.tensor.shape import Shape_i\nfrom aesara.tensor.type import TensorType, integer_dtypes\nfrom aesara.tensor.var import TensorVariable\n\n\n__docformat__ = \"restructedtext en\"\n__authors__ = (\n \"Razvan Pascanu \"\n \"Frederic Bastien \"\n \"James Bergstra \"\n \"Pascal Lamblin \"\n \"PyMC Developers \"\n \"Aesara Developers \"\n)\n__copyright__ = \"(c) 2010, Universite de Montreal\"\n\n# Logging function for sending warning or info\n_logger = logging.getLogger(\"aesara.scan.op\")\n\n\nclass Scan(Op):\n \"\"\"\n\n Parameters\n ----------\n inputs\n Inputs of the inner function of scan.\n outputs\n Outputs of the inner function of scan.\n info\n Dictionary containing different properties of the scan op (like number\n of different types of arguments, name, mode, if it should run on GPU or\n not, etc.).\n typeConstructor\n Function that constructs an equivalent to Aesara TensorType.\n\n Notes\n -----\n ``typeConstructor`` had been added to refactor how\n Aesara deals with the GPU. If it runs on the GPU, scan needs\n to construct certain outputs (those who reside in the GPU\n memory) as the GPU-specific type. However we can not import\n gpu code in this file (as it is in sandbox, and not available\n on each machine) so the workaround is that the GPU\n optimization passes to the constructor of this class a\n function that is able to construct a GPU type. This way the\n class Scan does not need to be aware of the details for the\n GPU, it just constructs any tensor using this function (which\n by default constructs normal tensors).\n\n \"\"\"\n\n def __init__(\n self,\n inputs,\n outputs,\n info,\n typeConstructor=None,\n ):\n # adding properties into self\n self.inputs = inputs\n self.outputs = outputs\n self.__dict__.update(info)\n # I keep a version of info in self, to use in __eq__ and __hash__,\n # since info contains all tunable parameters of the op, so for two\n # scan to be equal this tunable parameters should be the same\n self.info = info\n # build a list of output types for any Apply node using this op.\n self.output_types = []\n idx = 0\n jdx = 0\n\n def tensorConstructor(broadcastable, dtype):\n return TensorType(broadcastable=broadcastable, dtype=dtype)\n\n if typeConstructor is None:\n typeConstructor = tensorConstructor\n\n while idx < self.n_mit_mot_outs:\n # Not that for mit_mot there are several output slices per\n # output sequence\n o = outputs[idx]\n self.output_types.append(\n typeConstructor(\n broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype\n )\n )\n\n idx += len(self.mit_mot_out_slices[jdx])\n jdx += 1\n\n # mit_sot / sit_sot / nit_sot\n end = idx + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n\n for o in outputs[idx:end]:\n self.output_types.append(\n typeConstructor(\n broadcastable=(False,) + o.type.broadcastable, dtype=o.type.dtype\n )\n )\n\n # shared outputs + possibly the ending condition\n for o in outputs[end:]:\n self.output_types.append(o.type)\n\n if self.as_while:\n self.output_types = self.output_types[:-1]\n\n mode_instance = get_mode(self.mode)\n # Clone mode_instance, altering \"allow_gc\" for the linker,\n # and adding a message if we profile\n if self.name:\n message = self.name + \" sub profile\"\n else:\n message = \"Scan sub profile\"\n\n self.mode_instance = mode_instance.clone(\n link_kwargs=dict(allow_gc=self.allow_gc), message=message\n )\n\n if not hasattr(self, \"name\") or self.name is None:\n self.name = \"scan_fn\"\n # to have a fair __eq__ comparison later on, we update the info with\n # the actual mode used to compile the function and the name of the\n # function that we set in case none was given\n self.info[\"name\"] = self.name\n\n # Pre-computing some values to speed up perform\n self.mintaps = [np.min(x) for x in self.tap_array]\n self.mintaps += [0 for x in range(self.n_nit_sot)]\n self.seqs_arg_offset = 1 + self.n_seqs\n self.shared_arg_offset = (\n self.seqs_arg_offset + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n )\n self.nit_sot_arg_offset = self.shared_arg_offset + self.n_shared_outs\n self.n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n self.n_tap_outs = self.n_mit_mot + self.n_mit_sot\n if self.info[\"gpua\"]:\n self._hash_inner_graph = self.info[\"gpu_hash\"]\n else:\n # Do the missing inputs check here to have the error early.\n for var in graph_inputs(self.outputs, self.inputs):\n if var not in self.inputs and not isinstance(var, Constant):\n raise MissingInputError(f\"ScanOp is missing an input: {repr(var)}\")\n self._cmodule_key = CLinker().cmodule_key_variables(\n self.inputs, self.outputs, []\n )\n self._hash_inner_graph = hash(self._cmodule_key)\n\n # Compute mappings between outer inputs, outer outputs, inner\n # inputs and inner outputs to determine with variables are associated\n # with the same states.\n self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()\n\n def validate_inner_graph(self):\n \"\"\"\n Perform some elementary validations on the inner graph to ensure\n that it is coherent.\n\n \"\"\"\n\n # For every recurrent output, iterate over the associated inner\n # inputs and output and ensure that they have the same dtype\n nb_recurr_outputs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n\n for outer_oidx in range(nb_recurr_outputs):\n\n inner_iidxs = self.var_mappings[\"inner_inp_from_outer_out\"][outer_oidx]\n inner_oidxs = self.var_mappings[\"inner_out_from_outer_out\"][outer_oidx]\n\n for (inner_iidx, inner_oidx) in itertools.product(inner_iidxs, inner_oidxs):\n\n type_input = self.inputs[inner_iidx].type\n type_output = self.outputs[inner_oidx].type\n if type_input != type_output:\n raise TypeError(\n \"Inconsistency in the inner graph of \"\n f\"scan '{self.name}' : an input and an output are \"\n \"associated with the same recurrent state \"\n \"and should have the same type but have \"\n f\"type '{type_input}' and '{type_output}' respectively.\"\n )\n\n # If scan has the flag 'gpua' set to false (meaning that is shouldn't\n # use the gpuarray gpu backend ), ensure that is has no input and no\n # output with type GpuArrayType\n from aesara.gpuarray import GpuArrayType\n\n if not self.info.get(\"gpua\", False):\n for inp in self.inputs:\n if isinstance(inp.type, GpuArrayType):\n raise TypeError(\n \"Inconsistency in the inner graph of \"\n f\"scan '{self.name}' : one of the inputs to the \"\n \"inner graph is of type GpuArrayType but \"\n \"the attributes of the scan op indicate \"\n \"that it shouldn't be the case\"\n )\n\n for out in self.outputs:\n if isinstance(out.type, GpuArrayType):\n raise TypeError(\n \"Inconsistency in the inner graph of \"\n f\"scan '{self.name}' : one of the outputs to the \"\n \"inner graph is of type GpuArrayType but \"\n \"the attributes of the scan op indicate \"\n \"that it shouldn't be the case\"\n )\n\n def __setstate__(self, d):\n self.__dict__.update(d)\n if \"allow_gc\" not in self.__dict__:\n self.allow_gc = True\n self.info[\"allow_gc\"] = True\n if not hasattr(self, \"var_mappings\"):\n # Generate the mappings between inner and outer inputs and outputs\n # if they haven't already been generated.\n self.var_mappings = self.get_oinp_iinp_iout_oout_mappings()\n if hasattr(self, \"fn\"):\n if not hasattr(self, \"thunk_mit_mot_out_slices\"):\n # The thunk has been compiled before mit_mot preallocation\n # feature was implemented. Mark every mit_mot output tap as\n # not having been preallocated\n self.mitmots_preallocated = [False] * self.n_mit_mot_outs\n\n if not hasattr(self, \"outs_is_tensor\"):\n # The thunk has been compiled before the analysis, at\n # compilation time, of the location of the inputs and outputs.\n # Perform this analysis here.\n self.inps_is_tensor = [\n isinstance(out, TensorVariable)\n for out in self.fn.maker.fgraph.inputs\n ]\n self.outs_is_tensor = [\n isinstance(out, TensorVariable)\n for out in self.fn.maker.fgraph.outputs\n ]\n\n # Ensure that the graph associated with the inner function is valid.\n self.validate_inner_graph()\n\n def make_node(self, *inputs):\n \"\"\"\n Conventions:\n inner_X - the variable corresponding to X in the inner function\n of scan (the lambda function executed at every time\n step)\n outer_X - the variable corresponding to X in the outer graph,\n i.e. the main graph (where the scan op lives)\n inner_X_out - the variable representing the new value of X after\n executing one step of scan (i.e. outputs given by\n the inner function)\n\n \"\"\"\n assert np.all(isinstance(i, Variable) for i in inputs)\n # Check that the number of inputs to the Scan node corresponds to\n # the number of inputs of the inner function of scan\n n_outer_ins = len(inputs) - len(self.outer_nitsot(inputs)) - 1\n n_inner_ins = (\n len(self.inner_seqs(self.inputs))\n + len(self.mitmot_taps())\n + len(self.mitsot_taps())\n + len(self.inner_sitsot(self.inputs))\n + len(self.inner_shared(self.inputs))\n + len(self.inner_non_seqs(self.inputs))\n )\n assert n_outer_ins == n_inner_ins, (\n \"The number of inputs given to the inner function of scan\"\n \" does not match the number of inputs given to scan.\"\n )\n # Force the inputs to be on the CPU\n new_inputs = [as_tensor_variable(inputs[0])]\n # assert dtype is consistent\n err_msg1 = (\n \"When compiling the inner function of scan (the \"\n \"function called by scan in each of its iterations) \"\n \"the following error has been encountered: The \"\n \"%s %s (argument number %d) has dtype \"\n \"%s and %d dimension(s). The corresponding variable \"\n \"in the inner function of scan %s \"\n \"however has dtype %s and %d dimension(s). This \"\n \"variable in the inner function of scan should \"\n \"have the same dtype and one fewer dimension \"\n \"compared to its corresponding variable in the initial \"\n \"state (outputs_info in scan nomenclature). For example, \"\n \"if the inner function of scan returns a vector \"\n \"of size d and scan uses the values of \"\n \"the previous time-step, then the initial state in scan \"\n \"should be a matrix of shape (1, d). \"\n \"The first dimension of this \"\n \"matrix corresponds to the number of previous time-steps \"\n \"that scan uses in each of its iterations. \"\n \"In order to solve this issue if the two variable currently \"\n \"have the same dimensionality, you can increase the \"\n \"dimensionality of the varialbe in the initial state of scan \"\n \"by using dimshuffle or shape_padleft. \"\n )\n err_msg2 = (\n \"When compiling the inner function of scan the \"\n \"following error has been encountered: The \"\n \"initial state (`outputs_info` in scan nomenclature) \"\n \"of variable %s (argument number %d) \"\n \"has dtype %s, while the result of the inner function \"\n \"(`fn`) has dtype %s. This can happen if the inner \"\n \"function of scan results in an upcast or downcast.\"\n )\n err_msg3 = (\n \"When compiling the inner function of scan (the \"\n \"function called by scan in each of its iterations) \"\n \"the following error has been encountered: The \"\n \"initial state (`outputs_info` in scan nomenclature) \"\n \"of variable %s (argument number %d) has %d dimension(s), \"\n \"while the corresponding variable in the result of the inner \"\n \"function of scan (`fn`) has %d dimension(s) (it should \"\n \"be one less than the initial state). For example, \"\n \"if the inner function of scan returns a vector \"\n \"of size d and scan uses the values of \"\n \"the previous time-step, then the initial state in scan \"\n \"should be a matrix of shape (1, d). \"\n \"The first dimension of this \"\n \"matrix corresponds to the number of previous time-steps \"\n \"that scan uses in each of its iterations. \"\n \"In order to solve this issue if the two varialbe currently \"\n \"have the same dimensionality, you can increase the \"\n \"dimensionality of the variable in the initial state of scan \"\n \"by using dimshuffle or shape_padleft. \"\n )\n\n def check_broadcast(v1, v2):\n \"\"\"Checks that the broadcast pattern of v1 and v2.\n\n Controls that the broadcast pattern of the variable provided as\n input to `scan` matches the broadcast pattern provided in\n `output_info`. It raises an error when they don't match. The\n typical case is when the user provides either the input or the\n `output_info` (but not both) with a dimension fixed to 1,\n which may wrongly be interpreted as broadcastable.\n\n \"\"\"\n if not hasattr(v1, \"broadcastable\") and not hasattr(v2, \"broadcastable\"):\n return\n msg = (\n \"The broadcast pattern of the output of scan (%s) is \"\n \"inconsistent with the one provided in `output_info` \"\n \"(%s). The output on axis %d is `%r`, but it is `%r` on \"\n \"axis %d in `output_info`. This can happen if one of the \"\n \"dimension is fixed to 1 in the input, while it is still \"\n \"variable in the output, or vice-verca. You have to make \"\n \"them consistent, e.g. using aesara.tensor.\"\n \"{patternbroadcast,unbroadcast,addbroadcast}.\"\n )\n size = min(len(v1.broadcastable), len(v2.broadcastable))\n for n, (b1, b2) in enumerate(\n zip(v1.broadcastable[-size:], v2.broadcastable[-size:])\n ):\n if b1 != b2:\n a1 = n + size - len(v1.broadcastable) + 1\n a2 = n + size - len(v2.broadcastable) + 1\n raise TypeError(msg % (v1.type, v2.type, a1, b1, b2, a2))\n\n def format(var, as_var):\n \"\"\"\n This functions ensures that ``out`` has the same dtype as\n ``inp`` as well as calling filter_variable to make sure\n they are both TensorType or GpuArrayType. It internally\n deals with the corner case where inp.ndim + 1 = out.ndim\n\n \"\"\"\n if not hasattr(var, \"dtype\"):\n return var\n rval = var\n if rval.type.dtype != as_var.type.dtype:\n rval = rval.astype(as_var.type.dtype)\n if rval.ndim == as_var.ndim:\n rval = as_var.type.filter_variable(rval)\n else:\n tmp = as_var.type.clone(\n broadcastable=(\n tuple(var.broadcastable[:1]) + tuple(as_var.broadcastable)\n )\n )\n rval = tmp.filter_variable(rval)\n return rval\n\n # Check if input sequences and variables representing a slice of\n # them have the same dtype\n argoffset = 0\n for inner_seq, outer_seq in zip(\n self.inner_seqs(self.inputs), self.outer_seqs(inputs)\n ):\n check_broadcast(outer_seq, inner_seq)\n new_inputs.append(format(outer_seq, as_var=inner_seq))\n\n argoffset += len(self.outer_seqs(inputs))\n # Check that this 3 things have the same dtype for mit_mot:\n # - initial state of the output\n # - variable representing an input slice of the output\n # - variable representing an output slice of the output\n ipos = 0\n opos = 0\n inner_mitmot = self.inner_mitmot(self.inputs)\n inner_mitmot_outs = self.inner_mitmot_outs(self.outputs)\n for idx, (itaps, otaps, _outer_mitmot) in enumerate(\n zip(self.mitmot_taps(), self.mitmot_out_taps(), self.outer_mitmot(inputs))\n ):\n outer_mitmot = format(_outer_mitmot, as_var=inner_mitmot[ipos])\n new_inputs.append(outer_mitmot)\n for k in range(len(itaps)):\n if (\n inner_mitmot[ipos + k].type.dtype != outer_mitmot.type.dtype\n or inner_mitmot[ipos + k].ndim != outer_mitmot.ndim - 1\n ):\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_mitmot),\n argoffset + idx,\n outer_mitmot.type.dtype,\n outer_mitmot.type.ndim,\n str(inner_mitmot[ipos + k]),\n inner_mitmot[ipos + k].type.dtype,\n inner_mitmot[ipos + k].type.ndim,\n )\n )\n ipos += len(itaps)\n for k in range(len(otaps)):\n if inner_mitmot_outs[opos + k].type.dtype != outer_mitmot.type.dtype:\n raise ValueError(\n err_msg2\n % (\n str(outer_mitmot),\n argoffset + idx,\n outer_mitmot.type.dtype,\n inner_mitmot_outs[opos + k].type.dtype,\n )\n )\n if inner_mitmot_outs[opos + k].ndim != outer_mitmot.ndim - 1:\n raise ValueError(\n err_msg3\n % (\n str(outer_mitmot),\n argoffset + idx,\n outer_mitmot.ndim,\n inner_mitmot_outs[opos + k].ndim,\n )\n )\n opos += len(otaps)\n argoffset += len(self.outer_mitmot(inputs))\n # Same checks as above but for outputs of type mit_sot\n ipos = 0\n inner_mitsots = self.inner_mitsot(self.inputs)\n for idx, (itaps, _outer_mitsot, inner_mitsot_out) in enumerate(\n zip(\n self.mitsot_taps(),\n self.outer_mitsot(inputs),\n self.inner_mitsot_outs(self.outputs),\n )\n ):\n outer_mitsot = format(_outer_mitsot, as_var=inner_mitsots[ipos])\n new_inputs.append(outer_mitsot)\n\n for k in range(len(itaps)):\n if (\n inner_mitsots[ipos + k].type.dtype != outer_mitsot.type.dtype\n or inner_mitsots[ipos + k].ndim != outer_mitsot.ndim - 1\n ):\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_mitsot),\n argoffset + idx,\n outer_mitsot.type.dtype,\n outer_mitsot.type.ndim,\n str(inner_mitsots[ipos + k]),\n inner_mitsots[ipos + k].type.dtype,\n inner_mitsots[ipos + k].type.ndim,\n )\n )\n ipos += len(itaps)\n if inner_mitsot_out.type.dtype != outer_mitsot.type.dtype:\n raise ValueError(\n err_msg2\n % (\n str(outer_mitsot),\n argoffset + idx,\n outer_mitsot.type.dtype,\n inner_mitsot_out.type.dtype,\n )\n )\n if inner_mitsot_out.ndim != outer_mitsot.ndim - 1:\n raise ValueError(\n err_msg3\n % (\n str(outer_mitsot),\n argoffset + idx,\n outer_mitsot.ndim,\n inner_mitsot_out.ndim,\n )\n )\n\n argoffset += len(self.outer_mitsot(inputs))\n # Same checks as above but for outputs of type sit_sot\n for idx, (inner_sitsot, _outer_sitsot, inner_sitsot_out) in enumerate(\n zip(\n self.inner_sitsot(self.inputs),\n self.outer_sitsot(inputs),\n self.inner_sitsot_outs(self.outputs),\n )\n ):\n outer_sitsot = format(_outer_sitsot, as_var=inner_sitsot)\n new_inputs.append(outer_sitsot)\n if inner_sitsot.ndim != outer_sitsot.ndim - 1:\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_sitsot),\n argoffset + idx,\n outer_sitsot.type.dtype,\n outer_sitsot.type.ndim,\n str(inner_sitsot),\n inner_sitsot.type.dtype,\n inner_sitsot.type.ndim,\n )\n )\n if inner_sitsot_out.type.dtype != outer_sitsot.type.dtype:\n raise ValueError(\n err_msg2\n % (\n str(outer_sitsot),\n argoffset + idx,\n outer_sitsot.type.dtype,\n inner_sitsot_out.type.dtype,\n )\n )\n if inner_sitsot_out.ndim != outer_sitsot.ndim - 1:\n raise ValueError(\n err_msg3\n % (\n str(outer_sitsot),\n argoffset + idx,\n outer_sitsot.type.ndim,\n inner_sitsot_out.type.ndim,\n )\n )\n\n argoffset += len(self.outer_sitsot(inputs))\n # Check that the shared variable and their update rule have the same\n # dtype. Maybe even same type ?!\n for idx, (inner_shared, inner_shared_out, _outer_shared) in enumerate(\n zip(\n self.inner_shared(self.inputs),\n self.inner_shared_outs(self.outputs),\n self.outer_shared(inputs),\n )\n ):\n outer_shared = format(_outer_shared, as_var=inner_shared)\n new_inputs.append(outer_shared)\n if (\n hasattr(outer_shared, \"dtype\")\n and outer_shared.dtype != inner_shared_out.dtype\n ):\n raise ValueError(\n err_msg2\n % (\n str(outer_shared),\n idx + argoffset,\n outer_shared.dtype,\n inner_shared_out.dtype,\n )\n )\n if (\n hasattr(outer_shared, \"dtype\")\n and outer_shared.ndim != inner_shared_out.ndim\n ):\n raise ValueError(\n err_msg3\n % (\n str(outer_shared),\n idx + argoffset,\n outer_shared.ndim,\n inner_shared_out.ndim,\n )\n )\n\n if hasattr(outer_shared, \"dtype\") and (\n outer_shared.dtype != inner_shared.dtype\n or outer_shared.ndim != inner_shared.ndim\n ):\n raise ValueError(\n err_msg1\n % (\n \"initial state (outputs_info\" \" in scan nomenclature) \",\n str(outer_shared),\n argoffset + idx,\n outer_shared.dtype,\n outer_shared.ndim,\n str(inner_shared),\n inner_shared.dtype,\n inner_shared.ndim,\n )\n )\n # We do not need to call `format` on outer_nisot arguments.\n # outer_nitsot stands for no input tap single output tap. This means\n # these are states that do not feed anything back in the recurrent\n # computation, and hence they do not have an initial state. The scan\n # node however receives an input for each such argument, the input\n # in this case is just a int saying how many steps of this output we\n # need to store. This input does not have the same dtype, nor is it the same\n # type of tensor as the output, it is always a scalar int.\n new_inputs += [as_tensor_variable(ons) for ons in self.outer_nitsot(inputs)]\n for inner_nonseq, _outer_nonseq in zip(\n self.inner_non_seqs(self.inputs), self.outer_non_seqs(inputs)\n ):\n outer_nonseq = format(_outer_nonseq, as_var=inner_nonseq)\n new_inputs.append(outer_nonseq)\n if inner_nonseq.type != outer_nonseq.type:\n raise ValueError(\n (\n \"Argument %s given to scan node does not\"\n \" match its correspondence %s\"\n )\n % (str(outer_nonseq), str(inner_nonseq))\n )\n\n for outer_nitsot in self.outer_nitsot(inputs):\n # For every nit_sot input we get as input a int/uint that\n # depicts the size in memory for that sequence. This feature is\n # used by truncated BPTT and by scan space optimization\n if (\n str(outer_nitsot.type.dtype) not in integer_dtypes\n or outer_nitsot.ndim != 0\n ):\n raise ValueError(\n \"For output %s you need to provide a \" \"scalar int !\",\n str(outer_nitsot),\n )\n assert len(new_inputs) == len(inputs)\n\n # The vector_seqs and vector_outs are just a workaround\n # strange NumPy behavior: vector_ndarray[int] return a NumPy\n # scalar and not a NumPy ndarray of 0 dimensions.\n def is_cpu_vector(s):\n return isinstance(s.type, TensorType) and s.ndim == 1\n\n self.vector_seqs = [\n is_cpu_vector(seq) for seq in new_inputs[1 : 1 + self.n_seqs]\n ]\n self.vector_outs = [\n is_cpu_vector(arg)\n for arg in new_inputs[1 + self.n_seqs : (1 + self.n_seqs + self.n_outs)]\n ]\n self.vector_outs += [\n isinstance(t.type, TensorType) and t.ndim == 0\n for t in self.outer_nitsot_outs(self.outputs)\n ]\n\n apply_node = Apply(self, new_inputs, [t() for t in self.output_types])\n return apply_node\n\n def __eq__(self, other):\n # Check if we are dealing with same type of objects\n if not type(self) == type(other):\n return False\n if \"destroy_map\" not in self.info:\n self.info[\"destroy_map\"] = OrderedDict()\n if \"destroy_map\" not in other.info:\n other.info[\"destroy_map\"] = OrderedDict()\n keys_to_check = [\n \"truncate_gradient\",\n \"profile\",\n \"n_seqs\",\n \"tap_array\",\n \"as_while\",\n \"n_mit_sot\",\n \"destroy_map\",\n \"n_nit_sot\",\n \"n_shared_outs\",\n \"n_sit_sot\",\n \"gpua\",\n \"n_mit_mot_outs\",\n \"n_mit_mot\",\n \"mit_mot_out_slices\",\n ]\n # This are some safety checks ( namely that the inner graph has the\n # same number of inputs and same number of outputs )\n if not len(self.inputs) == len(other.inputs):\n return False\n elif not len(self.outputs) == len(other.outputs):\n return False\n for key in keys_to_check:\n if self.info[key] != other.info[key]:\n return False\n # If everything went OK up to here, there is still one thing to\n # check. Namely, do the internal graph represent same\n # computations\n for self_in, other_in in zip(self.inputs, other.inputs):\n if self_in.type != other_in.type:\n return False\n\n return equal_computations(\n self.outputs, other.outputs, self.inputs, other.inputs\n )\n\n def __str__(self):\n if self.gpua:\n gpu_str = \"gpu\"\n else:\n gpu_str = \"cpu\"\n if self.as_while:\n name = \"do_while\"\n else:\n name = \"for\"\n aux_txt = \"%s\"\n if len(self.destroy_map.keys()) > 0:\n # Check if all outputs are inplace\n if sorted(self.destroy_map.keys()) == sorted(\n range(self.n_mit_mot + self.n_mit_sot + self.n_sit_sot)\n ):\n aux_txt += \"all_inplace,%s,%s}\"\n else:\n aux_txt += \"{inplace{\"\n for k in self.destroy_map.keys():\n aux_txt += str(k) + \",\"\n aux_txt += \"},%s,%s}\"\n else:\n aux_txt += \"{%s,%s}\"\n aux_txt = aux_txt % (name, gpu_str, str(self.name))\n return aux_txt\n\n def __hash__(self):\n return hash(\n (\n type(self),\n # and a hash representing the inner graph using the\n # CLinker.cmodule_key_\n self._hash_inner_graph,\n hash_listsDictsTuples(self.info),\n )\n )\n\n def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):\n \"\"\"\n\n Parameters\n ----------\n node\n Something previously returned by self.make_node.\n storage_map\n dict variable -> one-element-list where a computed\n value for this variable may be found.\n compute_map\n dict variable -> one-element-list where a boolean\n value will be found. The boolean indicates whether the\n variable's storage_map container contains a valid value (True)\n or if it has not been computed yet (False).\n no_recycling\n List of variables for which it is forbidden to reuse memory\n allocated by a previous call.\n impl\n Use 'py' if we want python execution.\n Notes\n -----\n If the thunk consults the storage_map on every call, it is safe\n for it to ignore the no_recycling argument, because elements of the\n no_recycling list will have a value of None in the storage map. If\n the thunk can potentially cache return values (like CLinker does),\n then it must not do so for variables in the no_recycling list.\n\n \"\"\"\n\n # Before building the thunk, validate that the inner graph is\n # coherent\n self.validate_inner_graph()\n\n # Setting up all my variables in what I believe is a more Cython\n # friendly form\n\n node_input_storage = [storage_map[r] for r in node.inputs]\n node_output_storage = [storage_map[r] for r in node.outputs]\n # If a shared variable is the result of a ViewOp it is a clear\n # indication that we need to copy that value after the perform of\n # scan is done\n slices = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n\n if config.scan__allow_output_prealloc:\n\n # Go through the mitmots. Whenever a mitmot has a tap both as an\n # input and an output, wrap the input such that the corresponding\n # output variable becomes an update to be performed on it, possibly\n # inplace at the end of the functions's execution.\n wrapped_inputs = [In(x, borrow=False) for x in self.inputs[: self.n_seqs]]\n new_outputs = [x for x in self.outputs]\n preallocated_mitmot_outs = []\n new_mit_mot_out_slices = copy.deepcopy(self.mit_mot_out_slices)\n\n input_idx = self.n_seqs\n for mitmot_idx in range(self.n_mit_mot):\n for inp_tap in self.tap_array[mitmot_idx]:\n if inp_tap in self.mit_mot_out_slices[mitmot_idx]:\n inp = self.inputs[input_idx]\n\n # Figure out the index of the corresponding output\n output_idx = sum(\n [len(m) for m in self.mit_mot_out_slices[:mitmot_idx]]\n )\n output_idx += self.mit_mot_out_slices[mitmot_idx].index(inp_tap)\n\n # Make it so the input is automatically updated to the\n # output value, possibly inplace, at the end of the\n # function execution. Also, since an update is\n # defined, a default value must also be (this is\n # verified by DebugMode). Use an array of size 0 but\n # the right ndim and dtype (use a shape of 1 on\n # broadcastable dimensions, 0 on the others).\n default_shape = [1 if _b else 0 for _b in inp.broadcastable]\n default_val = inp.type.value_zeros(default_shape)\n wrapped_inp = In(\n variable=inp,\n value=default_val,\n update=self.outputs[output_idx],\n )\n wrapped_inputs.append(wrapped_inp)\n preallocated_mitmot_outs.append(output_idx)\n new_mit_mot_out_slices[mitmot_idx].remove(inp_tap)\n else:\n # Wrap the corresponding input as usual. Leave the\n # output as-is.\n wrapped_inputs.append(In(self.inputs[input_idx], borrow=False))\n input_idx += 1\n\n # Wrap the inputs not associated to mitmots and wrap the remaining\n # outputs\n wrapped_inputs += [In(x, borrow=False) for x in self.inputs[input_idx:]]\n wrapped_outputs = [Out(x, borrow=True) for x in new_outputs[:slices]]\n wrapped_outputs += new_outputs[slices:]\n\n # Remove now useless outputs from the output list (start from the\n # end to avoid altering the indices of the other outputs to be\n # deleted.\n preallocated_mitmot_outs.sort()\n for p in preallocated_mitmot_outs[::-1]:\n del wrapped_outputs[p]\n\n # Store the list of mitmot output taps that have been altered\n # so they can be preallocated\n self.mitmots_preallocated = [\n i in preallocated_mitmot_outs for i in range(self.n_mit_mot_outs)\n ]\n\n # Add an optimization to the compilation mode to attach a feature\n # to the function graph just before the inplace optimizations are\n # applied (inplace optimizations start at position 50 so the\n # optimization to attach the feature is registered at position 49.9\n # so that it runs before them). This feature will prevent mitsot,\n # sitsot and nitsot outputs from being computed inplace (to allow\n # their preallocation).\n mitsot_start = self.n_mit_mot_outs - len(preallocated_mitmot_outs)\n nitsot_end = mitsot_start + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n feature = NoOutputFromInplace(mitsot_start, nitsot_end)\n opt = AddFeatureOptimizer(feature)\n compilation_mode = self.mode_instance.register((opt, 49.9))\n\n else:\n # Output preallocation is not activated. Mark every mitmot output\n # tap as not being preallocated\n self.mitmots_preallocated = [False] * self.n_mit_mot_outs\n\n wrapped_inputs = [In(x, borrow=True) for x in self.inputs]\n wrapped_outputs = [Out(x, borrow=False) for x in self.outputs[:slices]]\n wrapped_outputs += self.outputs[slices:]\n\n compilation_mode = self.mode_instance\n\n profile = None\n if config.profile or (\n isinstance(self.profile, (str, bool, (int,))) and self.profile\n ):\n if isinstance(self.profile, str):\n profile = ScanProfileStats(name=self.profile)\n else:\n profile = ScanProfileStats(name=self.name)\n elif self.profile:\n profile = self.profile\n # make_thunk can be called many times on the same op\n # we do not want to recompile the inner fct every time.\n if not getattr(self, \"fn\", None):\n self.fn = function(\n wrapped_inputs,\n wrapped_outputs,\n mode=compilation_mode,\n name=self.name,\n profile=profile,\n on_unused_input=\"ignore\",\n )\n\n # Analyse the compile inner function to determine which inputs and\n # outputs are on the gpu and speed up some checks during the execution\n self.inps_is_tensor = [\n isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.inputs\n ]\n self.outs_is_tensor = [\n isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.outputs\n ]\n\n try:\n if impl == \"py\":\n raise MissingGXX\n cython_mintaps = np.asarray(self.mintaps, dtype=\"int32\")\n cython_tap_array_len = np.asarray(\n [len(x) for x in self.tap_array], dtype=\"int32\"\n )\n if len(self.tap_array) == 0:\n d1 = 0\n else:\n d1 = np.max(cython_tap_array_len)\n d0 = len(self.tap_array)\n cython_tap_array = np.zeros((d0, d1), dtype=\"int32\")\n for _d0 in range(d0):\n for _d1 in range(cython_tap_array_len[_d0]):\n cython_tap_array[_d0, _d1] = self.tap_array[_d0][_d1]\n cython_mit_mot_out_nslices = np.asarray(\n [len(x) for x in self.mit_mot_out_slices], dtype=\"int32\"\n )\n if len(self.mit_mot_out_slices) == 0:\n d1 = 0\n else:\n d1 = np.max(cython_mit_mot_out_nslices)\n d0 = len(self.mit_mot_out_slices)\n cython_mit_mot_out_slices = np.zeros((d0, d1), dtype=\"int32\")\n for _d0 in range(d0):\n for _d1 in range(cython_mit_mot_out_nslices[_d0]):\n cython_mit_mot_out_slices[_d0, _d1] = self.mit_mot_out_slices[_d0][\n _d1\n ]\n\n cython_vector_seqs = np.asarray(self.vector_seqs, dtype=\"int32\")\n cython_vector_outs = np.asarray(self.vector_outs, dtype=\"int32\")\n cython_mitmots_preallocated = np.asarray(\n self.mitmots_preallocated, dtype=\"int32\"\n )\n\n cython_inps_is_tensor = np.asarray(self.inps_is_tensor, dtype=\"int32\")\n cython_outs_is_tensor = np.asarray(self.outs_is_tensor, dtype=\"int32\")\n\n if self.destroy_map:\n cython_destroy_map = [\n x in self.destroy_map for x in range(len(node.outputs))\n ]\n else:\n cython_destroy_map = [0 for x in range(len(node.outputs))]\n cython_destroy_map = np.asarray(cython_destroy_map, dtype=\"int32\")\n from . import scan_perform_ext\n\n def p(node, args, outs):\n return scan_perform_ext.perform(\n self.n_shared_outs,\n self.n_mit_mot_outs,\n self.n_seqs,\n self.n_mit_mot,\n self.n_mit_sot,\n self.n_sit_sot,\n self.n_nit_sot,\n args[0],\n self.as_while,\n cython_mintaps,\n cython_tap_array,\n cython_tap_array_len,\n cython_vector_seqs,\n cython_vector_outs,\n cython_mit_mot_out_slices,\n cython_mit_mot_out_nslices,\n cython_mitmots_preallocated,\n cython_inps_is_tensor,\n cython_outs_is_tensor,\n self.fn.fn,\n self.fn,\n cython_destroy_map,\n args,\n outs,\n self,\n node,\n )\n\n except (ImportError, MissingGXX):\n p = self.perform\n\n # default arguments are stored in the closure of `rval`\n\n # Big ugly hack since we can't get the real value of allow_gc\n # for the englobing function.\n allow_gc = config.allow_gc and not self.allow_gc\n\n def rval(\n p=p, i=node_input_storage, o=node_output_storage, n=node, allow_gc=allow_gc\n ):\n r = p(n, [x[0] for x in i], o)\n for o in node.outputs:\n compute_map[o][0] = True\n if allow_gc:\n self.fn.free()\n return r\n\n rval.inputs = node_input_storage\n rval.outputs = node_output_storage\n rval.perform = p\n rval.lazy = False\n return rval\n\n def inner_seqs(self, list_inputs):\n # Given the list of inner inputs this function grabs those\n # corresponding to sequences\n return list_inputs[: self.n_seqs]\n\n def outer_seqs(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n # Given the list of outer inputs this function grabs those\n # corresponding to sequences\n return list_inputs[1 : 1 + self.n_seqs]\n\n def inner_mitmot(self, list_inputs):\n n_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])\n return list_inputs[self.n_seqs : self.n_seqs + n_taps]\n\n def outer_mitmot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n return list_inputs[1 + self.n_seqs : 1 + self.n_seqs + self.n_mit_mot]\n\n def inner_mitmot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n return list_outputs[:n_taps]\n\n def outer_mitmot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n return list_outputs[: self.n_mit_mot]\n\n def mitmot_taps(self):\n return self.tap_array[: self.n_mit_mot]\n\n def mitmot_out_taps(self):\n return self.mit_mot_out_slices[: self.n_mit_mot]\n\n def inner_mitsot(self, list_inputs):\n n_mitmot_taps = sum(len(x) for x in self.tap_array[: self.n_mit_mot])\n ntaps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n return list_inputs[\n self.n_seqs + n_mitmot_taps : self.n_seqs + ntaps_upto_sit_sot\n ]\n\n def outer_mitsot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = 1 + self.n_seqs + self.n_mit_mot\n return list_inputs[offset : offset + self.n_mit_sot]\n\n def inner_mitsot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n return list_outputs[n_taps : n_taps + self.n_mit_sot]\n\n def outer_mitsot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n return list_outputs[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]\n\n def mitsot_taps(self):\n return self.tap_array[self.n_mit_mot : self.n_mit_mot + self.n_mit_sot]\n\n def inner_sitsot(self, list_inputs):\n n_taps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n offset = self.n_seqs + n_taps_upto_sit_sot\n return list_inputs[offset : offset + self.n_sit_sot]\n\n def outer_sitsot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot\n return list_inputs[offset : offset + self.n_sit_sot]\n\n def inner_sitsot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n offset = self.n_mit_sot + n_taps\n return list_outputs[offset : offset + self.n_sit_sot]\n\n def outer_sitsot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n offset = self.n_mit_mot + self.n_mit_sot\n return list_outputs[offset : offset + self.n_sit_sot]\n\n def outer_nitsot(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = (\n 1\n + self.n_seqs\n + self.n_mit_mot\n + self.n_mit_sot\n + self.n_sit_sot\n + self.n_shared_outs\n )\n return list_inputs[offset : offset + self.n_nit_sot]\n\n def inner_nitsot_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n offset = self.n_mit_sot + n_taps + self.n_sit_sot\n return list_outputs[offset : offset + self.n_nit_sot]\n\n def outer_nitsot_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n return list_outputs[offset : offset + self.n_nit_sot]\n\n def inner_shared(self, list_inputs):\n n_taps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot\n return list_inputs[offset : offset + self.n_shared_outs]\n\n def outer_shared(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = 1 + self.n_seqs + self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n return list_inputs[offset : offset + self.n_shared_outs]\n\n def inner_shared_outs(self, list_outputs):\n n_taps = sum(len(x) for x in self.mit_mot_out_slices)\n offset = self.n_mit_sot + n_taps + self.n_sit_sot + self.n_nit_sot\n return list_outputs[offset : offset + self.n_shared_outs]\n\n def outer_shared_outs(self, list_outputs):\n if isinstance(list_outputs, Apply):\n list_outputs = list_outputs.outputs\n offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + self.n_nit_sot\n return list_outputs[offset : offset + self.n_shared_outs]\n\n def inner_non_seqs(self, list_inputs):\n n_taps_upto_sit_sot = sum(\n len(x) for x in self.tap_array[: (self.n_mit_mot + self.n_mit_sot)]\n )\n offset = self.n_seqs + n_taps_upto_sit_sot + self.n_sit_sot + self.n_shared_outs\n return list_inputs[offset:]\n\n def outer_non_seqs(self, list_inputs):\n if isinstance(list_inputs, Apply):\n list_inputs = list_inputs.inputs\n offset = (\n 1\n + self.n_seqs\n + self.n_mit_mot\n + self.n_mit_sot\n + self.n_sit_sot\n + self.n_nit_sot\n + self.n_shared_outs\n )\n return list_inputs[offset:]\n\n def perform(self, node, inputs, output_storage, params=None):\n \"\"\"Compute the scan operation in Python.\n\n The `inputs` are packed like this:\n\n n_steps\n\n X sequence inputs x_1, x_2, ... x_<self.n_seqs>\n\n Y initial states (u_1, u_2, ... u_<self.n_outs>) for our\n outputs. Each must have appropriate length (T_1, T_2, ..., T_Y).\n\n W other inputs w_1, w_2, ... w_W\n\n There are at least ``1 + self.n_seqs + self.n_outs`` inputs, and the\n ones above this number are passed to the scanned function as\n non-sequential inputs.\n\n The outputs are more straightforward:\n\n Y sequence outputs y_1, y_2, ... y_<self.n_outs>\n\n \"\"\"\n # 1. Unzip the number of steps and sequences. If number of steps is\n # negative flip sequences around, and make n_steps positive\n t0_call = time.time()\n t_fn = 0\n n_steps = inputs[0]\n seqs = []\n if n_steps < 0:\n # History, in the past, this was used for backward\n # scan. Now we reverse the inputs outside of scan.\n raise IndexError(\n f\"Scan was asked to run for negative number of step {int(n_steps)}\"\n )\n elif n_steps == 0:\n raise NotImplementedError(\n \"We didn't implemented yet the case where scan do 0 iteration\"\n )\n else:\n for idx, seq in enumerate(inputs[1 : self.seqs_arg_offset]):\n if seq.shape[0] < n_steps:\n raise ValueError(\n (\n \"Sequence is shorter then the required \"\n \"number of steps : (n_steps, seq, \"\n \"seq.shape):\"\n ),\n n_steps,\n node.inputs[1 + idx],\n seq.shape,\n )\n seqs.append(seq)\n\n # 2. Allocate memory for the outputs. Construct the list:\n # store_steps -- map containing the length of each output\n # pos -- map containing the current position of each\n # output\n\n store_steps = [\n arg.shape[0]\n for arg in inputs[self.seqs_arg_offset : self.shared_arg_offset]\n ]\n store_steps += [\n arg\n for arg in inputs[\n self.nit_sot_arg_offset : self.nit_sot_arg_offset + self.n_nit_sot\n ]\n ]\n\n pos = [\n (-self.mintaps[idx]) % store_steps[idx]\n for idx in range(self.n_outs + self.n_nit_sot)\n ]\n # 2.1 Create storage space for outputs\n for idx in range(self.n_outs):\n if idx in self.destroy_map:\n # ^ Case 1. Outputs should be computed inplace of their\n # initial state\n output_storage[idx][0] = inputs[self.seqs_arg_offset + idx]\n elif (\n output_storage[idx][0] is not None\n and output_storage[idx][0].shape[1:]\n == inputs[self.seqs_arg_offset + idx].shape[1:]\n and output_storage[idx][0].shape[0] >= store_steps[idx]\n ):\n # Put in the values of the initial state\n output_storage[idx][0] = output_storage[idx][0][: store_steps[idx]]\n if idx > self.n_mit_mot:\n l = -self.mintaps[idx]\n output_storage[idx][0][:l] = inputs[self.seqs_arg_offset + idx][:l]\n else:\n output_storage[idx][0][:] = inputs[self.seqs_arg_offset + idx]\n else:\n output_storage[idx][0] = inputs[self.seqs_arg_offset + idx].copy()\n\n offset = self.nit_sot_arg_offset + self.n_nit_sot\n other_args = inputs[offset:]\n inner_input_storage = self.fn.input_storage\n nb_mitmot_in = sum(map(len, self.tap_array[: self.n_mit_mot]))\n old_mitmot_input_storage = [None] * nb_mitmot_in\n old_mitmot_input_data = [None] * nb_mitmot_in\n inner_output_storage = self.fn.output_storage\n old_inner_output_storage = [None] * len(inner_output_storage)\n old_inner_output_data = [None] * len(inner_output_storage)\n fn = self.fn.fn\n offset = (\n self.n_seqs\n + sum(map(len, self.tap_array[: self.n_outs]))\n + self.n_shared_outs\n )\n for idx in range(len(other_args)):\n inner_input_storage[idx + offset].storage[0] = other_args[idx]\n\n i = 0\n cond = True\n # ############# THE MAIN LOOP ##############\n # for i in range(n_steps):\n while (i < n_steps) and cond:\n # sequences over which scan iterates\n # 3. collect input slices\n for idx in range(self.n_seqs):\n if self.vector_seqs[idx]:\n inner_input_storage[idx].storage[0] = seqs[idx][i : i + 1].reshape(\n ()\n )\n else:\n inner_input_storage[idx].storage[0] = seqs[idx][i]\n\n offset = self.n_seqs\n for idx in range(self.n_outs):\n if self.vector_outs[idx]:\n for tap in self.tap_array[idx]:\n _idx = (pos[idx] + tap) % store_steps[idx]\n inner_input_storage[offset].storage[0] = output_storage[idx][0][\n _idx : _idx + 1\n ].reshape(())\n offset += 1\n else:\n for tap in self.tap_array[idx]:\n _idx = (pos[idx] + tap) % store_steps[idx]\n inner_input_storage[offset].storage[0] = output_storage[idx][0][\n _idx\n ]\n offset += 1\n\n a_offset = self.shared_arg_offset\n o_offset = self.n_outs + self.n_nit_sot\n if i == 0:\n for j in range(self.n_shared_outs):\n inner_input_storage[offset].storage[0] = inputs[a_offset + j]\n offset += 1\n else:\n for j in range(self.n_shared_outs):\n inner_input_storage[offset].storage[0] = output_storage[\n o_offset + j\n ][0]\n offset += 1\n\n # 4. collecting slices where the output should be stored\n\n # 4.1. Collect slices for mitmots\n offset = 0\n for idx in range(self.n_mit_mot_outs):\n if not self.mitmots_preallocated[idx]:\n inner_output_storage[offset].storage[0] = None\n offset += 1\n\n # 4.2. Collect slices for mitsots, sitsots and nitsots\n if i != 0:\n for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):\n if (\n store_steps[idx + self.n_mit_mot] == 1\n or self.vector_outs[idx + self.n_mit_mot]\n ):\n inner_output_storage[idx + offset].storage[0] = None\n else:\n _pos0 = idx + self.n_mit_mot\n inner_output_storage[idx + offset].storage[0] = output_storage[\n _pos0\n ][0][pos[_pos0]]\n else:\n for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot):\n inner_output_storage[idx + offset].storage[0] = None\n\n # 4.3. Collect slices for shared outputs\n offset += self.n_outs + self.n_nit_sot - self.n_mit_mot\n for idx in range(self.n_shared_outs):\n inner_output_storage[idx + offset].storage[0] = None\n\n # 4.4. If there is a condition add it to the mix\n if self.as_while:\n pdx = offset + self.n_shared_outs\n inner_output_storage[pdx].storage[0] = None\n\n # 4.5. Keep a reference to the variables (ndarrays, GpuArrays,\n # etc) currently in the output_storage to be able to compare them\n # with the actual outputs of the inner function after its\n # execution. Also keep pointers to their data to be able to detect\n # cases where outputs reused the allocated object but alter the\n # memory region they refer to.\n for idx in range(len(inner_output_storage)):\n\n var = inner_output_storage[idx].storage[0]\n old_inner_output_storage[idx] = var\n\n if var is None:\n old_inner_output_data[idx] = None\n elif self.outs_is_tensor[idx]:\n old_inner_output_data[idx] = var.data\n else:\n old_inner_output_data[idx] = var.gpudata\n\n # 4.6. Keep a reference to the variables (ndarrays, GpuArrays,\n # etc) associated with mitmot inputs currently in the\n # input_storage to be able to compare them with the content of the\n # input_storage after the execution of the function. Also keep\n # pointers to their data to be able to detect cases where outputs\n # reused the allocated object but alter the memory region they\n # refer to.\n for idx in range(nb_mitmot_in):\n var = inner_input_storage[idx + self.n_seqs].storage[0]\n old_mitmot_input_storage[idx] = var\n\n if var is None:\n old_mitmot_input_data[idx] = None\n elif self.inps_is_tensor[idx + self.n_seqs]:\n old_mitmot_input_data[idx] = var.data\n else:\n old_mitmot_input_data[idx] = var.gpudata\n\n # 5.1 compute outputs\n t0_fn = time.time()\n\n try:\n fn()\n except Exception:\n if hasattr(fn, \"position_of_error\"):\n # this is a new vm-provided function or c linker\n # they need this because the exception manipulation\n # done by raise_with_op is not implemented in C.\n if hasattr(fn, \"thunks\"):\n # For the CVM\n raise_with_op(\n self.fn.maker.fgraph,\n fn.nodes[fn.position_of_error],\n fn.thunks[fn.position_of_error],\n )\n else:\n # For the c linker\n # We don't have access from python to all the\n # temps values So for now, we just don't print\n # the extra shapes/strides info\n raise_with_op(\n self.fn.maker.fgraph, fn.nodes[fn.position_of_error]\n )\n else:\n # old-style linkers raise their own exceptions\n raise\n\n dt_fn = time.time() - t0_fn\n if self.as_while:\n pdx = offset + self.n_shared_outs\n cond = inner_output_storage[pdx].storage[0] == 0\n\n # 5.2. By calling fn() directly instead of calling the aesara\n # function, it is possible that the updates have not been\n # performed. Perform the updates if needed.\n offset_out = len(inner_output_storage) - 1\n if getattr(fn, \"need_update_inputs\", True):\n # Update the inputs that have an update function\n for inp, storage in zip(\n self.fn.maker.expanded_inputs[::-1], self.fn.input_storage[::-1]\n ):\n if inp.update is not None:\n storage.data = inner_output_storage[offset_out].data\n offset_out -= 1\n\n t_fn += dt_fn\n offset_out = 0\n\n # 5.3 Copy over the values for mit_mot outputs\n mitmot_inp_offset = 0\n mitmot_out_idx = 0\n for j in range(self.n_mit_mot):\n for k in self.mit_mot_out_slices[j]:\n if self.mitmots_preallocated[mitmot_out_idx]:\n # This output tap has been preallocated.\n inp_idx = mitmot_inp_offset + self.tap_array[j].index(k)\n\n # Verify whether the input points to the same data as\n # it did before the execution of the inner function.\n old_var = old_mitmot_input_storage[inp_idx]\n new_var = inner_input_storage[self.n_seqs + inp_idx].storage[0]\n if old_var is new_var:\n old_data = old_mitmot_input_data[inp_idx]\n if self.inps_is_tensor[self.n_seqs + inp_idx]:\n same_data = new_var.data == old_data\n else:\n same_data = new_var.gpudata == old_data\n else:\n same_data = False\n\n # If the corresponding input storage still points to\n # the same data, it has been modified inplace and\n # nothing needs to be done. Otherwise, recover the\n # and store it in `outs` as usual\n if not same_data:\n output_storage[j][0][k + pos[j]] = inner_input_storage[\n self.n_seqs + inp_idx\n ].storage[0]\n\n else:\n # This output tap has not been preallocated, recover\n # its value as usual\n output_storage[j][0][k + pos[j]] = inner_output_storage[\n offset_out\n ].storage[0]\n offset_out += 1\n\n mitmot_out_idx += 1\n\n mitmot_inp_offset += len(self.tap_array[j])\n\n # 5.4 Copy over the values for mit_sot/sit_sot outputs\n begin = self.n_mit_mot\n end = self.n_outs\n offset_out -= self.n_mit_mot\n\n for j in range(begin, end):\n\n # Copy the output value to `outs`, if necessary\n if store_steps[j] == 1 or self.vector_outs[j]:\n output_storage[j][0][pos[j]] = inner_output_storage[\n offset_out + j\n ].storage[0]\n else:\n # Check whether the initialization of the output storage\n # map for this output has been reused.\n old_var = old_inner_output_storage[offset_out + j]\n new_var = inner_output_storage[offset_out + j].storage[0]\n if old_var is new_var:\n old_data = old_inner_output_data[offset_out + j]\n if old_data is None:\n output_reused = False\n elif self.outs_is_tensor[offset_out + j]:\n output_reused = new_var.data == old_data\n else:\n output_reused = new_var.gpudata == old_data\n else:\n output_reused = False\n\n if not output_reused:\n try:\n output_storage[j][0][pos[j]] = inner_output_storage[\n offset_out + j\n ].storage[0]\n except ValueError as e:\n if i == 0:\n # First iteration, so don't change the\n # error message as it can't be the\n # case we write about.\n raise\n ne = ValueError(\n \"An output of the scan has changed shape. \"\n \"This may be caused by a pushout optimization.\"\n \" Try adding \"\n \"'optimizer_excluding=scanOp_pushout_output' \"\n \"to your Aesara flags.\"\n )\n raise ne from e\n\n # 5.5 Copy over the values for nit_sot outputs\n begin = end\n end += self.n_nit_sot\n for j in range(begin, end):\n\n if i == 0:\n jout = j + offset_out\n shape = (store_steps[j],) + inner_output_storage[jout].storage[\n 0\n ].shape\n dtype = inner_output_storage[jout].storage[0].dtype\n if (\n output_storage[j][0] is None\n or output_storage[j][0].shape[0] < store_steps[j]\n or output_storage[j][0].shape[1:] != shape[1:]\n or output_storage[j][0].dtype != dtype\n ):\n output_storage[j][0] = node.outputs[j].type.value_zeros(shape)\n elif output_storage[j][0].shape[0] != store_steps[j]:\n output_storage[j][0] = output_storage[j][0][: store_steps[j]]\n output_storage[j][0][pos[j]] = inner_output_storage[jout].storage[0]\n elif store_steps[j] == 1 or self.vector_outs[j]:\n output_storage[j][0][pos[j]] = inner_output_storage[\n j + offset_out\n ].storage[0]\n else:\n # Check whether the initialization of the output storage map\n # for this output has been reused.\n old_var = old_inner_output_storage[offset_out + j]\n old_data = old_inner_output_data[offset_out + j]\n new_var = inner_output_storage[offset_out + j].storage[0]\n if old_var is new_var:\n if old_data is None:\n output_reused = False\n elif self.outs_is_tensor[offset_out + j]:\n output_reused = new_var.data == old_data\n else:\n output_reused = new_var.gpudata == old_data\n else:\n output_reused = False\n\n if not output_reused:\n output_storage[j][0][pos[j]] = inner_output_storage[\n j + offset_out\n ].storage[0]\n\n # 5.6 Copy over the values for outputs corresponding to shared\n # variables\n begin = end\n end += self.n_shared_outs\n for j in range(begin, end):\n jout = j + offset_out\n output_storage[j][0] = inner_output_storage[jout].storage[0]\n\n pos = [(idx + 1) % store for idx, store in zip(pos, store_steps)]\n i = i + 1\n\n # 6. Check if you need to re-order output buffers\n begin = self.n_mit_mot\n end = self.n_outs + self.n_nit_sot\n for idx in range(begin, end):\n if store_steps[idx] < i - self.mintaps[idx] and pos[idx] < store_steps[idx]:\n\n pdx = pos[idx]\n if pdx >= store_steps[idx] // 2:\n # It seems inefficient to copy the bigger part of the\n # array over, and back, but it is the only way that\n # there is no overlap in the areas of out[idx][0] that\n # are read and written.\n # This way, there will be no information overwritten\n # before it is read (as it used to happen).\n shape = (pdx,) + output_storage[idx][0].shape[1:]\n tmp = node.outputs[idx].type.value_zeros(shape)\n tmp[:] = output_storage[idx][0][:pdx]\n output_storage[idx][0][: store_steps[idx] - pdx] = output_storage[\n idx\n ][0][pdx:]\n output_storage[idx][0][store_steps[idx] - pdx :] = tmp\n del tmp\n else:\n shape = (store_steps[idx] - pdx,) + output_storage[idx][0].shape[1:]\n tmp = node.outputs[idx].type.value_zeros(shape)\n tmp[:] = output_storage[idx][0][pdx:]\n output_storage[idx][0][store_steps[idx] - pdx :] = output_storage[\n idx\n ][0][:pdx]\n output_storage[idx][0][: store_steps[idx] - pdx] = tmp\n del tmp\n # This would normally happen only when doing truncated\n # backpropagation through time. In such a scenario Scan is\n # expected to return 0 for all entries for which the gradient is\n # not actually computed\n elif store_steps[idx] > i - self.mintaps[idx]:\n output_storage[idx][0][i - self.mintaps[idx] :] = 0\n # This is a fix for a bug introduced by while. If you say\n # you want to loop up to a condition, you expect the output\n # to have that length ( and not the maximal length possible)\n #\n # Without this the behaviour of a scan op is not consistent\n # if optimization gets applied compared to when optimization\n # do not get applied\n if i < n_steps:\n # The reason I don't use out[idx][0][:i] is because for\n # certain outputs (those with multiple taps),\n # outs[idx][0] has more than n_steps entries, with the\n # initial state at the beginning. When indexing in it I\n # usually have to do something like\n # outs[idx][0][i+offset]. To do something similar here,\n # I would have first to compute the maximal tap for\n # every output and then do outs[0][:i+maximal_tap],\n # which implies I think more computations then this\n # little trick that I used\n output_storage[idx][0] = output_storage[idx][0][: -(n_steps - i)]\n\n # We never reuse the input or output storage of the\n # inner function so we clear it.\n for i_s in inner_input_storage:\n i_s.storage[0] = None\n for o_s in inner_output_storage:\n o_s.storage[0] = None\n\n t_call = time.time() - t0_call\n # NOTE: make this match what's in function.types.Function\n # and this little string helps us to find this spot:\n # \"PROFILE_CODE\"\n\n if hasattr(self.fn.maker, \"profile\") and self.fn.maker.profile:\n profile = self.fn.maker.profile\n profile.callcount += 1\n profile.nbsteps += n_steps\n profile.call_time += t_call\n profile.vm_call_time += t_fn\n if hasattr(self.fn.fn, \"update_profile\"):\n self.fn.fn.update_profile(profile)\n\n self.t_call = t_call\n self.t_fn = t_fn\n\n def infer_shape(self, fgraph, node, input_shapes):\n # input_shapes correspond to the shapes of node.inputs\n for inp, inp_shp in zip(node.inputs, input_shapes):\n assert inp_shp is None or len(inp_shp) == inp.type.ndim\n\n # Here we build 2 variables;\n # - A list `inner_ins_shapes`, such that inner_ins_shapes[i] is the\n # shape of self.inputs[i]\n # - A dictionary `out_equivalent` containing, for every inner input,\n # an equivalent variable computed from the outer inputs.\n # NOTE : For non-sequences, this equivalence is trivial. For\n # sequences and recurrent states, there is no direct equivalence\n # between outer and inner inputs. However, because every iteration\n # of the Scan needs to give the same output shapes, we can give an\n # equivalence between these inner inputs and the subelements of the\n # corresponding outer inputs that the Scan would use as input for\n # any given iteration. For simplicity, we use iteration 0.\n inner_ins_shapes = []\n out_equivalent = OrderedDict()\n\n # The two following blocks are commented as it cause in some\n # cases extra scans in the graph. See gh-XXX for the\n # investigation.\n\n # We skip the first outer input as it is the total or current number\n # of iterations.\n # sequences\n seqs_shape = [x[1:] for x in input_shapes[1 : 1 + self.n_seqs]]\n # We disable extra infer_shape for now. See gh-3765.\n extra_infer_shape = False\n\n if extra_infer_shape:\n inner_seqs = self.inputs[: self.n_seqs]\n outer_seqs = node.inputs[1 : 1 + self.n_seqs]\n for in_s, out_s in zip(inner_seqs, outer_seqs):\n out_equivalent[in_s] = out_s[0]\n\n # mit_mot, mit_sot, sit_sot\n outer_inp_idx = 1 + self.n_seqs\n inner_inp_idx = self.n_seqs\n else:\n outer_inp_idx = 0\n n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n outs_shape = []\n for idx in range(n_outs):\n mintap = abs(min(self.tap_array[idx]))\n for k in self.tap_array[idx]:\n outs_shape += [input_shapes[idx + self.n_seqs + 1][1:]]\n if extra_infer_shape:\n corresponding_tap = node.inputs[outer_inp_idx][mintap + k]\n out_equivalent[self.inputs[inner_inp_idx]] = corresponding_tap\n inner_inp_idx += 1\n outer_inp_idx += 1\n\n # shared_outs\n offset = 1 + self.n_seqs + n_outs\n for idx in range(self.n_shared_outs):\n outs_shape += [input_shapes[idx + offset]]\n\n # non_sequences\n offset += self.n_nit_sot + self.n_shared_outs\n inner_ins_shapes = seqs_shape + outs_shape + input_shapes[offset:]\n assert len(inner_ins_shapes) == len(self.inputs)\n\n # Non-sequences have a direct equivalent from self.inputs in\n # node.inputs\n inner_non_sequences = self.inputs[len(seqs_shape) + len(outs_shape) :]\n for in_ns, out_ns in zip(inner_non_sequences, node.inputs[offset:]):\n out_equivalent[in_ns] = out_ns\n\n if self.as_while:\n self_outs = self.outputs[:-1]\n else:\n self_outs = self.outputs\n outs_shape = infer_shape(\n outs=self_outs, inputs=self.inputs, input_shapes=inner_ins_shapes\n )\n # Will be used to check if outs_shape can be expressed without using\n # variables in self.inputs.\n # The shapes of node.inputs are valid.\n validator = Validator(\n valid=input_shapes, invalid=self.inputs, valid_equivalent=out_equivalent\n )\n\n offset = 1 + self.n_seqs\n scan_outs = [x for x in input_shapes[offset : offset + n_outs]]\n offset += n_outs\n outs_shape_n = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot\n for x in range(self.n_nit_sot):\n out_shape_x = outs_shape[outs_shape_n + x]\n if out_shape_x is None:\n # This output is not a tensor, and has no shape\n scan_outs.append(None)\n else:\n # We need to make sure that we can compute the shapes from\n # node.inputs, and constants, without using the variables\n # in the inner function.\n r = node.outputs[n_outs + x]\n assert r.ndim == 1 + len(out_shape_x)\n shp = [node.inputs[offset + self.n_shared_outs + x]]\n for i, shp_i in zip(range(1, r.ndim), out_shape_x):\n # Validate shp_i. v_shape_i is either None (if invalid),\n # or a (variable, Boolean) tuple. The Boolean indicates\n # whether variable is shp_i (if True), or an valid\n # equivalent (if False). Here, we only need the variable.\n v_shp_i = validator.check(shp_i)\n if v_shp_i is None:\n if hasattr(r, \"broadcastable\") and r.broadcastable[i]:\n shp.append(1)\n else:\n shp.append(Shape_i(i)(r))\n else:\n # It can (or at least, an equivalent variable can)\n shp.append(v_shp_i[0])\n scan_outs.append(tuple(shp))\n\n scan_outs += [x for x in input_shapes[offset : offset + self.n_shared_outs]]\n # if we are dealing with a repeat-until, then we do not know the\n # leading dimension so we replace it for every entry with Shape_i\n if self.as_while:\n scan_outs_init = scan_outs\n scan_outs = []\n for o, x in zip(node.outputs, scan_outs_init):\n if x is None:\n scan_outs.append(None)\n else:\n scan_outs.append((Shape_i(0)(o),) + x[1:])\n return scan_outs\n\n def connection_pattern(self, node):\n\n # We cache the result of this function because, with a previous\n # implementation that repeatedly called grad, there were cases\n # where calls to aesara.grad() took as much as 4h for functions\n # containing many nested scans.\n if hasattr(node.tag, \"connection_pattern\"):\n return node.tag.connection_pattern\n\n # Obtain the connection pattern of the inner function.\n inner_connect_pattern = io_connection_pattern(self.inputs, self.outputs)\n\n # Initially assume no outer input is connected to any outer output\n connection_pattern = [[False for output in node.outputs] for x in node.inputs]\n\n # For every possible pair of outer input and outer output, iterate\n # over every possible pairing of their corresponding inner inputs\n # and inner outputs and, if one such pair of inner variables is\n # connected than the pair of outer variables is connected.\n for outer_oidx in range(len(node.outputs)):\n inner_oidxs = self.var_mappings[\"inner_out_from_outer_out\"][outer_oidx]\n\n for outer_iidx in range(len(node.inputs)):\n inner_iidxs = self.var_mappings[\"inner_inp_from_outer_inp\"][outer_iidx]\n\n for inner_oidx in inner_oidxs:\n for inner_iidx in inner_iidxs:\n\n if inner_connect_pattern[inner_iidx][inner_oidx]:\n connection_pattern[outer_iidx][outer_oidx] = True\n break\n\n if connection_pattern[outer_iidx][outer_oidx]:\n break\n\n # Applying Floyd-Warshall to find all paths connecting inputs to\n # outputs. Note that if `x` is an input to `y_t` and `y_tm1` is an\n # input to `z_t` then `x` is an input to `z_t`.\n\n n_outs = len(node.outputs)\n\n for steps in range(n_outs):\n for iidx in range(n_outs):\n for jidx in range(n_outs):\n\n # Get the idx of the outer input corresponding to that\n # outer output\n j_inp_idx = self.var_mappings[\"outer_inp_from_outer_out\"][jidx]\n\n if j_inp_idx != -1:\n if connection_pattern[j_inp_idx][iidx] is True:\n for k in range(len(connection_pattern)):\n if connection_pattern[k][jidx]:\n connection_pattern[k][iidx] = True\n\n node.tag.connection_pattern = connection_pattern\n return connection_pattern\n\n def get_oinp_iinp_iout_oout_mappings(self):\n \"\"\"\n Compute and return dictionary mappings between the inputs and\n outputs of the inner function and the inputs and outputs of the Scan\n node in the outer graph.\n\n The return value is a dictionary in which the keys are the names of\n the individual mappings and the values are the mapping dictionaries\n themselves. In dictionaries representing mappings to outer variables,\n the values are individual integer indices. In dictionaries\n representing mappings to inner variables, the values are sequences of\n indices because multiple inner variables can be associated with the\n same state.\n\n \"\"\"\n # Lists for outer variables contain individual indices, lists for\n # inner variables contain sequences of indices because many inner\n # variables can be associated with the same outer variable. The list\n # and indices are initialized already containing the data associated\n # with the timestep index, the first outer input.\n outer_input_indices = [0]\n inner_input_indices = [[]]\n inner_output_indices = [[]]\n outer_output_indices = [-1]\n\n outer_iidx = 1\n inner_iidx = 0\n inner_oidx = 0\n outer_oidx = 0\n\n # Handle sequences inputs\n for i in range(self.info[\"n_seqs\"]):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([inner_iidx])\n inner_output_indices.append([])\n outer_output_indices.append(-1)\n\n outer_iidx += 1\n inner_iidx += 1\n inner_oidx += 0\n outer_oidx += 0\n\n # Handle mitmots, mitsots and sitsots variables\n for i in range(len(self.info[\"tap_array\"])):\n nb_input_taps = len(self.info[\"tap_array\"][i])\n\n if i < self.n_mit_mot:\n nb_output_taps = len(self.mit_mot_out_slices[i])\n else:\n nb_output_taps = 1\n\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append(\n list(range(inner_iidx, inner_iidx + nb_input_taps))\n )\n inner_output_indices.append(\n list(range(inner_oidx, inner_oidx + nb_output_taps))\n )\n outer_output_indices.append(outer_oidx)\n\n outer_iidx += 1\n inner_iidx += nb_input_taps\n inner_oidx += nb_output_taps\n outer_oidx += 1\n\n # This is needed because, for outer inputs (and for outer inputs only)\n # nitsots come *after* shared variables.\n outer_iidx += self.info[\"n_shared_outs\"]\n\n # Handle nitsots variables\n for i in range(self.n_nit_sot):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([])\n inner_output_indices.append([inner_oidx])\n outer_output_indices.append(outer_oidx)\n\n outer_iidx += 1\n inner_iidx += 0\n inner_oidx += 1\n outer_oidx += 1\n\n # This is needed because, for outer inputs (and for outer inputs only)\n # nitsots come *after* shared variables.\n outer_iidx -= self.info[\"n_shared_outs\"] + self.n_nit_sot\n\n # Handle shared states\n for i in range(self.info[\"n_shared_outs\"]):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([inner_iidx])\n inner_output_indices.append([inner_oidx])\n outer_output_indices.append(outer_oidx)\n\n outer_iidx += 1\n inner_iidx += 1\n inner_oidx += 1\n outer_oidx += 1\n\n # This is needed because, for outer inputs (and for outer inputs only)\n # nitsots come *after* shared variables.\n outer_iidx += self.n_nit_sot\n\n # Handle non-sequence inputs\n # Note : the number of non-sequence inputs is not stored in self.info\n # so it has to be inferred from the number of inner inputs that remain\n # to be handled\n for i in range(len(self.inputs) - inner_iidx):\n outer_input_indices.append(outer_iidx)\n inner_input_indices.append([inner_iidx])\n inner_output_indices.append([])\n outer_output_indices.append(-1)\n\n outer_iidx += 1\n inner_iidx += 1\n inner_oidx += 0\n outer_oidx += 0\n\n # With the global mapping inferred, the individual mappings\n # can be produced\n mappings = {\n \"outer_inp_from_outer_out\": {},\n \"inner_inp_from_outer_out\": {},\n \"inner_out_from_outer_out\": {},\n \"inner_inp_from_outer_inp\": {},\n \"inner_out_from_outer_inp\": {},\n \"outer_out_from_outer_inp\": {},\n \"outer_inp_from_inner_inp\": {},\n \"inner_out_from_inner_inp\": {},\n \"outer_out_from_inner_inp\": {},\n \"outer_inp_from_inner_out\": {},\n \"inner_inp_from_inner_out\": {},\n \"outer_out_from_inner_out\": {},\n }\n\n for (oinp, iinp, iout, oout) in zip(\n outer_input_indices,\n inner_input_indices,\n inner_output_indices,\n outer_output_indices,\n ):\n\n if oout != -1:\n mappings[\"outer_inp_from_outer_out\"][oout] = oinp\n mappings[\"inner_inp_from_outer_out\"][oout] = iinp\n mappings[\"inner_out_from_outer_out\"][oout] = iout\n\n if oinp != -1:\n mappings[\"inner_inp_from_outer_inp\"][oinp] = iinp\n mappings[\"inner_out_from_outer_inp\"][oinp] = iout\n mappings[\"outer_out_from_outer_inp\"][oinp] = oout\n\n for idx in iinp:\n mappings[\"outer_inp_from_inner_inp\"][idx] = oinp\n mappings[\"inner_out_from_inner_inp\"][idx] = iout\n mappings[\"outer_out_from_inner_inp\"][idx] = oout\n\n for idx in iout:\n mappings[\"outer_inp_from_inner_out\"][idx] = oinp\n mappings[\"inner_inp_from_inner_out\"][idx] = iinp\n mappings[\"outer_out_from_inner_out\"][idx] = oout\n\n return mappings\n\n def L_op(self, inputs, outs, dC_douts):\n if not isinstance(outs, (list, tuple)):\n outs = [outs]\n # `grad_step` equals the number of steps the original scan node has\n # done (if the original scan is a while loop than this number is the\n # length of the output sequence)\n # We do not know what kind of outputs the original scan has, so we\n # try first to see if it has a nit_sot output, then a sit_sot and\n # then a mit_sot\n if self.n_nit_sot > 0:\n grad_steps = self.outer_nitsot_outs(outs)[0].shape[0]\n elif self.n_sit_sot > 0:\n grad_steps = self.outer_sitsot_outs(outs)[0].shape[0] - 1\n elif self.n_mit_sot > 0:\n grad_steps = (\n self.outer_mitsot_outs(outs)[0].shape[0] + self.mintaps[self.n_mit_mot]\n )\n else:\n grad_steps = inputs[0]\n if self.as_while:\n n_steps = outs[0].shape[0]\n\n # Restrict the number of grad steps according to\n # self.truncate_gradient\n if self.truncate_gradient != -1:\n grad_steps = minimum(grad_steps, self.truncate_gradient)\n\n self_inputs = self.inputs\n self_outputs = self.outputs\n # differentiable inputs\n diff_inputs = (\n self.inner_seqs(self_inputs)\n + self.inner_mitmot(self_inputs)\n + self.inner_mitsot(self_inputs)\n + self.inner_sitsot(self_inputs)\n + self.inner_non_seqs(self_inputs)\n )\n diff_outputs = (\n self.inner_mitmot_outs(self_outputs)\n + self.inner_mitsot_outs(self_outputs)\n + self.inner_sitsot_outs(self_outputs)\n + self.inner_nitsot_outs(self_outputs)\n )\n scan_node = outs[0].owner\n connection_pattern = self.connection_pattern(scan_node)\n\n def get_inp_idx(iidx):\n if iidx < self.n_seqs:\n return 1 + iidx\n oidx = 1 + self.n_seqs\n iidx = iidx - self.n_seqs\n for taps in self.mitmot_taps():\n if len(taps) > iidx:\n return oidx\n else:\n oidx += 1\n iidx -= len(taps)\n for taps in self.mitsot_taps():\n if len(taps) > iidx:\n return oidx\n else:\n oidx += 1\n iidx -= len(taps)\n\n if iidx < self.info[\"n_sit_sot\"]:\n return oidx + iidx\n else:\n return oidx + iidx + self.info[\"n_nit_sot\"]\n\n def get_out_idx(iidx):\n oidx = 0\n for taps in self.mitmot_out_taps():\n if len(taps) > iidx:\n return oidx\n else:\n oidx += 1\n iidx -= len(taps)\n return oidx + iidx\n\n def compute_all_gradients(known_grads):\n y_s = known_grads.keys()\n g_y_s = known_grads.values()\n\n for g_y in g_y_s:\n if str(g_y.dtype) in integer_dtypes:\n raise TypeError(\n \"Gradients may never be integers but g_y \"\n \"has type \" + str(g_y.type)\n )\n\n out_indices = [get_out_idx(self_outputs.index(y)) for y in y_s]\n\n connected_inputs = [\n i\n for i in range(len(scan_node.inputs))\n if any([connection_pattern[i][odx] for odx in out_indices])\n ]\n\n wrt = [\n x\n for x in graph_inputs(y_s)\n if (x in diff_inputs)\n and get_inp_idx(self_inputs.index(x)) in connected_inputs\n ]\n gmp = OrderedDict()\n\n # Required in case there is a pair of variables X and Y, with X\n # used to compute Y, for both of which there is an external\n # gradient signal. Without this, the total gradient signal on X\n # will be the external gradient signalknown_grads[X]. With this,\n # it will be the sum of the external gradient signal and the\n # gradient obtained by propagating Y's external gradient signal\n # to X.\n known_grads = OrderedDict([(k.copy(), v) for (k, v) in known_grads.items()])\n\n grads = grad(\n cost=None,\n known_grads=known_grads,\n wrt=wrt,\n consider_constant=wrt,\n disconnected_inputs=\"ignore\",\n return_disconnected=\"None\",\n null_gradients=\"return\",\n )\n\n for i in range(len(wrt)):\n gmp[wrt[i]] = grads[i]\n\n rval = [gmp.get(p, None) for p in diff_inputs]\n return rval\n\n dC_dinps_t = [None for inp in diff_inputs]\n disconnected_dC_dinps_t = [True for inp in diff_inputs]\n dC_dXts = []\n Xts = []\n for idx, Xt in enumerate(diff_outputs):\n\n # We are looking for x[t-1] for a given x[t]\n if idx >= self.n_mit_mot_outs:\n Xt_placeholder = safe_new(Xt)\n Xts.append(Xt_placeholder)\n\n # Different processing based on whether Xt is a nitsot output\n # or not. NOTE : This cannot be done by using\n # \"if Xt not in self.inner_nitsot_outs(self_outputs)\" because\n # the exact same variable can be used as multiple outputs.\n idx_nitsot_start = (\n self.info[\"n_mit_mot\"] + self.info[\"n_mit_sot\"] + self.info[\"n_sit_sot\"]\n )\n idx_nitsot_end = idx_nitsot_start + self.info[\"n_nit_sot\"]\n if idx < idx_nitsot_start or idx >= idx_nitsot_end:\n # What we do here is loop through dC_douts and collect all\n # those that are connected to the specific one and do an\n # upcast on all of their dtypes to get the dtype for this\n # specific output. Deciding if the gradient with this\n # specific previous step is defined or not is done somewhere\n # else.\n dtypes = []\n states = (\n self.inner_mitmot(self_inputs)\n + self.inner_mitsot(self_inputs)\n + self.inner_sitsot(self_inputs)\n )\n\n for pos, inp in enumerate(states):\n if inp in graph_inputs([Xt]):\n # Get the index of the outer output that to which\n # the state variable 'inp' corresponds.\n outer_oidx = self.var_mappings[\"outer_out_from_inner_inp\"][\n self.n_seqs + pos\n ]\n\n if not isinstance(dC_douts[outer_oidx].type, DisconnectedType):\n dtypes.append(dC_douts[outer_oidx].dtype)\n if dtypes:\n new_dtype = aesara.scalar.upcast(*dtypes)\n else:\n new_dtype = config.floatX\n dC_dXt = safe_new(Xt, dtype=new_dtype)\n else:\n if isinstance(dC_douts[idx].type, DisconnectedType):\n continue\n dC_dXt = safe_new(dC_douts[idx][0])\n dC_dXts.append(dC_dXt)\n\n known_grads = OrderedDict()\n dc_dxts_idx = 0\n for i in range(len(diff_outputs)):\n if i < idx_nitsot_start or i >= idx_nitsot_end:\n if diff_outputs[i] in known_grads:\n known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]\n else:\n known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]\n dc_dxts_idx += 1\n else:\n if isinstance(dC_douts[i].type, DisconnectedType):\n continue\n else:\n if diff_outputs[i] in known_grads:\n known_grads[diff_outputs[i]] += dC_dXts[dc_dxts_idx]\n else:\n known_grads[diff_outputs[i]] = dC_dXts[dc_dxts_idx]\n dc_dxts_idx += 1\n dC_dinps_t = compute_all_gradients(known_grads)\n\n # mask inputs that get no gradients\n for dx in range(len(dC_dinps_t)):\n if not dC_dinps_t[dx]:\n dC_dinps_t[dx] = aet.zeros_like(diff_inputs[dx])\n else:\n disconnected_dC_dinps_t[dx] = False\n for Xt, Xt_placeholder in zip(diff_outputs[self.n_mit_mot_outs :], Xts):\n tmp = forced_replace(dC_dinps_t[dx], Xt, Xt_placeholder)\n dC_dinps_t[dx] = tmp\n\n # construct dX_dtm1\n dC_dXtm1s = []\n for pos, x in enumerate(dC_dinps_t[self.n_seqs :]):\n\n # Get the index of the first inner input corresponding to the\n # pos-ieth inner input state\n idxs = self.var_mappings[\"inner_out_from_inner_inp\"][self.n_seqs + pos]\n\n # Check if the pos-th input is associated with one of the\n # recurrent states\n x_is_state = pos < sum([len(t) for t in self.tap_array])\n\n if x_is_state and len(idxs) > 0:\n opos = idxs[0]\n dC_dXtm1s.append(safe_new(dC_dXts[opos]))\n if hasattr(x, \"dtype\") and x.dtype != dC_dXts[opos].dtype:\n dC_dinps_t[pos + self.n_seqs] = x.astype(dC_dXts[opos].dtype)\n else:\n dC_dXtm1s.append(safe_new(x))\n\n for dx, dC_dXtm1 in enumerate(dC_dXtm1s):\n if isinstance(dC_dinps_t[dx + self.n_seqs].type, NullType):\n # The accumulated gradient is undefined\n pass\n elif isinstance(dC_dXtm1.type, NullType):\n # The new gradient is undefined, this makes the accumulated\n # gradient undefined as weell\n dC_dinps_t[dx + self.n_seqs] = dC_dXtm1\n else:\n dC_dinps_t[dx + self.n_seqs] += dC_dXtm1\n # Construct scan op\n # Seqs\n if self.as_while:\n # equivalent to x[:n_steps][::-1]\n outer_inp_seqs = [x[n_steps - 1 :: -1] for x in inputs[1 : 1 + self.n_seqs]]\n else:\n outer_inp_seqs = [x[::-1] for x in inputs[1 : 1 + self.n_seqs]]\n for idx in range(self.n_mit_mot + self.n_mit_sot):\n mintap = np.min(self.tap_array[idx])\n if idx < self.n_mit_mot:\n outmaxtap = np.max(self.mitmot_out_taps()[idx])\n else:\n outmaxtap = 0\n seq = outs[idx]\n for k in self.tap_array[idx]:\n if outmaxtap - k != 0:\n nw_seq = seq[k - mintap : -(outmaxtap - k)][::-1]\n else:\n nw_seq = seq[k - mintap :][::-1]\n outer_inp_seqs.append(nw_seq)\n outer_inp_seqs += [x[:-1][::-1] for x in self.outer_sitsot_outs(outs)]\n for x in self.outer_nitsot_outs(dC_douts):\n if not isinstance(x.type, DisconnectedType):\n if self.as_while:\n # equivalent to x[:n_steps][::-1]\n outer_inp_seqs.append(x[n_steps - 1 :: -1])\n else:\n outer_inp_seqs.append(x[::-1])\n\n if hasattr(inputs[0].tag, \"test_value\"):\n # Here we tests that the new scan input sequence all have\n # the same shape[0]. This is a properties that the scan()\n # fct add and we want to keep it for all Scan op. This is\n # used in T_Scan.test_grad_multiple_outs_taps to test\n # that.\n if self.as_while:\n n = n_steps.tag.test_value\n else:\n n = inputs[0].tag.test_value\n for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs)):\n mintap = np.min(taps)\n if hasattr(x[::-1][:mintap], \"test_value\"):\n assert x[::-1][:mintap].tag.test_value.shape[0] == n\n for x in self.outer_sitsot_outs(outs):\n if hasattr(x[::-1][:-1].tag, \"test_value\"):\n assert x[::-1][:-1].tag.test_value.shape[0] == n\n for x in self.outer_nitsot_outs(outs):\n if hasattr(x[::-1].tag, \"test_value\"):\n if self.as_while:\n assert x[n_steps - 1 :: -1].tag.test_value.shape[0] == n\n else:\n assert x[::-1].tag.test_value.shape[0] == n\n outer_inp_seqs += [\n x[::-1][: np.min(taps)]\n for taps, x in zip(self.mitsot_taps(), self.outer_mitsot_outs(outs))\n ]\n outer_inp_seqs += [x[::-1][:-1] for x in self.outer_sitsot_outs(outs)]\n outer_inp_seqs += [x[::-1] for x in self.outer_nitsot_outs(outs)]\n\n # Restrict the length of the outer sequences to the number of grad\n # steps\n outer_inp_seqs = [s_[:grad_steps] for s_ in outer_inp_seqs]\n\n inner_inp_seqs = self.inner_seqs(self_inputs)\n inner_inp_seqs += self.inner_mitmot(self_inputs)\n inner_inp_seqs += self.inner_mitsot(self_inputs)\n inner_inp_seqs += self.inner_sitsot(self_inputs)\n inner_inp_seqs += self.inner_nitsot_outs(dC_dXts)\n inner_inp_seqs += Xts\n # mitmot\n outer_inp_mitmot = []\n inner_inp_mitmot = []\n inner_out_mitmot = []\n mitmot_inp_taps = []\n mitmot_out_taps = []\n type_outs = []\n out_pos = 0\n ins_pos = self.n_seqs\n n_mitmot_outs = 0\n n_mitmot_inps = 0\n\n for idx in range(self.n_mit_mot):\n if isinstance(dC_douts[idx].type, DisconnectedType):\n out = outs[idx]\n outer_inp_mitmot.append(aet.zeros_like(out))\n else:\n outer_inp_mitmot.append(dC_douts[idx][::-1])\n mitmot_inp_taps.append([])\n mitmot_out_taps.append([])\n undefined_msg = None\n through_shared = False\n disconnected = True\n\n for jdx in range(len(self.mit_mot_out_slices[idx])):\n inner_inp_mitmot.append(dC_dXts[out_pos])\n mitmot_inp_taps[idx].append(-self.mit_mot_out_slices[idx][jdx])\n n_mitmot_inps += 1\n out_pos += 1\n\n for jdx in range(len(self.tap_array[idx])):\n tap = -self.tap_array[idx][jdx]\n\n # Only create a new inner input if there is not already one\n # associated with this input tap\n if tap not in mitmot_inp_taps[idx]:\n inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # We cannot use Null in the inner graph, so we\n # use a zero tensor of the appropriate shape instead.\n inner_out_mitmot.append(\n aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)\n )\n undefined_msg = dC_dinps_t[ins_pos].type.why_null\n else:\n new_inner_out_mitmot = dC_dinps_t[ins_pos]\n\n # If there is already an inner input associated with that\n # input tap, make sure the computation of the new output\n # uses it instead of the input it's currently using\n if tap in mitmot_inp_taps[idx]:\n to_replace = dC_dXtm1s[ins_pos - self.n_seqs]\n replacement_idx = len(mitmot_inp_taps[idx]) - mitmot_inp_taps[\n idx\n ].index(tap)\n replacement = inner_inp_mitmot[-replacement_idx]\n\n self.tap_array[idx]\n new_inner_out_mitmot = clone_replace(\n new_inner_out_mitmot, replace=[(to_replace, replacement)]\n )\n\n inner_out_mitmot.append(new_inner_out_mitmot)\n\n if not disconnected_dC_dinps_t[ins_pos]:\n disconnected = False\n\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([dC_dinps_t[ins_pos]]):\n through_shared = True\n\n ins_pos += 1\n n_mitmot_outs += 1\n mitmot_out_taps[idx].append(-self.tap_array[idx][jdx])\n\n # Only add the tap as a new input tap if needed\n if tap not in mitmot_inp_taps[idx]:\n n_mitmot_inps += 1\n mitmot_inp_taps[idx].append(-self.tap_array[idx][jdx])\n\n if undefined_msg:\n type_outs.append(undefined_msg)\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n offset = self.n_mit_mot\n for idx in range(self.n_mit_sot):\n if isinstance(dC_douts[idx + offset].type, DisconnectedType):\n outer_inp_mitmot.append(outs[idx + offset].zeros_like())\n else:\n outer_inp_mitmot.append(dC_douts[idx + offset][::-1])\n mitmot_inp_taps.append([])\n mitmot_out_taps.append([])\n idx_tap = idx + self.n_mit_mot\n inner_inp_mitmot.append(dC_dXts[out_pos])\n out_pos += 1\n n_mitmot_inps += 1\n undefined_msg = None\n through_shared = False\n disconnected = True\n mitmot_inp_taps[idx + offset].append(0)\n for jdx in range(len(self.tap_array[idx_tap])):\n inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs])\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # We cannot use Null in the inner graph, so we\n # use a zero tensor of the appropriate shape instead.\n inner_out_mitmot.append(\n aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)\n )\n undefined_msg = dC_dinps_t[ins_pos].type.why_null\n else:\n inner_out_mitmot.append(dC_dinps_t[ins_pos])\n\n mitmot_inp_taps[idx + offset].append(-self.tap_array[idx_tap][jdx])\n mitmot_out_taps[idx].append(-self.tap_array[idx_tap][jdx])\n if not disconnected_dC_dinps_t[ins_pos]:\n disconnected = False\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([dC_dinps_t[ins_pos]]):\n through_shared = True\n\n n_mitmot_inps += 1\n ins_pos += 1\n n_mitmot_outs += 1\n\n if undefined_msg:\n type_outs.append(undefined_msg)\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n offset += self.n_mit_sot\n for idx in range(self.n_sit_sot):\n mitmot_inp_taps.append([0, 1])\n mitmot_out_taps.append([1])\n through_shared = False\n if not isinstance(dC_douts[idx + offset].type, DisconnectedType):\n outer_inp_mitmot.append(dC_douts[idx + offset][::-1])\n else:\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # Cannot use dC_dinps_t[ins_pos].dtype, so we use\n # floatX instead, as it is a dummy value that will not\n # be used anyway.\n outer_inp_mitmot.append(\n aet.zeros(outs[idx + offset].shape, dtype=config.floatX)\n )\n else:\n outer_inp_mitmot.append(\n aet.zeros(\n outs[idx + offset].shape, dtype=dC_dinps_t[ins_pos].dtype\n )\n )\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n # We cannot use Null in the inner graph, so we\n # use a zero tensor of the appropriate shape instead.\n inner_out_mitmot.append(\n aet.zeros(diff_inputs[ins_pos].shape, dtype=config.floatX)\n )\n else:\n inner_out_mitmot.append(dC_dinps_t[ins_pos])\n\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([dC_dinps_t[ins_pos]]):\n through_shared = True\n\n if isinstance(dC_dinps_t[ins_pos].type, NullType):\n type_outs.append(dC_dinps_t[ins_pos].type.why_null)\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected_dC_dinps_t[ins_pos]:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n inner_inp_mitmot += [dC_dXts[out_pos], dC_dXtm1s[ins_pos - self.n_seqs]]\n n_mitmot_outs += 1\n out_pos += 1\n ins_pos += 1\n n_mitmot_inps += 2\n\n n_nit_sot = self.n_seqs\n inner_out_nitsot = dC_dinps_t[: self.n_seqs]\n inner_out_sitsot = dC_dinps_t[ins_pos:]\n for _p, vl in enumerate(inner_out_sitsot):\n through_shared = False\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([vl]):\n through_shared = True\n if isinstance(vl.type, NullType):\n type_outs.append(vl.type.why_null)\n # Replace the inner output with a zero tensor of\n # the right shape\n inner_out_sitsot[_p] = aet.zeros(\n diff_inputs[ins_pos + _p].shape, dtype=config.floatX\n )\n elif through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected_dC_dinps_t[_p + ins_pos]:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n for _p, vl in enumerate(inner_out_nitsot):\n through_shared = False\n for _sh in self.inner_shared(self_inputs):\n if _sh in graph_inputs([vl]):\n through_shared = True\n if isinstance(vl.type, NullType):\n type_outs.append(vl.type.why_null)\n # Replace the inner output with a zero tensor of\n # the right shape\n inner_out_nitsot[_p] = aet.zeros(\n diff_inputs[_p].shape, dtype=config.floatX\n )\n\n if through_shared:\n type_outs.append(\"through_shared\")\n elif disconnected_dC_dinps_t[_p]:\n type_outs.append(\"disconnected\")\n else:\n type_outs.append(\"connected\")\n\n inner_inp_sitsot = dC_dXtm1s[ins_pos - self.n_seqs :]\n outer_inp_sitsot = []\n for _idx, y in enumerate(inner_inp_sitsot):\n x = self.outer_non_seqs(inputs)[_idx]\n if isinstance(y.type, NullType):\n # Cannot use dC_dXtm1s.dtype, so we use floatX instead.\n outer_inp_sitsot.append(\n aet.zeros(\n [grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],\n dtype=config.floatX,\n )\n )\n # replace y by a zero tensor of the right shape\n inner_inp_sitsot[_idx] = aet.zeros(\n diff_inputs[ins_pos + _idx].shape, dtype=config.floatX\n )\n\n else:\n outer_inp_sitsot.append(\n aet.zeros(\n [grad_steps + 1] + [x.shape[i] for i in range(x.ndim)],\n dtype=y.dtype,\n )\n )\n\n n_sitsot_outs = len(outer_inp_sitsot)\n new_tap_array = mitmot_inp_taps + [[-1] for k in range(n_sitsot_outs)]\n\n info = OrderedDict()\n info[\"n_seqs\"] = len(outer_inp_seqs)\n info[\"n_mit_sot\"] = 0\n info[\"tap_array\"] = new_tap_array\n info[\"gpua\"] = False\n info[\"n_mit_mot\"] = len(outer_inp_mitmot)\n info[\"n_mit_mot_outs\"] = n_mitmot_outs\n info[\"mit_mot_out_slices\"] = mitmot_out_taps\n info[\"truncate_gradient\"] = self.truncate_gradient\n info[\"n_sit_sot\"] = n_sitsot_outs\n info[\"n_shared_outs\"] = 0\n info[\"n_nit_sot\"] = n_nit_sot\n info[\"as_while\"] = False\n info[\"profile\"] = self.profile\n info[\"destroy_map\"] = OrderedDict()\n if self.name:\n info[\"name\"] = \"grad_of_\" + self.name\n else:\n info[\"name\"] = None\n info[\"mode\"] = self.mode\n info[\"allow_gc\"] = self.allow_gc\n\n outer_inputs = (\n [grad_steps]\n + outer_inp_seqs\n + outer_inp_mitmot\n + outer_inp_sitsot\n + [n_steps if self.as_while else inputs[0] for _ in range(n_nit_sot)]\n + self.outer_shared(inputs)\n + self.outer_non_seqs(inputs)\n )\n\n inner_gfn_ins = (\n inner_inp_seqs\n + inner_inp_mitmot\n + inner_inp_sitsot\n + self.inner_shared(self_inputs)\n + self.inner_non_seqs(self_inputs)\n )\n inner_gfn_outs = inner_out_mitmot + inner_out_sitsot + inner_out_nitsot\n\n local_op = Scan(inner_gfn_ins, inner_gfn_outs, info)\n outputs = local_op(*outer_inputs)\n if type(outputs) not in (list, tuple):\n outputs = [outputs]\n # Re-order the gradients correctly\n gradients = [DisconnectedType()()]\n\n offset = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot + n_sitsot_outs\n for p, (x, t) in enumerate(\n zip(\n outputs[offset : offset + self.n_seqs],\n type_outs[offset : offset + self.n_seqs],\n )\n ):\n if t == \"connected\":\n # If the forward scan is in as_while mode, we need to pad\n # the gradients, so that they match the size of the input\n # sequences.\n if self.as_while:\n n_zeros = inputs[0] - n_steps\n shp = (n_zeros,)\n if x.ndim > 1:\n shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))\n z = aet.zeros(shp, dtype=x.dtype)\n x = aet.concatenate([x[::-1], z], axis=0)\n gradients.append(x)\n else:\n gradients.append(x[::-1])\n elif t == \"disconnected\":\n gradients.append(DisconnectedType()())\n elif t == \"through_shared\":\n gradients.append(\n grad_undefined(\n self, p + 1, inputs[p + 1], \"Depends on a shared variable\"\n )\n )\n else:\n # t contains the \"why_null\" string of a NullType\n gradients.append(NullType(t)())\n\n end = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot\n for p, (x, t) in enumerate(zip(outputs[:end], type_outs[:end])):\n if t == \"connected\":\n # If the forward scan is in as_while mode, we need to pad\n # the gradients, so that they match the size of the input\n # sequences.\n if self.as_while:\n n_zeros = inputs[0] - grad_steps\n shp = (n_zeros,)\n if x.ndim > 1:\n shp = shp + tuple(x.shape[i] for i in range(1, x.ndim))\n z = aet.zeros(shp, dtype=x.dtype)\n x = aet.concatenate([x[::-1], z], axis=0)\n gradients.append(x)\n else:\n gradients.append(x[::-1])\n elif t == \"disconnected\":\n gradients.append(DisconnectedType()())\n elif t == \"through_shared\":\n gradients.append(\n grad_undefined(\n self,\n p + 1 + self.n_seqs,\n inputs[p + 1 + self.n_seqs],\n \"Depends on a shared variable\",\n )\n )\n else:\n # t contains the \"why_null\" string of a NullType\n gradients.append(NullType(t)())\n\n start = len(gradients)\n node = outs[0].owner\n for idx in range(self.n_shared_outs):\n disconnected = True\n connected_flags = self.connection_pattern(node)[idx + start]\n for dC_dout, connected in zip(dC_douts, connected_flags):\n if not isinstance(dC_dout.type, DisconnectedType) and connected:\n disconnected = False\n if disconnected:\n gradients.append(DisconnectedType()())\n else:\n gradients.append(\n grad_undefined(\n self, idx, inputs[idx], \"Shared Variable with update\"\n )\n )\n\n start = len(gradients)\n gradients += [DisconnectedType()() for _ in range(self.n_nit_sot)]\n begin = end\n\n end = begin + n_sitsot_outs\n for p, (x, t) in enumerate(zip(outputs[begin:end], type_outs[begin:end])):\n if t == \"connected\":\n gradients.append(x[-1])\n elif t == \"disconnected\":\n gradients.append(DisconnectedType()())\n elif t == \"through_shared\":\n gradients.append(\n grad_undefined(\n self,\n p + begin + 1,\n inputs[p + begin + 1],\n \"Depends on a shared variable\",\n )\n )\n else:\n # t contains the \"why_null\" string of a NullType\n gradients.append(NullType(t)())\n\n # Mask disconnected gradients\n # Ideally we would want to assert that the gradients we are\n # replacing do indeed evaluate to 0, though that is not practical\n # from a computational point of view\n # The gradients of scan are computed replacing Disconnected with 0,\n # because through the recurrence they can become nonzero\n for idx in range(len(gradients)):\n disconnected = True\n for kdx in range(len(node.outputs)):\n if connection_pattern[idx][kdx] and not isinstance(\n dC_douts[kdx].type, DisconnectedType\n ):\n disconnected = False\n if disconnected:\n gradients[idx] = DisconnectedType()()\n return gradients\n\n def R_op(self, inputs, eval_points):\n # Step 0. Prepare some shortcut variable\n self_inputs = self.inputs\n rop_of_inputs = (\n self_inputs[: self.n_seqs + self.n_outs]\n + self_inputs[self.n_seqs + self.n_outs + self.n_shared_outs :]\n )\n self_outputs = self.outputs\n\n # Step 1. Compute the R_op of the inner function\n inner_eval_points = [safe_new(x, \"_evalpoint\") for x in rop_of_inputs]\n if self.as_while:\n rop_self_outputs = self_outputs[:-1]\n else:\n rop_self_outputs = self_outputs\n if self.info[\"n_shared_outs\"] > 0:\n rop_self_outputs = rop_self_outputs[: -self.info[\"n_shared_outs\"]]\n rop_outs = Rop(rop_self_outputs, rop_of_inputs, inner_eval_points)\n if type(rop_outs) not in (list, tuple):\n rop_outs = [rop_outs]\n # Step 2. Figure out what corresponds to what in the scan\n\n # When doing the R-op of scan, you end up having double of each type of\n # input, because for each sequence you need also its eval point, for\n # each mit_mot, mit_sot, sit_sot or other type of inputs the same.\n # Interestingly enough, all these types of eval points behave the same\n # way as the input to which they correspond\n # The only exception is the eval point for the number of sequences, and\n # evan point for the number of nit_sot which I think should just be\n # ignored (?)\n info = OrderedDict()\n info[\"n_seqs\"] = self.n_seqs * 2\n info[\"n_mit_sot\"] = self.n_mit_sot * 2\n info[\"n_sit_sot\"] = self.n_sit_sot * 2\n info[\"n_mit_mot\"] = self.n_mit_mot * 2\n info[\"n_nit_sot\"] = self.n_nit_sot * 2\n info[\"n_shared_outs\"] = self.n_shared_outs\n info[\"gpua\"] = False\n info[\"as_while\"] = self.as_while\n info[\"profile\"] = self.profile\n info[\"truncate_gradient\"] = self.truncate_gradient\n if self.name:\n info[\"name\"] = \"rop_of_\" + self.name\n else:\n info[\"name\"] = None\n info[\"mode\"] = self.mode\n info[\"allow_gc\"] = self.allow_gc\n info[\"mit_mot_out_slices\"] = self.mit_mot_out_slices * 2\n info[\"destroy_map\"] = OrderedDict()\n new_tap_array = []\n b = 0\n e = self.n_mit_mot\n new_tap_array += self.tap_array[b:e] * 2\n b = e\n e += self.n_mit_sot\n new_tap_array += self.tap_array[b:e] * 2\n b = e\n e += self.n_sit_sot\n new_tap_array += self.tap_array[b:e] * 2\n info[\"tap_array\"] = new_tap_array\n\n # Sequences ...\n b = 1\n ib = 0\n e = 1 + self.n_seqs\n ie = self.n_seqs\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_seqs = inputs[b:e] + clean_eval_points\n inner_seqs = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # MIT_MOT sequences ...\n b = e\n e = e + self.n_mit_mot\n ib = ie\n ie = ie + int(np.sum([len(x) for x in self.tap_array[: self.n_mit_mot]]))\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_mit_mot = inputs[b:e] + clean_eval_points\n inner_mit_mot = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # MIT_SOT sequences ...\n b = e\n e = e + self.n_mit_sot\n ib = ie\n ie = ie + int(\n np.sum(\n [\n len(x)\n for x in self.tap_array[\n self.n_mit_mot : self.n_mit_mot + self.n_mit_sot\n ]\n ]\n )\n )\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_mit_sot = inputs[b:e] + eval_points[b:e]\n inner_mit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # SIT_SOT sequences ...\n b = e\n e = e + self.n_sit_sot\n ib = ie\n ie = ie + self.n_sit_sot\n clean_eval_points = []\n for inp, evp in zip(inputs[b:e], eval_points[b:e]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n\n scan_sit_sot = inputs[b:e] + clean_eval_points\n inner_sit_sot = self_inputs[ib:ie] + inner_eval_points[ib:ie]\n\n # Shared outs ...\n b = e\n e = e + self.n_shared_outs\n ib = ie\n ie = ie + self.n_shared_outs\n scan_shared = inputs[b:e]\n inner_shared = self_inputs[ib:ie]\n\n # NIT_SOT sequences\n b = e\n e = e + self.n_nit_sot\n scan_nit_sot = inputs[b:e] * 2\n\n # All other arguments\n clean_eval_points = []\n for inp, evp in zip(inputs[e:], eval_points[e:]):\n if evp is not None:\n clean_eval_points.append(evp)\n else:\n clean_eval_points.append(inp.zeros_like())\n scan_other = inputs[e:] + clean_eval_points\n # inner_eval_points do not have entries for shared variables\n inner_other = self_inputs[ie:] + inner_eval_points[ib:]\n\n # Outputs\n n_mit_mot_outs = int(np.sum([len(x) for x in self.mit_mot_out_slices]))\n info[\"n_mit_mot_outs\"] = n_mit_mot_outs * 2\n b = 0\n e = n_mit_mot_outs\n inner_out_mit_mot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_mit_sot\n inner_out_mit_sot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_sit_sot\n inner_out_sit_sot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_nit_sot\n inner_out_nit_sot = self_outputs[b:e] + rop_outs[b:e]\n b = e\n e = e + self.n_shared_outs\n inner_out_shared = self_outputs[b:e]\n\n inner_ins = (\n inner_seqs\n + inner_mit_mot\n + inner_mit_sot\n + inner_sit_sot\n + inner_shared\n + inner_other\n )\n inner_outs = (\n inner_out_mit_mot\n + inner_out_mit_sot\n + inner_out_sit_sot\n + inner_out_nit_sot\n + inner_out_shared\n )\n\n if self.as_while:\n inner_outs += [self_outputs[-1]]\n scan_inputs = (\n [inputs[0]]\n + scan_seqs\n + scan_mit_mot\n + scan_mit_sot\n + scan_sit_sot\n + scan_shared\n + scan_nit_sot\n + scan_other\n )\n\n local_op = Scan(inner_ins, inner_outs, info)\n outputs = local_op(*scan_inputs)\n if type(outputs) not in (list, tuple):\n outputs = [outputs]\n # Select only the result of the R_op results\n final_outs = []\n b = self.n_mit_mot\n e = self.n_mit_mot * 2\n final_outs += outputs[b:e]\n b = e + self.n_mit_sot\n e = e + self.n_mit_sot * 2\n final_outs += outputs[b:e]\n b = e + self.n_sit_sot\n e = e + self.n_sit_sot * 2\n final_outs += outputs[b:e]\n b = e + self.n_nit_sot\n e = e + self.n_nit_sot * 2\n final_outs += outputs[b:e]\n final_outs += [None] * self.n_shared_outs\n\n return final_outs\n\n\n# Since Scan is an op that contains an Aesara compiled function, it is\n# useful to let DebugMode know about it.\nops_with_inner_function[Scan] = \"fn\"\n\n\n@register_profiler_printer\ndef profile_printer(\n message, compile_time, fct_call_time, apply_time, apply_cimpl, outputs_size, file\n):\n # Scan overhead profile\n if any(\n [\n isinstance(node.op, Scan) and v > 0\n for (fgraph, node), v in apply_time.items()\n ]\n ):\n print(\"\", file=file)\n print(\"Scan overhead:\", file=file)\n print(\n \"<Scan op time(s)> <sub scan fct time(s)> <sub scan op \"\n \"time(s)> <sub scan fct time(% scan op time)> <sub scan \"\n \"op time(% scan op time)> <node>\",\n file=file,\n )\n\n total_super_scan_time = 0\n total_scan_fct_time = 0\n total_scan_op_time = 0\n for (fgraph, node), v in apply_time.items():\n if isinstance(node.op, Scan) and not node.op.fn.profile:\n print(\n \" One scan node do not have its inner profile enabled. \"\n \"If you enable Aesara profiler with \"\n \"'aesara.function(..., profile=True)', you must manually\"\n \" enable the profiling for each scan too: \"\n \"'aesara.scan(...,profile=True)'.\"\n \" Or use Aesara flag 'profile=True'.\",\n file=file,\n )\n elif isinstance(node.op, Scan) and node.op.fn.profile:\n if v > 0:\n scan_fct_time = node.op.fn.profile.call_time\n scan_op_time = sum(node.op.fn.profile.apply_time.values())\n total_super_scan_time += v\n total_scan_fct_time += scan_fct_time\n total_scan_op_time += scan_op_time\n print(\n \" %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%\"\n % (\n v,\n scan_fct_time,\n scan_op_time,\n scan_fct_time / v * 100,\n scan_op_time / v * 100,\n ),\n node,\n file=file,\n )\n else:\n print(\n (\" The node took 0s, so we can not \" \"compute the overhead\"),\n node,\n file=file,\n )\n if total_super_scan_time == 0:\n print(\" No scan have its inner profile enabled.\", file=file)\n else:\n print(\n \"total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%\"\n % (\n total_super_scan_time,\n total_scan_fct_time,\n total_scan_op_time,\n total_scan_fct_time / total_super_scan_time * 100,\n total_scan_op_time / total_super_scan_time * 100,\n ),\n file=file,\n )\n",
"import builtins\nimport warnings\n\nimport numpy as np\n\nfrom aesara import config, printing\nfrom aesara import scalar as aes\nfrom aesara.gradient import DisconnectedType\nfrom aesara.graph.basic import Apply, Variable\nfrom aesara.graph.op import COp, Op\nfrom aesara.graph.params_type import ParamsType\nfrom aesara.graph.type import Generic\nfrom aesara.misc.safe_asarray import _asarray\nfrom aesara.printing import pprint\nfrom aesara.scalar.basic import BinaryScalarOp\nfrom aesara.tensor.basic import (\n alloc,\n arange,\n as_tensor_variable,\n cast,\n concatenate,\n constant,\n patternbroadcast,\n stack,\n switch,\n)\nfrom aesara.tensor.elemwise import (\n CAReduce,\n CAReduceDtype,\n DimShuffle,\n Elemwise,\n scalar_elemwise,\n)\nfrom aesara.tensor.shape import shape\nfrom aesara.tensor.type import (\n complex_dtypes,\n continuous_dtypes,\n discrete_dtypes,\n int_dtypes,\n integer_dtypes,\n tensor,\n uint_dtypes,\n)\nfrom aesara.tensor.type_other import NoneConst\nfrom aesara.tensor.utils import as_list\nfrom aesara.tensor.var import TensorConstant, _tensor_py_operators\n\n\n# We capture the builtins that we are going to replace to follow the numpy API\n_abs = builtins.abs\n\n\nif int(config.tensor__cmp_sloppy) > 1:\n # This config variable is a quick-and-dirty way to get low-precision\n # comparisons. For a more precise setting of these tolerances set\n # them explicitly in your user code by assigning, for example,\n # \"aesara.tensor.math.float32_atol = ...\"\n\n # When config.tensor__cmp_sloppy>1 we are even more sloppy. This is\n # useful to test the GPU as they don't use extended precision and\n # this cause some difference bigger then the normal sloppy.\n float16_atol = 1e-2\n float16_rtol = 5e-2\n\n float32_atol = 5e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelif int(config.tensor__cmp_sloppy):\n float16_atol = 5e-3\n float16_rtol = 1e-2\n\n float32_atol = 1e-4\n float32_rtol = 1e-3\n\n float64_rtol = 1e-4\n float64_atol = 1e-3\nelse:\n # If you change those value in test don't forget to put them back\n # when the test end. Don't forget the case when the test fail.\n float16_atol = 1e-3\n float16_rtol = 1e-3\n\n float32_atol = 1e-5\n float32_rtol = 1e-5\n\n # defaults in numpy.allclose\n # Don't be more strict then numpy rtol\n # It cause useless error.\n float64_rtol = 1.0000000000000001e-05\n float64_atol = 1e-8\n\n\ndef _get_atol_rtol(a, b):\n tiny = (\"float16\",)\n narrow = (\"float32\", \"complex64\")\n if (str(a.dtype) in tiny) or (str(b.dtype) in tiny):\n atol = float16_atol\n rtol = float16_rtol\n elif (str(a.dtype) in narrow) or (str(b.dtype) in narrow):\n atol = float32_atol\n rtol = float32_rtol\n else:\n atol = float64_atol\n rtol = float64_rtol\n return atol, rtol\n\n\ndef _allclose(a, b, rtol=None, atol=None):\n a = np.asarray(a)\n b = np.asarray(b)\n atol_, rtol_ = _get_atol_rtol(a, b)\n if rtol is not None:\n rtol_ = rtol\n if atol is not None:\n atol_ = atol\n\n return np.allclose(a, b, atol=atol_, rtol=rtol_)\n\n\nclass MaxAndArgmax(COp):\n \"\"\"\n Calculate the max and argmax over a given axis or over all axes.\n\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 2 # max val, max idx\n E_axis = \"invalid axis\"\n params_type = Generic()\n __props__ = (\"axis\",)\n _f16_ok = True\n\n def __init__(self, axis):\n assert isinstance(axis, list)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n return self.axis\n\n def make_node(self, x):\n x = as_tensor_variable(x)\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the max / argmax.\n all_axes = set(self.axis)\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n inputs = [x]\n outputs = [\n tensor(x.type.dtype, broadcastable, name=\"max\"),\n tensor(\"int64\", broadcastable, name=\"argmax\"),\n ]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, outs, params):\n x = inp[0]\n axes = params\n max, max_idx = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n else:\n axes = tuple(int(ax) for ax in axes)\n max[0] = _asarray(np.max(x, axes), dtype=node.outputs[0].dtype)\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n\n # Numpy.prod returns 1.0 when arg is empty, so we cast it to int64\n # Otherwise reshape would complain citing float arg\n new_shape = kept_shape + (np.prod(reduced_shape, dtype=\"int64\"),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n if len(self.axis) != 1 and len(self.axis) != node.inputs[0].ndim:\n raise NotImplementedError(\n \"NumPy C-API can compute max and argmax only for 1 axis or for all axes.\"\n )\n x = inp[0]\n axis = sub[\"params\"]\n max, argmax = out\n fail = sub[\"fail\"]\n ret = \"\"\"\n #if PY_MAJOR_VERSION >= 3\n #ifndef PyInt_AS_LONG\n #define PyInt_AS_LONG PyLong_AS_LONG\n #endif\n #endif\n\n int axis;\n\n if (PyTuple_GET_SIZE(%(axis)s) == PyArray_NDIM(%(x)s)) {\n axis = NPY_MAXDIMS;\n } else if(PyTuple_GET_SIZE(%(axis)s) == 1) {\n PyObject* axis_object = PyTuple_GET_ITEM(%(axis)s, 0);\n axis = (int)PyInt_AS_LONG(axis_object);\n if (axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)) {\n PyErr_SetString(PyExc_ValueError,\n \"MaxAndArgmax: bad axis argument\");\n %(fail)s\n }\n } else {\n PyErr_SetString(PyExc_NotImplementedError,\n \"MaxAndArgmax: NumPy C-API can compute max and argmax only for 1 axis or for all axes.\");\n %(fail)s\n }\n\n Py_CLEAR(%(max)s);\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n\n %(max)s = (PyArrayObject*)PyArray_Max(%(x)s, axis, NULL);\n if (%(max)s == NULL) {\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(max)s)) {\n %(max)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(max)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(max)s == NULL){\n %(fail)s;\n }\n }\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if (%(argmax)s == NULL) {\n Py_CLEAR(%(max)s);\n %(fail)s;\n }\n if (!PyArray_CheckExact(%(argmax)s)) {\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if (PyArray_TYPE(%(argmax)s) != NPY_INT64) {\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (5,)\n\n def infer_shape(self, fgraph, node, shapes):\n ishape = shapes[0]\n rval = tuple(\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n )\n return [rval, rval]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None, None]\n if len(self.axis) != 1:\n raise ValueError(\"R_op supported for arg_max only for \" \"one axis!\")\n if self.axis[0] > 1:\n raise ValueError(\"R_op supported for arg_max only when \" \" axis is 0 or 1\")\n if inputs[0].ndim != 2:\n raise ValueError(\n \"R_op supported for arg_max only when \" \" input is a matrix\"\n )\n max_vals, max_pos = self.make_node(*inputs).outputs\n if self.axis[0] == 0:\n return [eval_points[0][max_pos, arange(eval_points[0].shape[1])], None]\n else:\n return [eval_points[0][arange(eval_points[0].shape[0]), max_pos], None]\n\n def grad(self, inp, grads):\n # The strict sense mathematical gradient of the maximum function is\n # not calculated here for it is not defined at every point where some\n # coordinates are identical. However, since the latter set has null\n # Lebesgue measure, the result may be interpreted as weak gradient.\n\n # @note: This function should work correctly for L{vector}s.\n # (x, y), (gz, gw)\n # gz*dz/dx + gw*dw/dx, gz*dz/dy + gw*dw/dy\n # gMax * dMax/dx + gArgMax * dArgMax/dx,\n # gMax * dMax/daxis + gArgMax * dArgMax/daxis\n # g_max has one less dimension than x, so you need to complete\n # g_max to x's shape when axis=0 the broadcasting mechanism\n # does it automatically\n x = inp[0]\n axis = as_tensor_variable(self.axis)\n g_max, g_max_idx = grads\n\n g_max_disconnected = isinstance(g_max.type, DisconnectedType)\n g_max_idx_disconnected = isinstance(g_max_idx.type, DisconnectedType)\n\n # if the op is totally disconnected, so are its inputs\n if g_max_disconnected and g_max_idx_disconnected:\n return [DisconnectedType()(), DisconnectedType()()]\n\n # if the max is disconnected but the argmax is not,\n # the gradient on its inputs is zero\n if g_max_disconnected:\n return [x.zeros_like()]\n if NoneConst.equals(axis):\n axis_ = list(range(x.ndim))\n else:\n axis_ = axis\n xmax = max(x, axis_)\n\n # Raise the g_max and xmax to the same number of dim as the input.\n pattern = []\n out_dim = 0\n if NoneConst.equals(axis):\n # We are taking the max/argmax over all dimensions.\n axis = None\n for i in range(x.ndim):\n if axis is None or i in axis.data:\n pattern.append(\"x\")\n else:\n pattern.append(out_dim)\n out_dim += 1\n g_max_pad = DimShuffle(g_max.broadcastable, pattern)(g_max)\n xmax_pad = DimShuffle(xmax.broadcastable, pattern)(xmax)\n\n # Set the grad to the correct position.\n g_x = eq(xmax_pad, x) * g_max_pad\n return (g_x,)\n\n\nclass Argmax(COp):\n \"\"\"\n Calculate the argmax over a given axis or over all axes.\n \"\"\"\n\n nin = 2 # tensor, axis\n nout = 1\n E_axis = \"invalid axis\"\n __props__ = (\"axis\",)\n _f16_ok = True\n\n params_type = ParamsType(c_axis=aes.int64)\n\n def __init__(self, axis):\n if axis is not None:\n axis = tuple(axis)\n self.axis = tuple(axis)\n\n def get_params(self, node):\n if self.axis is not None and len(self.axis) == 1:\n c_axis = np.int64(self.axis[0])\n else:\n # The value here doesn't matter, it won't be used\n c_axis = np.int64(-1)\n return self.params_type.get_params(c_axis=c_axis)\n\n def make_node(self, x, axis=None):\n x = as_tensor_variable(x)\n if self.axis is None:\n all_axes = list(range(x.ndim))\n else:\n all_axes = self.axis\n inputs = [x]\n\n # We keep the original broadcastable flags for dimensions on which\n # we do not perform the argmax.\n broadcastable = [\n b for i, b in enumerate(x.type.broadcastable) if i not in all_axes\n ]\n outputs = [tensor(\"int64\", broadcastable, name=\"argmax\")]\n return Apply(self, inputs, outputs)\n\n def prepare_node(self, node, storage_map, compute_map, impl):\n if len(node.inputs) == 2:\n raise ValueError(\n \"You are trying to compile a graph with an old Argmax node. Either reoptimize your graph or rebuild it to get the new node format.\"\n )\n\n def perform(self, node, inp, outs, params):\n (x,) = inp\n axes = self.axis\n (max_idx,) = outs\n if axes is None:\n axes = tuple(range(x.ndim))\n\n # Numpy does not support multiple axes for argmax\n # Work around\n keep_axes = np.array([i for i in range(x.ndim) if i not in axes], dtype=\"int64\")\n # Not-reduced axes in front\n transposed_x = np.transpose(x, np.concatenate((keep_axes, axes)))\n kept_shape = transposed_x.shape[: len(keep_axes)]\n reduced_shape = transposed_x.shape[len(keep_axes) :]\n new_shape = kept_shape + (np.prod(reduced_shape),)\n reshaped_x = transposed_x.reshape(new_shape)\n\n max_idx[0] = _asarray(np.argmax(reshaped_x, axis=-1), dtype=\"int64\")\n\n def c_code(self, node, name, inp, out, sub):\n (x,) = inp\n (argmax,) = out\n fail = sub[\"fail\"]\n params = sub[\"params\"]\n if self.axis is None:\n axis_code = \"axis = NPY_MAXDIMS;\"\n else:\n if len(self.axis) > 1:\n raise NotImplementedError()\n # params is only used here for now\n axis_code = (\n \"\"\"\n axis = %(params)s->c_axis;\n if(axis > PyArray_NDIM(%(x)s)-1 || axis < -PyArray_NDIM(%(x)s)){\n PyErr_SetString(PyExc_ValueError,\n \"Argmax, bad axis argument\");\n %(fail)s\n }\n \"\"\"\n % locals()\n )\n ret = \"\"\"\n int axis;\n\n Py_CLEAR(%(argmax)s);//todo pass them as out parameter.\n %(axis_code)s\n\n %(argmax)s = (PyArrayObject*)PyArray_ArgMax(%(x)s, axis, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n if(!PyArray_CheckExact(%(argmax)s)){\n %(argmax)s = (PyArrayObject*)PyArray_FromAny((PyObject*)%(argmax)s, NULL, 0, 0, NPY_ARRAY_ENSUREARRAY, NULL);\n if(%(argmax)s == NULL){\n %(fail)s;\n }\n }\n if(PyArray_TYPE(%(argmax)s) != NPY_INT64){\n PyObject * tmp = PyArray_Cast(%(argmax)s, NPY_INT64);\n if (NULL == tmp){\n %(fail)s;\n }\n Py_DECREF(%(argmax)s);\n %(argmax)s = (PyArrayObject*)tmp;\n }\n \"\"\"\n return ret % locals()\n\n def c_code_cache_version(self):\n return (1,)\n\n def infer_shape(self, fgraph, node, shapes):\n (ishape,) = shapes\n if self.axis is None:\n return [()]\n rval = tuple(\n [\n ishape[i]\n for (i, b) in enumerate(node.inputs[0].type.broadcastable)\n if i not in self.axis\n ]\n )\n return [rval]\n\n def grad(self, inp, grads):\n (x,) = inp\n\n return [x.zeros_like()]\n\n\ndef makeKeepDims(x, y, axis):\n \"\"\"\n Reintroduces in y with length one the axes of x which have been left out\n in a prior reduction of x. With this option, the resulting tensor will\n broadcast correctly against the original tensor x.\n\n \"\"\"\n x = as_tensor_variable(x)\n y = as_tensor_variable(y)\n\n if axis is None:\n axis = list(range(x.type.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n newaxis = []\n for a in axis:\n if not isinstance(a, int):\n raise ValueError(\"keepdims option can be used only with constant axis\")\n if a < 0:\n a += x.type.ndim\n newaxis.append(a)\n i = 0\n new_dims = []\n for j, _ in enumerate(x.type.broadcastable):\n if j in newaxis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n return DimShuffle(y.type.broadcastable, new_dims)(y)\n\n\ndef check_and_normalize_axes(x, axis):\n \"\"\"Check axes, normalize and convert them to a Python list of integers.\n\n Parameters\n ----------\n x: TensorVariable\n axis: int, tuple or list of integers\n\n Returns\n -------\n axis: list of integers\n Return an empty list if argument is None.\n\n \"\"\"\n x = as_tensor_variable(x)\n if axis is None:\n axis = []\n elif isinstance(axis, (int, np.integer)) or (\n isinstance(axis, np.ndarray) and axis.ndim == 0\n ):\n axis = [int(axis)]\n elif isinstance(axis, (tuple, list, np.ndarray)):\n axis = [int(i) for i in axis]\n elif isinstance(axis, Variable):\n if NoneConst.equals(axis):\n axis = []\n elif not isinstance(axis, TensorConstant):\n raise TypeError(f\"Computation needs a constant axis. Got {axis}\")\n else:\n assert axis.dtype in integer_dtypes\n if isinstance(axis.data, (int, np.integer)) or (\n isinstance(axis.data, np.ndarray) and axis.data.ndim == 0\n ):\n axis = [int(axis.data)]\n elif isinstance(axis.data, (list, np.ndarray)):\n axis = [int(i) for i in axis.data]\n else:\n raise TypeError(\n f\"Axis must be an integer, tuple, list of integers or a TensorVariable. Got {axis}\"\n )\n if len(axis) > 0:\n for i in range(len(axis)):\n if axis[i] < 0:\n axis[i] += x.type.ndim\n if axis[i] < 0 or axis[i] >= x.type.ndim:\n raise ValueError(\n f\"Computation needs a valid axis number for {int(x.type.ndim)}-D tensor. Got {int(axis[i])}\"\n )\n axis = list(set(axis))\n axis.sort()\n return axis\n\n\ndef max_and_argmax(a, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements and their indices obtained by iterating over\n given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n # Check axis and convert it to a Python list of integers.\n # Axis will be used as an op param of MaxAndArgmax.\n a = as_tensor_variable(a)\n axis = check_and_normalize_axes(a, axis)\n if len(axis) == 0:\n axis = list(range(a.type.ndim))\n out, argout = MaxAndArgmax(axis)(a)\n\n if keepdims:\n out = makeKeepDims(a, out, axis)\n argout = makeKeepDims(a, argout, axis)\n return [out, argout]\n\n\nclass NonZeroCAReduce(CAReduce):\n def _c_all(self, node, name, inames, onames, sub):\n decl, checks, alloc, loop, end = super()._c_all(node, name, inames, onames, sub)\n\n # We add an additional check for zero-sized dimensions (This seems like\n # something that could enabled in `elemwise_cgen.make_checks`.)\n iname = inames[0]\n\n axis = self.axis\n if axis is None:\n axis = list(range(len(node.inputs[0].type.broadcastable)))\n\n pattern = [0] * len(node.inputs[0].broadcastable)\n for i in axis:\n pattern[i] = 1\n\n pattern_ = str(pattern)[1:-1]\n\n decl += f\"\"\"int tosum[]={{{pattern_}}};\"\"\"\n alloc += f\"\"\"\n for(int i=0;i<PyArray_NDIM({iname});i++){{\n if(PyArray_DIMS({iname})[i]==0 && tosum[i]){{\n PyErr_Format(PyExc_ValueError,\n \"Input of CAReduce{{{node.op.scalar_op}}} has zero-size on axis %%d\",i);\n {sub[\"fail\"]};\n }}\n }}\n \"\"\"\n return decl, checks, alloc, loop, end\n\n\nclass Max(NonZeroCAReduce):\n nfunc_spec = (\"max\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(aes.scalar_maximum, axis)\n\n\nclass Min(NonZeroCAReduce):\n nfunc_spec = (\"min\", 1, 1)\n\n def __init__(self, axis):\n super().__init__(aes.scalar_minimum, axis)\n\n\ndef max(x, axis=None, keepdims=False):\n \"\"\"\n Returns maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the max is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n Notes\n -----\n We return an error as numpy when we reduce a dim with a shape of 0.\n\n \"\"\"\n\n # We have a choice of implementing this call with the\n # CAReduce op or the MaxAndArgmax op.\n\n # MaxAndArgmax supports grad and Rop, so we prefer to use that.\n # CAReduce is faster, but optimizations will replace MaxAndArgmax[0]\n # with CAReduce at compile time, so at this stage the important\n # thing is supporting all user interface features, not speed.\n # Some cases can be implemented only with CAReduce.\n\n # We thus prefer to use MaxAndArgmax, if possible. It does not\n # support all axis arguments, so we may need to fall back to CAReduce.\n\n try:\n out = max_and_argmax(x, axis)[0]\n except Exception:\n out = Max(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef argmax(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of maximum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmax is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims : bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n argout = max_and_argmax(x, axis)[1]\n\n if keepdims:\n argout = makeKeepDims(x, argout, axis)\n return argout\n\n\ndef min(x, axis=None, keepdims=False):\n \"\"\"\n Returns minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the min is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return -max(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n max_val = np.array(itype.max, dtype=itype.dtype)\n return max_val - max(max_val - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return ~max(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\ndef argmin(x, axis=None, keepdims=False):\n \"\"\"\n Returns indices of minimum elements obtained by iterating over given axis.\n\n When axis is None (the default value), the argmin is performed\n over the flattened tensor.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n x = as_tensor_variable(x)\n str_x_type = str(x.dtype)\n if str_x_type.startswith(\"float\") or str_x_type in int_dtypes:\n return argmax(-x, axis=axis, keepdims=keepdims)\n elif str_x_type in uint_dtypes:\n itype = np.iinfo(x.dtype)\n return argmax(itype.max - x, axis=axis, keepdims=keepdims)\n elif str_x_type == \"bool\":\n return argmax(~x, axis=axis, keepdims=keepdims)\n else:\n # Be careful about unsigned integers, complex\n raise NotImplementedError()\n\n\ndef smallest(*args):\n \"\"\"\n Return the [elementwise] smallest of a variable number of arguments.\n\n Like python's min.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a < b, a, b)\n else:\n return min(stack(args), axis=0)\n\n\ndef largest(*args):\n \"\"\"\n Return the [elementwise] largest of a variable number of arguments.\n\n Like python's max.\n\n \"\"\"\n if len(args) == 2:\n a, b = args\n return switch(a > b, a, b)\n else:\n return max(stack(args), axis=0)\n\n\n@scalar_elemwise\ndef lt(a, b):\n \"\"\"a < b\"\"\"\n\n\n@scalar_elemwise\ndef gt(a, b):\n \"\"\"a > b\"\"\"\n\n\n@scalar_elemwise\ndef le(a, b):\n \"\"\"a <= b\"\"\"\n\n\n@scalar_elemwise\ndef ge(a, b):\n \"\"\"a >= b\"\"\"\n\n\n@scalar_elemwise\ndef eq(a, b):\n \"\"\"a == b\"\"\"\n\n\n@scalar_elemwise\ndef neq(a, b):\n \"\"\"a != b\"\"\"\n\n\n@scalar_elemwise\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisnan_ = isnan\n\n\ndef isnan(a):\n \"\"\"isnan(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isnan_(a)\n\n\n@scalar_elemwise\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n\n\n# Rename isnan to isnan_ to allow to bypass it when not needed.\n# glibc 2.23 don't allow isnan on int, so we remove it from the graph.\nisinf_ = isinf\n\n\ndef isinf(a):\n \"\"\"isinf(a)\"\"\"\n a = as_tensor_variable(a)\n if a.dtype in discrete_dtypes:\n return alloc(\n np.asarray(False, dtype=\"bool\"), *[a.shape[i] for i in range(a.ndim)]\n )\n return isinf_(a)\n\n\ndef allclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implement Numpy's ``allclose`` on tensors.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan: bool\n Whether to consider nan's in the same place to be close.\n\n Returns\n -------\n bool\n A boolean value (of type int8 returned by the tensor elementwise `all`\n function) whether all elements in a and b are in the tolerance range\n defined above.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n \"\"\"\n return all(isclose(a, b, rtol, atol, equal_nan))\n\n\ndef isclose(a, b, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):\n \"\"\"\n Implements Numpy's ``isclose`` on tensors.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n ``absolute(a - b) <= (atol + rtol * absolute(b))``\n\n Parameters\n ----------\n a : tensor\n Input to compare.\n b : tensor\n Input to compare.\n rtol : float\n The relative tolerance parameter.\n atol : float\n The absolute tolerance parameter.\n equal_nan : bool\n Whether to consider nan's in the same place to be close\n\n Returns\n -------\n int8\n A boolean (int8) array where two arrays are element-wise equal\n within a tolerance.\n\n Notes\n -----\n Not a symmetric equation. See Numpy's documentation.\n\n Examples\n --------\n >>> import aesara\n >>> import numpy as np\n >>> a = _asarray([1e10, 1e-7], dtype=\"float64\")\n >>> b = _asarray([1.00001e10, 1e-8], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 0], dtype=int8)\n >>> a = _asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = _asarray([1.00001e10, 1e-9], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 1], dtype=int8)\n >>> a = _asarray([1e10, 1e-8], dtype=\"float64\")\n >>> b = _asarray([1.0001e10, 1e-9], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([0, 1], dtype=int8)\n >>> a = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> b = _asarray([1.0, np.nan], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b, equal_nan=True).eval()\n array([1, 1], dtype==int8)\n >>> a = _asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = _asarray([1.0, -np.inf], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 0], dtype==int8)\n >>> a = _asarray([1.0, np.inf], dtype=\"float64\")\n >>> b = _asarray([1.0, np.inf], dtype=\"float64\")\n >>> aesara.tensor.isclose(a, b).eval()\n array([1, 1], dtype==int8)\n\n \"\"\"\n # close will be an int8 array of 1 where within tolerance\n # and 0 where not within tolerance or there was a nan or inf value.\n diff = _abs(a - b)\n tolerance = atol + rtol * _abs(b)\n close_prelim = le(diff, tolerance)\n\n a_nan = isnan(a)\n b_nan = isnan(b)\n nans = bitwise_or(a_nan, b_nan)\n\n a_inf = isinf(a)\n b_inf = isinf(b)\n infs = bitwise_or(a_inf, b_inf)\n\n nans_or_infs = bitwise_or(nans, infs)\n\n # close is now an array of 0's except where elements are not nan or inf\n # and are within the tolerance.\n close = bitwise_and(close_prelim, bitwise_not(nans_or_infs))\n\n # deal with signed inf values. this will make an array inf_eq of 0's\n # except where inf values have the same sign.\n both_infs = bitwise_and(a_inf, b_inf)\n inf_signs_eq = eq(a_inf * sgn(a), b_inf * sgn(b))\n inf_eq = bitwise_and(both_infs, inf_signs_eq)\n\n # now create the potential result combining close and inf_eq\n close_with_infs = bitwise_or(close, inf_eq)\n\n # deal with comparing nan's.\n if equal_nan:\n both_nans = bitwise_and(a_nan, b_nan)\n return bitwise_or(close_with_infs, both_nans)\n # otherwise nan's aren't considered close.\n else:\n return close_with_infs\n\n\n##########################\n# Bit-wise\n##########################\n\n\n@scalar_elemwise\ndef and_(a, b):\n \"\"\"bitwise a & b\"\"\"\n\n\nbitwise_and = and_ # numpy name for it\n\n\n@scalar_elemwise\ndef or_(a, b):\n \"\"\"bitwise a | b\"\"\"\n\n\nbitwise_or = or_ # numpy name for it\n\n\n@scalar_elemwise\ndef xor(a, b):\n \"\"\"bitwise a ^ b\"\"\"\n\n\nbitwise_xor = xor # numpy name for it\n\n\n@scalar_elemwise\ndef invert(a):\n \"\"\"bitwise ~a\"\"\"\n\n\nbitwise_not = invert # numpy alias for it\n\n##########################\n# Math\n##########################\n\n\n@scalar_elemwise\ndef abs(a):\n \"\"\"|`a`|\"\"\"\n\n\n# These are deprecated and will be removed\nabs_ = abs\n\n\npprint.assign(abs, printing.PatternPrinter((\"|%(0)s|\", -1000)))\n\n\n@scalar_elemwise\ndef exp(a):\n \"\"\"e^`a`\"\"\"\n\n\n@scalar_elemwise\ndef exp2(a):\n \"\"\"2^`a`\"\"\"\n\n\n@scalar_elemwise\ndef expm1(a):\n \"\"\"e^`a` - 1\"\"\"\n\n\n@scalar_elemwise\ndef neg(a):\n \"\"\"-a\"\"\"\n\n\n@scalar_elemwise\ndef reciprocal(a):\n \"\"\"1.0/a\"\"\"\n\n\n# This is deprecated and will be removed\ninv = reciprocal\n\n\n@scalar_elemwise\ndef log(a):\n \"\"\"base e logarithm of a\"\"\"\n\n\n@scalar_elemwise\ndef log2(a):\n \"\"\"base 2 logarithm of a\"\"\"\n\n\n@scalar_elemwise\ndef log10(a):\n \"\"\"base 10 logarithm of a\"\"\"\n\n\n@scalar_elemwise\ndef log1p(a):\n \"\"\"log(1+a)\"\"\"\n\n\n@scalar_elemwise\ndef sgn(a):\n \"\"\"sign of a\"\"\"\n\n\n@scalar_elemwise\ndef ceil(a):\n \"\"\"ceiling of a\"\"\"\n\n\n@scalar_elemwise\ndef floor(a):\n \"\"\"floor of a\"\"\"\n\n\n@scalar_elemwise\ndef trunc(a):\n \"\"\"trunc of a\"\"\"\n\n\ndef iround(a, mode=None):\n \"\"\"cast(round(a,mode),'int64')\"\"\"\n return cast(round(a, mode), \"int64\")\n\n\ndef round(a, mode=None):\n \"\"\"round_mode(a) with mode in [half_away_from_zero, half_to_even].\n Default to half_to_even.\"\"\"\n if mode is None:\n mode = \"half_to_even\"\n if config.warn__round:\n warnings.warn(\n \"aesara.tensor.round() changed its default from\"\n \" `half_away_from_zero` to `half_to_even` to have\"\n \" the same default as NumPy. Use the Aesara flag\"\n \" `warn__round=False` to disable this warning.\"\n )\n if mode == \"half_away_from_zero\":\n return round_half_away_from_zero(a)\n elif mode == \"half_to_even\":\n return round_half_to_even(a)\n else:\n raise Exception(f\"round mode {mode} is not implemented.\")\n\n\n@scalar_elemwise\ndef round_half_to_even(a):\n \"\"\"round_half_to_even(a)\"\"\"\n\n\n@scalar_elemwise\ndef round_half_away_from_zero(a):\n \"\"\"round_half_away_from_zero(a)\"\"\"\n\n\n@scalar_elemwise\ndef sqr(a):\n \"\"\"square of a\"\"\"\n\n\n# alias to sqr, included to maintain similarity with numpy interface\nsquare = sqr\n\n\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, aweights=None):\n \"\"\"Calculate the covariance matrix.\n\n Covariance indicates the level to which two variables vary together.\n If we examine N-dimensional samples, :math:`m = [x_1, x_2, ... x_N]^T`,\n then the covariance matrix element :math:`C_{ij}` is the covariance of\n :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance\n of :math:`x_i`. Code and docstring ported from numpy.\n\n Parameters\n ==========\n m : array_like\n A 2-D array containing multiple variables and observations.\n Each row of `m` represents a variable, and each column is\n observations of all those variables.\n y : array_like, optional\n An additional set of variables and observations. `y` has the same form\n as that of `m`.\n rowvar : bool, optional\n If `rowvar` is True (default), then each row represents a\n variable, with observations in the columns. Otherwise, the relationship\n is transposed: each column represents a variable, while the rows\n contain observations.\n bias : bool, optional\n Default normalization (False) is by ``(N - 1)``, where ``N`` is the\n number of observations given (unbiased estimate). If `bias` is True, then\n normalization is by ``N``. These values can be overridden by using the\n keyword ``ddof``.\n ddof : int, optional\n If not ``None`` the default value implied by `bias` is overridden.\n The default value is ``None``.\n\n Returns\n =======\n out : The covariance matrix of the variables.\n\n \"\"\"\n\n if fweights is not None:\n raise NotImplementedError(\"fweights are not implemented\")\n if aweights is not None:\n raise NotImplementedError(\"aweights are not implemented\")\n\n if not rowvar and m.shape[0] != 1:\n m = m.T\n\n if y is not None:\n if not rowvar and y.shape[0] != 1:\n y = y.T\n m = concatenate((m, y), axis=0)\n\n if ddof is None:\n if not bias:\n ddof = 1\n else:\n ddof = 0\n\n # Determine the normalization\n fact = m.shape[1] - ddof\n\n m -= m.mean(axis=1, keepdims=1)\n c = m.dot(m.T)\n c *= constant(1) / fact\n return c.squeeze()\n\n\n@scalar_elemwise\ndef sqrt(a):\n \"\"\"square root of a\"\"\"\n\n\n@scalar_elemwise\ndef deg2rad(a):\n \"\"\"convert degree a to radian\"\"\"\n\n\n@scalar_elemwise\ndef rad2deg(a):\n \"\"\"convert radian a to degree\"\"\"\n\n\n@scalar_elemwise\ndef cos(a):\n \"\"\"cosine of a\"\"\"\n\n\n@scalar_elemwise\ndef arccos(a):\n \"\"\"arccosine of a\"\"\"\n\n\n@scalar_elemwise\ndef sin(a):\n \"\"\"sine of a\"\"\"\n\n\n@scalar_elemwise\ndef arcsin(a):\n \"\"\"arcsine of a\"\"\"\n\n\n@scalar_elemwise\ndef tan(a):\n \"\"\"tangent of a\"\"\"\n\n\n@scalar_elemwise\ndef arctan(a):\n \"\"\"arctangent of a\"\"\"\n\n\n@scalar_elemwise\ndef arctan2(a, b):\n \"\"\"arctangent of a / b\"\"\"\n\n\n@scalar_elemwise\ndef cosh(a):\n \"\"\"hyperbolic cosine of a\"\"\"\n\n\n@scalar_elemwise\ndef arccosh(a):\n \"\"\"hyperbolic arc cosine of a\"\"\"\n\n\n@scalar_elemwise\ndef sinh(a):\n \"\"\"hyperbolic sine of a\"\"\"\n\n\n@scalar_elemwise\ndef arcsinh(a):\n \"\"\"hyperbolic arc sine of a\"\"\"\n\n\n@scalar_elemwise\ndef tanh(a):\n \"\"\"hyperbolic tangent of a\"\"\"\n\n\n@scalar_elemwise\ndef arctanh(a):\n \"\"\"hyperbolic arc tangent of a\"\"\"\n\n\n@scalar_elemwise\ndef erf(a):\n \"\"\"error function\"\"\"\n\n\n@scalar_elemwise\ndef erfc(a):\n \"\"\"complementary error function\"\"\"\n\n\n@scalar_elemwise\ndef erfcx(a):\n \"\"\"scaled complementary error function\"\"\"\n\n\n@scalar_elemwise\ndef erfinv(a):\n \"\"\"inverse error function\"\"\"\n\n\n@scalar_elemwise\ndef erfcinv(a):\n \"\"\"inverse complementary error function\"\"\"\n\n\n@scalar_elemwise\ndef gamma(a):\n \"\"\"gamma function\"\"\"\n\n\n@scalar_elemwise\ndef gammaln(a):\n \"\"\"log gamma function\"\"\"\n\n\n@scalar_elemwise\ndef psi(a):\n \"\"\"derivative of log gamma function\"\"\"\n\n\n@scalar_elemwise\ndef tri_gamma(a):\n \"\"\"second derivative of the log gamma function\"\"\"\n\n\n@scalar_elemwise\ndef chi2sf(x, k):\n \"\"\"chi squared survival function\"\"\"\n\n\n@scalar_elemwise\ndef gammainc(k, x):\n \"\"\"Regularized lower gamma function\"\"\"\n\n\n@scalar_elemwise\ndef gammaincc(k, x):\n \"\"\"Regularized upper gamma function\"\"\"\n\n\n@scalar_elemwise\ndef gammau(k, x):\n \"\"\"Upper incomplete gamma function.\"\"\"\n\n\n@scalar_elemwise\ndef gammal(k, x):\n \"\"\"Lower incomplete gamma function.\"\"\"\n\n\n@scalar_elemwise\ndef j0(x):\n \"\"\"Bessel function of the first kind of order 0.\"\"\"\n\n\n@scalar_elemwise\ndef j1(x):\n \"\"\"Bessel function of the first kind of order 1.\"\"\"\n\n\n@scalar_elemwise\ndef jv(v, x):\n \"\"\"Bessel function of the first kind of order v (real).\"\"\"\n\n\n@scalar_elemwise\ndef i0(x):\n \"\"\"Modified Bessel function of the first kind of order 0.\"\"\"\n\n\n@scalar_elemwise\ndef i1(x):\n \"\"\"Modified Bessel function of the first kind of order 1.\"\"\"\n\n\n@scalar_elemwise\ndef iv(v, x):\n \"\"\"Modified Bessel function of the first kind of order v (real).\"\"\"\n\n\n@scalar_elemwise\ndef sigmoid(x):\n \"\"\"Logistic sigmoid function (1 / (1 + exp(x)), also known as expit or inverse logit\"\"\"\n\n\nexpit = sigmoid\n\n\n@scalar_elemwise\ndef softplus(x):\n \"\"\"Compute log(1 + exp(x)), also known as softplus or log1pexp\"\"\"\n\n\nlog1pexp = softplus\n\n\n@scalar_elemwise\ndef log1mexp(x):\n \"\"\"Compute log(1 - exp(x)), also known as log1mexp\"\"\"\n\n\n@scalar_elemwise\ndef betainc(a, b, x):\n \"\"\"Regularized incomplete beta function\"\"\"\n\n\n@scalar_elemwise\ndef real(z):\n \"\"\"Return real component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.real = property(real)\n\n\n@scalar_elemwise\ndef imag(z):\n \"\"\"Return imaginary component of complex-valued tensor `z`\"\"\"\n\n\n_tensor_py_operators.imag = property(imag)\n\n\n@scalar_elemwise\ndef angle(z):\n \"\"\"Return polar-coordinate angle of complex-valued tensor `z`\"\"\"\n\n\n@scalar_elemwise # numpy.complex cannot build tensors\ndef complex(real, imag):\n \"\"\"Return complex-valued tensor with `real` and `imag` components\"\"\"\n\n\n@scalar_elemwise\ndef conj(z):\n \"\"\"Return the complex conjugate of `z`.\"\"\"\n\n\n@scalar_elemwise\ndef complex_from_polar(abs, angle):\n \"\"\"Return complex-valued tensor from polar coordinate specification.\"\"\"\n\n\nclass Mean(CAReduce):\n def __init__(self, axis=None):\n super().__init__(aes.add, axis)\n assert self.axis is None or len(self.axis) == 1\n\n def __str__(self):\n if self.axis is not None:\n return \"Mean{%s}\" % (\", \".join(str(x) for x in self.axis))\n else:\n return \"Mean\"\n\n def _output_dtype(self, idtype):\n # we want to protect against overflow\n return \"float64\"\n\n def perform(self, node, inp, out):\n (input,) = inp\n (output,) = out\n if self.axis is None:\n axis = None\n else:\n axis = self.axis[0]\n # numpy.asarray is needed as otherwise we can end up with a\n # numpy scalar.\n output[0] = np.asarray(np.mean(input, dtype=\"float64\", axis=axis))\n\n def c_code(self, node, name, inames, onames, sub):\n\n ret = super().c_code(node, name, inames, onames, sub)\n\n if self.axis is not None:\n return ret\n\n # TODO: c_code perform support only axis is None\n return (\n ret\n + f\"\"\"\n *((double *)PyArray_DATA({onames[0]})) /= PyArray_SIZE({inames[0]});\n \"\"\"\n )\n\n\n# TODO: implement the grad. When done and tested, you can make this the default\n# version.\n# def grad(self, (x,), (gout,)):\n# import pdb;pdb.set_trace()\n# return grad(mean(x, self.axis, op=False),[x])\n\n\ndef mean(input, axis=None, dtype=None, op=False, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the mean value along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis : None or int or (list of int) (see `Sum`)\n Compute the mean along this axis of the tensor.\n None means all axes (like numpy).\n dtype: None or string\n Dtype to cast the result of the inner summation into.\n For instance, by default, a sum of a float32 tensor will be\n done in float64 (acc_dtype would be float64 by default),\n but that result will be casted back in float32.\n keepdims: bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n acc_dtype: None or string\n Dtype to use for the inner summation. This will not\n necessarily be the dtype of the output (in particular\n if it is a discrete (int/uint) dtype, the output will\n be in a float type). If None, then we use the same rules as `sum()`.\n\n Notes\n -----\n For gpu, if you specify dtype=float32, everything will be done on the gpu.\n\n \"\"\"\n input = as_tensor_variable(input)\n if op:\n if dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"the dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n if acc_dtype not in (None, \"float64\"):\n raise NotImplementedError(\n \"The Mean op does not support the acc_dtype argument, \"\n \"and will always use float64. If you want to specify \"\n \"acc_dtype, call tensor.mean(..., op=False).\",\n dtype,\n )\n out = Mean(axis)(input)\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n if dtype is not None:\n # The summation will be done with the specified dtype.\n # sum() will complain if it is not suitable.\n sum_dtype = dtype\n else:\n sum_dtype = None\n # float16 overflows on the cast way too often\n if input.dtype == \"float16\":\n sum_dtype = \"float32\"\n\n s = sum(input, axis=axis, dtype=sum_dtype, keepdims=keepdims, acc_dtype=acc_dtype)\n shp = shape(input)\n\n # Cast shp into a float type\n # TODO Once we have a consistent casting policy, we could simply\n # use true_div.\n if s.dtype in (\"float16\", \"float32\", \"complex64\"):\n shp = cast(shp, \"float32\")\n else:\n shp = cast(shp, \"float64\")\n\n if axis is None:\n axis = list(range(input.ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # This sequential division will possibly be optimized by Aesara:\n for i in axis:\n s = true_div(s, shp[i])\n\n # This can happen when axis is an empty list/tuple\n if s.dtype != shp.dtype and s.dtype in discrete_dtypes:\n s = cast(s, shp.dtype)\n\n if dtype == \"float16\" or (dtype is None and input.dtype == \"float16\"):\n s = cast(s, \"float16\")\n s.name = \"mean\"\n return s\n\n\ndef var(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the variance along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n Default uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Also supports 'corrected_two_pass' algorithm (using the 'corrected' flag)\n which is numerically more stable. There exist other implementations that\n offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n input_ndim = input.type.ndim\n if axis is None:\n axis = list(range(input_ndim))\n elif isinstance(axis, (int, np.integer)):\n axis = [axis]\n elif isinstance(axis, np.ndarray) and axis.ndim == 0:\n axis = [int(axis)]\n else:\n axis = [int(a) for a in axis]\n\n # compute the axis-wise mean\n mean_input = mean(input, axis, keepdims=True)\n\n # center the input\n centered_input = input - mean_input\n\n # return the mean sqr\n two = constant(2, dtype=centered_input.dtype)\n if ddof == 0:\n v = mean((centered_input ** two), axis, keepdims=keepdims)\n else:\n shp = shape(input) - ddof\n v = sum((centered_input ** two), axis=axis, keepdims=keepdims)\n for i in axis:\n v = true_div(v, shp[i])\n\n # use 'corrected_two_pass' algorithm\n if corrected:\n if ddof == 0:\n error = mean(centered_input, axis, keepdims=keepdims) ** 2\n else:\n shp = shape(input) - ddof\n shp_inp = shape(input)\n error = sum(centered_input, axis=axis, keepdims=keepdims) ** 2\n for i in axis:\n error = true_div(error, shp[i] * shp_inp[i])\n v = v - error\n\n v.name = \"var\"\n return v\n\n\ndef std(input, axis=None, ddof=0, keepdims=False, corrected=False):\n \"\"\"\n Computes the standard deviation along the given axis(es) of a tensor `input`.\n\n Parameters\n ----------\n axis: None or int or (list of int) (see `Sum`)\n Compute the variance along this axis of the tensor.\n None means all axes (like numpy).\n ddof: Degrees of freedom; 0 would compute the ML estimate, 1 would compute\n the unbiased estimate.\n keepdims : bool\n If this is set to True, the axes which are reduced are\n left in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original tensor.\n corrected : bool\n If this is set to True, the 'corrected_two_pass' algorithm is\n used to compute the variance.\n Refer : http://www.cs.yale.edu/publications/techreports/tr222.pdf\n\n Notes\n -----\n It calls 'var()' and 'var()' uses the two-pass algorithm (reference below).\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm\n Function 'var()' also supports 'corrected_two_pass' algorithm (using the\n 'corrected' flag) which is numerically more stable. There exist other\n implementations that offer better stability, but probably slower.\n\n \"\"\"\n\n if isinstance(ddof, (bool)):\n raise ValueError(\n \"Parameter keepdims is now at index 3: (input, \\\n axis=None, ddof=0, keepdims=False, corrected=False)\"\n )\n\n ret = sqrt(\n var(input=input, axis=axis, ddof=ddof, keepdims=keepdims, corrected=corrected)\n )\n ret.name = \"std\"\n return ret\n\n\n@scalar_elemwise(symbolname=\"scalar_maximum\")\ndef maximum(x, y):\n \"\"\"elemwise maximum. See max for the maximum in one tensor\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise(symbolname=\"scalar_minimum\")\ndef minimum(x, y):\n \"\"\"elemwise minimum. See min for the minimum in one tensor\"\"\"\n # see decorator for function body\n\n\ndef divmod(x, y):\n \"\"\"elementvise divmod, using floor_div and mod_check\"\"\"\n return floor_div(x, y), mod_check(x, y)\n\n\n@scalar_elemwise\ndef add(a, *other_terms):\n \"\"\"elementwise addition\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef sub(a, b):\n \"\"\"elementwise subtraction\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef mul(a, *other_terms):\n \"\"\"elementwise multiplication\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef true_div(a, b):\n \"\"\"elementwise [true] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef int_div(a, b):\n \"\"\"elementwise [floor] division (inverse of multiplication)\"\"\"\n # see decorator for function body\n\n\n# floor_div and int_div are the same thing\nfloor_div = int_div\n\n\ndef ceil_intdiv(a, b):\n \"\"\"\n Safely compute ceil(float_division(a, b)).\n\n Works for all dtypes, but mostly useful when a and b are int.\n\n \"\"\"\n # If a and b are int with not many significant bits, we could\n # cast them to float to avoid doing the modulo. We do not know if this\n # is faster or not. But this is not safe for int64 as the cast will\n # lose precision.\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, aes.upcast(a, b))\n\n # We cast for the case when a and b are uint*. Otherwise neq will\n # force their upcast to int.\n div = int_div(a, b)\n ret = cast(neq(a % b, 0), div.dtype) + div\n assert ret.dtype == aes.upcast(div.owner.inputs[0], div.owner.inputs[1])\n return ret\n\n\ndef mod_check(x, y):\n \"\"\"Make sure we do not try to use complex numbers.\"\"\"\n if (\n as_tensor_variable(x).dtype in complex_dtypes\n or as_tensor_variable(y).dtype in complex_dtypes\n ):\n # Currently forbidden.\n raise aes.Mod.complex_error\n else:\n return mod(x, y)\n\n\n@scalar_elemwise\ndef mod(a, b):\n \"\"\"elementwise modulo\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef pow(a, b):\n \"\"\"elementwise power\"\"\"\n # see decorator for function body\n\n\n@scalar_elemwise\ndef clip(x, min, max):\n \"\"\"\n Clip x to be between min and max.\n\n Note that when `x` is equal to the boundaries, the output is considered\n to be `x`, so at these points, the gradient of the cost wrt the output\n will be propagated to `x`, not to `min` nor `max`. In other words,\n on these points, the gradient wrt `x` will be equal to the gradient wrt\n the output, and the gradient wrt `min` and `max` will be zero.\n\n \"\"\"\n # see decorator for function body\n # for grep: clamp, bound\n\n\npprint.assign(add, printing.OperatorPrinter(\"+\", -2, \"either\"))\npprint.assign(mul, printing.OperatorPrinter(\"*\", -1, \"either\"))\npprint.assign(sub, printing.OperatorPrinter(\"-\", -2, \"left\"))\npprint.assign(neg, printing.OperatorPrinter(\"-\", 0, \"either\"))\npprint.assign(true_div, printing.OperatorPrinter(\"/\", -1, \"left\"))\npprint.assign(int_div, printing.OperatorPrinter(\"//\", -1, \"left\"))\npprint.assign(pow, printing.OperatorPrinter(\"**\", 1, \"right\"))\n\n\nclass Dot(Op):\n \"\"\"\n Computes the dot product of two variables. For two matrices, this is\n equivalent to matrix multiplication. For two vectors, this is the inner\n product.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n\n __props__ = ()\n\n # the rationale for Dot22 is related to getting GEMM Ops into the\n # graph. See Dot22 in tensor.blas for details.\n\n def make_node(self, *inputs):\n inputs = list(map(as_tensor_variable, inputs))\n\n if len(inputs) != 2:\n raise TypeError(f\"Two arguments required, {len(inputs)} given \")\n if inputs[0].ndim not in (1, 2):\n raise TypeError(\n \"Input 0 (0-indexed) must have ndim of \"\n f\"1 or 2, {int(inputs[0].ndim)} given. Consider calling \"\n \"aesara.tensor.dot instead.\"\n )\n if inputs[1].ndim not in (1, 2):\n raise TypeError(\n \"Input 1 (0-indexed) must have ndim of \"\n f\"1 or 2, {int(inputs[1].ndim)} given. Consider calling \"\n \"aesara.tensor.dot instead.\"\n )\n\n i_broadcastables = [input.type.broadcastable for input in inputs]\n bx, by = i_broadcastables\n if len(by) == 2: # y is a matrix\n bz = bx[:-1] + by[-1:]\n elif len(by) == 1: # y is vector\n bz = bx[:-1]\n\n i_dtypes = [input.type.dtype for input in inputs]\n outputs = [tensor(aes.upcast(*i_dtypes), bz)]\n return Apply(self, inputs, outputs)\n\n def perform(self, node, inp, out):\n x, y = inp\n (z,) = out\n\n # the asarray is here because dot between two vectors\n # gives a numpy float object but we need to return a 0d\n # ndarray\n z[0] = np.asarray(np.dot(x, y))\n\n def grad(self, inp, grads):\n\n x, y = inp\n (gz,) = grads\n xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim\n\n # grad is scalar, so x is vector and y is vector\n if gdim == 0:\n xgrad = gz * y\n ygrad = gz * x\n\n # x is vector, y is matrix, grad is vector\n elif xdim == 1 and ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = outer(x.T, gz)\n\n # x is matrix, y is vector, grad is vector\n elif xdim == 2 and ydim == 1:\n xgrad = outer(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # x is matrix, y is matrix, grad is matrix\n elif xdim == ydim == 2:\n xgrad = dot(gz, y.T)\n ygrad = dot(x.T, gz)\n\n # If x or y contain broadcastable dimensions but only one of\n # them know that a matching dimensions is broadcastable, the\n # above code don't always return the right broadcast pattern.\n # This cause problem down the road. See gh-1461.\n if xgrad.broadcastable != x.broadcastable:\n xgrad = patternbroadcast(xgrad, x.broadcastable)\n if ygrad.broadcastable != y.broadcastable:\n ygrad = patternbroadcast(ygrad, y.broadcastable)\n\n rval = xgrad, ygrad\n\n for elem in rval:\n assert elem.dtype.find(\"float\") != -1\n\n return rval\n\n def R_op(self, inputs, eval_points):\n # R_op for a \\dot b evaluated at c for a and d for b is\n # simply c \\dot b + a \\dot d\n\n assert len(inputs) == 2\n assert len(eval_points) == 2\n if eval_points[0] is None and eval_points[1] is None:\n return [None]\n\n if eval_points[0]:\n t1 = self(eval_points[0], inputs[1])\n if eval_points[1]:\n t2 = self(inputs[0], eval_points[1])\n\n if eval_points[0] and eval_points[1]:\n return [t1 + t2]\n elif eval_points[0]:\n return [t1]\n else:\n return [t2]\n\n def infer_shape(self, fgraph, node, shapes):\n xshp, yshp = shapes\n x, y = node.inputs\n\n # vector / vector\n if x.ndim == 1 and y.ndim == 1:\n return [()]\n # matrix / vector\n if x.ndim == 2 and y.ndim == 1:\n return [xshp[:-1]]\n # vector / matrix\n if x.ndim == 1 and y.ndim == 2:\n return [yshp[-1:]]\n # matrix / matrix\n if x.ndim == 2 and y.ndim == 2:\n return [xshp[:-1] + yshp[-1:]]\n raise NotImplementedError()\n\n def __str__(self):\n return \"dot\"\n\n\n_dot = Dot()\npprint.assign(\n _dot, printing.OperatorPrinter(printing.special[\"middle_dot\"], -1, \"left\")\n)\n\n\ndef dot(l, r):\n \"\"\"Return a symbolic dot product.\n\n This is designed to work with both sparse and dense tensors types.\n \"\"\"\n\n if not isinstance(l, Variable):\n l = as_tensor_variable(l)\n\n if not isinstance(r, Variable):\n r = as_tensor_variable(r)\n\n try:\n res = l.__dot__(r)\n if res is NotImplemented:\n raise NotImplementedError\n except (NotImplementedError, AttributeError, TypeError):\n res = r.__rdot__(l)\n if res is NotImplemented:\n raise NotImplementedError()\n\n return res\n\n\ndef dense_dot(a, b):\n \"\"\"\n Computes the dot product of two variables.\n\n For two matrices, this is equivalent to matrix multiplication.\n For two vectors, this is the inner product.\n When one variable is a scalar, this is like elementwise multiplication.\n For N dimensions, this is a sum product over the last axis\n of the first array and the second-to-last axis of the second array:\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n Note that this dot function does one of three things, in the following\n sequence:\n\n 1. If either a or b is scalar, it returns the elementwise product\n without calling the Aesara Dot op.\n\n 2. If either a or b has more than 2 dimensions, it calls Aesara's\n tensordot function with appropriate axes. The tensordot function\n expresses high-dimensional dot products in terms of 2D matrix\n multiplications, so it may be possible to further optimize for\n performance.\n\n 3. If both a and b have either 1 or 2 dimensions, it calls Aesara's\n Dot op on a and b.\n\n Notes\n -----\n Matrix-matrix products are sometimes optimized to Dot22 or Gemm ops\n (see tensor.blas).\n Vector-vector products are sometimes optimized to Ger or CGer (see\n tensor.blas).\n Matrix-vector products are sometimes optimized to Gemv, CGemv (see\n tensor.blas).\n\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if a.ndim == 0 or b.ndim == 0:\n return a * b\n elif a.ndim > 2 or b.ndim > 2:\n return tensordot(a, b, [[a.ndim - 1], [np.maximum(0, b.ndim - 2)]])\n else:\n return _dot(a, b)\n\n\ndef _tensordot_as_dot(a, b, axes, dot, batched):\n \"\"\"\n Reduces a tensor dot product to a matrix or vector dot product. Based\n on code from Tijmen Tieleman's gnumpy\n (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Please see the documentation of tensordot for the meaning of the a, b\n and axes arguments.\n\n :param dot: a function that accepts two symbolic variables and computes\n the appropriate dot product (e.g. dot, batched_dot)\n :type dot: function\n\n :param batched: whether to treat the first axis of a and b as a batch\n axis. If so, this axis will be preserved in the output,\n allowing this function to be used also for batched\n tensor dot products.\n :type batched: boolean\n\n :returns: a tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less the first dimension and any dimensions that were summed\n over).\n :rtype: symbolic tensor\n \"\"\"\n a, b = as_tensor_variable(a), as_tensor_variable(b)\n\n if not np.isscalar(axes) and len(axes) != 2:\n raise ValueError(\n \"Axes should be an integer or a \"\n \"list/tuple of len 2 ({axes} was provided)\"\n )\n\n # if 'axes' is a number of axes to multiply and sum over (trailing axes\n # of a, leading axes of b), we can just reshape and use dot.\n elif np.isscalar(axes):\n axes = int(axes)\n\n for operand_name, operand in ((\"a\", a), (\"b\", b)):\n if axes > operand.ndim:\n raise ValueError(\n f\"axes can not be larger than the dimension of {operand_name} \"\n f\"({operand_name}.ndim={operand.ndim}, axes={axes})\"\n )\n if batched and axes == operand.ndim:\n raise ValueError(\n \"axes to sum over must not include the batch axis \"\n f\"of {operand_name} ({operand_name}.ndim={operand.ndim}, axes={axes})\"\n )\n\n batch_axes = 1 if batched else 0\n a_outaxes = slice(0, a.ndim - axes)\n b_outaxes = slice(batch_axes + axes, b.ndim)\n outshape = concatenate([a.shape[a_outaxes], b.shape[b_outaxes]])\n outbcast = a.broadcastable[a_outaxes] + b.broadcastable[b_outaxes]\n outndim = len(outbcast)\n\n a_shape = [1] * 2\n b_shape = [1] * 2\n\n # compute total size of summed axes\n for i in range(0, axes):\n a_shape[1] *= a.shape[-(i + 1)]\n b_shape[0] *= b.shape[batch_axes + i]\n # compute total size of other axes\n for i in range(0, a.ndim - axes - batch_axes):\n a_shape[0] *= a.shape[batch_axes + i]\n for i in range(0, b.ndim - axes - batch_axes):\n b_shape[1] *= b.shape[-(i + 1)]\n\n if batched:\n a_shape.insert(0, a.shape[0])\n b_shape.insert(0, b.shape[0])\n\n a_reshaped = a.reshape(a_shape)\n b_reshaped = b.reshape(b_shape)\n\n out_reshaped = dot(a_reshaped, b_reshaped)\n out = out_reshaped.reshape(outshape, outndim)\n # Make sure the broadcastable pattern of the result is correct,\n # since some shape information can be lost in the reshapes.\n return patternbroadcast(out, outbcast)\n\n # if 'axes' is a list, transpose a and b such that the summed axes of a\n # are last and the summed axes of b are first.\n else:\n axes = [as_list(axes_) for axes_ in axes]\n\n if len(axes[0]) != len(axes[1]):\n raise ValueError(\"Axes elements must have the same length.\")\n\n for i, (operand_name, operand) in enumerate(((\"a\", a), (\"b\", b))):\n if len(axes[i]) > operand.ndim:\n raise ValueError(\n f\"axes[{i}] should be array_like with length less than \"\n f\"the dimensions of {operand_name} ({operand_name}.ndim={operand.ndim}, len(axes[0])={len(axes[i])}).\"\n )\n if len(axes[i]) > 0 and np.max(axes[i]) >= operand.ndim:\n raise ValueError(\n f\"axes[{i}] contains dimensions greater than or equal \"\n f\"to {operand_name}.ndim ({operand_name}.ndim={operand.ndim}, max(axes[0])={np.max(np.array(axes[i]))}).\"\n )\n if batched and 0 in axes[i]:\n raise ValueError(\n \"axes to sum over must not contain the batch axis \"\n f\"(axes[{i}]={axes[i]})\"\n )\n\n batch_axes = [0] if batched else []\n other_axes = [\n [x for x in range(operand.ndim) if x not in axes[i] and x not in batch_axes]\n for i, operand in enumerate((a, b))\n ]\n\n a_shuffled = a.dimshuffle(batch_axes + other_axes[0] + axes[0])\n b_shuffled = b.dimshuffle(batch_axes + axes[1] + other_axes[1])\n\n # now that a and b are in the right order, recur with integer axes\n return _tensordot_as_dot(\n a_shuffled, b_shuffled, len(axes[0]), dot=dot, batched=batched\n )\n\n\ndef tensordot(a, b, axes=2):\n \"\"\"\n Compute a generalized dot product over provided axes.\n\n Given two tensors a and b, tensordot computes a generalized dot product over\n the provided axes. Aesara's implementation reduces all expressions to\n matrix or vector dot products and is based on code from Tijmen Tieleman's\n gnumpy (http://www.cs.toronto.edu/~tijmen/gnumpy.html).\n\n Parameters\n ----------\n a: symbolic tensor\n The first tensor variable.\n b: symbolic tensor\n The second tensor variable\n axes: int or array-like of length 2\n If an integer, the number of axes to sum over.\n If an array, it must have two array elements containing the axes\n to sum over in each tensor.\n\n Note that the default value of 2 is not guaranteed to work\n for all values of a and b, and an error will be raised if\n that is the case. The reason for keeping the default is to\n maintain the same signature as numpy's tensordot function\n (and np.tensordot raises analogous errors for non-compatible\n inputs).\n\n If an integer i, it is converted to an array containing\n the last i dimensions of the first tensor and the first\n i dimensions of the second tensor:\n axes = [list(range(a.ndim - i, b.ndim)), list(range(i))]\n\n If an array, its two elements must contain compatible axes\n of the two tensors. For example, [[1, 2], [2, 0]] means sum\n over the 2nd and 3rd axes of a and the 3rd and 1st axes of b.\n (Remember axes are zero-indexed!) The 2nd axis of a and the\n 3rd axis of b must have the same shape; the same is true for\n the 3rd axis of a and the 1st axis of b.\n\n Returns\n -------\n symbolic tensor\n A tensor with shape equal to the concatenation of a's shape\n (less any dimensions that were summed over) and b's shape\n (less any dimensions that were summed over).\n\n Examples\n --------\n It may be helpful to consider an example to see what tensordot does.\n Aesara's implementation is identical to NumPy's. Here a has shape (2, 3, 4)\n and b has shape (5, 6, 4, 3). The axes to sum over are [[1, 2], [3, 2]] --\n note that a.shape[1] == b.shape[3] and a.shape[2] == b.shape[2]; these axes\n are compatible. The resulting tensor will have shape (2, 5, 6) -- the\n dimensions that are not being summed:\n\n >>> a = np.random.random((2,3,4))\n >>> b = np.random.random((5,6,4,3))\n\n #tensordot\n >>> c = np.tensordot(a, b, [[1,2],[3,2]])\n\n #loop replicating tensordot\n >>> a0, a1, a2 = a.shape\n >>> b0, b1, _, _ = b.shape\n >>> cloop = np.zeros((a0,b0,b1))\n\n #loop over non-summed indices -- these exist\n #in the tensor product.\n >>> for i in range(a0):\n ... for j in range(b0):\n ... for k in range(b1):\n ... #loop over summed indices -- these don't exist\n ... #in the tensor product.\n ... for l in range(a1):\n ... for m in range(a2):\n ... cloop[i,j,k] += a[i,l,m] * b[j,k,m,l]\n\n >>> np.allclose(c, cloop)\n true\n\n This specific implementation avoids a loop by transposing a and b such that\n the summed axes of a are last and the summed axes of b are first. The\n resulting arrays are reshaped to 2 dimensions (or left as vectors, if\n appropriate) and a matrix or vector dot product is taken. The result is\n reshaped back to the required output dimensions.\n\n In an extreme case, no axes may be specified. The resulting tensor\n will have shape equal to the concatenation of the shapes of a and b:\n\n >>> c = np.tensordot(a, b, 0)\n >>> print(a.shape)\n (2,3,4)\n >>> print(b.shape)\n (5,6,4,3)\n >>> print(c.shape)\n (2,3,4,5,6,4,3)\n\n See the documentation of numpy.tensordot for more examples.\n\n \"\"\"\n return _tensordot_as_dot(a, b, axes, dot=dot, batched=False)\n\n\ndef outer(x, y):\n \"\"\"Return vector-vector outer product.\n\n If an input isn't a vector, we flatten it first.\n\n \"\"\"\n if x.ndim != 1:\n x = x.flatten()\n if y.ndim != 1:\n y = y.flatten()\n return dot(x.dimshuffle(0, \"x\"), y.dimshuffle(\"x\", 0))\n\n\nclass All(CAReduce):\n \"\"\"Applies `logical and` to all the values of a tensor along the\n specified axis(es).\n\n \"\"\"\n\n __props__ = (\"axis\",)\n nfunc_spec = (\"all\", 1, 1)\n\n def __init__(self, axis=None):\n super().__init__(aes.and_, axis)\n\n def _output_dtype(self, idtype):\n return \"bool\"\n\n def __str__(self):\n if self.axis is None:\n return \"All\"\n else:\n return \"All{%s}\" % \", \".join(map(str, self.axis))\n\n def make_node(self, input):\n input = as_tensor_variable(input)\n if input.dtype != \"bool\":\n input = neq(input, 0)\n ret = super().make_node(input)\n return ret\n\n def grad(self, inp, grads):\n (x,) = inp\n return [x.zeros_like(config.floatX)]\n\n\nclass Any(CAReduce):\n \"\"\"Applies `bitwise or` to all the values of a tensor along the\n specified axis(es).\n\n \"\"\"\n\n __props__ = (\"axis\",)\n nfunc_spec = (\"any\", 1, 1)\n\n def __init__(self, axis=None):\n super().__init__(aes.or_, axis)\n\n def _output_dtype(self, idtype):\n return \"bool\"\n\n def __str__(self):\n if self.axis is None:\n return \"Any\"\n else:\n return \"Any{%s}\" % \", \".join(map(str, self.axis))\n\n def make_node(self, input):\n input = as_tensor_variable(input)\n if input.dtype != \"bool\":\n input = neq(input, 0)\n ret = super().make_node(input)\n return ret\n\n def grad(self, inp, grads):\n (x,) = inp\n return [x.zeros_like(config.floatX)]\n\n\nclass Sum(CAReduceDtype):\n \"\"\"\n Sums all the values of a tensor along the specified axis(es).\n\n Equivalent to `CAReduceDtype(scalar.add, axis=axis, dtype=dtype)`,\n with the difference that this defines the gradient of sum wrt its\n tensor input.\n\n Parameters\n ----------\n axis\n Axis(es) along which the tensor should be summed\n (use None to sum over all axes, and a list or tuple to sum along more\n than one axis).\n\n dtype\n The dtype of the internal accumulator and returned\n tensor. If None, then we use the default dtype which is the same as the\n input tensor's dtype except when:\n - the input dtype is a signed integer of precision < 64 bit, in\n which case we use int64\n - the input dtype is an unsigned integer of precision < 64 bit, in\n which case we use uint64\n This value does not depend on the value of \"acc_dtype\".\n\n acc_dtype\n The dtype of the internal accumulator.\n If None (default), we use the dtype in the list below,\n or the input dtype if its precision is higher:\n - for int dtypes, we use at least int64;\n - for uint dtypes, we use at least uint64;\n - for float dtypes, we use at least float64;\n - for complex dtypes, we use at least complex128.\n\n \"\"\"\n\n __props__ = (\"axis\", \"dtype\", \"acc_dtype\")\n nfunc_spec = (\"sum\", 1, 1)\n\n def __init__(self, axis=None, dtype=None, acc_dtype=None):\n super().__init__(aes.add, axis=axis, dtype=dtype, acc_dtype=acc_dtype)\n\n def __str__(self):\n name = self.__class__.__name__\n axis = \"\"\n if self.axis is not None:\n axis = \", \".join(str(x) for x in self.axis)\n axis = f\"axis=[{axis}], \"\n return f\"{name}{{{axis}acc_dtype={self.acc_dtype}}}\"\n\n def L_op(self, inp, out, grads):\n (x,) = inp\n\n if out[0].dtype not in continuous_dtypes:\n return [x.zeros_like(dtype=config.floatX)]\n\n (gz,) = grads\n gz = as_tensor_variable(gz)\n axis = self.axis\n if axis is None:\n axis = list(range(x.type.ndim))\n if axis == ():\n return (gz,)\n new_dims = []\n i = 0\n for j, _ in enumerate(x.type.broadcastable):\n if j in axis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n ds_op = DimShuffle(gz.type.broadcastable, new_dims)\n gx = Elemwise(aes.second)(x, ds_op(gz))\n return [gx]\n\n def R_op(self, inputs, eval_points):\n # There is just one element in inputs and eval_points, the axis are\n # part of self\n if None in eval_points:\n return [None]\n return self(*eval_points, **dict(return_list=True))\n\n\ndef sum(input, axis=None, dtype=None, keepdims=False, acc_dtype=None):\n \"\"\"\n Computes the sum along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the sum is performed\n over the flattened tensor.\n\n For full documentation see `Sum`.\n In particular please pay attention to the important warning when using\n a custom acc_dtype.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = Sum(axis=axis, dtype=dtype, acc_dtype=acc_dtype)(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\npprint.assign(Sum(), printing.FunctionPrinter(\"sum\"))\n\n\nclass Prod(CAReduceDtype):\n \"\"\"\n Multiplies all the values of a tensor along the specified axis(es).\n\n Equivalent to `CAReduce(scalar.mul, axis = axis)`, with the\n difference that this defines the gradient of prod wrt its tensor\n input.\n\n \"\"\"\n\n __props__ = (\"axis\", \"dtype\", \"acc_dtype\")\n nfunc_spec = (\"prod\", 1, 1)\n\n def __init__(self, axis=None, dtype=None, acc_dtype=None, no_zeros_in_input=False):\n super().__init__(aes.mul, axis=axis, dtype=dtype, acc_dtype=acc_dtype)\n self.no_zeros_in_input = no_zeros_in_input\n\n def __setstate__(self, dct):\n super().__setstate__(dct)\n # Add default value to be able to reload old pickled objects.\n if \"no_zeros_in_input\" not in dct:\n self.no_zeros_in_input = False\n\n def L_op(self, inp, out, grads):\n \"\"\"\n The grad of this Op could be very easy, if it is was not for the case\n where zeros are present in a given \"group\" (ie. elements reduced\n together to form the product).\n\n If no zeros are found in the elements of the product, then the\n partial derivative of the product relative to one of the elements\n (one of the inputs) is simply the product of the other elements.\n That's easy to see from the chain rule.\n\n Now the trick (with no zeros) is to take the overall product, then\n for every original element, the partial derivative is given by\n this product divided by the element itself (which equals the product\n of the other terms). This is easy to do by broadcasting the original\n product.\n\n (Note that we also need to broadcast-multiply by the\n \"incoming gradient\", ie. the gradient of the cost relative to the\n output/product).\n\n With zeros, things get more complicated. For a given group, we have 3\n cases:\n\n * No zeros in the group. Use previous trick.\n * If only one zero is present, then the gradient for that element is\n non-zero, but is zero for all others.\n * If more than one zero is present, then all the derivatives are zero.\n\n For the last two cases (with 1 or more zeros), we can't use the\n division trick, as this gives divisions by 0.\n\n Implementing that case-by-case logic is not as trivial, so a bunch of\n hacks are piled down here to do it. Notably, for the \"only one zero\"\n case, there's a special Op that computes the product of the elements\n in the group, minus the zero (see `ProdWithoutZeros`). The trick is then\n to use the division trick for groups with no zero, to use the\n `ProdWithoutZeros` op where there's only one zero, and to output a\n derivative of zero for any element part of a group with more than\n one zero.\n\n I do this by first counting the number of zeros in each group (see the\n `aet.eq` bits), then taking this or that behavior (see `aet.switch`)\n based on the result of this count.\n\n \"\"\"\n (prod_in,) = inp\n (gz,) = grads\n\n if out[0].dtype in discrete_dtypes or self.acc_dtype in discrete_dtypes:\n # There is an int conversion in the way\n return [prod_in.zeros_like(dtype=config.floatX)]\n\n # Prepare the broadcasting that is used everywhere to broadcast\n # over the original groups (ie. broadcast over the elements of a given\n # product)\n gz = as_tensor_variable(gz)\n axis = self.axis\n if axis is None:\n axis = list(range(prod_in.type.ndim))\n if axis == ():\n return (gz,)\n new_dims = []\n i = 0\n for j, _ in enumerate(prod_in.type.broadcastable):\n if j in axis:\n new_dims.append(\"x\")\n else:\n new_dims.append(i)\n i += 1\n\n # result of the product, broadcastable over groups\n prod_out = self(prod_in).dimshuffle(new_dims)\n # incoming gradient, broadcastable over groups\n gz = gz.dimshuffle(new_dims)\n\n # division trick if we don't have zeros. This will contain\n # NaNs to be eliminated in the `aet.switch` if we do have zeros.\n grad_case_without_zeros = gz * prod_out / prod_in\n\n if self.no_zeros_in_input:\n # this handles inputs with zeros, but only certain input shapes\n return [grad_case_without_zeros]\n else:\n\n where_zeros = eq(prod_in, 0.0)\n sum_where_zeros = sum(where_zeros, axis=self.axis)\n groups_with_single_zero = eq(sum_where_zeros, 1).dimshuffle(new_dims)\n # tensor with 0 everywhere except for those places where\n # a 0 part of a group with a single zero was to be found\n where_single_zero = groups_with_single_zero * where_zeros\n # further optimization to avoid computing ProdWithoutZeros\n # if the incoming gradient is 0\n where_gz_not_zero = neq(gz, 0.0)\n # only take ProdWithoutZeros for the groups with single zeros\n # with non-null incoming gradient\n where_to_take_prod_without_zeros = (\n groups_with_single_zero * where_gz_not_zero\n )\n # preprocess the original input so that we set 0 everywhere\n # except for groups that contain a single zero, to avoid computing\n # multiplications on other groups\n prod_without_zeros_in = where_to_take_prod_without_zeros * prod_in\n # TODO: put lazy switch here, if it'd work\n # this is pretty efficient already (no multiplication if 0), but\n # it'd be even better if we had a lazy if per element\n prod_without_zeros = ProdWithoutZeros(axis=self.axis)(prod_without_zeros_in)\n prod_without_zeros = prod_without_zeros.dimshuffle(new_dims)\n\n groups_without_zeros = eq(sum_where_zeros, 0).dimshuffle(new_dims)\n\n final_grad = switch(\n groups_without_zeros,\n grad_case_without_zeros,\n switch(where_single_zero, prod_without_zeros, 0.0) * gz,\n )\n\n return [final_grad]\n\n def c_code_cache_version(self):\n return (1,)\n\n\ndef prod(\n input,\n axis=None,\n dtype=None,\n keepdims=False,\n acc_dtype=None,\n no_zeros_in_input=False,\n):\n \"\"\"\n Computes the product along the given axis(es) of a tensor `input`.\n\n When axis is None (the default value), the product is performed\n over the flattened tensor.\n\n For full documentation see ``tensor.elemwise.Prod``.\n\n Parameters\n ----------\n keepdims: bool\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the result\n will broadcast correctly against the original tensor.\n\n \"\"\"\n\n out = Prod(\n axis, dtype=dtype, acc_dtype=acc_dtype, no_zeros_in_input=no_zeros_in_input\n )(input)\n\n if keepdims:\n out = makeKeepDims(input, out, axis)\n return out\n\n\nclass MulWithoutZeros(BinaryScalarOp):\n # \"identity\" here is zero, as in Reduce we don't want to start\n # with reducing (1, something_else): this leads to the erroneous\n # case where a vector of zeros is reduced by binary reductions\n # of (1, 0), which always ends up as 1 (ie. the result for\n # the c version, for the product of [0,0,0], is 1.0)\n\n identity = 0.0\n commutative = True\n associative = True\n\n def impl(self, x, y):\n if x == 0:\n return y\n if y == 0:\n return x\n return x * y\n\n def c_code(self, node, name, inp, out, sub):\n x, y = inp\n (z,) = out\n return (\n \"%(z)s = ((%(x)s == 0) ? (%(y)s) : \"\n + \"((%(y)s == 0) ? (%(x)s) : ((%(y)s)*(%(x)s))) );\"\n ) % locals()\n\n def c_code_cache_version(self):\n return (1,)\n\n\nmul_without_zeros = MulWithoutZeros(aes.upcast_out, name=\"mul_without_zeros\")\n\n\nclass ProdWithoutZeros(CAReduceDtype):\n\n __props__ = (\"axis\", \"dtype\", \"acc_dtype\")\n\n def __init__(self, axis=None, dtype=None, acc_dtype=None):\n super().__init__(mul_without_zeros, axis=axis, dtype=dtype, acc_dtype=acc_dtype)\n\n def grad(self, inp, grads):\n from aesara.gradient import grad_not_implemented\n\n (a,) = inp\n a_grad = grad_not_implemented(\n self,\n 0,\n a,\n \"2nd derivatives of `product(a)` is not currently supported.\"\n \"If `a` is guaranteed to contains no zeros, use \"\n \"`product(a, no_zeros_in_input=True)`.\",\n )\n return [a_grad]\n\n\ndef any(x, axis=None, keepdims=False):\n out = Any(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef all(x, axis=None, keepdims=False):\n out = All(axis)(x)\n\n if keepdims:\n out = makeKeepDims(x, out, axis)\n return out\n\n\ndef ptp(a, axis=None):\n \"\"\"\n Range of values (maximum - minimum) along an axis.\n\n The name of the function comes from the acronym for peak to peak.\n\n Parameters\n ----------\n a\n Input tensor.\n axis\n Axis along which to find the peaks. By default, flatten the array.\n\n Returns\n -------\n array\n A new array holding the result.\n\n \"\"\"\n\n a = as_tensor_variable(a)\n\n out = max(a, axis) - min(a, axis)\n\n return out\n\n\ndef power(x, y):\n return x ** y\n\n\ndef logaddexp(*xs):\n \"\"\"Logarithm of the sum of exponentiations of the inputs.\n\n See ``numpy.logaddexp``.\n\n Parameters\n ----------\n xs : symbolic tensors\n Input\n\n Returns\n -------\n tensor\n\n \"\"\"\n\n return log(add(*[exp(x) for x in xs]))\n\n\ndef logsumexp(x, axis=None, keepdims=False):\n \"\"\"Compute the log of the sum of exponentials of input elements.\n\n See ``scipy.special.logsumexp``.\n\n Parameters\n ----------\n x : symbolic tensor\n Input\n\n axis : None or int or tuple of ints, optional\n Axis or axes over which the sum is taken. By default axis is None,\n and all elements are summed.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result will\n broadcast correctly against the original array.\n\n Returns\n -------\n tensor\n\n \"\"\"\n\n return log(sum(exp(x), axis=axis, keepdims=keepdims))\n\n\n__all__ = [\n \"max_and_argmax\",\n \"max\",\n \"argmax\",\n \"min\",\n \"argmin\",\n \"smallest\",\n \"largest\",\n \"lt\",\n \"gt\",\n \"le\",\n \"ge\",\n \"eq\",\n \"neq\",\n \"isnan\",\n \"isinf\",\n \"allclose\",\n \"isclose\",\n \"and_\",\n \"bitwise_and\",\n \"or_\",\n \"bitwise_or\",\n \"xor\",\n \"bitwise_xor\",\n \"invert\",\n \"bitwise_not\",\n \"abs\",\n \"abs_\",\n \"exp\",\n \"exp2\",\n \"expm1\",\n \"neg\",\n \"reciprocal\",\n \"inv\",\n \"log\",\n \"log2\",\n \"log10\",\n \"log1p\",\n \"sgn\",\n \"ceil\",\n \"floor\",\n \"trunc\",\n \"iround\",\n \"round\",\n \"round_half_to_even\",\n \"round_half_away_from_zero\",\n \"sqr\",\n \"square\",\n \"cov\",\n \"sqrt\",\n \"deg2rad\",\n \"rad2deg\",\n \"cos\",\n \"arccos\",\n \"sin\",\n \"arcsin\",\n \"tan\",\n \"arctan\",\n \"arctan2\",\n \"cosh\",\n \"arccosh\",\n \"sinh\",\n \"arcsinh\",\n \"tanh\",\n \"arctanh\",\n \"erf\",\n \"erfc\",\n \"erfcx\",\n \"erfinv\",\n \"erfcinv\",\n \"gamma\",\n \"gammaln\",\n \"psi\",\n \"tri_gamma\",\n \"chi2sf\",\n \"gammainc\",\n \"gammaincc\",\n \"gammau\",\n \"gammal\",\n \"j0\",\n \"j1\",\n \"jv\",\n \"i0\",\n \"i1\",\n \"iv\",\n \"sigmoid\",\n \"expit\",\n \"softplus\",\n \"log1pexp\",\n \"log1mexp\",\n \"betainc\",\n \"real\",\n \"imag\",\n \"angle\",\n \"complex\",\n \"conj\",\n \"complex_from_polar\",\n \"sum\",\n \"prod\",\n \"mean\",\n \"var\",\n \"std\",\n \"std\",\n \"maximum\",\n \"minimum\",\n \"divmod\",\n \"add\",\n \"sub\",\n \"mul\",\n \"true_div\",\n \"int_div\",\n \"floor_div\",\n \"ceil_intdiv\",\n \"mod\",\n \"pow\",\n \"clip\",\n \"dot\",\n \"dense_dot\",\n \"tensordot\",\n \"outer\",\n \"any\",\n \"all\",\n \"ptp\",\n \"power\",\n \"logaddexp\",\n \"logsumexp\",\n]\n"
] | [
[
"numpy.asarray",
"numpy.max",
"numpy.min",
"numpy.zeros"
],
[
"numpy.allclose",
"numpy.dot",
"numpy.maximum",
"numpy.asarray",
"numpy.argmax",
"numpy.int64",
"numpy.iinfo",
"numpy.max",
"numpy.prod",
"numpy.array",
"numpy.concatenate",
"numpy.isscalar",
"numpy.mean"
]
] |
APS-XSD-OPT-Group/wavepytools | [
"25397c099e86a8939cc4ee3a2d266e4f809a1d18"
] | [
"wavepytools/optics/fourierOptics/exampleCircularLens2Steps.py"
] | [
"# -*- coding: utf-8 -*- #\n\"\"\"\nCreated on Tue Mar 3 11:18:30 2015\n\n@author: wcgrizolli\n\"\"\"\n\nimport sys\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom myFourierLib import *\n\n\nsys.path.append('/home/wcgrizolli/pythonWorkspace/wgTools')\nimport wgTools as wgt\n\nsys.path.append('/home/wcgrizolli/pythonWorkspace/srw/wgTools4srw')\nfrom wgTools4srw import *\n\n##=========================================================#\n# %% sampling definition\n##=========================================================#\nwavelength = 1.2398e-9 # 1KeV\n[Lx, Ly] = [2.5e-3, 2.5e-3]\n# Mx = Lx^2/wavelength/z\n[Mx, My] = [1001, 1001]\ndx = Lx/Mx\ndy = Ly/My\n\n#zz = 1.00 # XXX: dist to propag\n#Lx2 = Lx\n\nzz = .00322808 # XXX: dist to propag\nLx2 = Lx/2500.0\n\nprint('WG: sampling x=' + str(Mx))\nprint('WG: sampling y=' + str(My))\n\n# %%\nif Mx > 1001 or My > 1001:\n wgt.color_print('WG: Sampling bigger than 1001^2, stoping the program')\n# sys.exit()\n\n##=========================================================#\n# %% 2D u1 function\n##=========================================================#\n\n\ndef circ(X, Y, wx, wy, Xo=0.0, Yo=0.0): # circular\n out = X*0.0\n out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) < 0.5**2] = 1.0\n out[abs(((X-Xo)/wx)**2 + ((Y-Yo)/wy)**2) == 0.5**2] = .50\n return out\n\n\ndef tFuncLens(X, Y, wavelength, fx=1e23, fy=1e23):\n return np.exp(-1j*2*np.pi/wavelength/2/fx*(X**2+Y**2))\n\n\ndef tFuncZP(X, Y, wavelength, fx=1e23, fy=1e23):\n return .5*(1.0 + np.sign(np.cos(np.pi/wavelength/fx*(X**2 + Y**2))))\n\n\nwx = 200e-6\nwy = 200e-6\nX, Y = np.meshgrid(np.linspace(-Lx/2, Lx/2, Mx), np.linspace(-Ly/2, Ly/2, My))\n\nprint('WG: Creating Source Wave u1...')\n\n#u1_xy = circ(X, Y, wx, wy)*tFuncZP(X, Y, wavelength, fx=zz)\nu1_xy = circ(X, Y, wx, wy)*tFuncLens(X, Y, wavelength, fx=zz)\n\n#u1_xy = circ(X, Y, wx, wy, 0, 80e-6) + circ(X, Y, wx, wy, 0,-80e-6) # double slit\n\nprint('WG: Creating Source Wave u1: DONE!')\n\n##=========================================================#\n# %% Propagation\n##=========================================================#\n\nprint('WG: Propagation...')\n\n\nif Lx == Lx2:\n u2_xy = propTForIR(u1_xy, Lx, Ly, wavelength, zz)\n X2, Y2 = X, Y\nelse:\n u2_xy = prop2step(u1_xy, Lx, Lx2, wavelength, zz)\n X2, Y2 = np.meshgrid(np.linspace(-Lx2/2, Lx2/2, Mx),\n np.linspace(-Lx2/2, Lx2/2, My))\n\nprint('WG: Propagation: DONE!')\n\n##=========================================================#\n# %% Plot u1\n##=========================================================#\n\n\n\n\nsaveFigure = 0\n\nprint('WG: Plot u1...')\n\n\n\nfactorX, unitStrX = wgt.chooseUnit(X)\nfactorY, unitStrY = wgt.chooseUnit(Y)\n\nunitStrX = unitStrX + ' m'\nunitStrY = unitStrY + ' m'\n\n# %% U1\n\n\n\nwgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),\n r'$x [' + unitStrX +']$',\n r'$y [' + unitStrY + ']$',\n r'Intensity [a.u.]',\n xo=0.0, yo=0.0,\n unitX=unitStrX, unitY=unitStrY)\n\n\n# %% U1\n\n#wgt.plotProfile(X*factorX, Y*factorY, np.abs(u1_xy),\n# r'$x [' + unitStrX +']$',\n# r'$y [' + unitStrY + ']$',\n# r'Intensity [a.u.]',\n# xo=0.0, yo=0.0,\n# unitX=unitStrX, unitY=unitStrY)\nif saveFigure:\n outputFigureName = wgt.datetimeNowStr() + '_u1.png'\n plt.savefig(outputFigureName)\n print('WG: Figure saved at %s!\\n' % (outputFigureName))\n plt.close()\nelse:\n plt.show(block=True)\n\nprint('WG: Plot u1: DONE!')\n\n##=========================================================#\n# %% Plot u2\n##=========================================================#\n\nprint('WG: Plot u2...')\n\nfactorX2, unitStrX2 = wgt.chooseUnit(X2)\nfactorY2, unitStrY2 = wgt.chooseUnit(Y2)\n\nunitStrX2 = unitStrX2 + ' m'\nunitStrY2 = unitStrY2 + ' m'\n\n\n## U1\n\nwgt.plotProfile(X2*factorX2, Y2*factorY2, np.abs(u2_xy),\n r'$x [' + unitStrX2 + ']$',\n r'$y [' + unitStrY2 + ']$',\n r'Intensity [a.u.]',\n unitX=unitStrX2, unitY=unitStrY2)\n\nif saveFigure:\n outputFigureName = wgt.datetimeNowStr() + '_u2.png'\n plt.savefig(outputFigureName)\n print('WG: Figure saved at %s!\\n' % (outputFigureName))\n plt.close()\nelse:\n plt.show(block=True)\n\nprint('WG: Plot u2: DONE!')\n# %%\n"
] | [
[
"matplotlib.pyplot.savefig",
"numpy.abs",
"numpy.cos",
"numpy.exp",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"numpy.linspace"
]
] |
George-Jiao/pytorch-toolbelt | [
"920e03876805351ed5645e439a64074cb4f37589"
] | [
"pytorch_toolbelt/modules/encoders/timm/common.py"
] | [
"import math\nimport warnings\nimport torch\n\nfrom typing import List, Union\nfrom torch import Tensor, nn\n\nfrom ..common import EncoderModule, _take\n\n__all__ = [\"GenericTimmEncoder\", \"make_n_channel_input_std_conv\"]\n\n\nclass GenericTimmEncoder(EncoderModule):\n def __init__(self, timm_encoder: Union[nn.Module, str], layers: List[int] = None):\n strides = []\n channels = []\n default_layers = []\n if isinstance(timm_encoder, str):\n import timm.models.factory\n\n timm_encoder = timm.models.factory.create_model(timm_encoder, pretrained=True)\n\n for i, oi in enumerate(timm_encoder.feature_info.out_indices):\n fi = timm_encoder.feature_info.info[i]\n strides.append(fi[\"reduction\"])\n channels.append(fi[\"num_chs\"])\n default_layers.append(i)\n\n if layers is None:\n layers = default_layers\n\n super().__init__(channels, strides, layers)\n self.encoder = timm_encoder\n\n def forward(self, x: Tensor) -> List[Tensor]:\n return _take(self.encoder(x), self._layers)\n\n\ndef make_n_channel_input_std_conv(conv: nn.Module, in_channels: int, mode=\"auto\", **kwargs) -> nn.Module:\n \"\"\"\n Return the same convolution class but with desired number of channels\n\n Args:\n conv: Input nn.Conv2D object to copy settings/weights from\n in_channels: Desired number of input channels\n mode:\n **kwargs: Optional overrides for Conv2D parameters\n \"\"\"\n conv_cls = conv.__class__\n\n if conv.in_channels == in_channels:\n warnings.warn(\"make_n_channel_input call is spurious\")\n return conv\n\n new_conv = conv_cls(\n in_channels,\n out_channels=conv.out_channels,\n kernel_size=kwargs.get(\"kernel_size\", conv.kernel_size),\n stride=kwargs.get(\"stride\", conv.stride),\n padding=kwargs.get(\"padding\", conv.padding),\n dilation=kwargs.get(\"dilation\", conv.dilation),\n groups=kwargs.get(\"groups\", conv.groups),\n bias=kwargs.get(\"bias\", conv.bias is not None),\n eps=kwargs.get(\"eps\", conv.eps),\n )\n\n w = conv.weight\n if in_channels > conv.in_channels:\n n = math.ceil(in_channels / float(conv.in_channels))\n w = torch.cat([w] * n, dim=1)\n w = w[:, :in_channels, ...]\n new_conv.weight = nn.Parameter(w, requires_grad=True)\n else:\n w = w[:, 0:in_channels, ...]\n new_conv.weight = nn.Parameter(w, requires_grad=True)\n\n return new_conv\n"
] | [
[
"torch.cat",
"torch.nn.Parameter"
]
] |
goldtime1987/pyQTGraph | [
"97193758d9f8f57f304f95959403f1db84c3c0b0"
] | [
"go.py"
] | [
"from PyQt4 import QtGui,QtCore\r\nimport sys\r\nimport ui_main\r\nimport numpy as np\r\nimport pylab\r\nimport time\r\nimport pyqtgraph\r\n\r\nclass ExampleApp(QtGui.QMainWindow, ui_main.Ui_MainWindow):\r\n def __init__(self, parent=None):\r\n pyqtgraph.setConfigOption('background', 'w') #before loading widget\r\n super(ExampleApp, self).__init__(parent)\r\n self.setupUi(self)\r\n self.btnAdd.clicked.connect(self.update)\r\n self.grPlot.plotItem.showGrid(True, True, 0.7)\r\n\r\n def update(self):\r\n t1=time.clock()\r\n points=100 #number of data points\r\n X=np.arange(points)\r\n Y=np.sin(np.arange(points)/points*3*np.pi+time.time())\r\n C=pyqtgraph.hsvColor(time.time()/5%1,alpha=.5)\r\n pen=pyqtgraph.mkPen(color=C,width=10)\r\n self.grPlot.plot(X,Y,pen=pen,clear=True)\r\n print(\"update took %.02f ms\"%((time.clock()-t1)*1000))\r\n if self.chkMore.isChecked():\r\n QtCore.QTimer.singleShot(1, self.update) # QUICKLY repeat\r\n\r\nif __name__==\"__main__\":\r\n app = QtGui.QApplication(sys.argv)\r\n form = ExampleApp()\r\n form.show()\r\n form.update() #start with something\r\n app.exec_()\r\n print(\"DONE\")"
] | [
[
"numpy.arange"
]
] |
ImmortalSdm/Speech-Emotion-Recognition-1 | [
"c5f766a0f66c77df30c6d75e86d97c27c2bbb240"
] | [
"extract_feats/opensmile.py"
] | [
"import os\nimport csv\nimport sys\nimport time\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom typing import Tuple\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import train_test_split\n\n# 每个特征集的特征数量\nFEATURE_NUM = {\n 'IS09_emotion': 384,\n 'IS10_paraling': 1582,\n 'IS11_speaker_state': 4368,\n 'IS12_speaker_trait': 6125,\n 'IS13_ComParE': 6373,\n 'ComParE_2016': 6373\n}\n\n\n'''\nget_feature_opensmile(): Opensmile 提取一个音频的特征\n\n输入:\n config(Class)\n file_path: 音频路径\n\n输出:\n 该音频的特征向量\n'''\n\ndef get_feature_opensmile(config, filepath: str):\n # 项目路径\n BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\n # single_feature.csv 路径\n single_feat_path = os.path.join(BASE_DIR, config.feature_path, 'single_feature.csv')\n # Opensmile 配置文件路径\n opensmile_config_path = os.path.join(config.opensmile_path, 'config', config.opensmile_config + '.conf')\n\n # Opensmile 命令\n cmd = 'cd ' + config.opensmile_path + ' && ./SMILExtract -C ' + opensmile_config_path + ' -I ' + filepath + ' -O ' + single_feat_path\n print(\"Opensmile cmd: \", cmd)\n os.system(cmd)\n \n reader = csv.reader(open(single_feat_path,'r'))\n rows = [row for row in reader]\n last_line = rows[-1]\n return last_line[1: FEATURE_NUM[config.opensmile_config] + 1]\n\n\n'''\nload_feature(): 从 .csv 文件中加载特征数据\n\n输入:\n config(Class)\n feature_path: 特征文件路径\n train: 是否为训练数据\n\n输出:\n 训练数据、测试数据和对应的标签\n'''\n\ndef load_feature(config, feature_path: str, train: bool):\n # 加载特征数据\n df = pd.read_csv(feature_path)\n features = [str(i) for i in range(1, FEATURE_NUM[config.opensmile_config] + 1)]\n\n X = df.loc[:,features].values\n Y = df.loc[:,'label'].values\n\n # 标准化模型路径\n scaler_path = os.path.join(config.checkpoint_path, 'SCALER_OPENSMILE.m')\n\n if train == True:\n # 标准化数据 \n scaler = StandardScaler().fit(X)\n # 保存标准化模型\n joblib.dump(scaler, scaler_path)\n X = scaler.transform(X)\n\n # 划分训练集和测试集\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 42)\n return x_train, x_test, y_train, y_test\n else:\n # 标准化数据\n # 加载标准化模型\n scaler = joblib.load(scaler_path)\n X = scaler.transform(X)\n return X\n\n\n'''\nget_data(): \n 提取所有音频的特征: 遍历所有文件夹, 读取每个文件夹中的音频, 提取每个音频的特征,把所有特征保存在 feature_path 中\n\n输入:\n config(Class)\n data_path: 数据集文件夹/测试文件路径\n feature_path: 保存特征的路径\n train: 是否为训练数据\n\n输出:\n train = True: 训练数据、测试数据特征和对应的标签\n train = False: 预测数据特征\n'''\n\n# Opensmile 提取特征\ndef get_data(config, data_path, feature_path: str, train: bool):\n\n writer = csv.writer(open(feature_path, 'w'))\n first_row = ['label']\n for i in range(1, FEATURE_NUM[config.opensmile_config] + 1):\n first_row.append(str(i))\n writer.writerow(first_row)\n\n writer = csv.writer(open(feature_path, 'a+'))\n print('Opensmile extracting...')\n\n if train == True:\n cur_dir = os.getcwd()\n sys.stderr.write('Curdir: %s\\n' % cur_dir)\n os.chdir(data_path)\n # 遍历文件夹\n for i, directory in enumerate(config.class_labels):\n sys.stderr.write(\"Started reading folder %s\\n\" % directory)\n os.chdir(directory)\n\n # label_name = directory\n label = config.class_labels.index(directory)\n\n # 读取该文件夹下的音频\n for filename in os.listdir('.'):\n if not filename.endswith('wav'):\n continue\n filepath = os.path.join(os.getcwd(), filename)\n \n # 提取该音频的特征\n feature_vector = get_feature_opensmile(config, filepath)\n feature_vector.insert(0, label)\n # 把每个音频的特征整理到一个 csv 文件中\n writer.writerow(feature_vector)\n\n sys.stderr.write(\"Ended reading folder %s\\n\" % directory)\n os.chdir('..')\n os.chdir(cur_dir)\n \n else:\n feature_vector = get_feature_opensmile(config, data_path)\n feature_vector.insert(0, '-1')\n writer.writerow(feature_vector)\n\n print('Opensmile extract done.')\n\n # 一个玄学 bug 的暂时性解决方案\n # 这里无法直接加载除了 IS10_paraling 以外的其他特征集的预测数据特征,非常玄学\n if(train == True):\n return load_feature(config, feature_path, train = train)"
] | [
[
"pandas.read_csv",
"sklearn.externals.joblib.load",
"sklearn.preprocessing.StandardScaler",
"sklearn.externals.joblib.dump",
"sklearn.model_selection.train_test_split"
]
] |
kpflugshaupt/pandas | [
"a1fee9199eba7ebf423880243936b9f1501d3d3a"
] | [
"pandas/tests/series/test_replace.py"
] | [
"# coding=utf-8\n# pylint: disable-msg=E1101,W0612\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas.util.testing as tm\n\nfrom .common import TestData\n\n\nclass TestSeriesReplace(TestData):\n def test_replace(self):\n N = 100\n ser = pd.Series(np.random.randn(N))\n ser[0:4] = np.nan\n ser[6:10] = 0\n\n # replace list with a single value\n ser.replace([np.nan], -1, inplace=True)\n\n exp = ser.fillna(-1)\n tm.assert_series_equal(ser, exp)\n\n rs = ser.replace(0., np.nan)\n ser[ser == 0.] = np.nan\n tm.assert_series_equal(rs, ser)\n\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n ser = pd.Series([np.nan, 0, np.inf])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, pd.NaT])\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n filled = ser.copy()\n filled[4] = 0\n tm.assert_series_equal(ser.replace(np.inf, 0), filled)\n\n ser = pd.Series(self.ts.index)\n tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))\n\n # malformed\n msg = r\"Replacement lists must match in length\\. Expecting 3 got 2\"\n with pytest.raises(ValueError, match=msg):\n ser.replace([1, 2, 3], [np.nan, 0])\n\n # make sure that we aren't just masking a TypeError because bools don't\n # implement indexing\n with pytest.raises(TypeError, match='Cannot compare types .+'):\n ser.replace([1, 2], [np.nan, 0])\n\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])\n tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))\n\n def test_replace_gh5319(self):\n # API change from 0.12?\n # GH 5319\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace([np.nan])\n tm.assert_series_equal(result, expected)\n\n ser = pd.Series([0, np.nan, 2, 3, 4])\n expected = ser.ffill()\n result = ser.replace(np.nan)\n tm.assert_series_equal(result, expected)\n # GH 5797\n ser = pd.Series(pd.date_range('20130101', periods=5))\n expected = ser.copy()\n expected.loc[2] = pd.Timestamp('20120101')\n result = ser.replace({pd.Timestamp('20130103'):\n pd.Timestamp('20120101')})\n tm.assert_series_equal(result, expected)\n result = ser.replace(pd.Timestamp('20130103'),\n pd.Timestamp('20120101'))\n tm.assert_series_equal(result, expected)\n\n # GH 11792: Test with replacing NaT in a list with tz data\n ts = pd.Timestamp('2015/01/01', tz='UTC')\n s = pd.Series([pd.NaT, pd.Timestamp('2015/01/01', tz='UTC')])\n result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)\n expected = pd.Series([pd.Timestamp.min, ts], dtype=object)\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_single_list(self):\n ser = pd.Series([0, 1, 2, 3, 4])\n result = ser.replace([1, 2, 3])\n tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))\n\n s = ser.copy()\n s.replace([1, 2, 3], inplace=True)\n tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))\n\n # make sure things don't get corrupted when fillna call fails\n s = ser.copy()\n msg = (r\"Invalid fill method\\. Expecting pad \\(ffill\\) or backfill\"\n r\" \\(bfill\\)\\. Got crash_cymbal\")\n with pytest.raises(ValueError, match=msg):\n s.replace([1, 2, 3], inplace=True, method='crash_cymbal')\n tm.assert_series_equal(s, ser)\n\n def test_replace_with_empty_list(self):\n # GH 21977\n s = pd.Series([[1], [2, 3], [], np.nan, [4]])\n expected = s\n result = s.replace([], np.nan)\n tm.assert_series_equal(result, expected)\n\n # GH 19266\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: []})\n with pytest.raises(ValueError, match=\"cannot assign mismatch\"):\n s.replace({np.nan: ['dummy', 'alt']})\n\n def test_replace_mixed_types(self):\n s = pd.Series(np.arange(5), dtype='int64')\n\n def check_replace(to_rep, val, expected):\n sc = s.copy()\n r = s.replace(to_rep, val)\n sc.replace(to_rep, val, inplace=True)\n tm.assert_series_equal(expected, r)\n tm.assert_series_equal(expected, sc)\n\n # MUST upcast to float\n e = pd.Series([0., 1., 2., 3., 4.])\n tr, v = [3], [3.0]\n check_replace(tr, v, e)\n\n # MUST upcast to float\n e = pd.Series([0, 1, 2, 3.5, 4])\n tr, v = [3], [3.5]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, 'a'])\n tr, v = [3, 4], [3.5, 'a']\n check_replace(tr, v, e)\n\n # again casts to object\n e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])\n tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]\n check_replace(tr, v, e)\n\n # casts to object\n e = pd.Series([0, 1, 2, 3.5, True], dtype='object')\n tr, v = [3, 4], [3.5, True]\n check_replace(tr, v, e)\n\n # test an object with dates + floats + integers + strings\n dr = pd.date_range('1/1/2001', '1/10/2001',\n freq='D').to_series().reset_index(drop=True)\n result = dr.astype(object).replace(\n [dr[0], dr[1], dr[2]], [1.0, 2, 'a'])\n expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)\n tm.assert_series_equal(result, expected)\n\n def test_replace_bool_with_string_no_op(self):\n s = pd.Series([True, False, True])\n result = s.replace('fun', 'in-the-sun')\n tm.assert_series_equal(s, result)\n\n def test_replace_bool_with_string(self):\n # nonexistent elements\n s = pd.Series([True, False, True])\n result = s.replace(True, '2u')\n expected = pd.Series(['2u', False, '2u'])\n tm.assert_series_equal(expected, result)\n\n def test_replace_bool_with_bool(self):\n s = pd.Series([True, False, True])\n result = s.replace(True, False)\n expected = pd.Series([False] * len(s))\n tm.assert_series_equal(expected, result)\n\n def test_replace_with_dict_with_bool_keys(self):\n s = pd.Series([True, False, True])\n with pytest.raises(TypeError, match='Cannot compare types .+'):\n s.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_replace2(self):\n N = 100\n ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),\n dtype=object)\n ser[:5] = np.nan\n ser[6:10] = 'foo'\n ser[20:30] = 'bar'\n\n # replace list with a single value\n rs = ser.replace([np.nan, 'foo', 'bar'], -1)\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -1).all()\n assert (rs[20:30] == -1).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values\n rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})\n\n assert (rs[:5] == -1).all()\n assert (rs[6:10] == -2).all()\n assert (rs[20:30] == -3).all()\n assert (pd.isna(ser[:5])).all()\n\n # replace with different values with 2 lists\n rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])\n tm.assert_series_equal(rs, rs2)\n\n # replace inplace\n ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)\n assert (ser[:5] == -1).all()\n assert (ser[6:10] == -1).all()\n assert (ser[20:30] == -1).all()\n\n def test_replace_with_empty_dictlike(self):\n # GH 15289\n s = pd.Series(list('abcd'))\n tm.assert_series_equal(s, s.replace(dict()))\n tm.assert_series_equal(s, s.replace(pd.Series([])))\n\n def test_replace_string_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace('2', np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_replacer_equals_replacement(self):\n # GH 20656\n # make sure all replacers are matching against original values\n s = pd.Series(['a', 'b'])\n expected = pd.Series(['b', 'a'])\n result = s.replace({'a': 'b', 'b': 'a'})\n tm.assert_series_equal(expected, result)\n\n def test_replace_unicode_with_number(self):\n # GH 15743\n s = pd.Series([1, 2, 3])\n result = s.replace('2', np.nan)\n expected = pd.Series([1, 2, 3])\n tm.assert_series_equal(expected, result)\n\n def test_replace_mixed_types_with_string(self):\n # Testing mixed\n s = pd.Series([1, 2, 3, '4', 4, 5])\n result = s.replace([2, '4'], np.nan)\n expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])\n tm.assert_series_equal(expected, result)\n\n @pytest.mark.parametrize(\"categorical, numeric\", [\n (pd.Categorical('A', categories=['A', 'B']), [1]),\n (pd.Categorical(('A', ), categories=['A', 'B']), [1]),\n (pd.Categorical(('A', 'B'), categories=['A', 'B']), [1, 2]),\n ])\n def test_replace_categorical(self, categorical, numeric):\n # GH 24971\n # Do not check if dtypes are equal due to a known issue that\n # Categorical.replace sometimes coerces to object (GH 23305)\n s = pd.Series(categorical)\n result = s.replace({'A': 1, 'B': 2})\n expected = pd.Series(numeric)\n tm.assert_series_equal(expected, result, check_dtype=False)\n\n def test_replace_with_no_overflowerror(self):\n # GH 25616\n # casts to object without Exception from OverflowError\n s = pd.Series([0, 1, 2, 3, 4])\n result = s.replace([3], ['100000000000000000000'])\n expected = pd.Series([0, 1, 2, '100000000000000000000', 4])\n tm.assert_series_equal(result, expected)\n\n s = pd.Series([0, '100000000000000000000',\n '100000000000000000001'])\n result = s.replace(['100000000000000000000'], [1])\n expected = pd.Series([0, 1, '100000000000000000001'])\n tm.assert_series_equal(result, expected)\n"
] | [
[
"pandas.Series",
"pandas.date_range",
"numpy.random.randn",
"pandas.Categorical",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.Timestamp",
"pandas.isna",
"pandas.util.testing.makeDateIndex"
]
] |
Shashi456/transformers | [
"0f43e742d908772733870730dbddd8e00e0253ef"
] | [
"src/transformers/models/pegasus/modeling_tf_pegasus.py"
] | [
"# coding=utf-8\n# Copyright 2021, Google Inc. and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 Pegasus model. \"\"\"\n\n\nimport random\nfrom typing import Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ...activations_tf import get_tf_activation\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_tf_outputs import (\n TFBaseModelOutput,\n TFBaseModelOutputWithPastAndCrossAttentions,\n TFSeq2SeqLMOutput,\n TFSeq2SeqModelOutput,\n)\n\n# Public API\nfrom ...modeling_tf_utils import (\n DUMMY_INPUTS,\n TFCausalLanguageModelingLoss,\n TFPreTrainedModel,\n TFSharedEmbeddings,\n TFWrappedEmbeddings,\n input_processing,\n keras_serializable,\n shape_list,\n)\nfrom ...utils import logging\nfrom .configuration_pegasus import PegasusConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"google/pegasus-large\"\n_CONFIG_FOR_DOC = \"PegasusConfig\"\n_TOKENIZER_FOR_DOC = \"PegasusTokenizer\"\n\n\nLARGE_NEGATIVE = -1e8\n\n\n# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right\ndef shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):\n start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id)\n shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids = tf.where(\n shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids\n )\n\n if tf.executing_eagerly():\n # \"Verify that `labels` has only positive values and -100\"\n assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0))\n\n # Make sure the assertion op is called by wrapping the result in an identity no-op\n with tf.control_dependencies([assert_gte0]):\n shifted_input_ids = tf.identity(shifted_input_ids)\n\n return shifted_input_ids\n\n\n# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask\ndef _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE\n mask_cond = tf.range(shape_list(mask)[-1])\n\n mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)\n\n if past_key_values_length > 0:\n mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)\n\n return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))\n\n\n# Copied from transformers.models.bart.modeling_tf_bart._expand_mask\ndef _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n src_len = shape_list(mask)[1]\n tgt_len = tgt_len if tgt_len is not None else src_len\n one_cst = tf.constant(1.0)\n mask = tf.cast(mask, dtype=one_cst.dtype)\n expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))\n\n return (one_cst - expanded_mask) * LARGE_NEGATIVE\n\n\n# Copied from transformers.models.marian.modeling_tf_marian.TFMarianSinusoidalPositionalEmbedding with Marian->Pegasus\nclass TFPegasusSinusoidalPositionalEmbedding(tf.keras.layers.Layer):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\"\"\"\n\n def __init__(self, num_positions: int, embedding_dim: int, **kwargs):\n super().__init__(**kwargs)\n\n if embedding_dim % 2 != 0:\n raise NotImplementedError(f\"odd embedding_dim {embedding_dim} not supported\")\n\n self.embedding_dim = embedding_dim\n self.num_positions = num_positions\n\n def build(self, input_shape: tf.TensorShape):\n \"\"\"\n Build shared token embedding layer Shared weights logic adapted from\n https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24\n \"\"\"\n\n weight = self._init_weight(self.num_positions, self.embedding_dim)\n\n self.weight = self.add_weight(\n name=\"embeddings\",\n shape=[self.num_positions, self.embedding_dim],\n )\n weight = tf.cast(weight, dtype=self.weight.dtype)\n\n self.weight.assign(weight)\n\n super().build(input_shape)\n\n @staticmethod\n def _init_weight(n_pos: int, dim: int):\n \"\"\"\n Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in\n the 2nd half of the vector. [dim // 2:]\n \"\"\"\n position_enc = np.array(\n [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]\n )\n # index 0 is all zero\n position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])\n position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2])\n # convert to tensor\n table = tf.convert_to_tensor(position_enc)\n tf.stop_gradient(table)\n return table\n\n def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):\n \"\"\"Input is expected to be of size [bsz x seqlen].\"\"\"\n bsz, seq_len = input_shape[:2]\n\n positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name=\"range\")\n return tf.gather(self.weight, positions)\n\n\n# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Pegasus\nclass TFPegasusAttention(tf.keras.layers.Layer):\n \"\"\"Multi-headed attention from \"Attention Is All You Need\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n\n self.num_heads = num_heads\n self.dropout = tf.keras.layers.Dropout(dropout)\n self.head_dim = embed_dim // num_heads\n assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n self.is_decoder = is_decoder\n\n self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"k_proj\")\n self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"q_proj\")\n self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"v_proj\")\n self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name=\"out_proj\")\n\n def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):\n return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))\n\n def call(\n self,\n hidden_states: tf.Tensor,\n key_value_states: Optional[tf.Tensor] = None,\n past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,\n attention_mask: Optional[tf.Tensor] = None,\n layer_head_mask: Optional[tf.Tensor] = None,\n training=False,\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = shape_list(hidden_states)\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = tf.concat([past_key_value[0], key_states], axis=2)\n value_states = tf.concat([past_key_value[1], value_states], axis=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)\n key_states = tf.reshape(key_states, proj_shape)\n value_states = tf.reshape(value_states, proj_shape)\n\n src_len = shape_list(key_states)[1]\n attn_weights = tf.matmul(query_states, key_states, transpose_b=True)\n\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(attn_weights),\n [bsz * self.num_heads, tgt_len, src_len],\n message=f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}\",\n )\n\n if attention_mask is not None:\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(attention_mask),\n [bsz, 1, tgt_len, src_len],\n message=f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}\",\n )\n\n attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)\n attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_weights = tf.nn.softmax(attn_weights, axis=-1)\n\n if layer_head_mask is not None:\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(layer_head_mask),\n [self.num_heads],\n message=f\"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}\",\n )\n\n attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(\n attn_weights, (bsz, self.num_heads, tgt_len, src_len)\n )\n attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))\n\n attn_probs = self.dropout(attn_weights, training=training)\n attn_output = tf.matmul(attn_probs, value_states)\n\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(attn_output),\n [bsz * self.num_heads, tgt_len, self.head_dim],\n message=f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}\",\n )\n\n attn_output = tf.transpose(\n tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)\n )\n attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))\n\n attn_output = self.out_proj(attn_output)\n attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))\n\n return attn_output, attn_weights, past_key_value\n\n\n# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Pegasus\nclass TFPegasusEncoderLayer(tf.keras.layers.Layer):\n def __init__(self, config: PegasusConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFPegasusAttention(\n self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name=\"self_attn\"\n )\n self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.activation_fn = get_tf_activation(config.activation_function)\n self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)\n self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name=\"fc1\")\n self.fc2 = tf.keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n\n def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):\n \"\"\"\n Args:\n hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, self_attn_weights, _ = self.self_attn(\n hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask\n )\n\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(hidden_states),\n shape_list(residual),\n message=f\"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}\",\n )\n\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n return hidden_states, self_attn_weights\n\n\n# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Pegasus\nclass TFPegasusDecoderLayer(tf.keras.layers.Layer):\n def __init__(self, config: PegasusConfig, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = config.d_model\n self.self_attn = TFPegasusAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n name=\"self_attn\",\n is_decoder=True,\n )\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.activation_fn = get_tf_activation(config.activation_function)\n self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout)\n\n self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"self_attn_layer_norm\")\n self.encoder_attn = TFPegasusAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n name=\"encoder_attn\",\n is_decoder=True,\n )\n self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"encoder_attn_layer_norm\")\n self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name=\"fc1\")\n self.fc2 = tf.keras.layers.Dense(self.embed_dim, name=\"fc2\")\n self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"final_layer_norm\")\n\n def call(\n self,\n hidden_states,\n attention_mask: Optional[tf.Tensor] = None,\n encoder_hidden_states: Optional[tf.Tensor] = None,\n encoder_attention_mask: Optional[tf.Tensor] = None,\n layer_head_mask: Optional[tf.Tensor] = None,\n cross_attn_layer_head_mask: Optional[tf.Tensor] = None,\n past_key_value: Optional[Tuple[tf.Tensor]] = None,\n training=False,\n ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:\n \"\"\"\n Args:\n hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size\n `(decoder_attention_heads,)`\n cross_attn_layer_head_mask (:obj:`tf.Tensor`): mask for heads of the cross-attention module.\n `(decoder_attention_heads,)`\n past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states\n \"\"\"\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n layer_head_mask=cross_attn_layer_head_mask,\n past_key_value=cross_attn_past_key_value,\n )\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = self.activation_dropout(hidden_states, training=training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = residual + hidden_states\n\n return (\n hidden_states,\n self_attn_weights,\n cross_attn_weights,\n present_key_value,\n )\n\n\nclass TFPegasusPreTrainedModel(TFPreTrainedModel):\n config_class = PegasusConfig\n base_model_prefix = \"model\"\n\n @property\n def dummy_inputs(self):\n pad_token = 1\n input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)\n decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32)\n dummy_inputs = {\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": tf.math.not_equal(input_ids, pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n @tf.function(\n input_signature=[\n {\n \"input_ids\": tf.TensorSpec((None, None), tf.int32, name=\"input_ids\"),\n \"attention_mask\": tf.TensorSpec((None, None), tf.int32, name=\"attention_mask\"),\n \"decoder_input_ids\": tf.TensorSpec((None, None), tf.int32, name=\"decoder_input_ids\"),\n \"decoder_attention_mask\": tf.TensorSpec((None, None), tf.int32, name=\"decoder_attention_mask\"),\n }\n ]\n )\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving\n def serving(self, inputs):\n output = self.call(inputs)\n\n return self.serving_output(output)\n\n\nPEGASUS_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the\n generic methods the library implements for all its model (such as downloading or saving, resizing the input\n embeddings, pruning heads etc.)\n\n This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use\n it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage\n and behavior.\n\n .. note::\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all\n the tensors in the first argument of the model call function: :obj:`model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in\n the first positional argument :\n\n - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n :obj:`model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n Args:\n config (:class:`~transformers.PegasusConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the\n model weights.\n\"\"\"\n\nPEGASUS_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import PegasusTokenizer, TFPegasusForConditionalGeneration\n\n >>> model = TFPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum')\n >>> tokenizer = PegasusTokenizer.from_pretrained('google/pegasus-xsum')\n\n >>> ARTICLE_TO_SUMMARIZE = (\n ... \"PG&E stated it scheduled the blackouts in response to forecasts for high winds \"\n ... \"amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were \"\n ... \"scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.\"\n ... )\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf')\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'])\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])\n\"\"\"\n\nPEGASUS_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are decoder input IDs? <../glossary.html#decoder-input-ids>`__\n\n Pegasus uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If\n :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see\n :obj:`past_key_values`).\n decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.\n head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n decoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (:obj:`tf.FloatTensor`, `optional`):\n hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of\n past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`)\n contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.\n use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation\n output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all\n attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only\n in eager mode, in graph mode the value in the config will be used instead.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@keras_serializable\nclass TFPegasusEncoder(tf.keras.layers.Layer):\n config_class = PegasusConfig\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`TFPegasusEncoderLayer`.\n\n Args:\n config: PegasusConfig\n \"\"\"\n\n def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n self.layerdrop = config.encoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0\n\n self.embed_tokens = embed_tokens\n self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n name=\"embed_positions\",\n )\n self.layers = [TFPegasusEncoderLayer(config, name=f\"layers.{i}\") for i in range(config.encoder_layers)]\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n\n def get_embed_tokens(self):\n return self.embed_tokens\n\n def set_embed_tokens(self, embed_tokens):\n self.embed_tokens = embed_tokens\n\n def call(\n self,\n input_ids=None,\n inputs_embeds=None,\n attention_mask=None,\n head_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n \"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value\n in the config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail. This argument can be used only in eager mode, in graph mode the value in the config\n will be used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.embed_tokens(inputs[\"input_ids\"]) * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n hidden_states = inputs[\"inputs_embeds\"] + embed_pos\n hidden_states = self.dropout(hidden_states, training=inputs[\"training\"])\n\n # check attention mask and invert\n if inputs[\"attention_mask\"] is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(inputs[\"attention_mask\"])\n else:\n attention_mask = None\n\n encoder_states = () if inputs[\"output_hidden_states\"] else None\n all_attentions = () if inputs[\"output_attentions\"] else None\n\n # check if head_mask has a correct number of layers specified if desired\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n if inputs[\"head_mask\"] is not None and tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(inputs[\"head_mask\"])[0],\n len(self.layers),\n message=f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.\",\n )\n\n # encoder layers\n for idx, encoder_layer in enumerate(self.layers):\n\n if inputs[\"output_hidden_states\"]:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if inputs[\"training\"] and (dropout_probability < self.layerdrop): # skip the layer\n continue\n\n hidden_states, attn = encoder_layer(\n hidden_states,\n attention_mask,\n inputs[\"head_mask\"][idx] if inputs[\"head_mask\"] is not None else None,\n )\n\n if inputs[\"output_attentions\"]:\n all_attentions += (attn,)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if inputs[\"output_hidden_states\"]:\n encoder_states = encoder_states + (hidden_states,)\n\n if not inputs[\"return_dict\"]:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return TFBaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\n@keras_serializable\nclass TFPegasusDecoder(tf.keras.layers.Layer):\n config_class = PegasusConfig\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFPegasusDecoderLayer`\n\n Args:\n config: PegasusConfig\n embed_tokens: output embedding\n \"\"\"\n\n def __init__(self, config: PegasusConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs):\n super().__init__(**kwargs)\n self.config = config\n self.padding_idx = config.pad_token_id\n self.embed_tokens = embed_tokens\n self.layerdrop = config.decoder_layerdrop\n self.embed_positions = TFPegasusSinusoidalPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n name=\"embed_positions\",\n )\n self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0\n self.layers = [TFPegasusDecoderLayer(config, name=f\"layers.{i}\") for i in range(config.decoder_layers)]\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name=\"layer_norm\")\n\n self.dropout = tf.keras.layers.Dropout(config.dropout)\n\n def get_embed_tokens(self):\n return self.embed_tokens\n\n def set_embed_tokens(self, embed_tokens):\n self.embed_tokens = embed_tokens\n\n def call(\n self,\n input_ids=None,\n inputs_embeds=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.PegasusTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value\n in the config will be used instead.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail. This argument can be used only in eager mode, in graph mode the value in the config\n will be used instead.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This\n argument can be used in eager mode, in graph mode the value will always be set to True.\n training (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"input_ids\"] is not None and inputs[\"inputs_embeds\"] is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif inputs[\"input_ids\"] is not None:\n input_shape = shape_list(inputs[\"input_ids\"])\n elif inputs[\"inputs_embeds\"] is not None:\n input_shape = shape_list(inputs[\"inputs_embeds\"])[:-1]\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n past_key_values_length = (\n shape_list(inputs[\"past_key_values\"][0][0])[2] if inputs[\"past_key_values\"] is not None else 0\n )\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n if inputs[\"inputs_embeds\"] is None:\n inputs[\"inputs_embeds\"] = self.embed_tokens(inputs[\"input_ids\"]) * self.embed_scale\n\n hidden_states = inputs[\"inputs_embeds\"]\n\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)\n else:\n combined_attention_mask = _expand_mask(\n tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]\n )\n\n if inputs[\"attention_mask\"] is not None:\n combined_attention_mask = combined_attention_mask + _expand_mask(\n inputs[\"attention_mask\"], tgt_len=input_shape[-1]\n )\n\n if inputs[\"encoder_hidden_states\"] is not None and inputs[\"encoder_attention_mask\"] is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n inputs[\"encoder_attention_mask\"] = _expand_mask(inputs[\"encoder_attention_mask\"], tgt_len=input_shape[-1])\n\n hidden_states = self.dropout(hidden_states + positions, training=inputs[\"training\"])\n\n # decoder layers\n all_hidden_states = () if inputs[\"output_hidden_states\"] else None\n all_self_attns = () if inputs[\"output_attentions\"] else None\n all_cross_attns = () if (inputs[\"output_attentions\"] and inputs[\"encoder_hidden_states\"] is not None) else None\n present_key_values = () if inputs[\"use_cache\"] else None\n\n # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired\n # The tf.debugging asserts are not compliant with XLA then they\n # have to be disabled in other modes than eager.\n for attn_mask in [\"head_mask\", \"cross_attn_head_mask\"]:\n if inputs[attn_mask] is not None and tf.executing_eagerly():\n tf.debugging.assert_equal(\n shape_list(inputs[attn_mask])[0],\n len(self.layers),\n message=f\"The {attn_mask} should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs[attn_mask])[0]}.\",\n )\n\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if inputs[\"output_hidden_states\"]:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n\n if inputs[\"training\"] and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = inputs[\"past_key_values\"][idx] if inputs[\"past_key_values\"] is not None else None\n\n hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(\n hidden_states,\n attention_mask=combined_attention_mask,\n encoder_hidden_states=inputs[\"encoder_hidden_states\"],\n encoder_attention_mask=inputs[\"encoder_attention_mask\"],\n layer_head_mask=inputs[\"head_mask\"][idx] if inputs[\"head_mask\"] is not None else None,\n cross_attn_layer_head_mask=inputs[\"cross_attn_head_mask\"][idx]\n if inputs[\"cross_attn_head_mask\"] is not None\n else None,\n past_key_value=past_key_value,\n )\n\n if inputs[\"use_cache\"]:\n present_key_values += (present_key_value,)\n\n if inputs[\"output_attentions\"]:\n all_self_attns += (layer_self_attn,)\n\n if inputs[\"encoder_hidden_states\"] is not None:\n all_cross_attns += (layer_cross_attn,)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if inputs[\"output_hidden_states\"]:\n all_hidden_states += (hidden_states,)\n\n if inputs[\"output_attentions\"]:\n all_self_attns = list(all_self_attns)\n\n if inputs[\"encoder_hidden_states\"] is not None:\n all_cross_attns = list(all_cross_attns)\n\n if inputs[\"use_cache\"]:\n present_key_values = (inputs[\"encoder_hidden_states\"], present_key_values)\n\n if not inputs[\"return_dict\"]:\n return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns\n else:\n return TFBaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=present_key_values,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attns,\n )\n\n\n@keras_serializable\nclass TFPegasusMainLayer(tf.keras.layers.Layer):\n config_class = PegasusConfig\n\n def __init__(self, config: PegasusConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name=\"model.shared\")\n\n with tf.compat.v1.variable_scope(\"model.shared\") as shared_abs_scope_name:\n pass\n\n # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.\n embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)\n embed_tokens.vocab_size = self.shared.vocab_size\n embed_tokens.hidden_size = self.shared.hidden_size\n\n self.encoder = TFPegasusEncoder(config, embed_tokens, name=\"encoder\")\n self.decoder = TFPegasusDecoder(config, embed_tokens, name=\"decoder\")\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, new_embeddings):\n self.shared.weight = new_embeddings\n self.shared.vocab_size = self.shared.weight.shape[0]\n # retrieve correct absolute scope for embed token wrapper\n with tf.compat.v1.variable_scope(\"model.shared\") as shared_abs_scope_name:\n pass\n # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope.\n embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name)\n self.encoder.set_embed_tokens(embed_tokens)\n self.decoder.set_embed_tokens(embed_tokens)\n\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"decoder_input_ids\"] is None and inputs[\"decoder_inputs_embeds\"] is None:\n inputs[\"use_cache\"] = False\n\n inputs[\"output_hidden_states\"] = (\n inputs[\"output_hidden_states\"]\n if inputs[\"output_hidden_states\"] is not None\n else self.config.output_hidden_states\n )\n\n if inputs[\"encoder_outputs\"] is None:\n inputs[\"encoder_outputs\"] = self.encoder(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n head_mask=inputs[\"head_mask\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True\n elif inputs[\"return_dict\"] and not isinstance(inputs[\"encoder_outputs\"], TFBaseModelOutput):\n inputs[\"encoder_outputs\"] = TFBaseModelOutput(\n last_hidden_state=inputs[\"encoder_outputs\"][0],\n hidden_states=inputs[\"encoder_outputs\"][1] if len(inputs[\"encoder_outputs\"]) > 1 else None,\n attentions=inputs[\"encoder_outputs\"][2] if len(inputs[\"encoder_outputs\"]) > 2 else None,\n )\n # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False\n elif not inputs[\"return_dict\"] and not isinstance(inputs[\"encoder_outputs\"], tuple):\n inputs[\"encoder_outputs\"] = inputs[\"encoder_outputs\"].to_tuple()\n\n decoder_outputs = self.decoder(\n inputs[\"decoder_input_ids\"],\n attention_mask=inputs[\"decoder_attention_mask\"],\n encoder_hidden_states=inputs[\"encoder_outputs\"][0],\n encoder_attention_mask=inputs[\"attention_mask\"],\n head_mask=inputs[\"decoder_head_mask\"],\n cross_attn_head_mask=inputs[\"cross_attn_head_mask\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n if not inputs[\"return_dict\"]:\n return decoder_outputs + inputs[\"encoder_outputs\"]\n\n return TFSeq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=inputs[\"encoder_outputs\"].last_hidden_state,\n encoder_hidden_states=inputs[\"encoder_outputs\"].hidden_states,\n encoder_attentions=inputs[\"encoder_outputs\"].attentions,\n )\n\n\n@add_start_docstrings(\n \"The bare PEGASUS Model outputting raw hidden-states without any specific head on top.\",\n PEGASUS_START_DOCSTRING,\n)\nclass TFPegasusModel(TFPegasusPreTrainedModel):\n def __init__(self, config: PegasusConfig, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.model = TFPegasusMainLayer(config, name=\"model\")\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_decoder(self):\n return self.model.decoder\n\n @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFSeq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n **kwargs\n ):\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n training=training,\n kwargs_call=kwargs,\n )\n\n outputs = self.model(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n decoder_input_ids=inputs[\"decoder_input_ids\"],\n decoder_attention_mask=inputs[\"decoder_attention_mask\"],\n head_mask=inputs[\"head_mask\"],\n decoder_head_mask=inputs[\"decoder_head_mask\"],\n cross_attn_head_mask=inputs[\"cross_attn_head_mask\"],\n encoder_outputs=inputs[\"encoder_outputs\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n decoder_inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n\n return outputs\n\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output\n def serving_output(self, output):\n pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None\n dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None\n dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None\n cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None\n enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None\n enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None\n\n return TFSeq2SeqModelOutput(\n last_hidden_state=output.last_hidden_state,\n past_key_values=pkv,\n decoder_hidden_states=dec_hs,\n decoder_attentions=dec_attns,\n cross_attentions=cross_attns,\n encoder_last_hidden_state=output.encoder_last_hidden_state,\n encoder_hidden_states=enc_hs,\n encoder_attentions=enc_attns,\n )\n\n\n@add_start_docstrings(\n \"The PEGASUS Model with a language modeling head. Can be used for summarization.\",\n PEGASUS_START_DOCSTRING,\n)\nclass TFPegasusForConditionalGeneration(TFPegasusPreTrainedModel, TFCausalLanguageModelingLoss):\n _keys_to_ignore_on_load_unexpected = [\n r\"model.encoder.embed_tokens.weight\",\n r\"model.decoder.embed_tokens.weight\",\n ]\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.model = TFPegasusMainLayer(config, name=\"model\")\n self.use_cache = config.use_cache\n # final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency.\n self.final_logits_bias = self.add_weight(\n name=\"final_logits_bias\", shape=[1, config.vocab_size], initializer=\"zeros\", trainable=False\n )\n\n def get_decoder(self):\n return self.model.decoder\n\n def get_encoder(self):\n return self.model.encoder\n\n def get_output_embeddings(self):\n return self.get_input_embeddings()\n\n def set_output_embeddings(self, value):\n self.set_input_embeddings(value)\n\n def get_bias(self):\n return {\"final_logits_bias\": self.final_logits_bias}\n\n def set_bias(self, value):\n self.final_logits_bias = value[\"final_logits_bias\"]\n\n @add_start_docstrings_to_model_forward(PEGASUS_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(PEGASUS_GENERATION_EXAMPLE)\n def call(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs: Optional[TFBaseModelOutput] = None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n training=False,\n **kwargs,\n ):\n \"\"\"\n labels (:obj:`tf.tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n\n \"\"\"\n inputs = input_processing(\n func=self.call,\n config=self.config,\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n labels=labels,\n training=training,\n kwargs_call=kwargs,\n )\n\n if inputs[\"labels\"] is not None:\n inputs[\"labels\"] = tf.where(\n inputs[\"labels\"] == self.config.pad_token_id,\n tf.fill(shape_list(inputs[\"labels\"]), -100),\n inputs[\"labels\"],\n )\n inputs[\"use_cache\"] = False\n if inputs[\"decoder_input_ids\"] is None:\n inputs[\"decoder_input_ids\"] = shift_tokens_right(\n inputs[\"labels\"], self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n decoder_input_ids=inputs[\"decoder_input_ids\"],\n encoder_outputs=inputs[\"encoder_outputs\"],\n decoder_attention_mask=inputs[\"decoder_attention_mask\"],\n head_mask=inputs[\"head_mask\"],\n decoder_head_mask=inputs[\"decoder_head_mask\"],\n cross_attn_head_mask=inputs[\"cross_attn_head_mask\"],\n past_key_values=inputs[\"past_key_values\"],\n inputs_embeds=inputs[\"inputs_embeds\"],\n decoder_inputs_embeds=inputs[\"decoder_inputs_embeds\"],\n use_cache=inputs[\"use_cache\"],\n output_attentions=inputs[\"output_attentions\"],\n output_hidden_states=inputs[\"output_hidden_states\"],\n return_dict=inputs[\"return_dict\"],\n training=inputs[\"training\"],\n )\n lm_logits = self.model.shared(outputs[0], mode=\"linear\")\n lm_logits = lm_logits + self.final_logits_bias\n masked_lm_loss = None if inputs[\"labels\"] is None else self.compute_loss(inputs[\"labels\"], lm_logits)\n\n if not inputs[\"return_dict\"]:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n return TFSeq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values, # index 1 of d outputs\n decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs\n decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs\n cross_attentions=outputs.cross_attentions, # index 4 of d outputs\n encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs\n encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out\n encoder_attentions=outputs.encoder_attentions, # 2 of e out\n )\n\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output\n def serving_output(self, output):\n pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None\n dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None\n dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None\n cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None\n enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None\n enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None\n\n return TFSeq2SeqLMOutput(\n logits=output.logits,\n past_key_values=pkv,\n decoder_hidden_states=dec_hs,\n decoder_attentions=dec_attns,\n cross_attentions=cross_attns,\n encoder_last_hidden_state=output.encoder_last_hidden_state,\n encoder_hidden_states=enc_hs,\n encoder_attentions=enc_attns,\n )\n\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past,\n attention_mask,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n **kwargs,\n ) -> Dict:\n assert past is not None and len(past) in {1, 2}, f\"past has to be an iterable of length 1,2 got {past}\"\n if len(past) == 1:\n assert isinstance(past[0], tf.Tensor), f\"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}\"\n encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0])\n past_key_values = None\n else:\n assert (\n len(past) == 2\n ), \"`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position.\"\n encoder_outputs, past_key_values = past\n if isinstance(encoder_outputs, tuple):\n assert isinstance(\n encoder_outputs[0], tf.Tensor\n ), f\"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}\"\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0])\n elif isinstance(encoder_outputs, tf.Tensor):\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs)\n assert (\n past_key_values\n ), f\"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past\"\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n assert isinstance(\n encoder_outputs, TFBaseModelOutput\n ), f\"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}.\"\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past_key_values,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n\n @staticmethod\n # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache\n def _reorder_cache(past, beam_idx):\n if len(past) == 1:\n return past\n\n past_key_values = past[1]\n\n reordered_past = ()\n for layer_past_key_values in past_key_values:\n reordered_past += (\n tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2])\n + layer_past_key_values[2:],\n )\n return (past[0], reordered_past)\n"
] | [
[
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.matmul",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.identity",
"tensorflow.executing_eagerly",
"tensorflow.nn.softmax",
"tensorflow.tuple",
"numpy.cos",
"tensorflow.keras.layers.Dense",
"tensorflow.constant",
"tensorflow.keras.layers.Dropout",
"tensorflow.cast",
"numpy.power",
"tensorflow.tile",
"tensorflow.control_dependencies",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.zeros",
"tensorflow.range",
"tensorflow.math.not_equal",
"tensorflow.stop_gradient",
"tensorflow.compat.v1.variable_scope",
"tensorflow.TensorSpec",
"numpy.sin",
"tensorflow.gather"
]
] |
mrksbrg/adas-pro-sivic | [
"fb4bbd4f39b58e42c3d47494fb4116a3e7fced0d"
] | [
"scripts/compile_prosivic_results.py"
] | [
"import os\nimport statistics\nimport csv\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\n\nclass ExpSetup:\n\n def __init__(self, ped_x, ped_y, ped_orient, ped_speed, car_speed, min_dist, min_ttc, min_dist_awa, det, col):\n self.ped_x = ped_x\n self.ped_y = ped_y\n self.ped_orient = ped_orient\n self.ped_speed = ped_speed\n self.car_speed = car_speed\n self.min_dist_counter = Counter([min_dist])\n self.min_dist = [min_dist]\n self.min_ttc = [min_ttc]\n self.min_ttc_counter = Counter([min_ttc])\n self.min_dist_awa = [min_dist_awa]\n self.min_dist_awa_counter = Counter(([min_dist_awa]))\n self.detection = [det]\n self.collision = [col]\n self.nbr_results = 1\n\n self.results = Counter([ExpResult(min_dist, min_ttc, min_dist_awa, det, col)])\n\n def __str__(self):\n return \"### Scenario (x0P=\" + str(self.ped_x) + \", y0P=\" + str(self.ped_y) + \", Th0P=\" + str(self.ped_orient) + \", v0P=\" + str(self.ped_speed) + \", v0C=\" + str(self.car_speed) + \") ###\"\n\n def __eq__(self, other):\n return self.ped_x == other.ped_x and self.ped_y == other.ped_y and self.ped_orient == other.ped_orient \\\n and self.ped_speed == other.ped_speed and self.car_speed == other.car_speed\n\n def __lt__(self, other):\n return self.ped_x < other.ped_x\n\n def add_result(self, min_dist, min_ttc, min_dist_awa, det, col):\n self.min_dist.append(min_dist)\n self.min_dist_counter.update([min_dist])\n self.min_ttc.append(min_ttc)\n self.min_ttc_counter.update([min_ttc])\n self.min_dist_awa.append(min_dist_awa)\n self.min_dist_awa_counter.update([min_dist_awa])\n self.detection.append(det)\n self.collision.append(col)\n self.nbr_results += 1\n\n self.results.update([ExpResult(min_dist, min_ttc, min_dist_awa, det, col)])\n\n def get_nbr_results(self):\n return self.nbr_results\n\n def get_results(self):\n return self.results\n\n def get_nbr_unique_results(self):\n unique_list_of1 = []\n unique_list_of2 = []\n unique_list_of3 = []\n for x in self.min_dist:\n if x not in unique_list_of1:\n unique_list_of1.append(x)\n for y in self.min_ttc:\n if y not in unique_list_of2:\n unique_list_of2.append(y)\n for z in self.min_dist_awa:\n if z not in unique_list_of3:\n unique_list_of3.append(z)\n return {'of1': unique_list_of1, 'of2': unique_list_of2, 'of3': unique_list_of3}\n\n def get_avg_min_dist(self):\n sum = 0\n for res in self.min_dist:\n sum += res\n return sum / len(self.min_dist)\n\n def get_sd_min_dist(self):\n if len(self.min_dist) == 1:\n return 0\n else:\n return statistics.stdev(self.min_dist)\n\n def get_avg_min_ttc(self):\n sum = 0\n for res in self.min_ttc:\n sum += res\n return sum / len(self.min_ttc)\n\n def get_sd_min_ttc(self):\n if len(self.min_ttc) == 1:\n return 0\n else:\n return statistics.stdev(self.min_ttc)\n\n def get_avg_min_dist_awa(self):\n sum = 0\n for res in self.min_dist_awa:\n sum += res\n return sum / len(self.min_dist_awa)\n\n def get_sd_min_dist_awa(self):\n if len(self.min_dist_awa) == 1:\n return 0\n else:\n return statistics.stdev(self.min_dist_awa)\n\n def get_nbr_detections(self):\n sum = 0\n for res in self.detection:\n sum += res\n return sum\n\n def get_nbr_collisions(self):\n sum = 0\n for res in self.collision:\n sum += res\n return sum\n\n @property\n def get_ped_x(self):\n return self.ped_x\n\n @property\n def get_ped_y(self):\n return self.ped_y\n\n @property\n def get_ped_orient(self):\n return self.ped_orient\n\n @property\n def get_ped_speed(self):\n return self.ped_speed\n\n @property\n def get_car_speed(self):\n return self.car_speed\n\n @property\n def get_of1_counter(self):\n return self.min_dist_counter\n\n\nclass ExpResult:\n\n def __init__(self, min_dist, min_ttc, min_dist_awa, det, col):\n self.min_dist = min_dist\n self.min_ttc = min_ttc\n self.min_dist_awa = min_dist_awa\n self.detection = det\n self.collision = col\n\n @property\n def get_min_dist(self):\n return self.min_dist\n\n @property\n def get_min_ttc(self):\n return self.min_ttc\n\n @property\n def get_min_dist_awa(self):\n return self.min_dist_awa\n\n @property\n def get_detected(self):\n return self.detection\n\n @property\n def get_collision(self):\n return self.collision\n\n def __str__(self):\n return \"\\tOF1=\" + str(self.min_dist) + \", OF2=\" + str(self.min_ttc) + \", OF3=\" + str(self.min_dist_awa) + \", Detection=\" + str(self.detection) + \", Collision=\" + str(self.collision)\n\n def __eq__(self, other):\n return self.min_dist == other.min_dist and self.min_ttc == other.min_ttc and self.min_dist_awa == other.min_dist_awa \\\n and self.detection == other.detection and self.collision == other.collision\n\n def __lt__(self, other):\n return self.min_dist < other.min_dist\n\n def __hash__(self):\n return hash((self.min_dist, self.min_ttc, self.min_dist_awa, self.detection, self.collision))\n\ndir_name = 'prosivic_results'\nresult_dataframes = []\nscenario_results = []\n\nfor filename in os.listdir(dir_name):\n if filename.endswith(\".csv\"):\n df = pd.read_csv(dir_name + \"\\\\\" + filename)\n for index, row in df.iterrows():\n exp_setup = ExpSetup(row['ped_x'], row['ped_y'], row['ped_orient'], row['ped_speed'], row['car_speed'], row['of1'], row['of2'], row['of3'], row['detection'], row['collision'])\n if exp_setup not in scenario_results:\n scenario_results.append(exp_setup)\n else:\n #print(\"Adding results to: \" + str(conf))\n i = scenario_results.index(exp_setup)\n scenario_results[i].add_result(row['of1'], row['of2'], row['of3'], row['detection'], row['collision'])\n\nwith open('mode_prosivic_results.csv', mode='w') as merged_file:\n mode_writer = csv.writer(merged_file, delimiter=',')\n mode_writer.writerow(['x0P', 'y0P', 'Th0P', 'v0P', 'v0C', 'OF1', 'OF2', 'OF3', 'det', 'col', 'conf'])\n\n #merge_writer.writerow(['x0P', 'y0P', 'Th0P', 'v0P', 'v0C', 'nbr', 'OF1_unique', 'OF1_avg', 'OF1_sd', 'OF2_unique', 'OF2_avg', 'OF2_sd', 'OF3_unique', 'OF3_avg', 'OF3_sd', 'det_true', 'det_false', 'col_true', 'col_false'])\n\n for exp_setup in scenario_results:\n print(\"\\n\" + str(exp_setup))\n print(\"\\tNumber of results: \" + str(exp_setup.get_nbr_results()))\n res = exp_setup.get_results()\n for result, count in res.most_common():\n print(\"\\t\" + str(count) + \"x:\" + str(result))\n\n unique_per_of = exp_setup.get_nbr_unique_results()\n print(\"\\t\\t# Result per objective function #\")\n print(\"\\t\\tmin_dist:\\t\\tUnique = \" + str(len(unique_per_of[\"of1\"])) + \"\\tAvg = \" + str(exp_setup.get_avg_min_dist()) + \"\\tSD = \" + str(exp_setup.get_sd_min_dist()))\n print(\"\\t\\t\\tCounter min_dist: \" + str(exp_setup.min_dist_counter))\n print(\"\\t\\tmin_ttc:\\t\\tUnique = \" + str(len(unique_per_of[\"of2\"])) + \"\\tAvg = \" + str(exp_setup.get_avg_min_ttc()) + \"\\tSD = \" + str(exp_setup.get_sd_min_ttc()))\n print(\"\\t\\t\\tCounter min_ttc: \" + str(exp_setup.min_ttc_counter))\n print(\"\\t\\tmin_dist_awa:\\tUnique = \" + str(len(unique_per_of[\"of3\"])) + \"\\tAvg = \" + str(exp_setup.get_avg_min_dist_awa()) + \"\\tSD = \" + str(exp_setup.get_sd_min_dist_awa()))\n print(\"\\t\\t\\tCounter min_dist_awa: \" + str(exp_setup.min_dist_awa_counter))\n print(\"\\t\\tNumber detections: \" + str(exp_setup.get_nbr_detections()) + \" (out of \" + str(exp_setup.get_nbr_results()) + \" = \" + str(100 * (exp_setup.get_nbr_detections()/exp_setup.get_nbr_results())) + \"%)\")\n print(\"\\t\\tNumber collisions: \" + str(exp_setup.get_nbr_collisions()) + \" (out of \" + str(exp_setup.get_nbr_results()) + \" = \" + str(100 * (exp_setup.get_nbr_collisions()/exp_setup.get_nbr_results())) + \"%)\")\n\n mode_result = res.most_common(1)[0][0] # this is the most common ExpResult (first element in first tuple in first element in the Counter)\n conf = (res.most_common(1)[0][1]/exp_setup.get_nbr_results()) # this is the count of the most common results divided by the total number\n\n mode_writer.writerow([exp_setup.ped_x, exp_setup.ped_y, exp_setup.ped_orient, exp_setup.ped_speed, exp_setup.car_speed, mode_result.min_dist, mode_result.min_ttc, mode_result.min_dist_awa, mode_result.detection, mode_result.collision, conf])\n #merge_writer.writerow([exp_setup.ped_x, exp_setup.ped_y, exp_setup.ped_orient, exp_setup.ped_speed, exp_setup.car_speed, exp_setup.get_nbr_results(), len(unique_per_of[\"of1\"]), exp_setup.get_avg_min_dist(), exp_setup.get_sd_min_dist(), len(unique_per_of[\"of2\"]), exp_setup.get_avg_min_ttc(), exp_setup.get_sd_min_ttc(), len(unique_per_of[\"of3\"]), exp_setup.get_avg_min_dist_awa(), exp_setup.get_sd_min_dist_awa(), exp_setup.get_nbr_detections(), (exp_setup.get_nbr_results() - exp_setup.get_nbr_detections()), exp_setup.get_nbr_collisions(), (exp_setup.get_nbr_results() - exp_setup.get_nbr_collisions())])\n"
] | [
[
"pandas.read_csv"
]
] |
PeruBhardwaj/AttributionAttack | [
"0d5ca334c611c5e067029a3f8907f2d91255ddde"
] | [
"KGEAttack/ConvE/l2_del.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In this notebook, I delete a triple from the neighbourhood of the target triple based on the **L2 metric = euclidean distance** between the candidate triple's embedding and the target triple's embedding\n# \n# - 'triple' embedding is computed by applying the model's scoring function to embeddings\n# - neighbourhood refers to the triples that share the entities with target's entities\n# \n# \n\n# In[1]:\n\n\nimport pickle\nfrom typing import Dict, Tuple, List\nimport os\nimport numpy as np\nimport pandas as pd\nfrom collections import defaultdict\nimport operator\n\nimport json\nimport logging\nimport argparse \nimport math\nfrom pprint import pprint\nimport errno\nimport time \n\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\n\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn import functional as F\nimport torch.autograd as autograd\n\nfrom evaluation import evaluation\nfrom model import Distmult, Complex, Conve, Transe\nimport utils\n\n\ndef generate_nghbrs(test_set, train_set):\n '''\n For every triple in test set, return the index of \n neighbouring triple in training set,\n i.e. indices in training set are returned\n '''\n n_dict = {}\n for t, triple in enumerate(test_set):\n sub = triple[0]\n obj = triple[2]\n mask = (np.isin(train_set[:,0], [sub, obj]) | np.isin(train_set[:,2], [sub, obj]))\n #nghbrs_dict[t] = pro_train[mask]\n mask_idx = np.where(mask)[0]\n n_dict[t] = mask_idx\n \n return n_dict \n\ndef get_deletions(train_data, test_data, neighbours, model, attack_batch_size):\n logger.info('------ Generating edits per target triple ------')\n start_time = time.time()\n logger.info('Start time: {0}'.format(str(start_time)))\n \n triples_to_delete = []\n for test_idx, test_trip in enumerate(test_data):\n test_nghbrs = neighbours[test_idx]\n nghbr_trip = train_data[test_nghbrs]\n test_trip = test_trip[None, :] # add a batch dimension\n test_trip = torch.from_numpy(test_trip).to(device)\n test_s, test_r, test_o = test_trip[:,0], test_trip[:,1], test_trip[:,2]\n test_vec = model.score_triples_vec(test_s, test_r, test_o)\n\n b_begin = 0\n nghbr_dist = []\n if attack_batch_size == -1:\n nghbr_batch = nghbr_trip.shape[0]\n else:\n nghbr_batch = args.attack_batch_size\n\n while b_begin < nghbr_trip.shape[0]:\n b_nghbr_trip = nghbr_trip[b_begin : b_begin+nghbr_batch]\n b_nghbr_trip = torch.from_numpy(b_nghbr_trip).to(device)\n b_nghbr_s, b_nghbr_r, b_nghbr_o = b_nghbr_trip[:,0], b_nghbr_trip[:,1], b_nghbr_trip[:,2]\n b_nghbr_vec = model.score_triples_vec(b_nghbr_s, b_nghbr_r, b_nghbr_o)\n # shape of nghbr_vec is (num_nghbrs x emb_dim) e.g. (459 x 100)\n # shape of test vec is (1 x emb_dim)\n #b_dist = -torch.cdist(test_vec, b_nghbr_vec).squeeze() \n b_dist = -torch.norm((b_nghbr_vec-test_vec), p=2, dim=-1)\n b_dist = b_dist.detach().cpu().numpy().tolist()\n nghbr_dist += b_dist\n b_begin += nghbr_batch \n\n nghbr_dist = np.array(nghbr_dist)\n nghbr_dist = torch.from_numpy(nghbr_dist).to(device)\n # we want to remove the neighbour with maximum norm similarity\n max_values, argsort = torch.sort(nghbr_dist, -1, descending=True)\n del_idx = argsort[0]\n triple_to_delete = nghbr_trip[del_idx]\n\n triples_to_delete.append(triple_to_delete)\n if test_idx%100 == 0 or test_idx == test_data.shape[0]-1:\n logger.info('Processed test triple {0}'.format(str(test_idx)))\n logger.info('Time taken: {0}'.format(str(time.time() - start_time)))\n logger.info('Time taken to generate edits: {0}'.format(str(time.time() - start_time))) \n \n return triples_to_delete\n\n\n\nif __name__ == '__main__':\n\n\n parser = utils.get_argument_parser()\n parser.add_argument('--target-split', type=str, default='0_100_1', help='Ranks to use for target set. Values are 0 for ranks==1; 1 for ranks <=10; 2 for ranks>10 and ranks<=100. Default: 1')\n parser.add_argument('--budget', type=int, default=1, help='Budget for each target triple for each corruption side')\n parser.add_argument('--rand-run', type=int, default=1, help='A number assigned to the random run of experiment')\n parser.add_argument('--attack-batch-size', type=int, default=-1, help='Batch size for processing neighbours of target')\n\n args = parser.parse_args()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args.device = device\n\n # args.target_split = '0_100_1' # which target split to use \n #Values are 1 for ranks <=10; 2 for ranks>10 and ranks<=100.\n # args.budget = 1 #indicates the num of adversarial edits for each target triple for each corruption side\n # args.rand_run = 1 # a number assigned to the random run of the experiment\n args.seed = args.seed + (args.rand_run - 1) # default seed is 17\n\n # args.model = 'distmult'\n # args.data = 'WN18RR'\n\n if args.reproduce_results:\n args = utils.set_hyperparams(args) \n\n\n # Fixing random seeds for reproducibility -https://pytorch.org/docs/stable/notes/randomness.html\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n cudnn.benchmark = False\n np.random.seed(args.seed)\n rng = np.random.default_rng(seed=args.seed)\n\n\n args.epochs = -1 #no training here\n model_name = '{0}_{1}_{2}_{3}_{4}'.format(args.model, args.embedding_dim, args.input_drop, args.hidden_drop, args.feat_drop)\n model_path = 'saved_models/{0}_{1}.model'.format(args.data, model_name)\n log_path = 'logs/attack_logs/l2_del_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data, \n args.target_split, args.budget, args.rand_run)\n\n\n logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt = '%m/%d/%Y %H:%M:%S',\n level = logging.INFO,\n filename = log_path\n )\n logger = logging.getLogger(__name__)\n\n\n data_path = 'data/target_{0}_{1}_{2}'.format(args.model, args.data, args.target_split)\n\n n_ent, n_rel, ent_to_id, rel_to_id = utils.generate_dicts(data_path)\n\n ##### load data####\n data = utils.load_data(data_path)\n train_data, valid_data, test_data = data['train'], data['valid'], data['test']\n\n inp_f = open(os.path.join(data_path, 'to_skip_eval.pickle'), 'rb')\n to_skip_eval: Dict[str, Dict[Tuple[int, int], List[int]]] = pickle.load(inp_f)\n inp_f.close()\n to_skip_eval['lhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['lhs'].items()}\n to_skip_eval['rhs'] = {(int(k[0]), int(k[1])): v for k,v in to_skip_eval['rhs'].items()}\n\n\n\n model = utils.load_model(model_path, args, n_ent, n_rel, device)\n\n neighbours = generate_nghbrs(test_data, train_data) \n # test set is the target set because we loaded data from target_...\n\n\n triples_to_delete = get_deletions(train_data, test_data, neighbours, \n model, args.attack_batch_size)\n\n\n df = pd.DataFrame(data=triples_to_delete)\n df = df.drop_duplicates()\n # print(df.shape)\n trips_to_delete = df.values\n # print(trips_to_delete.shape)\n num_duplicates = len(triples_to_delete) - trips_to_delete.shape[0]\n # print(num_duplicates)\n\n\n\n per_tr_1, n_ignored_edits = utils.perturb_data(train_data, \n trips_to_delete)\n\n\n # Perturbed dataset\n logger.info('Shape of perturbed training set: {0}'.format(per_tr_1.shape))\n logger.info('Number of adversarial deletions ignored (because of singleton nodes): {0}'.format(n_ignored_edits))\n logger.info('Number of duplicate adversarial deletions : {0}'.format(num_duplicates))\n\n\n\n logger.info ('Length of original training set: ' + str(train_data.shape[0]))\n logger.info ('Length of new poisoned training set: ' + str(per_tr_1.shape[0]))\n\n\n save_path = 'data/l2_del_{0}_{1}_{2}_{3}_{4}'.format( args.model, args.data, \n args.target_split, args.budget, args.rand_run)\n\n\n try :\n os.makedirs(save_path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n logger.info(e)\n logger.info('Using the existing folder {0} for processed data'.format(save_path))\n else:\n raise\n\n\n new_train = per_tr_1\n num_en_or = np.unique(np.concatenate((train_data[:,0], train_data[:,2]))).shape[0]\n num_en_pos = np.unique(np.concatenate((new_train[:,0], new_train[:,2]))).shape[0]\n\n\n with open(os.path.join(save_path, 'train.txt'), 'w') as out:\n for item in new_train:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n out = open(os.path.join(save_path, 'train.pickle'), 'wb')\n pickle.dump(new_train.astype('uint64'), out)\n out.close()\n\n\n with open(os.path.join(save_path, 'entities_dict.json'), 'w') as f:\n f.write(json.dumps(ent_to_id) + '\\n')\n\n with open(os.path.join(save_path, 'relations_dict.json'), 'w') as f:\n f.write(json.dumps(rel_to_id) + '\\n')\n\n\n with open(os.path.join(save_path, 'valid.txt'), 'w') as out:\n for item in valid_data:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n out = open(os.path.join(save_path, 'valid.pickle'), 'wb')\n pickle.dump(valid_data.astype('uint64'), out)\n out.close()\n\n\n with open(os.path.join(save_path, 'test.txt'), 'w') as out:\n for item in test_data:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n out = open(os.path.join(save_path, 'test.pickle'), 'wb')\n pickle.dump(test_data.astype('uint64'), out)\n out.close()\n\n\n with open(os.path.join(save_path, 'stats.txt'), 'w') as f:\n f.write('Model: {0} \\n'.format(args.model))\n f.write('Data: {0} \\n'.format(args.data))\n f.write('Length of original training set: {0} \\n'. format(train_data.shape[0]))\n f.write('Length of new poisoned training set: {0} \\n'. format(new_train.shape[0]))\n f.write('Number of duplicate deletions: {0} \\n'. format(num_duplicates))\n f.write('Number of deletions ignored due to singleton nodes: {0} \\n'. format(n_ignored_edits))\n f.write('Number of entities in original training set: {0} \\n'. format(num_en_or))\n f.write('Number of entities in poisoned training set: {0} \\n'. format(num_en_pos))\n f.write('Length of original test set: {0} \\n'. format(test_data.shape[0]))\n f.write('---------------------------------------------------------------------- \\n')\n\n\n\n with open(os.path.join(save_path, 'influential_triples.txt'), 'w') as out:\n for item in triples_to_delete:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n\n with open(os.path.join(save_path, 'deletions.txt'), 'w') as out:\n for item in trips_to_delete:\n out.write(\"%s\\n\" % \"\\t\".join(map(str, item)))\n\n\n # In[ ]:\n\n\n\n\n\n # In[ ]:\n\n\n\n\n"
] | [
[
"numpy.random.default_rng",
"torch.manual_seed",
"pandas.DataFrame",
"numpy.random.seed",
"numpy.concatenate",
"torch.norm",
"numpy.isin",
"torch.cuda.is_available",
"torch.from_numpy",
"numpy.array",
"numpy.where",
"torch.sort"
]
] |
ydiller/NoMoreNMS | [
"1c1557357e5312c287f0971c840060deb1bcd039"
] | [
"tools/my_runner.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport platform\nimport shutil\nimport time\nimport warnings\nimport torch\nimport mmcv\nimport wandb\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom mmcv.runner.base_runner import BaseRunner\nfrom mmcv.runner.builder import RUNNERS\nfrom mmcv.runner.checkpoint import save_checkpoint\nfrom mmcv.runner.utils import get_host_info\nimport copy\nimport logging\nimport os.path as osp\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom torch.optim import Optimizer\nimport mmcv\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner.checkpoint import load_checkpoint\nfrom mmcv.runner.dist_utils import get_dist_info\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom mmcv.runner.log_buffer import LogBuffer\nfrom mmcv.runner.priority import Priority, get_priority\nfrom mmcv.runner.utils import get_time_str\n\n\[email protected]_module()\nclass MyRunner(BaseRunner):\n \"\"\"Epoch-based Runner.\n\n This runner train models epoch by epoch.\n \"\"\"\n def __init__(self,\n model,\n batch_processor=None,\n optimizer=None,\n work_dir=None,\n logger=None,\n meta=None,\n max_iters=None,\n max_epochs=None,\n with_wandb=None):\n if batch_processor is not None:\n if not callable(batch_processor):\n raise TypeError('batch_processor must be callable, '\n f'but got {type(batch_processor)}')\n warnings.warn(\n 'batch_processor is deprecated, please implement '\n 'train_step() and val_step() in the model instead.',\n DeprecationWarning)\n # raise an error is `batch_processor` is not None and\n # `model.train_step()` exists.\n if is_module_wrapper(model):\n _model = model.module\n else:\n _model = model\n if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):\n raise RuntimeError(\n 'batch_processor and model.train_step()/model.val_step() '\n 'cannot be both available.')\n else:\n assert hasattr(model, 'train_step')\n\n # check the type of `optimizer`\n if isinstance(optimizer, dict):\n for name, optim in optimizer.items():\n if not isinstance(optim, Optimizer):\n raise TypeError(\n f'optimizer must be a dict of torch.optim.Optimizers, '\n f'but optimizer[\"{name}\"] is a {type(optim)}')\n elif not isinstance(optimizer, Optimizer) and optimizer is not None:\n raise TypeError(\n f'optimizer must be a torch.optim.Optimizer object '\n f'or dict or None, but got {type(optimizer)}')\n\n # check the type of `logger`\n if not isinstance(logger, logging.Logger):\n raise TypeError(f'logger must be a logging.Logger object, '\n f'but got {type(logger)}')\n\n # check the type of `meta`\n if meta is not None and not isinstance(meta, dict):\n raise TypeError(\n f'meta must be a dict or None, but got {type(meta)}')\n\n self.model = model\n self.batch_processor = batch_processor\n self.optimizer = optimizer\n self.logger = logger\n self.meta = meta\n self.with_wandb = with_wandb\n # create work_dir\n if mmcv.is_str(work_dir):\n self.work_dir = osp.abspath(work_dir)\n mmcv.mkdir_or_exist(self.work_dir)\n elif work_dir is None:\n self.work_dir = None\n else:\n raise TypeError('\"work_dir\" must be a str or None')\n\n # get model name from the model class\n if hasattr(self.model, 'module'):\n self._model_name = self.model.module.__class__.__name__\n else:\n self._model_name = self.model.__class__.__name__\n\n self._rank, self._world_size = get_dist_info()\n self.timestamp = get_time_str()\n self.mode = None\n self._hooks = []\n self._epoch = 0\n self._iter = 0\n self._inner_iter = 0\n\n if max_epochs is not None and max_iters is not None:\n raise ValueError(\n 'Only one of `max_epochs` or `max_iters` can be set.')\n\n self._max_epochs = max_epochs\n self._max_iters = max_iters\n # TODO: Redesign LogBuffer, it is not flexible and elegant enough\n self.log_buffer = LogBuffer()\n\n def register_optimizer_hook(self, optimizer_config):\n if optimizer_config is None:\n return\n if isinstance(optimizer_config, dict):\n optimizer_config.setdefault('type', 'MyHook')\n hook = mmcv.build_from_cfg(optimizer_config, HOOKS)\n else:\n hook = optimizer_config\n self.register_hook(hook, priority='ABOVE_NORMAL')\n\n\n def run_iter(self, data_batch, train_mode, **kwargs):\n if self.batch_processor is not None:\n outputs = self.batch_processor(\n self.model, data_batch, train_mode=train_mode, **kwargs)\n elif train_mode:\n outputs = self.model.train_step(data_batch, self.optimizer,\n **kwargs)\n else:\n outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)\n if not isinstance(outputs, dict):\n raise TypeError('\"batch_processor()\" or \"model.train_step()\"'\n 'and \"model.val_step()\" must return a dict')\n if 'log_vars' in outputs:\n self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])\n self.outputs = outputs\n\n def train(self, data_loader, **kwargs):\n self.model.train()\n self.mode = 'train'\n self.data_loader = data_loader\n self._max_iters = self._max_epochs * len(self.data_loader)\n self.call_hook('before_train_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.call_hook('before_train_iter')\n self.run_iter(data_batch, train_mode=True, **kwargs)\n self.call_hook('after_train_iter')\n self._iter += 1\n\n self.call_hook('after_train_epoch')\n self._epoch += 1\n\n @torch.no_grad()\n def val(self, data_loader, **kwargs):\n self.model.eval()\n self.mode = 'val'\n self.data_loader = data_loader\n self.call_hook('before_val_epoch')\n time.sleep(2) # Prevent possible deadlock during epoch transition\n for i, data_batch in enumerate(self.data_loader):\n self._inner_iter = i\n self.call_hook('before_val_iter')\n self.run_iter(data_batch, train_mode=False)\n self.call_hook('after_val_iter')\n\n self.call_hook('after_val_epoch')\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n if self.with_wandb:\n wandb.log({\"CE val loss\": sum(self.log_buffer.val_history['loss_deepsets_ce'])/\n len(self.log_buffer.val_history['loss_deepsets_ce']),\n \"val ds_acc\": sum(self.log_buffer.val_history['ds_acc'])/\n len(self.log_buffer.val_history['ds_acc']),\n \"val iou_error\": sum(self.log_buffer.val_history['iou_error'])/len(self.log_buffer.val_history['iou_error']),\n \"val max score predictions\": sum(self.log_buffer.val_history['ds_pred_on_max'])/\n len(self.log_buffer.val_history['ds_pred_on_max'])\n })\n else: # single gpu\n if self.with_wandb:\n wandb.log({\"CE val loss\": sum(self.log_buffer.val_history['loss_deepsets_ce']) /\n len(self.log_buffer.val_history['loss_deepsets_ce']),\n \"val ds_acc\": sum(self.log_buffer.val_history['ds_acc']) /\n len(self.log_buffer.val_history['ds_acc']),\n \"val iou_error\": sum(self.log_buffer.val_history['iou_error']) / len(\n self.log_buffer.val_history['iou_error']),\n \"val max score predictions\": sum(self.log_buffer.val_history['ds_pred_on_max']) /\n len(self.log_buffer.val_history['ds_pred_on_max'])})\n\n\n def run(self, data_loaders, workflow, max_epochs=None, **kwargs):\n \"\"\"Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n \"\"\"\n assert isinstance(data_loaders, list)\n assert mmcv.is_list_of(workflow, tuple)\n assert len(data_loaders) == len(workflow)\n if max_epochs is not None:\n warnings.warn(\n 'setting max_epochs in run is deprecated, '\n 'please set max_epochs in runner_config', DeprecationWarning)\n self._max_epochs = max_epochs\n\n assert self._max_epochs is not None, (\n 'max_epochs must be specified during instantiation')\n\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if mode == 'train':\n self._max_iters = self._max_epochs * len(data_loaders[i])\n break\n\n work_dir = self.work_dir if self.work_dir is not None else 'NONE'\n self.logger.info('Start running, host: %s, work_dir: %s',\n get_host_info(), work_dir)\n self.logger.info('Hooks will be executed in the following order:\\n%s',\n self.get_hook_info())\n self.logger.info('workflow: %s, max: %d epochs', workflow,\n self._max_epochs)\n self.call_hook('before_run')\n\n while self.epoch < self._max_epochs:\n for i, flow in enumerate(workflow):\n mode, epochs = flow\n if isinstance(mode, str): # self.train()\n if not hasattr(self, mode):\n raise ValueError(\n f'runner has no method named \"{mode}\" to run an '\n 'epoch')\n epoch_runner = getattr(self, mode)\n else:\n raise TypeError(\n 'mode in workflow must be a str, but got {}'.format(\n type(mode)))\n\n for _ in range(epochs):\n if mode == 'train' and self.epoch >= self._max_epochs:\n break\n epoch_runner(data_loaders[i], **kwargs)\n\n time.sleep(1) # wait for some hooks like loggers to finish\n self.call_hook('after_run')\n\n def save_checkpoint(self,\n out_dir,\n filename_tmpl='end2end_epoch_{}.pth',\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n \"\"\"Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to 'epoch_{}.pth'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n \"latest.pth\" to point to the latest checkpoint.\n Defaults to True.\n \"\"\"\n if meta is None:\n meta = {}\n elif not isinstance(meta, dict):\n raise TypeError(\n f'meta should be a dict or None, but got {type(meta)}')\n if self.meta is not None:\n meta.update(self.meta)\n # Note: meta.update(self.meta) should be done before\n # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise\n # there will be problems with resumed checkpoints.\n # More details in https://github.com/open-mmlab/mmcv/pull/1108\n meta.update(epoch=self.epoch + 1, iter=self.iter)\n\n filename = filename_tmpl.format(self.epoch + 1)\n filepath = osp.join(out_dir, filename)\n optimizer = self.optimizer if save_optimizer else None\n save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)\n # in some environments, `os.symlink` is not supported, you may need to\n # set `create_symlink` to False\n if create_symlink:\n dst_file = osp.join(out_dir, 'latest.pth')\n if platform.system() != 'Windows':\n mmcv.symlink(filename, dst_file)\n else:\n shutil.copy(filepath, dst_file)\n\n\n# @RUNNERS.register_module()\n# class Runner(MyRunner):\n# \"\"\"Deprecated name of EpochBasedRunner.\"\"\"\n#\n# def __init__(self, *args, **kwargs):\n# warnings.warn(\n# 'Runner was deprecated, please use EpochBasedRunner instead',\n# DeprecationWarning)\n# super().__init__(*args, **kwargs)\n"
] | [
[
"torch.distributed.is_initialized",
"torch.no_grad",
"torch.distributed.get_rank"
]
] |
MortonWang/geo_IF | [
"4e27aeb9e005cdfb151777bc730de6d8372d1b7f"
] | [
"data_process/kdtree.py"
] | [
"# -*- coding:utf-8 -*-\nimport copy\nimport numpy as np\n\nfrom scipy._lib.six import xrange\n\n\nclass KDTree:\n def __init__(self, bucket_size, dimensions, parent=None):\n self.bucket_size = bucket_size\n self.parent = None\n self.left = None\n self.right = None\n self.split_dimension = None\n self.split_value = None\n self.index_locations = []\n self.location_count = 0\n self.min_limit = [np.Inf] * dimensions \n self.max_limit = [-np.Inf] * dimensions\n self.dimensions = dimensions\n \n def get_leaf(self, location):\n if not self.left and not self.right:\n return self\n elif location[self.split_dimension] <= self.split_value:\n return self.left.get_leaf(location)\n else:\n return self.right.get_leaf(location) \n \n def add_point(self, index_location_tuple):\n self.index_locations.append(index_location_tuple)\n self.location_count += 1\n self.extendBounds(index_location_tuple[1])\n self.min_boundary = copy.deepcopy(self.min_limit)\n self.max_boundary = copy.deepcopy(self.max_limit)\n \n def extendBounds(self, location):\n # empty\n if self.min_limit == None:\n self.min_limit = copy.deepcopy(location)\n self.max_limit = copy.deepcopy(location)\n return\n for i in xrange(self.dimensions):\n self.min_limit[i] = min(self.min_limit[i], location[i])\n self.max_limit[i] = max(self.max_limit[i], location[i])\n \n def findWidestAxis(self):\n widths = [self.max_limit[i] - self.min_limit[i] for i in range(self.dimensions)]\n widest_axis = np.argmax(widths)\n return widest_axis\n\n def getNodes(self):\n nodes = []\n self.getNodesHelper(nodes)\n return nodes\n \n def getNodesHelper(self, nodes):\n nodes.append(self)\n if self.left:\n self.left.getNodesHelper(nodes)\n if self.right:\n self.right.getNodesHelper(nodes)\n \n def getLeaves(self):\n leaves = []\n self.getLeavesHelper(leaves)\n return leaves\n \n def getLeavesHelper(self, leaves):\n if not self.right and not self.left:\n leaves.append(self)\n else:\n if self.left:\n self.left.getLeavesHelper(leaves)\n if self.right:\n self.right.getLeavesHelper(leaves)\n \n def balance(self):\n self.nodeSplit(self)\n \n def nodeSplit(self, cursor, empty_non_leaf=True):\n if cursor.location_count > cursor.bucket_size:\n cursor.split_dimension = cursor.findWidestAxis()\n # the partition method is the median of all values in the widest dimension\n cursor.split_value = np.median([cursor.index_locations[i][1][cursor.split_dimension] for i in range(cursor.location_count)])\n # if width is 0 (all the values are the same) don't partition\n if cursor.min_limit[cursor.split_dimension] == cursor.max_limit[cursor.split_dimension]:\n return\n # Don't let the split value be the same as the upper value as\n # can happen due to rounding errors!\n if cursor.split_value == cursor.max_limit[cursor.split_dimension]:\n cursor.split_value = cursor.min_limit[cursor.split_dimension]\n cursor.left = KDTree(bucket_size=cursor.bucket_size, dimensions=cursor.dimensions, parent=cursor)\n cursor.right = KDTree(bucket_size=cursor.bucket_size, dimensions=cursor.dimensions, parent=cursor)\n \n cursor.left.min_boundary = copy.deepcopy(cursor.min_boundary)\n cursor.left.max_boundary = copy.deepcopy(cursor.max_boundary)\n cursor.right.min_boundary = copy.deepcopy(cursor.min_boundary)\n cursor.right.max_boundary = copy.deepcopy(cursor.max_boundary)\n cursor.left.max_boundary[cursor.split_dimension] = cursor.split_value\n cursor.right.min_boundary[cursor.split_dimension] = cursor.split_value\n \n for index_loc in cursor.index_locations:\n if index_loc[1][cursor.split_dimension] > cursor.split_value:\n cursor.right.index_locations.append(index_loc)\n cursor.right.location_count += 1\n cursor.right.extendBounds(index_loc[1])\n else:\n cursor.left.index_locations.append(index_loc)\n cursor.left.location_count += 1\n cursor.left.extendBounds(index_loc[1])\n if empty_non_leaf:\n cursor.index_locations = []\n cursor.nodeSplit(cursor.left)\n cursor.nodeSplit(cursor.right)\n\n\nclass KDTreeClustering:\n def __init__(self, bucket_size=10):\n self.bucket_size = bucket_size\n self.is_fitted = False\n \n def fit(self, X):\n # X is an array\n if hasattr(X, 'shape'):\n n_samples = X.shape[0]\n dimensions = X.shape[1]\n else:\n n_samples = len(X)\n dimensions = len(X[0])\n \n self.kdtree = KDTree(bucket_size=self.bucket_size, dimensions=dimensions, parent=None)\n for i in xrange(n_samples):\n self.kdtree.add_point((i, X[i]))\n self.kdtree.nodeSplit(cursor=self.kdtree, empty_non_leaf=True)\n self.clusters = [leave.index_locations for leave in self.kdtree.getLeaves()]\n clusters = [cluster.index_locations for cluster in self.kdtree.getLeaves()]\n results = np.zeros((n_samples,), dtype=int)\n for i, id_locs in enumerate(clusters):\n for id, l in id_locs:\n results[id] = i\n self.clusters = results\n self.num_clusters = len(clusters)\n self.is_fitted = True\n \n def get_clusters(self):\n if self.is_fitted:\n return self.clusters\n\n\nif __name__ == '__main__':\n # tree = KDTree(300, 2)\n import params\n import geolocate\n geolocate.initialize(granularity=params.BUCKET_SIZE, write=False, readText=True, reload_init=False, regression=False)\n locations = [geolocate.locationStr2Float(loc) for loc in params.trainUsers.values()]\n clusterer = KDTreeClustering(bucket_size=params.BUCKET_SIZE)\n clusterer.fit(locations)\n clusters = clusterer.get_clusters()\n"
] | [
[
"numpy.zeros",
"numpy.argmax",
"scipy._lib.six.xrange"
]
] |
PIN-devel/inside-kids | [
"554e4a0a5654c9a0f5237b904bb2ca6db88a55cb"
] | [
"contents/tts/content/TensorflowTTS/tensorflow_tts/utils/group_conv.py"
] | [
"# -*- coding: utf-8 -*-\n# This code is copy from https://github.com/tensorflow/tensorflow/pull/36773.\n\"\"\"Group Convolution Modules.\"\"\"\n\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine.base_layer import Layer\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\n\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import nn_ops\n\nfrom tensorflow.python.keras.layers import Conv1D\nfrom tensorflow.python.keras.layers import SeparableConv1D\n\n\nclass Convolution(object):\n \"\"\"Helper class for convolution.\n Note that this class assumes that shapes of input and filter passed to\n __call__ are compatible with input_shape and filter_shape passed to the\n constructor.\n Arguments\n input_shape: static shape of input. i.e. input.get_shape().\n filter_shape: static shape of the filter. i.e. filter.get_shape().\n padding: see convolution.\n strides: see convolution.\n dilation_rate: see convolution.\n name: see convolution.\n data_format: see convolution.\n \"\"\"\n\n def __init__(\n self,\n input_shape,\n filter_shape,\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None,\n ):\n \"\"\"Helper function for convolution.\"\"\"\n num_total_dims = filter_shape.ndims\n if num_total_dims is None:\n num_total_dims = input_shape.ndims\n if num_total_dims is None:\n raise ValueError(\"rank of input or filter must be known\")\n\n num_spatial_dims = num_total_dims - 2\n\n try:\n input_shape.with_rank(num_spatial_dims + 2)\n except ValueError:\n raise ValueError(\"input tensor must have rank %d\" % (num_spatial_dims + 2))\n\n try:\n filter_shape.with_rank(num_spatial_dims + 2)\n except ValueError:\n raise ValueError(\"filter tensor must have rank %d\" % (num_spatial_dims + 2))\n\n if data_format is None or not data_format.startswith(\"NC\"):\n input_channels_dim = tensor_shape.dimension_at_index(\n input_shape, num_spatial_dims + 1\n )\n spatial_dims = range(1, num_spatial_dims + 1)\n else:\n input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)\n spatial_dims = range(2, num_spatial_dims + 2)\n\n filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)\n if not (input_channels_dim % filter_dim).is_compatible_with(0):\n raise ValueError(\n \"number of input channels is not divisible by corresponding \"\n \"dimension of filter, {} % {} != 0\".format(\n input_channels_dim, filter_dim\n )\n )\n\n strides, dilation_rate = nn_ops._get_strides_and_dilation_rate(\n num_spatial_dims, strides, dilation_rate\n )\n\n self.input_shape = input_shape\n self.filter_shape = filter_shape\n self.data_format = data_format\n self.strides = strides\n self.padding = padding\n self.name = name\n self.dilation_rate = dilation_rate\n self.conv_op = nn_ops._WithSpaceToBatch(\n input_shape,\n dilation_rate=dilation_rate,\n padding=padding,\n build_op=self._build_op,\n filter_shape=filter_shape,\n spatial_dims=spatial_dims,\n data_format=data_format,\n )\n\n def _build_op(self, _, padding):\n return nn_ops._NonAtrousConvolution(\n self.input_shape,\n filter_shape=self.filter_shape,\n padding=padding,\n data_format=self.data_format,\n strides=self.strides,\n name=self.name,\n )\n\n def __call__(self, inp, filter):\n return self.conv_op(inp, filter)\n\n\nclass Conv(Layer):\n \"\"\"Abstract N-D convolution layer (private, used as implementation base).\n This layer creates a convolution kernel that is convolved\n (actually cross-correlated) with the layer input to produce a tensor of\n outputs. If `use_bias` is True (and a `bias_initializer` is provided),\n a bias vector is created and added to the outputs. Finally, if\n `activation` is not `None`, it is applied to the outputs as well.\n Note: layer attributes cannot be modified after the layer has been called\n once (except the `trainable` attribute).\n Arguments:\n rank: An integer, the rank of the convolution, e.g. \"2\" for 2D convolution.\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n length of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, ..., channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, ...)`.\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n groups: Integer, the number of channel groups controlling the connections\n between inputs and outputs. Input channels and `filters` must both be\n divisible by `groups`. For example,\n - At `groups=1`, all inputs are convolved to all outputs.\n - At `groups=2`, the operation becomes equivalent to having two\n convolutional layers side by side, each seeing half the input\n channels, and producing half the output channels, and both\n subsequently concatenated.\n - At `groups=input_channels`, each input channel is convolved with its\n own set of filters, of size `input_channels / filters`\n activation: Activation function to use.\n If you don't specify anything, no activation is applied.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n name: A string, the name of the layer.\n \"\"\"\n\n def __init__(\n self,\n rank,\n filters,\n kernel_size,\n strides=1,\n padding=\"valid\",\n data_format=None,\n dilation_rate=1,\n groups=1,\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n trainable=True,\n name=None,\n **kwargs\n ):\n super(Conv, self).__init__(\n trainable=trainable,\n name=name,\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs\n )\n self.rank = rank\n if filters is not None and not isinstance(filters, int):\n filters = int(filters)\n self.filters = filters\n self.groups = groups or 1\n if filters is not None and filters % self.groups != 0:\n raise ValueError(\n \"The number of filters must be evenly divisible by the number of \"\n \"groups. Received: groups={}, filters={}\".format(groups, filters)\n )\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, \"kernel_size\")\n if not all(self.kernel_size):\n raise ValueError(\n \"The argument `kernel_size` cannot contain 0(s). \"\n \"Received: %s\" % (kernel_size,)\n )\n self.strides = conv_utils.normalize_tuple(strides, rank, \"strides\")\n self.padding = conv_utils.normalize_padding(padding)\n if self.padding == \"causal\" and not isinstance(self, (Conv1D, SeparableConv1D)):\n raise ValueError(\n \"Causal padding is only supported for `Conv1D`\"\n \"and ``SeparableConv1D`.\"\n )\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(\n dilation_rate, rank, \"dilation_rate\"\n )\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(ndim=self.rank + 2)\n\n def build(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape)\n input_channel = self._get_input_channel(input_shape)\n if input_channel % self.groups != 0:\n raise ValueError(\n \"The number of input channels must be evenly divisible by the number \"\n \"of groups. Received groups={}, but the input has {} channels \"\n \"(full input shape is {}).\".format(\n self.groups, input_channel, input_shape\n )\n )\n kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)\n\n self.kernel = self.add_weight(\n name=\"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype,\n )\n if self.use_bias:\n self.bias = self.add_weight(\n name=\"bias\",\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype,\n )\n else:\n self.bias = None\n channel_axis = self._get_channel_axis()\n self.input_spec = InputSpec(\n ndim=self.rank + 2, axes={channel_axis: input_channel}\n )\n\n self._build_conv_op_input_shape = input_shape\n self._build_input_channel = input_channel\n self._padding_op = self._get_padding_op()\n self._conv_op_data_format = conv_utils.convert_data_format(\n self.data_format, self.rank + 2\n )\n self._convolution_op = Convolution(\n input_shape,\n filter_shape=self.kernel.shape,\n dilation_rate=self.dilation_rate,\n strides=self.strides,\n padding=self._padding_op,\n data_format=self._conv_op_data_format,\n )\n self.built = True\n\n def call(self, inputs):\n if self._recreate_conv_op(inputs):\n self._convolution_op = Convolution(\n inputs.get_shape(),\n filter_shape=self.kernel.shape,\n dilation_rate=self.dilation_rate,\n strides=self.strides,\n padding=self._padding_op,\n data_format=self._conv_op_data_format,\n )\n self._build_conv_op_input_shape = inputs.get_shape()\n\n # Apply causal padding to inputs for Conv1D.\n if self.padding == \"causal\" and self.__class__.__name__ == \"Conv1D\":\n inputs = array_ops.pad(inputs, self._compute_causal_padding())\n\n outputs = self._convolution_op(inputs, self.kernel)\n\n if self.use_bias:\n if self.data_format == \"channels_first\":\n if self.rank == 1:\n # nn.bias_add does not accept a 1D input tensor.\n bias = array_ops.reshape(self.bias, (1, self.filters, 1))\n outputs += bias\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format=\"NCHW\")\n else:\n outputs = nn.bias_add(outputs, self.bias, data_format=\"NHWC\")\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == \"channels_last\":\n space = input_shape[1:-1]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i],\n )\n new_space.append(new_dim)\n return tensor_shape.TensorShape(\n [input_shape[0]] + new_space + [self.filters]\n )\n else:\n space = input_shape[2:]\n new_space = []\n for i in range(len(space)):\n new_dim = conv_utils.conv_output_length(\n space[i],\n self.kernel_size[i],\n padding=self.padding,\n stride=self.strides[i],\n dilation=self.dilation_rate[i],\n )\n new_space.append(new_dim)\n return tensor_shape.TensorShape([input_shape[0], self.filters] + new_space)\n\n def get_config(self):\n config = {\n \"filters\": self.filters,\n \"kernel_size\": self.kernel_size,\n \"strides\": self.strides,\n \"padding\": self.padding,\n \"data_format\": self.data_format,\n \"dilation_rate\": self.dilation_rate,\n \"groups\": self.groups,\n \"activation\": activations.serialize(self.activation),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(self.activity_regularizer),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n }\n base_config = super(Conv, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def _compute_causal_padding(self):\n \"\"\"Calculates padding for 'causal' option for 1-d conv layers.\"\"\"\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n if self.data_format == \"channels_last\":\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n else:\n causal_padding = [[0, 0], [0, 0], [left_pad, 0]]\n return causal_padding\n\n def _get_channel_axis(self):\n if self.data_format == \"channels_first\":\n return 1\n else:\n return -1\n\n def _get_input_channel(self, input_shape):\n channel_axis = self._get_channel_axis()\n if input_shape.dims[channel_axis].value is None:\n raise ValueError(\n \"The channel dimension of the inputs \"\n \"should be defined. Found `None`.\"\n )\n return int(input_shape[channel_axis])\n\n def _get_padding_op(self):\n if self.padding == \"causal\":\n op_padding = \"valid\"\n else:\n op_padding = self.padding\n if not isinstance(op_padding, (list, tuple)):\n op_padding = op_padding.upper()\n return op_padding\n\n def _recreate_conv_op(self, inputs):\n \"\"\"Recreate conv_op if necessary.\n Check if the input_shape in call() is different from that in build().\n For the values that are not None, if they are different, recreate\n the _convolution_op to avoid the stateful behavior.\n Args:\n inputs: The input data to call() method.\n Returns:\n `True` or `False` to indicate whether to recreate the conv_op.\n \"\"\"\n call_input_shape = inputs.get_shape()\n for axis in range(1, len(call_input_shape)):\n if (\n call_input_shape[axis] is not None\n and self._build_conv_op_input_shape[axis] is not None\n and call_input_shape[axis] != self._build_conv_op_input_shape[axis]\n ):\n return True\n return False\n\n\nclass GroupConv1D(Conv):\n \"\"\"1D convolution layer (e.g. temporal convolution).\n This layer creates a convolution kernel that is convolved\n with the layer input over a single spatial (or temporal) dimension\n to produce a tensor of outputs.\n If `use_bias` is True, a bias vector is created and added to the outputs.\n Finally, if `activation` is not `None`,\n it is applied to the outputs as well.\n When using this layer as the first layer in a model,\n provide an `input_shape` argument\n (tuple of integers or `None`, e.g.\n `(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,\n or `(None, 128)` for variable-length sequences of 128-dimensional vectors.\n Examples:\n >>> # The inputs are 128-length vectors with 10 timesteps, and the batch size\n >>> # is 4.\n >>> input_shape = (4, 10, 128)\n >>> x = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.Conv1D(\n ... 32, 3, activation='relu',input_shape=input_shape)(x)\n >>> print(y.shape)\n (4, 8, 32)\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer,\n specifying the length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"causal\"` or `\"same\"` (case-insensitive).\n `\"causal\"` results in causal (dilated) convolutions, e.g. `output[t]`\n does not depend on `input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section\n 2.1](https://arxiv.org/abs/1609.03499).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n groups: Integer, the number of channel groups controlling the connections\n between inputs and outputs. Input channels and `filters` must both be\n divisible by `groups`. For example,\n - At `groups=1`, all inputs are convolved to all outputs.\n - At `groups=2`, the operation becomes equivalent to having two\n convolutional layers side by side, each seeing half the input\n channels, and producing half the output channels, and both\n subsequently concatenated.\n - At `groups=input_channels`, each input channel is convolved with its\n own set of filters, of size `input_channels / filters`\n dilation_rate: an integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix (\n see `keras.initializers`).\n bias_initializer: Initializer for the bias vector (\n see `keras.initializers`).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix (see `keras.regularizers`).\n bias_regularizer: Regularizer function applied to the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\") (\n see `keras.regularizers`).\n kernel_constraint: Constraint function applied to the kernel matrix (\n see `keras.constraints`).\n bias_constraint: Constraint function applied to the bias vector (\n see `keras.constraints`).\n Input shape:\n 3D tensor with shape: `(batch_size, steps, input_dim)`\n Output shape:\n 3D tensor with shape: `(batch_size, new_steps, filters)`\n `steps` value might have changed due to padding or strides.\n Returns:\n A tensor of rank 3 representing\n `activation(conv1d(inputs, kernel) + bias)`.\n Raises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1.\n \"\"\"\n\n def __init__(\n self,\n filters,\n kernel_size,\n strides=1,\n padding=\"valid\",\n data_format=\"channels_last\",\n dilation_rate=1,\n groups=1,\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n rank=1,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n groups=groups,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=initializers.get(kernel_initializer),\n bias_initializer=initializers.get(bias_initializer),\n kernel_regularizer=regularizers.get(kernel_regularizer),\n bias_regularizer=regularizers.get(bias_regularizer),\n activity_regularizer=regularizers.get(activity_regularizer),\n kernel_constraint=constraints.get(kernel_constraint),\n bias_constraint=constraints.get(bias_constraint),\n **kwargs\n )\n"
] | [
[
"tensorflow.python.ops.nn_ops._WithSpaceToBatch",
"tensorflow.python.keras.constraints.get",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.keras.initializers.serialize",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.keras.activations.get",
"tensorflow.python.keras.regularizers.serialize",
"tensorflow.python.ops.nn_ops._NonAtrousConvolution",
"tensorflow.python.ops.nn_ops._get_strides_and_dilation_rate",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.keras.utils.conv_utils.normalize_padding",
"tensorflow.python.keras.utils.conv_utils.conv_output_length",
"tensorflow.python.keras.utils.conv_utils.convert_data_format",
"tensorflow.python.keras.regularizers.get",
"tensorflow.python.keras.utils.conv_utils.normalize_data_format",
"tensorflow.python.keras.utils.conv_utils.normalize_tuple",
"tensorflow.python.keras.activations.serialize",
"tensorflow.python.keras.engine.input_spec.InputSpec",
"tensorflow.python.keras.constraints.serialize",
"tensorflow.python.framework.tensor_shape.dimension_at_index",
"tensorflow.python.keras.initializers.get"
]
] |
FredericSauv/z-quantum-core | [
"f285b292159fe272d7401ba05baac7bab28475d7"
] | [
"src/python/zquantum/core/utils.py"
] | [
"\"\"\"General-purpose utilities.\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import expm\nimport random\nimport math\nimport operator\nimport sys\nimport json\nimport openfermion\nfrom openfermion import hermitian_conjugated\nfrom openfermion.ops import SymbolicOperator\nfrom networkx.readwrite import json_graph\nimport lea\nimport collections\nimport scipy\nfrom typing import List\nimport importlib\n\nSCHEMA_VERSION = 'zapata-v1'\nRNDSEED = 12345\n\ndef convert_dict_to_array(dictionary: dict) -> np.ndarray:\n \"\"\"Convert a dictionary to a numpy array.\n\n Args:\n dictionary (dict): the dict containing the data\n \n Returns:\n array (numpy.array): a numpy array\n \"\"\"\n \n array = np.array(dictionary['real'])\n\n if dictionary.get('imag'):\n array = array + 1j*np.array(dictionary['imag'])\n\n return array\n\ndef convert_array_to_dict(array: np.ndarray) -> dict:\n \"\"\"Convert a numpy array to a dictionary.\n\n Args:\n array (numpy.array): a numpy array\n \n Returns:\n dictionary (dict): the dict containing the data\n \"\"\"\n\n dictionary = {}\n if np.iscomplexobj(array):\n dictionary['real'] = array.real.tolist()\n dictionary['imag'] = array.imag.tolist()\n else:\n dictionary['real'] = array.tolist()\n\n return dictionary\n\ndef dec2bin(number: int, length: int) -> List[int]:\n \"\"\"Converts a decimal number into a binary representation\n of fixed number of bits.\n\n Args:\n number: (int) the input decimal number\n length: (int) number of bits in the output string\n\n Returns:\n A list of binary numbers\n \"\"\"\n\n if pow(2,length) < number:\n sys.exit('Insufficient number of bits for representing the number {}'.format(number))\n\n bit_str = bin(number)\n bit_str = bit_str[2:len(bit_str)] # chop off the first two chars\n bit_string = [int(x) for x in list(bit_str)]\n if len(bit_string) < length:\n len_zeros = length - len(bit_string)\n bit_string = [int(x) for x in list(np.zeros(len_zeros))] + bit_string\n\n return bit_string\n\ndef bin2dec(x: List[int]) -> int:\n \"\"\"Converts a binary vector to an integer, with the 0-th\n element being the most significant digit.\n\n Args:\n x: (list) a binary vector\n\n Returns:\n An integer\n \"\"\"\n\n dec = 0\n coeff = 1\n for i in range(len(x)):\n dec = dec + coeff * x[len(x)-1-i]\n coeff = coeff * 2\n return dec\n\n\"\"\"\nThe functions PAULI_X, PAULI_Y, PAULI_Z and IDENTITY below are used for \ngenerating the generators of the Pauli group, which include Pauli X, Y, Z \noperators as well as identity operator\n\"\"\"\n\npauli_x = np.array([[0.0,1.0],[1.0,0.0]])\npauli_y = np.array([[0.0,-1.0j],[1.0j,0.0]])\npauli_z = np.array([[1.0,0.0],[0.0,-1.0]])\nidentity = np.array([[1.0,0.0],[0.0,1.0]])\n\ndef is_identity(u, tol=1e-15):\n \"\"\"Test if a matrix is identity.\n\n Args:\n u: np.ndarray\n Matrix to be checked.\n tol: float\n Threshold below which two matrix elements are considered equal.\n \"\"\"\n\n dims = np.array(u).shape\n if dims[0] != dims[1]:\n raise Exception('Input matrix is not square.')\n \n return np.allclose(u, np.eye(u.shape[0]), atol=tol)\n\ndef is_unitary(u, tol = 1e-15):\n \"\"\"Test if a matrix is unitary.\n\n Args:\n u: array\n Matrix to be checked.\n tol: float\n Threshold below which two matrix elements are considered equal.\n \"\"\"\n\n dims = np.array(u).shape\n if dims[0] != dims[1]:\n raise Exception('Input matrix is not square.')\n\n test_matrix = np.dot(hermitian_conjugated(np.array(u)), u)\n return is_identity(test_matrix, tol)\n\ndef compare_unitary(u1: np.ndarray, \n u2: np.ndarray, \n tol: float = 1e-15) -> bool:\n \"\"\"Compares two unitary operators to see if they are equal to within a phase.\n\n Args:\n u1 (numpy.ndarray): First unitary operator.\n u2 (numpy.ndarray): Second unitary operator.\n tol (float): Threshold below which two matrix elements are considered equal.\n \n Returns:\n bool: True if the unitaries are equal to within the tolerance, ignoring\n differences in global phase.\n \"\"\"\n\n if is_unitary(u1, tol) == False:\n raise Exception('The first input matrix is not unitary.')\n if is_unitary(u2, tol) == False:\n raise Exception('The second input matrix is not unitary.')\n \n test_matrix = np.dot(u1.conj().T, u2)\n phase = test_matrix.item((0,0))**-1\n return is_identity(phase*test_matrix, tol)\n\ndef sample_from_probability_distribution(probability_distribution: dict, n_samples: int) -> collections.Counter:\n '''\n Samples events from a discrete probability distribution\n\n Args:\n probabilty_distribution: The discrete probability distribution to be used\n for sampling. This should be a dictionary\n \n n_samples (int): The number of samples desired\n\n Returns:\n A dictionary of the outcomes sampled. The key values are the things be sampled\n and values are how many times those things appeared in the sampling\n '''\n if isinstance(probability_distribution, dict):\n prob_pmf = lea.pmf(probability_distribution)\n sampled_dict = collections.Counter(prob_pmf.random(n_samples))\n return sampled_dict\n else:\n raise RuntimeError(\"Probability distribution should be a dictionary with key value \\\n being the thing being sampled and the value being probability of getting \\\n sampled \")\n\n\ndef convert_bitstrings_to_tuples(bitstrings):\n '''Given the measured bitstrings, convert each bitstring to tuple format\n\n Args:\n bitstrings (list of strings): the measured bitstrings\n Returns:\n A list of tuples\n '''\n # Convert from bitstrings to tuple format\n measurements = []\n for bitstring in bitstrings:\n\n measurement = ()\n for char in bitstring:\n measurement = measurement + (int(char),)\n\n measurements.append(measurement)\n return measurements\n \n\ndef convert_tuples_to_bitstrings(tuples):\n '''Given a set of measurement tuples, convert each to bitstring format\n\n Args:\n tuples (list of tuples): the measurement tuples\n Returns:\n A list of bitstrings\n '''\n # Convert from tuples to bitstrings\n bitstrings = []\n for tuple_item in tuples:\n\n bitstring = \"\"\n for bit in tuple_item:\n bitstring = bitstring + str(bit)\n\n bitstrings.append(bitstring)\n return bitstrings\n\n\nclass ValueEstimate:\n \"\"\"A class representing a numerical value and its precision corresponding\n to an observable or an objective function\n\n Args:\n value (np.float): the numerical value\n precision (np.float): its precision\n\n Attributes:\n value (np.float): the numerical value\n precision (np.float): its precision\n \"\"\"\n\n def __init__(self, value, precision=None):\n self.value = value\n self.precision = precision\n \n def to_dict(self):\n \"\"\"Convert to a dictionary\"\"\"\n\n data = {'schema' : SCHEMA_VERSION + '-value_estimate'}\n if type(self.value).__module__ == np.__name__:\n data['value'] = self.value.item()\n else:\n data['value'] = self.value\n\n if type(self.precision).__module__ == np.__name__:\n data['precision'] = self.precision.item()\n else:\n data['precision'] = self.precision\n \n return data\n \n @classmethod\n def from_dict(cls, dictionary):\n \"\"\"Create an ExpectationValues object from a dictionary.\"\"\"\n\n value = dictionary['value']\n if 'precision' in dictionary:\n precision = dictionary['precision']\n return cls(value, precision)\n else:\n return cls(value)\n\n\ndef load_value_estimate(file):\n \"\"\"Loads value estimate from a faile.\n\n Args:\n file (str or file-like object): the name of the file, or a file-like object.\n \n Returns:\n array (numpy.array): the array\n \"\"\"\n\n if isinstance(file, str):\n with open(file, 'r') as f:\n data = json.load(f)\n else:\n data = json.load(file)\n \n return ValueEstimate.from_dict(data)\n\n\ndef save_value_estimate(value_estimate, filename):\n \"\"\"Saves value estimate to a file.\n\n Args:\n value_estimate (core.utils.ValueEstimate): the value estimate\n file (str or file-like object): the name of the file, or a file-like object\n \"\"\"\n dictionary = value_estimate.to_dict()\n dictionary['schema'] = SCHEMA_VERSION + '-value_estimate'\n\n with open(filename, 'w') as f:\n f.write(json.dumps(dictionary, indent=2))\n\n\ndef load_list(file):\n \"\"\"Load an array from a file.\n\n Args:\n file (str or file-like object): the name of the file, or a file-like object.\n \n Returns:\n array (list): the list\n \"\"\"\n\n if isinstance(file, str):\n with open(file, 'r') as f:\n data = json.load(f)\n else:\n data = json.load(file)\n \n return data['list']\n\n\ndef save_list(array, filename):\n \"\"\"Save expectation values to a file.\n\n Args:\n array (list): the list to be saved\n file (str or file-like object): the name of the file, or a file-like object\n \"\"\"\n dictionary = {}\n dictionary['schema'] = SCHEMA_VERSION + '-list'\n dictionary['list'] = array\n\n with open(filename, 'w') as f:\n f.write(json.dumps(dictionary, indent=2))\n\n\ndef create_object(specs, **kwargs):\n \"\"\"\n Creates an object based on given specs.\n Specs include information about module and function necessary to create the object, \n as well as any additional input parameters for it.\n\n Args:\n specs (dict): dictionary containing the following keys:\n module_name: specifies from which module an object comes.\n function_name: specifies the name of the function used to create object.\n \n Returns:\n object: object of any type\n \"\"\"\n module_name = specs.pop(\"module_name\")\n module = importlib.import_module(module_name)\n creator_name = specs.pop(\"function_name\")\n creator = getattr(module, creator_name)\n created_object = creator(**specs, **kwargs)\n return created_object"
] | [
[
"numpy.array",
"numpy.iscomplexobj",
"numpy.eye",
"numpy.zeros"
]
] |
yuishihara/chainerrl | [
"74901712a8ed8207b9d526d3f45b04bf22996b8d"
] | [
"examples/ale/train_nsq_ale.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom builtins import * # NOQA\nfrom future import standard_library\nstandard_library.install_aliases() # NOQA\n\nimport argparse\nimport os\nimport random\n\n# This prevents numpy from using multiple threads\nos.environ['OMP_NUM_THREADS'] = '1' # NOQA\n\nimport gym\ngym.undo_logger_setup() # NOQA\nfrom chainer import links as L\nimport numpy as np\n\nfrom chainerrl.action_value import DiscreteActionValue\nfrom chainerrl.agents import nsq\nfrom chainerrl import experiments\nfrom chainerrl import explorers\nfrom chainerrl import links\nfrom chainerrl import misc\nfrom chainerrl.optimizers import rmsprop_async\nfrom chainerrl import spaces\n\nimport atari_wrappers\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('processes', type=int)\n parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')\n parser.add_argument('--seed', type=int, default=0,\n help='Random seed [0, 2 ** 31)')\n parser.add_argument('--lr', type=float, default=7e-4)\n parser.add_argument('--steps', type=int, default=8 * 10 ** 7)\n parser.add_argument('--max-episode-len', type=int,\n default=5 * 60 * 60 // 4, # 5 minutes with 60/4 fps\n help='Maximum number of steps for each episode.')\n parser.add_argument('--final-exploration-frames',\n type=int, default=4 * 10 ** 6)\n parser.add_argument('--outdir', type=str, default='results',\n help='Directory path to save output files.'\n ' If it does not exist, it will be created.')\n parser.add_argument('--profile', action='store_true')\n parser.add_argument('--eval-interval', type=int, default=10 ** 6)\n parser.add_argument('--eval-n-runs', type=int, default=10)\n parser.add_argument('--demo', action='store_true', default=False)\n parser.add_argument('--load', type=str, default=None)\n parser.add_argument('--logging-level', type=int, default=20,\n help='Logging level. 10:DEBUG, 20:INFO etc.')\n parser.add_argument('--render', action='store_true', default=False,\n help='Render env states in a GUI window.')\n parser.add_argument('--monitor', action='store_true', default=False,\n help='Monitor env. Videos and additional information'\n ' are saved as output files.')\n args = parser.parse_args()\n\n import logging\n logging.basicConfig(level=args.logging_level)\n\n # Set a random seed used in ChainerRL.\n # If you use more than one processes, the results will be no longer\n # deterministic even with the same random seed.\n misc.set_random_seed(args.seed)\n\n # Set different random seeds for different subprocesses.\n # If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].\n # If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].\n process_seeds = np.arange(args.processes) + args.seed * args.processes\n assert process_seeds.max() < 2 ** 31\n\n args.outdir = experiments.prepare_output_dir(args, args.outdir)\n print('Output files are saved in {}'.format(args.outdir))\n\n def make_env(process_idx, test):\n # Use different random seeds for train and test envs\n process_seed = process_seeds[process_idx]\n env_seed = 2 ** 31 - 1 - process_seed if test else process_seed\n env = atari_wrappers.wrap_deepmind(\n atari_wrappers.make_atari(args.env),\n episode_life=not test,\n clip_rewards=not test)\n env.seed(int(env_seed))\n if args.monitor:\n env = gym.wrappers.Monitor(\n env, args.outdir,\n mode='evaluation' if test else 'training')\n if args.render:\n misc.env_modifiers.make_rendered(env)\n return env\n\n sample_env = make_env(0, test=False)\n action_space = sample_env.action_space\n assert isinstance(action_space, spaces.Discrete)\n\n # Define a model and its optimizer\n q_func = links.Sequence(\n links.NIPSDQNHead(),\n L.Linear(256, action_space.n),\n DiscreteActionValue)\n opt = rmsprop_async.RMSpropAsync(lr=args.lr, eps=1e-1, alpha=0.99)\n opt.setup(q_func)\n\n def phi(x):\n # Feature extractor\n return np.asarray(x, dtype=np.float32) / 255\n\n # Make process-specific agents to diversify exploration\n def make_agent(process_idx):\n # Random epsilon assignment described in the original paper\n rand = random.random()\n if rand < 0.4:\n epsilon_target = 0.1\n elif rand < 0.7:\n epsilon_target = 0.01\n else:\n epsilon_target = 0.5\n explorer = explorers.LinearDecayEpsilonGreedy(\n 1, epsilon_target, args.final_exploration_frames,\n action_space.sample)\n # Suppress the explorer logger\n explorer.logger.setLevel(logging.INFO)\n return nsq.NSQ(q_func, opt, t_max=5, gamma=0.99,\n i_target=40000,\n explorer=explorer, phi=phi)\n\n if args.demo:\n env = make_env(0, True)\n agent = make_agent(0)\n eval_stats = experiments.eval_performance(\n env=env,\n agent=agent,\n n_runs=args.eval_n_runs)\n print('n_runs: {} mean: {} median: {} stdev {}'.format(\n args.eval_n_runs, eval_stats['mean'], eval_stats['median'],\n eval_stats['stdev']))\n else:\n explorer = explorers.ConstantEpsilonGreedy(0.05, action_space.sample)\n\n # Linearly decay the learning rate to zero\n def lr_setter(env, agent, value):\n agent.optimizer.lr = value\n\n lr_decay_hook = experiments.LinearInterpolationHook(\n args.steps, args.lr, 0, lr_setter)\n\n experiments.train_agent_async(\n outdir=args.outdir,\n processes=args.processes,\n make_env=make_env,\n make_agent=make_agent,\n profile=args.profile,\n steps=args.steps,\n eval_n_runs=args.eval_n_runs,\n eval_interval=args.eval_interval,\n eval_explorer=explorer,\n max_episode_len=args.max_episode_len,\n global_step_hooks=[lr_decay_hook],\n save_best_so_far_agent=False,\n )\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.arange",
"numpy.asarray"
]
] |
mdand2000/keras-team-keras | [
"5eecd55a6f1d6d149b42f9b76aa53d4c5ab8d3eb"
] | [
"tests/keras/test_callbacks.py"
] | [
"import os\nimport multiprocessing\n\nimport numpy as np\nimport pytest\nfrom csv import reader\nfrom csv import Sniffer\nimport shutil\nfrom keras import optimizers\nfrom keras import initializers\nfrom keras import callbacks\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Dropout, add, dot, Lambda\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D, GlobalAveragePooling1D, GlobalAveragePooling2D\nfrom keras.utils.test_utils import get_test_data\nfrom keras.utils.test_utils import keras_test\nfrom keras import backend as K\nfrom keras.utils import np_utils\ntry:\n from unittest.mock import patch\nexcept:\n from mock import patch\n\n\ninput_dim = 2\nnum_hidden = 4\nnum_classes = 2\nbatch_size = 5\ntrain_samples = 20\ntest_samples = 20\n\n\n@keras_test\ndef test_TerminateOnNaN():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [callbacks.TerminateOnNaN()]\n model = Sequential()\n initializer = initializers.Constant(value=1e5)\n for _ in range(5):\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu',\n kernel_initializer=initializer))\n model.add(Dense(num_classes, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n # case 1 fit\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf\n\n # case 2 fit_generator\n def data_generator():\n max_batch_index = len(X_train) // batch_size\n i = 0\n while 1:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n history = model.fit_generator(data_generator(),\n len(X_train),\n validation_data=(X_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) == 1\n assert loss[0] == np.inf or np.isnan(loss[0])\n\n\n@keras_test\ndef test_stop_training_csv(tmpdir):\n np.random.seed(1337)\n fp = str(tmpdir / 'test.csv')\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n cbks = [callbacks.TerminateOnNaN(), callbacks.CSVLogger(fp)]\n model = Sequential()\n for _ in range(5):\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='linear'))\n model.compile(loss='mean_squared_error',\n optimizer='rmsprop')\n\n def data_generator():\n i = 0\n max_batch_index = len(X_train) // batch_size\n tot = 0\n while 1:\n if tot > 3 * len(X_train):\n yield np.ones([batch_size, input_dim]) * np.nan, np.ones([batch_size, num_classes]) * np.nan\n else:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n i += 1\n tot += 1\n i = i % max_batch_index\n\n history = model.fit_generator(data_generator(),\n len(X_train) // batch_size,\n validation_data=(X_test, y_test),\n callbacks=cbks,\n epochs=20)\n loss = history.history['loss']\n assert len(loss) > 1\n assert loss[-1] == np.inf or np.isnan(loss[-1])\n\n values = []\n with open(fp) as f:\n for x in reader(f):\n values.append(x)\n\n assert 'nan' in values[-1], 'The last epoch was not logged.'\n os.remove(fp)\n\n\n@keras_test\ndef test_ModelCheckpoint(tmpdir):\n np.random.seed(1337)\n filepath = str(tmpdir / 'checkpoint.h5')\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n # case 1\n monitor = 'val_loss'\n save_best_only = False\n mode = 'auto'\n\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 2\n mode = 'min'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 3\n mode = 'max'\n monitor = 'val_acc'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 4\n save_best_only = True\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n assert os.path.isfile(filepath)\n os.remove(filepath)\n\n # case 5\n save_best_only = False\n period = 2\n mode = 'auto'\n filepath = 'checkpoint.{epoch:02d}.h5'\n cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,\n save_best_only=save_best_only, mode=mode,\n period=period)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=4)\n assert os.path.isfile(filepath.format(epoch=2))\n assert os.path.isfile(filepath.format(epoch=4))\n assert not os.path.exists(filepath.format(epoch=1))\n assert not os.path.exists(filepath.format(epoch=3))\n os.remove(filepath.format(epoch=2))\n os.remove(filepath.format(epoch=4))\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_EarlyStopping():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n mode = 'max'\n monitor = 'val_acc'\n patience = 0\n cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n\n mode = 'auto'\n monitor = 'val_acc'\n patience = 2\n cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]\n history = model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=20)\n\n\n@keras_test\ndef test_EarlyStopping_reuse():\n np.random.seed(1337)\n patience = 3\n data = np.random.random((100, 1))\n labels = np.where(data > 0.5, 1, 0)\n model = Sequential((\n Dense(1, input_dim=1, activation='relu'),\n Dense(1, activation='sigmoid'),\n ))\n model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])\n stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)\n weights = model.get_weights()\n\n hist = model.fit(data, labels, callbacks=[stopper], epochs=20)\n assert len(hist.epoch) >= patience\n\n # This should allow training to go for at least `patience` epochs\n model.set_weights(weights)\n hist = model.fit(data, labels, callbacks=[stopper], epochs=20)\n assert len(hist.epoch) >= patience\n\n\n@keras_test\ndef test_EarlyStopping_patience():\n class DummyModel(object):\n def __init__(self):\n self.stop_training = False\n\n early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=2)\n early_stop.model = DummyModel()\n\n losses = [0.0860, 0.1096, 0.1040, 0.1019]\n\n # Should stop after epoch 3, as the loss has not improved after patience=2 epochs.\n epochs_trained = 0\n early_stop.on_train_begin()\n\n for epoch in range(len(losses)):\n epochs_trained += 1\n early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n\n if early_stop.model.stop_training:\n break\n\n assert epochs_trained == 3\n\n\n@keras_test\ndef test_EarlyStopping_baseline():\n class DummyModel(object):\n def __init__(self):\n self.stop_training = False\n\n def baseline_tester(acc_levels):\n early_stop = callbacks.EarlyStopping(monitor='val_acc', baseline=0.75, patience=2)\n early_stop.model = DummyModel()\n epochs_trained = 0\n early_stop.on_train_begin()\n for epoch in range(len(acc_levels)):\n epochs_trained += 1\n early_stop.on_epoch_end(epoch, logs={'val_acc': acc_levels[epoch]})\n if early_stop.model.stop_training:\n break\n return epochs_trained\n\n acc_levels = [0.55, 0.76, 0.81, 0.81]\n baseline_met = baseline_tester(acc_levels)\n acc_levels = [0.55, 0.74, 0.81, 0.81]\n baseline_not_met = baseline_tester(acc_levels)\n\n # All epochs should run because baseline was met in second epoch\n assert baseline_met == 4\n # Baseline was not met by second epoch and should stop\n assert baseline_not_met == 2\n\n\n@keras_test\ndef test_LearningRateScheduler():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5)\n assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()\n\n\n@keras_test\ndef test_ReduceLROnPlateau():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n model = make_model()\n\n # This should reduce the LR after the first epoch (due to high epsilon).\n cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=10, patience=1, cooldown=5)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)\n assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())\n\n model = make_model()\n cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5, verbose=2)\n assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())\n\n\n@keras_test\ndef test_ReduceLROnPlateau_patience():\n class DummyOptimizer(object):\n def __init__(self):\n self.lr = K.variable(1.0)\n\n class DummyModel(object):\n def __init__(self):\n self.optimizer = DummyOptimizer()\n\n reduce_on_plateau = callbacks.ReduceLROnPlateau(monitor='val_loss',\n patience=2)\n reduce_on_plateau.model = DummyModel()\n\n losses = [0.0860, 0.1096, 0.1040]\n lrs = []\n\n for epoch in range(len(losses)):\n reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})\n lrs.append(K.get_value(reduce_on_plateau.model.optimizer.lr))\n\n # The learning rates should be 1.0 except the last one\n assert all([lr == 1.0 for lr in lrs[:-1]]) and lrs[-1] < 1.0\n\n\n@keras_test\ndef test_ReduceLROnPlateau_backwards_compatibility():\n import warnings\n with warnings.catch_warnings(record=True) as ws:\n reduce_on_plateau = callbacks.ReduceLROnPlateau(epsilon=1e-13)\n # Check if warnings are disabled\n if os.environ.get(\"PYTHONWARNINGS\") != \"ignore\":\n assert \"`epsilon` argument is deprecated\" in str(ws[0].message)\n assert not hasattr(reduce_on_plateau, 'epsilon')\n assert hasattr(reduce_on_plateau, 'min_delta')\n assert reduce_on_plateau.min_delta == 1e-13\n\n\n@keras_test\ndef test_CSVLogger(tmpdir):\n np.random.seed(1337)\n filepath = str(tmpdir / 'log.tsv')\n sep = '\\t'\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def make_model():\n np.random.seed(1337)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.SGD(lr=0.1),\n metrics=['accuracy'])\n return model\n\n # case 1, create new file with defined separator\n model = make_model()\n cbks = [callbacks.CSVLogger(filepath, separator=sep)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n assert os.path.isfile(filepath)\n with open(filepath) as csvfile:\n dialect = Sniffer().sniff(csvfile.read())\n assert dialect.delimiter == sep\n del model\n del cbks\n\n # case 2, append data to existing file, skip header\n model = make_model()\n cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n # case 3, reuse of CSVLogger object\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n import re\n with open(filepath) as csvfile:\n output = \" \".join(csvfile.readlines())\n assert len(re.findall('epoch', output)) == 1\n\n os.remove(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_TensorBoard(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n inp = Input((input_dim,))\n hidden = Dense(num_hidden, activation='relu')(inp)\n hidden = Dropout(0.1)(hidden)\n output = Dense(num_classes, activation='softmax')(hidden)\n model = Model(inputs=inp, outputs=output)\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)]\n\n # fit without validation data\n model.fit(X_train, y_train, batch_size=batch_size,\n callbacks=callbacks_factory(histogram_freq=0), epochs=3)\n\n # fit with validation data and accuracy\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test),\n callbacks=callbacks_factory(histogram_freq=0), epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=0))\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=(X_test, y_test),\n callbacks=callbacks_factory(histogram_freq=1))\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\[email protected]((K.backend() != 'tensorflow'),\n reason='Requires TensorFlow backend')\ndef test_TensorBoard_histogram_freq_must_have_validation_data(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n inp = Input((input_dim,))\n hidden = Dense(num_hidden, activation='relu')(inp)\n hidden = Dropout(0.1)(hidden)\n output = Dense(num_classes, activation='softmax')(hidden)\n model = Model(inputs=inp, outputs=output)\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)]\n\n # fit without validation data should raise ValueError if histogram_freq > 0\n with pytest.raises(ValueError) as raised_exception:\n model.fit(X_train, y_train, batch_size=batch_size,\n callbacks=callbacks_factory(histogram_freq=1), epochs=3)\n assert 'validation_data must be provided' in str(raised_exception.value)\n\n # fit generator without validation data should raise ValueError if\n # histogram_freq > 0\n with pytest.raises(ValueError) as raised_exception:\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=1))\n assert 'validation_data must be provided' in str(raised_exception.value)\n\n # fit generator with validation data generator should raise ValueError if\n # histogram_freq > 0\n with pytest.raises(ValueError) as raised_exception:\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=data_generator(False),\n validation_steps=1,\n callbacks=callbacks_factory(histogram_freq=1))\n assert 'validation_data must be provided' in str(raised_exception.value)\n\n\n@keras_test\ndef test_TensorBoard_multi_input_output(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(\n num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim, input_dim),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n # simulate multi-input/output models\n yield ([X_train[i * batch_size: (i + 1) * batch_size]] * 2,\n [y_train[i * batch_size: (i + 1) * batch_size]] * 2)\n else:\n yield ([X_test[i * batch_size: (i + 1) * batch_size]] * 2,\n [y_test[i * batch_size: (i + 1) * batch_size]] * 2)\n i += 1\n i = i % max_batch_index\n\n inp1 = Input((input_dim, input_dim))\n inp2 = Input((input_dim, input_dim))\n inp_3d = add([inp1, inp2])\n inp_2d = GlobalAveragePooling1D()(inp_3d)\n inp_pair = Lambda(lambda x: x)([inp_3d, inp_2d]) # test a layer with a list of output tensors\n hidden = dot(inp_pair, axes=-1)\n hidden = Dense(num_hidden, activation='relu')(hidden)\n hidden = Dropout(0.1)(hidden)\n output1 = Dense(num_classes, activation='softmax')(hidden)\n output2 = Dense(num_classes, activation='softmax')(hidden)\n model = Model(inputs=[inp1, inp2], outputs=[output1, output2])\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # we must generate new callbacks for each test, as they aren't stateless\n def callbacks_factory(histogram_freq):\n return [callbacks.TensorBoard(log_dir=filepath,\n histogram_freq=histogram_freq,\n write_images=True, write_grads=True,\n embeddings_freq=1,\n embeddings_layer_names=['dense_1'],\n batch_size=5)]\n\n # fit without validation data\n model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,\n callbacks=callbacks_factory(histogram_freq=0), epochs=3)\n\n # fit with validation data and accuracy\n model.fit([X_train] * 2, [y_train] * 2, batch_size=batch_size,\n validation_data=([X_test] * 2, [y_test] * 2),\n callbacks=callbacks_factory(histogram_freq=1), epochs=2)\n\n # fit generator without validation data\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n callbacks=callbacks_factory(histogram_freq=0))\n\n # fit generator with validation data and accuracy\n model.fit_generator(data_generator(True), len(X_train), epochs=2,\n validation_data=([X_test] * 2, [y_test] * 2),\n callbacks=callbacks_factory(histogram_freq=1))\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_TensorBoard_convnet(tmpdir):\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n input_shape = (16, 16, 3)\n (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,\n num_test=200,\n input_shape=input_shape,\n classification=True,\n num_classes=num_classes)\n y_train = np_utils.to_categorical(y_train)\n y_test = np_utils.to_categorical(y_test)\n\n model = Sequential([\n Conv2D(filters=8, kernel_size=3,\n activation='relu',\n input_shape=input_shape),\n MaxPooling2D(pool_size=2),\n Conv2D(filters=4, kernel_size=(3, 3),\n activation='relu', padding='same'),\n GlobalAveragePooling2D(),\n Dense(num_classes, activation='softmax')\n ])\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1,\n write_images=True, write_grads=True,\n batch_size=16)\n cbks = [tsb]\n model.summary()\n history = model.fit(x_train, y_train, epochs=2, batch_size=16,\n validation_data=(x_test, y_test),\n callbacks=cbks,\n verbose=0)\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef test_CallbackValData():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbk = callbacks.LambdaCallback(on_train_end=lambda x: 1)\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=[cbk], epochs=1)\n\n def data_generator(train):\n if train:\n max_batch_index = len(X_train) // batch_size\n else:\n max_batch_index = len(X_test) // batch_size\n i = 0\n while 1:\n if train:\n yield (X_train[i * batch_size: (i + 1) * batch_size],\n y_train[i * batch_size: (i + 1) * batch_size])\n else:\n yield (X_test[i * batch_size: (i + 1) * batch_size],\n y_test[i * batch_size: (i + 1) * batch_size])\n i += 1\n i = i % max_batch_index\n\n cbk2 = callbacks.LambdaCallback(on_train_end=lambda x: 1)\n model.fit_generator(data_generator(True), len(X_train), epochs=1,\n validation_data=(X_test, y_test),\n callbacks=[cbk2])\n\n # callback validation data should always have x, y, and sample weights\n assert len(cbk.validation_data) == len(cbk2.validation_data) == 3\n assert cbk.validation_data[0] is cbk2.validation_data[0]\n assert cbk.validation_data[1] is cbk2.validation_data[1]\n assert cbk.validation_data[2].shape == cbk2.validation_data[2].shape\n\n\n@keras_test\ndef test_LambdaCallback():\n np.random.seed(1337)\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n # Start an arbitrary process that should run during model training and be terminated after training has completed.\n def f():\n while True:\n pass\n\n p = multiprocessing.Process(target=f)\n p.start()\n cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())\n\n cbks = [cleanup_callback]\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=5)\n p.join()\n assert not p.is_alive()\n\n\n@keras_test\ndef test_TensorBoard_with_ReduceLROnPlateau(tmpdir):\n import shutil\n np.random.seed(np.random.randint(1, 1e7))\n filepath = str(tmpdir / 'logs')\n\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='binary_crossentropy',\n optimizer='sgd',\n metrics=['accuracy'])\n\n cbks = [\n callbacks.ReduceLROnPlateau(\n monitor='val_loss',\n factor=0.5,\n patience=4,\n verbose=1),\n callbacks.TensorBoard(\n log_dir=filepath)]\n\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=2)\n\n assert os.path.isdir(filepath)\n shutil.rmtree(filepath)\n assert not tmpdir.listdir()\n\n\n@keras_test\ndef tests_RemoteMonitor():\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n cbks = [callbacks.RemoteMonitor()]\n\n with patch('requests.post'):\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n\n@keras_test\ndef tests_RemoteMonitorWithJsonPayload():\n (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,\n num_test=test_samples,\n input_shape=(input_dim,),\n classification=True,\n num_classes=num_classes)\n y_test = np_utils.to_categorical(y_test)\n y_train = np_utils.to_categorical(y_train)\n model = Sequential()\n model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n cbks = [callbacks.RemoteMonitor(send_as_json=True)]\n\n with patch('requests.post'):\n model.fit(X_train, y_train, batch_size=batch_size,\n validation_data=(X_test, y_test), callbacks=cbks, epochs=1)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n"
] | [
[
"numpy.ones",
"numpy.random.seed",
"numpy.random.random",
"numpy.isnan",
"numpy.where",
"numpy.random.randint"
]
] |
Gruschwick/ECG_PLATFORM | [
"4a1ee568e8593938a3b51c595d4834f861a6db6e"
] | [
"Framework/Sketch/Helpers/Metrices.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 11 16:56:51 2019\n\n@author: x\n\"\"\"\n\nimport numpy as np\nfrom collections import Counter\n\nclass MetricesConstants(object):\n #qrs_cutoff_distance = 0.2\n qrs_cutoff_distance = 0.120 #https://www.sciencedirect.com/science/article/abs/pii/S1746809417300216\n\ndef sample_to_time(samples, freq):\n return samples/freq\n\ndef match_peaks( ref_peaks, pred_peaks, cutoff_distance = None):\n '''\n calc best matching between ref_peaks and pred_peaks with cutoff (error time distance no longer than cutoff_distance)\n [(ref_peaks[r], pred_peaks[c]) for r, c in zip(row_ind, col_ind)\n '''\n from scipy.optimize import linear_sum_assignment\n assert np.all(ref_peaks >= 0), \"positive time\"\n assert np.all(pred_peaks >= 0), \"positive time\"\n \n if cutoff_distance is None:\n cutoff_distance = MetricesConstants.qrs_cutoff_distance\n \n max_ref_peaks = np.max(ref_peaks)\n len_ref_peaks = len(ref_peaks)\n max_pred_peaks = np.max(pred_peaks)\n len_pred_peaks = len(pred_peaks)\n \n max_len = max(len_ref_peaks, len_pred_peaks)\n max_peaks = max(max_ref_peaks, max_pred_peaks)\n max_distance = max_peaks*10000 \n \n ref_peaks = np.pad(ref_peaks, ((0,max_len - len_ref_peaks),), 'constant', constant_values=(0, max_distance)) \n pred_peaks = np.pad(pred_peaks, ((0,max_len - len_pred_peaks),), 'constant', constant_values=(0, max_distance)) \n\n distance_matrix = np.abs(ref_peaks[:,np.newaxis] - pred_peaks[np.newaxis,:])\n \n distance_matrix[distance_matrix > cutoff_distance] = max_distance\n \n row_ind, col_ind= linear_sum_assignment(distance_matrix)\n \n matching_filtered = [(r,c) for r, c in zip(row_ind, col_ind) if distance_matrix[r,c] <= cutoff_distance]\n \n #ref_peaks[r], pred_peaks[c]\n return matching_filtered\n\ndef qrs_detection_scores( ref_peaks, pred_peaks, peaks_matching):\n deltas = [(ref_peaks[r] - pred_peaks[c]) for r, c in peaks_matching]\n tpr = len(peaks_matching)/len(ref_peaks)\n ppv = len(peaks_matching)/len(pred_peaks)\n \n return np.mean(deltas), np.std(deltas), tpr, ppv\n\ndef qrs_detection_by_class(ref_peaks_class, peaks_matching):\n ref_counts = Counter(ref_peaks_class)\n detected_counts = Counter(ref_peaks_class[r] for r, c in peaks_matching)\n \n return {(k, detected_counts.get(k,0)/ref_counts[k]) for k in ref_counts.keys()}, ref_counts, detected_counts\n"
] | [
[
"numpy.abs",
"scipy.optimize.linear_sum_assignment",
"numpy.all",
"numpy.max",
"numpy.std",
"numpy.pad",
"numpy.mean"
]
] |
Kiiwi/Syssel | [
"83705e3fd0edf40f09df950d5ce91c95586573f5"
] | [
"venv/Lib/site-packages/IPython/lib/latextools.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Tools for handling LaTeX.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom io import BytesIO, open\nfrom base64 import encodestring\nimport os\nimport tempfile\nimport shutil\nimport subprocess\n\nfrom IPython.utils.process import find_cmd, FindCmdError\nfrom IPython.config import get_config\nfrom IPython.config.configurable import SingletonConfigurable\nfrom IPython.utils.traitlets import List, Bool, Unicode\nfrom IPython.utils.py3compat import cast_unicode, cast_unicode_py2 as u\n\n\nclass LaTeXTool(SingletonConfigurable):\n \"\"\"An object to store configuration of the LaTeX tool.\"\"\"\n def _config_default(self):\n return get_config()\n \n backends = List(\n Unicode, [\"matplotlib\", \"dvipng\"],\n help=\"Preferred backend to draw LaTeX math equations. \"\n \"Backends in the list are checked one by one and the first \"\n \"usable one is used. Note that `matplotlib` backend \"\n \"is usable only for inline style equations. To draw \"\n \"display style equations, `dvipng` backend must be specified. \",\n # It is a List instead of Enum, to make configuration more\n # flexible. For example, to use matplotlib mainly but dvipng\n # for display style, the default [\"matplotlib\", \"dvipng\"] can\n # be used. To NOT use dvipng so that other repr such as\n # unicode pretty printing is used, you can use [\"matplotlib\"].\n config=True)\n\n use_breqn = Bool(\n True,\n help=\"Use breqn.sty to automatically break long equations. \"\n \"This configuration takes effect only for dvipng backend.\",\n config=True)\n\n packages = List(\n ['amsmath', 'amsthm', 'amssymb', 'bm'],\n help=\"A list of packages to use for dvipng backend. \"\n \"'breqn' will be automatically appended when use_breqn=True.\",\n config=True)\n\n preamble = Unicode(\n help=\"Additional preamble to use when generating LaTeX source \"\n \"for dvipng backend.\",\n config=True)\n\n\ndef latex_to_png(s, encode=False, backend=None, wrap=False):\n \"\"\"Render a LaTeX string to PNG.\n\n Parameters\n ----------\n s : text\n The raw string containing valid inline LaTeX.\n encode : bool, optional\n Should the PNG data base64 encoded to make it JSON'able.\n backend : {matplotlib, dvipng}\n Backend for producing PNG data.\n wrap : bool\n If true, Automatically wrap `s` as a LaTeX equation.\n\n None is returned when the backend cannot be used.\n\n \"\"\"\n s = cast_unicode(s)\n allowed_backends = LaTeXTool.instance().backends\n if backend is None:\n backend = allowed_backends[0]\n if backend not in allowed_backends:\n return None\n if backend == 'matplotlib':\n f = latex_to_png_mpl\n elif backend == 'dvipng':\n f = latex_to_png_dvipng\n else:\n raise ValueError('No such backend {0}'.format(backend))\n bin_data = f(s, wrap)\n if encode and bin_data:\n bin_data = encodestring(bin_data)\n return bin_data\n\n\ndef latex_to_png_mpl(s, wrap):\n try:\n from matplotlib import mathtext\n except ImportError:\n return None\n \n # mpl mathtext doesn't support display math, force inline\n s = s.replace('$$', '$')\n if wrap:\n s = u'${0}$'.format(s)\n \n mt = mathtext.MathTextParser('bitmap')\n f = BytesIO()\n mt.to_png(f, s, fontsize=12)\n return f.getvalue()\n\n\ndef latex_to_png_dvipng(s, wrap):\n try:\n find_cmd('latex')\n find_cmd('dvipng')\n except FindCmdError:\n return None\n try:\n workdir = tempfile.mkdtemp()\n tmpfile = os.path.join(workdir, \"tmp.tex\")\n dvifile = os.path.join(workdir, \"tmp.dvi\")\n outfile = os.path.join(workdir, \"tmp.png\")\n\n with open(tmpfile, \"w\", encoding='utf8') as f:\n f.writelines(genelatex(s, wrap))\n\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(\n [\"latex\", \"-halt-on-error\", \"-interaction\", \"batchmode\", tmpfile],\n cwd=workdir, stdout=devnull, stderr=devnull)\n\n subprocess.check_call(\n [\"dvipng\", \"-T\", \"tight\", \"-x\", \"1500\", \"-z\", \"9\",\n \"-bg\", \"transparent\", \"-o\", outfile, dvifile], cwd=workdir,\n stdout=devnull, stderr=devnull)\n\n with open(outfile, \"rb\") as f:\n return f.read()\n finally:\n shutil.rmtree(workdir)\n\n\ndef kpsewhich(filename):\n \"\"\"Invoke kpsewhich command with an argument `filename`.\"\"\"\n try:\n find_cmd(\"kpsewhich\")\n proc = subprocess.Popen(\n [\"kpsewhich\", filename],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (stdout, stderr) = proc.communicate()\n return stdout.strip().decode('utf8', 'replace')\n except FindCmdError:\n pass\n\n\ndef genelatex(body, wrap):\n \"\"\"Generate LaTeX document for dvipng backend.\"\"\"\n lt = LaTeXTool.instance()\n breqn = wrap and lt.use_breqn and kpsewhich(\"breqn.sty\")\n yield u(r'\\documentclass{article}')\n packages = lt.packages\n if breqn:\n packages = packages + ['breqn']\n for pack in packages:\n yield u(r'\\usepackage{{{0}}}'.format(pack))\n yield u(r'\\pagestyle{empty}')\n if lt.preamble:\n yield lt.preamble\n yield u(r'\\begin{document}')\n if breqn:\n yield u(r'\\begin{dmath*}')\n yield body\n yield u(r'\\end{dmath*}')\n elif wrap:\n yield u'$${0}$$'.format(body)\n else:\n yield body\n yield u'\\end{document}'\n\n\n_data_uri_template_png = u\"\"\"<img src=\"data:image/png;base64,%s\" alt=%s />\"\"\"\n\ndef latex_to_html(s, alt='image'):\n \"\"\"Render LaTeX to HTML with embedded PNG data using data URIs.\n\n Parameters\n ----------\n s : str\n The raw string containing valid inline LateX.\n alt : str\n The alt text to use for the HTML.\n \"\"\"\n base64_data = latex_to_png(s, encode=True).decode('ascii')\n if base64_data:\n return _data_uri_template_png % (base64_data, alt)\n\n\n"
] | [
[
"matplotlib.mathtext.MathTextParser"
]
] |
tropp/ACQ4 | [
"792e05e99cedfc175593d200aeabecd6fa6304ce"
] | [
"acq4/devices/PatchStar/patchstar.py"
] | [
"# -*- coding: utf-8 -*-\nimport time\nimport numpy as np\nfrom PyQt4 import QtGui, QtCore\nfrom ..Stage import Stage, MoveFuture, StageInterface\nfrom acq4.drivers.PatchStar import PatchStar as PatchStarDriver\nfrom acq4.util.Mutex import Mutex\nfrom acq4.util.Thread import Thread\nfrom acq4.pyqtgraph import debug, ptime, SpinBox\n\n\nclass PatchStar(Stage):\n \"\"\"\n A Scientifica PatchStar manipulator.\n\n port: <serial port> # eg. 'COM1' or '/dev/ttyACM0'\n \"\"\"\n def __init__(self, man, config, name):\n self.port = config.pop('port')\n self.scale = config.pop('scale', (1e-7, 1e-7, 1e-7))\n self.dev = PatchStarDriver(self.port)\n self._lastMove = None\n man.sigAbortAll.connect(self.stop)\n\n Stage.__init__(self, man, config, name)\n\n # clear cached position for this device and re-read to generate an initial position update\n self._lastPos = None\n self.getPosition(refresh=True)\n self.setUserSpeed(3e-3)\n\n # Set scaling for each axis\n self.dev.send('UUX 6.4')\n self.dev.send('UUY 6.4')\n self.dev.send('UUZ 6.4')\n\n # makes 1 roe turn == 1 second movement for any speed\n self.dev.send('JS 200')\n\n # Set approach angle\n self.dev.send('ANGLE %f' % self.pitch)\n self.dev.send('APPROACH 0')\n\n # thread for polling position changes\n self.monitor = MonitorThread(self)\n self.monitor.start()\n\n def capabilities(self):\n \"\"\"Return a structure describing the capabilities of this device\"\"\"\n if 'capabilities' in self.config:\n return self.config['capabilities']\n else:\n return {\n 'getPos': (True, True, True),\n 'setPos': (True, True, True),\n 'limits': (False, False, False),\n }\n\n def stop(self):\n \"\"\"Stop the manipulator immediately.\n \"\"\"\n with self.lock:\n self.dev.stop()\n if self._lastMove is not None:\n self._lastMove._stopped()\n self._lastMove = None\n\n def setUserSpeed(self, v):\n \"\"\"Set the speed of the rotary controller (m/turn).\n \"\"\"\n self.userSpeed = v\n self.dev.setSpeed(v / self.scale[0])\n\n def _getPosition(self):\n # Called by superclass when user requests position refresh\n with self.lock:\n pos = self.dev.getPos()\n pos = [pos[i] * self.scale[i] for i in (0, 1, 2)]\n if pos != self._lastPos:\n self._lastPos = pos\n emit = True\n else:\n emit = False\n\n if emit:\n # don't emit signal while locked\n self.posChanged(pos)\n\n return pos\n\n def targetPosition(self):\n with self.lock:\n if self._lastMove is None or self._lastMove.isDone():\n return self.getPosition()\n else:\n return self._lastMove.targetPos\n\n def quit(self):\n self.monitor.stop()\n Stage.quit(self)\n\n def _move(self, abs, rel, speed, linear):\n with self.lock:\n if self._lastMove is not None and not self._lastMove.isDone():\n self.stop()\n pos = self._toAbsolutePosition(abs, rel)\n self._lastMove = PatchStarMoveFuture(self, pos, speed, self.userSpeed)\n return self._lastMove\n\n def deviceInterface(self, win):\n return PatchStarGUI(self, win)\n\n\nclass MonitorThread(Thread):\n \"\"\"Thread to poll for manipulator position changes.\n \"\"\"\n def __init__(self, dev):\n self.dev = dev\n self.lock = Mutex(recursive=True)\n self.stopped = False\n self.interval = 0.3\n \n Thread.__init__(self)\n\n def start(self):\n self.stopped = False\n Thread.start(self)\n\n def stop(self):\n with self.lock:\n self.stopped = True\n\n def setInterval(self, i):\n with self.lock:\n self.interval = i\n \n def run(self):\n minInterval = 100e-3\n interval = minInterval\n lastPos = None\n while True:\n try:\n with self.lock:\n if self.stopped:\n break\n maxInterval = self.interval\n\n pos = self.dev._getPosition() # this causes sigPositionChanged to be emitted\n if pos != lastPos:\n # if there was a change, then loop more rapidly for a short time.\n interval = minInterval\n lastPos = pos\n else:\n interval = min(maxInterval, interval*2)\n\n time.sleep(interval)\n except:\n debug.printExc('Error in PatchStar monitor thread:')\n time.sleep(maxInterval)\n \n\nclass PatchStarMoveFuture(MoveFuture):\n \"\"\"Provides access to a move-in-progress on a PatchStar manipulator.\n \"\"\"\n def __init__(self, dev, pos, speed, userSpeed):\n MoveFuture.__init__(self, dev, pos, speed)\n self._interrupted = False\n self._errorMSg = None\n self._finished = False\n pos = (np.array(pos) / np.array(self.dev.scale)).astype(int)\n if speed == 'fast':\n speed = 1e-3\n elif speed == 'slow':\n speed = 1e-6\n with self.dev.dev.lock:\n self.dev.dev.moveTo(pos, speed / self.dev.scale[0])\n # reset to user speed immediately after starting move\n # (the move itself will run with the previous speed)\n self.dev.dev.setSpeed(userSpeed / self.dev.scale[0])\n \n def wasInterrupted(self):\n \"\"\"Return True if the move was interrupted before completing.\n \"\"\"\n return self._interrupted\n\n def isDone(self):\n \"\"\"Return True if the move is complete.\n \"\"\"\n return self._getStatus() != 0\n\n def _getStatus(self):\n # check status of move unless we already know it is complete.\n # 0: still moving; 1: finished successfully; -1: finished unsuccessfully\n if self._finished:\n if self._interrupted:\n return -1\n else:\n return 1\n if self.dev.dev.isMoving():\n # Still moving\n return 0\n # did we reach target?\n pos = self.dev._getPosition()\n if ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5 < 1e-6:\n # reached target\n self._finished = True\n return 1\n else:\n # missed\n self._finished = True\n self._interrupted = True\n self._errorMsg = \"Move did not complete.\"\n return -1\n\n def _stopped(self):\n # Called when the manipulator is stopped, possibly interrupting this move.\n status = self._getStatus()\n if status == 1:\n # finished; ignore stop\n return\n elif status == -1:\n self._errorMsg = \"Move was interrupted before completion.\"\n elif status == 0:\n # not actually stopped! This should not happen.\n raise RuntimeError(\"Interrupted move but manipulator is still running!\")\n else:\n raise Exception(\"Unknown status: %s\" % status)\n\n def errorMessage(self):\n return self._errorMsg\n\n\n\nclass PatchStarGUI(StageInterface):\n def __init__(self, dev, win):\n StageInterface.__init__(self, dev, win)\n\n # Insert patchstar-specific controls into GUI\n self.psGroup = QtGui.QGroupBox('PatchStar Rotary Controller')\n self.layout.addWidget(self.psGroup, self.nextRow, 0, 1, 2)\n self.nextRow += 1\n\n self.psLayout = QtGui.QGridLayout()\n self.psGroup.setLayout(self.psLayout)\n self.speedLabel = QtGui.QLabel('Speed')\n self.speedSpin = SpinBox(value=self.dev.userSpeed, suffix='m/turn', siPrefix=True, dec=True, limits=[1e-6, 10e-3])\n self.revXBtn = QtGui.QPushButton('Reverse X')\n self.revYBtn = QtGui.QPushButton('Reverse Y')\n self.revZBtn = QtGui.QPushButton('Reverse Z')\n self.psLayout.addWidget(self.speedLabel, 0, 0)\n self.psLayout.addWidget(self.speedSpin, 0, 1)\n self.psLayout.addWidget(self.revXBtn, 1, 1)\n self.psLayout.addWidget(self.revYBtn, 2, 1)\n self.psLayout.addWidget(self.revZBtn, 3, 1)\n\n self.revXBtn.clicked.connect(lambda: self.dev.dev.send('JDX'))\n self.revYBtn.clicked.connect(lambda: self.dev.dev.send('JDY'))\n self.revZBtn.clicked.connect(lambda: self.dev.dev.send('JDZ'))\n\n self.speedSpin.valueChanged.connect(lambda v: self.dev.setDefaultSpeed(v))\n\n"
] | [
[
"numpy.array"
]
] |
JiaXingBinggan/MSRL | [
"fcc8b06eb1938a78549868b27f2962cb47b3d866"
] | [
"agent/DQN_agent.py"
] | [
"import numpy as np\nimport mindspore\nfrom mindspore import context, ops, Tensor, nn\nfrom mindspore.common.parameter import Parameter, ParameterTuple\nimport copy\n\n\ncontext.set_context(mode=context.PYNATIVE_MODE, device_target=\"CPU\")\n\n\n_update_op = ops.MultitypeFuncGraph(\"update_op\")\n\n\n@_update_op.register(\"Tensor\", \"Tensor\")\ndef _parameter_update(policy_param, target_param):\n assign = ops.Assign()\n output = assign(target_param, policy_param)\n return output\n\n\nclass DQN(nn.Cell):\n neuron_nums = 16\n\n def __init__(self, n_features, n_actions):\n super(DQN, self).__init__()\n self.net = nn.SequentialCell(\n nn.Dense(n_features, self.neuron_nums),\n nn.ReLU(),\n nn.Dense(self.neuron_nums, n_actions),\n )\n\n def construct(self, s):\n return self.net(s)\n\n\nclass PolicyNetWithLossCell(nn.Cell):\n \"\"\"DQN policy network with loss cell\"\"\"\n\n def __init__(self, backbone, loss_fn):\n super(PolicyNetWithLossCell,\n self).__init__(auto_prefix=False)\n self._backbone = backbone\n self._loss_fn = loss_fn\n self.gather = ops.GatherD()\n\n def construct(self, x, a0, label):\n \"\"\"constructor for Loss Cell\"\"\"\n out = self._backbone(x)\n out = self.gather(out, 1, a0)\n loss = self._loss_fn(out, label)\n return loss\n\n# Deep Q Network off-policy\nclass DeepQNetwork:\n def __init__(\n self,\n n_actions,\n n_features,\n learning_rate=0.01,\n reward_decay=0.9,\n e_greedy=0.9,\n replace_target_iter=300,\n memory_size=500,\n batch_size=3,\n e_greedy_increment=None,\n ):\n self.n_actions = n_actions\n self.n_features = n_features\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon_max = e_greedy\n self.replace_target_iter = replace_target_iter\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.epsilon_increment = e_greedy_increment\n self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max\n\n # total learning step\n self.learn_step_counter = 0\n\n # initialize zero memory [s, a, r, s_]\n self.memory = np.zeros((self.memory_size, n_features * 2 + 2))\n\n self.eval_net = DQN(self.n_features, self.n_actions)\n self.target_net = copy.deepcopy(self.eval_net)\n self.policy_param = ParameterTuple(\n self.eval_net.get_parameters())\n self.target_param = ParameterTuple(\n self.target_net.get_parameters())\n\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n\n loss_func = nn.MSELoss()\n opt = nn.Adam(self.eval_net.trainable_params(), learning_rate=self.lr)\n loss_q_net = PolicyNetWithLossCell(self.eval_net, loss_func)\n self.policy_network_train = nn.TrainOneStepCell(loss_q_net, opt)\n self.policy_network_train.set_train(mode=True)\n\n self.hyper_map = ops.HyperMap()\n self.cost_his = []\n\n def store_transition(self, transition):\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n\n def reset_epsilon(self, epsilon):\n self.epsilon = epsilon\n\n def choose_action(self, observation):\n observation = Tensor(observation[np.newaxis, :], mindspore.float32)\n if np.random.uniform() < self.epsilon:\n self.eval_net.set_train(mode=False)\n action_v = self.eval_net(observation)\n action = np.argmax(action_v)\n else:\n action = np.random.randint(0, self.n_actions)\n return action\n\n def update_param(self):\n assign_result = self.hyper_map(\n _update_op,\n self.policy_param,\n self.target_param\n )\n return assign_result\n\n def learn(self):\n if self.learn_step_counter % self.replace_target_iter == 0:\n self.update_param()\n\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size, replace=False)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size, replace=False)\n\n batch_memory = Tensor(self.memory[sample_index, :], mindspore.float32)\n b_s = batch_memory[:, :self.n_features]\n b_a = ops.ExpandDims()(batch_memory[:, self.n_features], 1).astype(mindspore.int32)\n b_r = ops.ExpandDims()(batch_memory[:, self.n_features + 1], 1)\n b_s_ = batch_memory[:, -self.n_features:]\n\n q_next = self.target_net(b_s_).max(axis=1)\n q_target = b_r + self.gamma * q_next\n\n loss = self.policy_network_train(b_s, b_a, q_target)\n self.cost_his.append(round(float(np.mean(loss.asnumpy())), 3))\n\n # increasing epsilon\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n self.learn_step_counter += 1\n\n return loss\n\n def plot_cost(self):\n import matplotlib.pyplot as plt\n plt.plot(np.arange(len(self.cost_his)), self.cost_his)\n plt.ylabel('Cost')\n plt.xlabel('training steps')\n plt.show()\n\n\n\n"
] | [
[
"numpy.random.uniform",
"numpy.zeros",
"numpy.random.choice",
"numpy.argmax",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"numpy.random.randint",
"matplotlib.pyplot.xlabel"
]
] |
AndersDHenriksen/Tensorflow-Project-Template | [
"32dfeaaf1243587af4ceb7b378c135092ddb9258"
] | [
"base/base_train.py"
] | [
"import tensorflow as tf\n\n\nclass BaseTrain:\n def __init__(self, sess, model, data, config, logger):\n self.model = model\n self.logger = logger\n self.config = config\n self.sess = sess\n self.data = data\n self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n if not self.model.is_loaded:\n self.sess.run(self.init)\n\n def train(self):\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\n self.train_epoch()\n self.sess.run(self.model.increment_cur_epoch_tensor)\n\n def train_epoch(self):\n \"\"\"\n implement the logic of epoch:\n -loop over the number of iterations in the config and call the train step\n -add any summaries you want using the summary\n \"\"\"\n raise NotImplementedError\n\n def train_step(self):\n \"\"\"\n implement the logic of the train step\n - run the tensorflow session\n - return any metrics you need to summarize\n \"\"\"\n raise NotImplementedError\n"
] | [
[
"tensorflow.local_variables_initializer",
"tensorflow.global_variables_initializer"
]
] |
tjuwlz/MachineTranslation | [
"7335c7e95d2ca23ca7e26c45d4b8b13e2ce96704"
] | [
"modules/nmt.py"
] | [
"from datautil.dataloader import batch_iter\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.utils as nn_utils\nimport time\nimport torch\nimport numpy as np\nfrom config.Const import *\n\n\nclass NMT(object):\n def __init__(self, encoder, decoder):\n super(NMT, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def summary(self):\n print('encoder:', self.encoder)\n print('decoder:', self.decoder)\n\n # 训练一轮\n def train(self, train_pairs, enc_optimizer, dec_optimizer, args, src_vocab, tgt_vocab):\n train_loss = 0\n for src_batch, tgt_batch in batch_iter(train_pairs, args, src_vocab, tgt_vocab):\n loss = 0\n # enc_out: (batch_size, seq_len, hidden_size * nb_directions)\n # enc_hidden: (num_layers * nb_directions, batch_size, hidden_size)\n enc_out, enc_hidden = self.encoder(src_batch.src_idxs, mask=src_batch.non_pad_mask)\n\n self.encoder.zero_grad()\n self.decoder.zero_grad()\n\n dec_hidden = enc_hidden\n dec_input = tgt_batch.src_idxs[0].unsqueeze(1)\n if np.random.uniform(0, 1) <= args.teacher_force:\n # print('以目标作为下一个输入')\n for i in range(1, tgt_batch.src_idxs.size(0)):\n dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n loss += self.calc_loss(dec_out, tgt_batch.src_idxs[i])\n train_loss += loss.data.item()\n\n dec_input = tgt_batch.src_idxs[i].unsqueeze(1)\n else:\n # print('以网络的预测输出作为下一个输入')\n for i in range(1, tgt_batch.src_idxs.size(0)):\n dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n loss += self.calc_loss(dec_out, tgt_batch.src_idxs[i])\n train_loss += loss.data.item()\n\n _, top_i = dec_out.data.topk(1)\n dec_input = top_i # (batch_size, 1)\n\n loss.backward()\n\n nn_utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.encoder.parameters()), max_norm=5.0)\n nn_utils.clip_grad_norm_(filter(lambda p: p.requires_grad, self.decoder.parameters()), max_norm=5.0)\n\n enc_optimizer.step()\n dec_optimizer.step()\n\n return train_loss / len(train_pairs)\n\n # 训练多轮\n def train_iter(self, train_pairs, args, src_vocab, tgt_vocab):\n self.encoder.train()\n self.decoder.train()\n enc_optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.encoder.parameters()), lr=args.lr)\n dec_optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.decoder.parameters()), lr=args.lr)\n enc_lr_scheduler = optim.lr_scheduler.LambdaLR(enc_optimizer, lambda ep: max(0.95**ep, 1e-4))\n dec_lr_scheduler = optim.lr_scheduler.LambdaLR(dec_optimizer, lambda ep: max(0.95**ep, 1e-4))\n # enc_lr_scheduler = optim.lr_scheduler.LambdaLR(enc_optimizer, lambda ep: max(1 - 0.75 * ep / args.epoch, 1e-4))\n # dec_lr_scheduler = optim.lr_scheduler.LambdaLR(dec_optimizer, lambda ep: max(1 - 0.75 * ep / args.epoch, 1e-4))\n\n for i in range(args.epoch):\n enc_lr_scheduler.step()\n dec_lr_scheduler.step()\n t1 = time.time()\n train_loss = self.train(train_pairs, enc_optimizer, dec_optimizer, args, src_vocab, tgt_vocab)\n t2 = time.time()\n print('[Epoch %d] train loss: %.3f' % (i+1, train_loss))\n print('encoder lr:', enc_lr_scheduler.get_lr())\n print('decoder lr:', dec_lr_scheduler.get_lr())\n print('time cost: %.2fs' % (t2 - t1))\n\n def calc_loss(self, pred, tgt):\n return F.nll_loss(pred, tgt, ignore_index=0)\n\n # def evaluate(self, test_pairs, args, src_vocab, tgt_vocab):\n # self.encoder.eval()\n # self.decoder.eval()\n # pred_wds, tgt_wds = [], []\n # for src_batch, tgt_batch in batch_iter(test_pairs, args, src_vocab, tgt_vocab):\n # batch_pred_wds, batch_tgt_wds = [], []\n # enc_out, enc_hidden = self.encoder(src_batch.src_idxs, mask=src_batch.non_pad_mask)\n #\n # dec_hidden = enc_hidden\n # dec_input = tgt_batch.src_idxs[0]\n # for i in range(1, tgt_batch.src_idxs.size(0)):\n # dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n #\n # dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n # tgt_idxs = tgt_batch.src_idxs[i]\n # # greedy search\n # pred_idxs = dec_out.data.argmax(dim=1)\n # batch_pred_wds.append(tgt_vocab.index2word(pred_idxs.tolist()))\n # batch_tgt_wds.append(tgt_vocab.index2word(tgt_idxs.tolist()))\n # dec_input = pred_idxs\n #\n # pred_wds.extend(self.extract_valid(np.asarray(batch_pred_wds).T.tolist()))\n # tgt_wds.extend(self.extract_valid(np.asarray(batch_tgt_wds).T.tolist()))\n #\n # print('BLEU:', self.corpus_bleu(pred_wds, tgt_wds))\n\n # beam search\n '''\n 执行过程:设beam size = 3\n 1、选择t1时刻输出的概率分数最大的3个词\n 2、分别将t-1时刻选择的3个词作为当前时刻的输入\n 3、求t时刻累积的(序列)概率分数(历史所选择词的对数似然和),选择分数值最大的3个词\n 4、重复2-3过程,直到到达最大长度(或遇到<eos>)\n '''\n def evaluate(self, test_pairs, args, src_vocab, tgt_vocab):\n self.encoder.eval()\n self.decoder.eval()\n # pred_wds, tgt_wds = [], []\n for src_batch, tgt_batch in batch_iter(test_pairs, args, src_vocab, tgt_vocab):\n # batch_pred_wds, batch_tgt_wds = [], []\n enc_out, enc_hidden = self.encoder(src_batch.src_idxs, mask=src_batch.non_pad_mask)\n\n # 保存历史分数\n seq_len, batch_size = tgt_batch.src_idxs.size()\n # (bz, beam_size)\n hist_score = torch.zeros((batch_size, args.beam_size), device=args.device)\n # (beam_size, bz, vocab_size)\n beam_score = torch.zeros((args.beam_size, batch_size, tgt_vocab.vocab_size), device=args.device)\n # (bz, beam_size, max_len)\n best_paths = torch.zeros((MAX_LEN, batch_size, args.beam_size), device=args.device)\n\n dec_hidden = enc_hidden\n dec_input = tgt_batch.src_idxs[0].unsqueeze(1)\n for i in range(1, min(MAX_LEN, seq_len)):\n if i == 1:\n # dec_input: (bz, 1)\n # dec_out: (bz, vocab_size)\n dec_out, dec_hidden = self.decoder(dec_input, dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n # (bz, beam_size)\n top_prob, top_idxs = dec_out.data.topk(args.beam_size, dim=1)\n hist_score = top_prob\n best_paths[i] = top_idxs\n # (bz, beam_size)\n dec_input = top_idxs\n else:\n # dec_input: (bz, beam_size) -> (beam_size, bz)\n dec_input = dec_input.transpose(0, 1)\n for j in range(args.beam_size):\n # dec_out: (bz, vocab_size)\n dec_out, dec_hidden = self.decoder(dec_input[j].unsqueeze(1), dec_hidden, enc_out)\n dec_hidden *= tgt_batch.non_pad_mask[i].unsqueeze(1).repeat(1, dec_hidden.size(-1))\n beam_score[j] = dec_out\n # (bz, beam_size, 1) -> (bz, beam_size, vocab_size)\n hist_score = hist_score.unsqueeze(-1).expand((-1, -1, tgt_vocab.vocab_size))\n hist_score += beam_score.transpose(0, 1) # (bz, beam_size, vocab_size)\n # (bz, beam_size * vocab_size)\n hist_score = hist_score.reshape((batch_size, -1))\n # (bz, beam_size)\n top_prob, top_idxs = hist_score.topk(args.beam_size, dim=1)\n hist_score = top_prob\n top_idxs %= tgt_vocab.vocab_size\n best_paths[i] = top_idxs\n dec_input = top_idxs\n\n # pred_wds.extend(self.extract_valid(np.asarray(batch_pred_wds).T.tolist()))\n # tgt_wds.extend(self.extract_valid(np.asarray(batch_tgt_wds).T.tolist()))\n\n # 提取序列的非填充部分\n def extract_valid(self, seqs: list):\n return list(map(lambda x: x[:x.index(EOS)] if EOS in x else x, seqs))\n\n # 统计ngram数目\n def count_ngram(self, cand: list, ref: list, n=1) -> int:\n assert len(cand) != 0 and len(ref) != 0\n\n total_count = 0\n for i in range(len(cand) - n + 1):\n cand_count, ref_count = 1, 0\n ngram = cand[i: i + n]\n # 统计ngram在机器翻译译文中出现的次数\n for j in range(i + n, len(cand) - n + 1):\n if ngram == cand[j: j + n]:\n cand_count += 1\n # 统计ngram在人工译文中出现的次数\n for k in range(len(ref) - n + 1):\n if ngram == ref[k: k + n]:\n ref_count += 1\n total_count += min(cand_count, ref_count)\n\n return total_count\n\n # 计算单句话的BLEU值,取值在[0, 1]之间,越大越好\n def sentence_bleu(self, cand: list, ref: list, N=4) -> float:\n '''\n :param cand: sentence_tokens\n :param ref: sentence_tokens\n :return:\n '''\n assert len(cand) != 0 and len(ref) != 0\n # n-gram中n的取值在[1, 4]之间\n res = 0\n cand_len, ref_len = len(cand), len(ref)\n for n in range(1, N+1):\n cand_gram = max(0, cand_len - n + 1)\n res += 0.25 * np.log(self.count_ngram(cand, ref, n) / cand_gram)\n # 短译句惩罚因子\n # bp = np.exp(1 - max(1., len(ref) / len(cand)))\n return np.exp(res + min(0., 1 - ref_len / cand_len))\n\n # 计算多句话的BLEU值(注:不是直接对sentence bleu求和求平均)\n def corpus_bleu(self, cands: list, refs: list, N=4) -> float:\n '''\n :param cands: [sentence_tokens1, sentence_tokens2]\n :param refs: [sentence_tokens1, sentence_tokens2]\n :return:\n '''\n assert len(cands) != 0 and len(cands) == len(refs)\n\n ref_len, cand_len = 0, 0\n for cand, ref in zip(cands, refs):\n ref_len += len(ref)\n cand_len += len(cand)\n\n res = 0\n for n in range(1, N+1):\n n_match, n_grams = 0, 0\n for cand, ref in zip(cands, refs):\n n_match += self.count_ngram(cand, ref, n)\n n_grams += max(0, len(cand) - n + 1)\n res += 0.25 * np.log(n_match / n_grams + 1e-8)\n\n return np.exp(res + min(0., 1 - ref_len / cand_len))\n"
] | [
[
"torch.zeros",
"torch.nn.functional.nll_loss",
"numpy.random.uniform",
"numpy.log"
]
] |
formalabstracts/CNL-CIC | [
"c857ee0d52b4ba91dd06a51c8f9f3ec2749ca0eb"
] | [
"2parser/sample.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 16 05:48:26 2021\n\n@author: thales\n\nGenerate random samples from parsers\n\n\"\"\"\n\nfrom numpy.random import (poisson , binomial, randint)\n\nfrom tokenlib import (Item , Etok, mk_stream)\n\nimport lib\n\nimport state\n\ndef bernoulli(p):\n return binomial(1,p)\n\ndef ran(ls):\n if not ls:\n raise TypeError(f'ran, expected nonempty list {ls}')\n return ls\n return ls[randint(0,len(ls))]\n\ndef mk_tok(v):\n toks = mk_stream(v)\n try: \n return toks.stream[0]\n except:\n raise IndexError(f'List index out of range. Empty list mk_tok({v})')\n \ndef mk_toks(vs):\n toks = mk_stream(vs)\n return toks.stream\n\ndef next_token():\n return mk_tok('blah')\n\ndef none():\n return None\n\ndef add_sample(self,other):\n def sample():\n try: # debug\n acc1 = self.sample()\n acc2 = other.sample()\n return (acc1,acc2)\n except AttributeError as ex:\n raise AttributeError(f'MyAttributeError {other}')\n return sample\n\ndef or_sample(self,other):\n def sample():\n if bernoulli(0.5):\n return self.sample()\n return other.sample()\n return sample\n\ndef treat_sample(self,treatment):\n def sample():\n return treatment(self.sample())\n return sample \n\ndef some(self,sep,m):\n def sample():\n if sep:\n if m==0:\n return []\n return lib.flatten((self.sample(),sep.sample()) for _ in range(0,m-1))+[self.sample()]\n return [self.sample() for _ in range(0,m-1)]\n return sample\n\ndef plus(self,sep):\n return some(self,sep,1 + poisson(0.5))\n \ndef many(self,sep):\n return some(self,sep,0 + poisson(0.5))\n\ndef atleast(self,n):\n return some(self,None,n + poisson(0.5))\n\ndef possibly(self):\n def sample():\n if state.state.include_possibly:\n return self.sample()\n if bernoulli(0.5):\n return self.sample()\n return None\n return sample\n\ndef if_test(self,p):\n def sample():\n iteration_limit = 10 # arbitrary limit\n for _ in range(0,iteration_limit):\n acc = self.sample() # randomized guess\n if p(acc):\n return acc \n return next_token() # give up on test\n return sample\n\ndef if_value(v):\n def sample():\n return mk_tok(v)\n return sample\n\ndef if_rawvalue(v):\n return if_value(v)\n\ndef type_sample(ty:str):\n \"\"\" \n >>> type_sample('WORD')\n '...'\n \"\"\"\n d = {'STRING': ['\"'+s+'\"' for s in 'hello world so little time'.split()],\n 'CONTROLSEQ':['\\\\'+s for s in 'alpha beta gamma delta sum prod deg circ ast lneg times rtimes'.split()],\n 'DECIMAL':['3.14','2.718','1.0','4.96'],\n 'INTEGER': [str(i) for i in range(0,10)] ,\n 'SYMBOL':['<','>','!=','+','-','*','^'],\n 'SYMBOL_QED':[r'\\qed'],\n 'MAPSTO':[r'\\mapsto'],\n 'MID':[r'\\mid'],\n 'TMID':[r'\\tmid'],\n 'ASSIGN':[':='],\n 'ARROW':[r'\\to'],\n 'BLANK':['_'],\n 'ALT':['|'],\n 'PERIOD':['.'],\n 'COLON':[':'],\n 'APPLYSUB':[r'\\sub'],\n 'COERCION': [r'\\^'],\n 'LAMBDA':[r'\\lambda'],\n 'PITY':[r'\\Pity'],\n 'QUANTIFIER':[r'\\forall',r'\\exists'],\n 'VAR':[ f'{x}{n}' for x in 'b c x y z u v w'.split() for n in range(0,5)],\n 'WORD':\"\"\"estimate equation solution expression inequality random sample \n mean pair ordered function evaluate order operation property divisible \n exponent base multiple square common prime form factorization point \n plane line angle ray parallel intersecting perpendicular regular \n polygon degree circle diameter chord similar congruent symmetry \n leg triangle scalene equilateral trapezoid rotation transformation \n translation polyhedron integer positive opposite value origin \n coordinate area circumference word number blah part\"\"\".split(),\n 'ATOMIC_IDENTIFIER':'foo_bar bar3 foo22 sin_ cos_ atan2 ceil_ comb_ fabs_ factorial_ floor_ gcd_ sqrt_ log2 log10 pow_ '.split(),\n 'HIERARCHICAL_IDENTIFIER':['math.pi','math.ceil','math.abs'],\n 'FIELD_ACCESSOR':['.assoc','.distrib'],\n 'UNKNOWN':['?'],\n 'TEX_ERROR':[r'\\error']\n }\n return ran(d[ty])\n\ndef if_types(tys):\n \"\"\" \n >>> if_types(['WORD','INTEGER','DECIMAL'])()\n LexToken(...)\n \"\"\"\n def sample():\n ty = ran(tys)\n return mk_tok(type_sample(ty))\n return sample\n\ndef all_sample(prs):\n def sample():\n return [p.sample() for p in prs]\n return sample\n\ndef first(prs):\n def sample():\n if not prs:\n return None\n i = randint(0,len(prs))\n return prs[i].sample()\n return sample\n\n#def lazy_call(pr):\n# def sample():\n# return pr().sample()\n# return sample\n\ndef first_word(ss):\n #DEBUG if not(ss):\n # raise IndexError(f'Index out of range, split first_word({ss})')\n s = ran(ss.split())\n def sample():\n return mk_tok(s)\n return sample\n\ndef word_net_string(wn):\n s = ran([k for k in wn])\n if not s:\n return ''\n return s + ' ' + word_net_string(wn[s])\n\ndef word_net(wn):\n def sample():\n s = word_net_string(wn)\n return mk_toks(s)\n return sample\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n# doctest.testmod(verbose=True, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n# doctest.testmod()\n\n \n\n"
] | [
[
"numpy.random.binomial",
"numpy.random.poisson"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.